SDl 2.0 and ffmpeg

Hi,
I’m new to SDL2, I used to play with danger ffmpeg tutorials to stream
video to an SDL surface.

I’m updating my program incorporating SDL2.

Here is a piece of snippet and would appreciate your help to actually
draw the video image into the surface using the SDL2 api.
thanks


AVFrame* frame ;
AVPacket avpacket;
int frame_finished;
SDL_Event event;
int w = 640;
int h = 480;
int done = 0, color = 0;
SDL_Window * window = SDL_CreateWindow(“SDL”,
SDL_WINDOWPOS_UNDEFINED, SDL_WINDOWPOS_UNDEFINED, w, h, 0);
SDL_Renderer * renderer = SDL_CreateRenderer(window, -1, 0);
SDL_Log("+++++ INIT DONE +++++");

 frame = avcodec_alloc_frame();

     	while (av_read_frame(format_context, &avpacket) >= 0) {
     		if (avpacket.stream_index == videostream) {

     			avcodec_decode_video2(codec_context, frame, &frame_finished, 

&avpacket);

     		}



 while (!done) {
 	SDL_WaitEvent(&event);

 	 switch(event.type) {
 			//case FF_ALLOC_EVENT:
                  //alloc_picture(event.user.data1);
              //    break;
              //case FF_REFRESH_EVENT:
                  //video_refresh_timer(event.user.data1);
                //  break;
        case SDL_KEYDOWN:
        case SDL_KEYUP:
        //case SDL_MOUSEBUTTONDOWN:
        case SDL_QUIT:
        	done=1;
         SDL_Log("Unhandled event type=%d", event.type);

     }

     //Draw Image into renderer.
     //We used to do it through SDL_DisplayYUVOverlay. How to do 

that with SDL2.0. Any help for this part ?

     SDL_RenderClear(renderer);
     SDL_RenderPresent(renderer);
     SDL_Delay(20);
 }

  }//while av_read_frame

 SDL_Log("+++++ FINISHED +++++");
 SDL_Quit();

}

Have a look at this.

It might help you

ZeniosOn Fri, Mar 9, 2012 at 12:09 PM, David martin wrote:

Hi,
I’m new to SDL2, I used to play with danger ffmpeg tutorials to stream video
to an SDL surface.

I’m updating my program incorporating SDL2.

Here is a piece of snippet and would appreciate your help to actually draw
the video image into the surface using the SDL2 api.
thanks


? ?AVFrame* frame ;
? ?AVPacket avpacket;
? ?int frame_finished;
? ?SDL_Event event;
? ?int w = 640;
? ?int h = 480;
? ?int done = 0, color = 0;
? ?SDL_Window * window = SDL_CreateWindow(“SDL”, SDL_WINDOWPOS_UNDEFINED,
SDL_WINDOWPOS_UNDEFINED, w, h, 0);
? ?SDL_Renderer * renderer = SDL_CreateRenderer(window, -1, 0);
? ?SDL_Log(“+++++ INIT DONE +++++”);

? ?frame = avcodec_alloc_frame();

? ? ? ? ? ? ? ?while (av_read_frame(format_context, &avpacket) >= 0) {
? ? ? ? ? ? ? ? ? ? ? ?if (avpacket.stream_index == videostream) {

? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ?avcodec_decode_video2(codec_context, frame,
&frame_finished, &avpacket);

? ? ? ? ? ? ? ? ? ? ? ?}

? ?while (!done) {
? ? ? ?SDL_WaitEvent(&event);

? ? ? ? switch(event.type) {
? ? ? ? ? ? ? ? ? ? ? ?//case FF_ALLOC_EVENT:
? ? ? ? ? ? ? ? ? ? //alloc_picture(event.user.data1);
? ? ? ? ? ? ? ? // ? ?break;
? ? ? ? ? ? ? ? //case FF_REFRESH_EVENT:
? ? ? ? ? ? ? ? ? ? //video_refresh_timer(event.user.data1);
? ? ? ? ? ? ? ? ? // ?break;
? ? ? ? ? case SDL_KEYDOWN:
? ? ? ? ? case SDL_KEYUP:
? ? ? ? ? //case SDL_MOUSEBUTTONDOWN:
? ? ? ? ? case SDL_QUIT:
? ? ? ? ? ? ? ?done=1;
? ? ? ? ? ?SDL_Log(“Unhandled event type=%d”, event.type);

? ? ? ?}

? ? ? ?//Draw Image into renderer.
? ? ? ?//We used to do it through SDL_DisplayYUVOverlay. How to do that with
SDL2.0. ? ? ? ?Any help for this part ?

? ? ? ?SDL_RenderClear(renderer);
? ? ? ?SDL_RenderPresent(renderer);
? ? ? ?SDL_Delay(20);
? ?}

? ? }//while av_read_frame

? ?SDL_Log(“+++++ FINISHED +++++”);
? ?SDL_Quit();
}


SDL mailing list
SDL at lists.libsdl.org
http://lists.libsdl.org/listinfo.cgi/sdl-libsdl.org

Thanks for the link.
I’m reading a video streaming (not a single BMp image) so not sure how
this applies to my code.
ffmpeg reads in in yuvp42 format (see output from ffmpeg dump_format)

~/workspace/test1/Release $ ./test2 toto.mpeg
Input #0, mpeg, from ‘toto.mpeg’:
Duration: 00:00:04.83, start: 1.000000, bitrate: 606 kb/s
Stream #0:0[0x1e0]: Video: mpeg1video, yuv420p, 352x288 [SAR
178:163 DAR 1958:1467], 104857 kb/s, 60 fps, 60 tbr, 90k tbn, 60 tbc

So the question is do i need to create a surface from each image read
frame ? How would you do that ?

thanks,On 03/09/2012 11:27 AM, Dimitris Zenios wrote:

Have a look at this.
Exploring the Galaxy: MPEG acceleration with GLSL

It might help you

Zenios

On Fri, Mar 9, 2012 at 12:09 PM, David martin<@David_martin1> wrote:

Hi,
I’m new to SDL2, I used to play with danger ffmpeg tutorials to stream video
to an SDL surface.

I’m updating my program incorporating SDL2.

Here is a piece of snippet and would appreciate your help to actually draw
the video image into the surface using the SDL2 api.
thanks


AVFrame* frame ;
AVPacket avpacket;
int frame_finished;
SDL_Event event;
int w = 640;
int h = 480;
int done = 0, color = 0;
SDL_Window * window = SDL_CreateWindow(“SDL”, SDL_WINDOWPOS_UNDEFINED,
SDL_WINDOWPOS_UNDEFINED, w, h, 0);
SDL_Renderer * renderer = SDL_CreateRenderer(window, -1, 0);
SDL_Log(“+++++ INIT DONE +++++”);

frame = avcodec_alloc_frame();

            while (av_read_frame(format_context,&avpacket)>= 0) {
                    if (avpacket.stream_index == videostream) {
                            avcodec_decode_video2(codec_context, frame,

&frame_finished,&avpacket);

                    }



while (!done) {
    SDL_WaitEvent(&event);

     switch(event.type) {
                    //case FF_ALLOC_EVENT:
                 //alloc_picture(event.user.data1);
             //    break;
             //case FF_REFRESH_EVENT:
                 //video_refresh_timer(event.user.data1);
               //  break;
       case SDL_KEYDOWN:
       case SDL_KEYUP:
       //case SDL_MOUSEBUTTONDOWN:
       case SDL_QUIT:
            done=1;
        SDL_Log("Unhandled event type=%d", event.type);

    }

    //Draw Image into renderer.
    //We used to do it through SDL_DisplayYUVOverlay. How to do that with

SDL2.0. Any help for this part ?

    SDL_RenderClear(renderer);
    SDL_RenderPresent(renderer);
    SDL_Delay(20);
}

 }//while av_read_frame

SDL_Log("+++++ FINISHED +++++");
SDL_Quit();

}


SDL mailing list
SDL at lists.libsdl.org
http://lists.libsdl.org/listinfo.cgi/sdl-libsdl.org

Thanks for the link.
I’m reading a video streaming (not a single BMp image) so not sure how
this applies to my code.
ffmpeg reads in in yuvp42 format (see output from ffmpeg dump_format)

~/workspace/test1/Release $ ./test2 toto.mpeg
Input #0, mpeg, from ‘toto.mpeg’:
Duration: 00:00:04.83, start: 1.000000, bitrate: 606 kb/s
Stream #0:0[0x1e0]: Video: mpeg1video, yuv420p, 352x288 [SAR 178:163
DAR 1958:1467], 104857 kb/s, 60 fps, 60 tbr, 90k tbn, 60 tbc

So the question is do i need to create a surface from each image read
frame ? How would you do that ?

You should create only a streaming texture, with a command like this:

texture = SDL_CreateTexture(renderer, SDL_PIXELFORMAT_YV12,
SDL_TEXTUREACCESS_STREAMING, frame_width, frame_height))

Then you should copy the avpicture you get from ffmpeg with a function
similat to this one (was a test with an older 1.3 version of SDL I don’t
know if it still works with recent 2.0 builds):

void display_picture(AVPicture *picture)
{
unsigned char *pixels;
int pitch;

if(texture && !SDL_LockTexture(texture, NULL, (void **)&pixels, &pitch)

) {

    if(pitch == picture->linesize[0]) {
        int size = pitch * frame_height;

        memcpy(pixels, picture->data[0], size);
        memcpy(pixels + size, picture->data[2], size / 4);
        memcpy(pixels + size * 5 / 4, picture->data[1], size / 4);
    }
    else {
        register unsigned char *y1,*y2,*y3,*i1,*i2,*i3;
        int i;
        y1 = pixels;
        y3 = pixels + pitch * frame_height; // invertiti xche' avevo i

colori sballati!
y2 = pixels + pitch * frame_height * 5 / 4;

        i1=picture->data[0];
        i2=picture->data[1];
        i3=picture->data[2];

        for (i = 0; i<(frame_height/2); i++) {
            memcpy(y1,i1,pitch);
            i1+=picture->linesize[0];
            y1+=pitch;
            memcpy(y1,i1,pitch);

            memcpy(y2,i2,pitch / 2);
            memcpy(y3,i3,pitch / 2);

            y1+=pitch;
            y2+=pitch / 2;
            y3+=pitch / 2;
            i1+=picture->linesize[0];
            i2+=picture->linesize[1];
            i3+=picture->linesize[2];
        }
    }
    SDL_UnlockTexture(texture);
}

SDL_RenderCopy(renderer, texture, NULL, &rect);
SDL_RenderPresent(renderer);

}On Fri, Mar 9, 2012 at 3:02 PM, David martin wrote:


Bye,
Gabry

The problem is that in SDL1.2 picture relies on the function
SDL_LockYUVOverlay() that is not implemented in 2.0.
How to cope with that ?On 03/09/2012 04:15 PM, Gabriele Greco wrote:

On Fri, Mar 9, 2012 at 3:02 PM, David martin <@David_martin1 mailto:David_martin1> wrote:

Thanks for the link.
I'm reading a video streaming (not a single BMp image) so not sure
how this applies to my code.
ffmpeg reads in in yuvp42 format (see output from ffmpeg dump_format)

 > ~/workspace/test1/Release $ ./test2 toto.mpeg
Input #0, mpeg, from 'toto.mpeg':
  Duration: 00:00:04.83, start: 1.000000, bitrate: 606 kb/s
    Stream #0:0[0x1e0]: Video: mpeg1video, yuv420p, 352x288 [SAR
178:163 DAR 1958:1467], 104857 kb/s, 60 fps, 60 tbr, 90k tbn, 60 tbc


So the question is do i need to create a surface from each image
read frame ? How would you do that ?

You should create only a streaming texture, with a command like this:

texture = SDL_CreateTexture(renderer, SDL_PIXELFORMAT_YV12,
SDL_TEXTUREACCESS_STREAMING, frame_width, frame_height))

Then you should copy the avpicture you get from ffmpeg with a function
similat to this one (was a test with an older 1.3 version of SDL I don’t
know if it still works with recent 2.0 builds):
void display_picture(AVPicture *picture)
{
unsigned char *pixels;
int pitch;

 if(texture && !SDL_LockTexture(texture, NULL, (void **)&pixels,

&pitch) ) {

     if(pitch == picture->linesize[0]) {
         int size = pitch * frame_height;

         memcpy(pixels, picture->data[0], size);
         memcpy(pixels + size, picture->data[2], size / 4);
         memcpy(pixels + size * 5 / 4, picture->data[1], size / 4);
     }
     else {
         register unsigned char *y1,*y2,*y3,*i1,*i2,*i3;
         int i;
         y1 = pixels;
         y3 = pixels + pitch * frame_height; // invertiti xche'

avevo i colori sballati!
y2 = pixels + pitch * frame_height * 5 / 4;

         i1=picture->data[0];
         i2=picture->data[1];
         i3=picture->data[2];

         for (i = 0; i<(frame_height/2); i++) {
             memcpy(y1,i1,pitch);
             i1+=picture->linesize[0];
             y1+=pitch;
             memcpy(y1,i1,pitch);

             memcpy(y2,i2,pitch / 2);
             memcpy(y3,i3,pitch / 2);

             y1+=pitch;
             y2+=pitch / 2;
             y3+=pitch / 2;
             i1+=picture->linesize[0];
             i2+=picture->linesize[1];
             i3+=picture->linesize[2];
         }
     }
     SDL_UnlockTexture(texture);
 }

 SDL_RenderCopy(renderer, texture, NULL, &rect);
 SDL_RenderPresent(renderer);

}


Bye,
Gabry


SDL mailing list
SDL at lists.libsdl.org
http://lists.libsdl.org/listinfo.cgi/sdl-libsdl.org

From the link that dimitris posted it seems that the problems of
SDL_LockYUVOverlay(bmp) can be fixed by using a surface implementation.

image = SDL_LoadBMP(argv[1]);
if (!image) {
fprintf(stderr, “Couldn’t load BMP file %s: %s\n”, argv[1],
SDL_GetError());
quit(3);
}
if (image->format->BytesPerPixel != 3) {
fprintf(stderr, “BMP must be 24-bit\n”);
quit(4);
}

 /* wxh for the V plane, and then w/2xh/2 for the U and V planes */
 imageYUV = SDL_malloc(image->w*image->h+(image->w*image->h)/2);
 ConvertRGBtoYV12(image->pixels, imageYUV, image->w, image->h, 0, 100);

Check his link for the details on the function.
Actually SDL_LoadBMP reads from an image file, in case of ffmpeg i don’t
know how to create a surface from an image stream.

Any idea ?On 03/09/2012 04:15 PM, Gabriele Greco wrote:

On Fri, Mar 9, 2012 at 3:02 PM, David martin <@David_martin1 mailto:David_martin1> wrote:

Thanks for the link.
I'm reading a video streaming (not a single BMp image) so not sure
how this applies to my code.
ffmpeg reads in in yuvp42 format (see output from ffmpeg dump_format)

 > ~/workspace/test1/Release $ ./test2 toto.mpeg
Input #0, mpeg, from 'toto.mpeg':
  Duration: 00:00:04.83, start: 1.000000, bitrate: 606 kb/s
    Stream #0:0[0x1e0]: Video: mpeg1video, yuv420p, 352x288 [SAR
178:163 DAR 1958:1467], 104857 kb/s, 60 fps, 60 tbr, 90k tbn, 60 tbc


So the question is do i need to create a surface from each image
read frame ? How would you do that ?

You should create only a streaming texture, with a command like this:

texture = SDL_CreateTexture(renderer, SDL_PIXELFORMAT_YV12,
SDL_TEXTUREACCESS_STREAMING, frame_width, frame_height))

Then you should copy the avpicture you get from ffmpeg with a function
similat to this one (was a test with an older 1.3 version of SDL I don’t
know if it still works with recent 2.0 builds):
void display_picture(AVPicture *picture)
{
unsigned char *pixels;
int pitch;

 if(texture && !SDL_LockTexture(texture, NULL, (void **)&pixels,

&pitch) ) {

     if(pitch == picture->linesize[0]) {
         int size = pitch * frame_height;

         memcpy(pixels, picture->data[0], size);
         memcpy(pixels + size, picture->data[2], size / 4);
         memcpy(pixels + size * 5 / 4, picture->data[1], size / 4);
     }
     else {
         register unsigned char *y1,*y2,*y3,*i1,*i2,*i3;
         int i;
         y1 = pixels;
         y3 = pixels + pitch * frame_height; // invertiti xche'

avevo i colori sballati!
y2 = pixels + pitch * frame_height * 5 / 4;

         i1=picture->data[0];
         i2=picture->data[1];
         i3=picture->data[2];

         for (i = 0; i<(frame_height/2); i++) {
             memcpy(y1,i1,pitch);
             i1+=picture->linesize[0];
             y1+=pitch;
             memcpy(y1,i1,pitch);

             memcpy(y2,i2,pitch / 2);
             memcpy(y3,i3,pitch / 2);

             y1+=pitch;
             y2+=pitch / 2;
             y3+=pitch / 2;
             i1+=picture->linesize[0];
             i2+=picture->linesize[1];
             i3+=picture->linesize[2];
         }
     }
     SDL_UnlockTexture(texture);
 }

 SDL_RenderCopy(renderer, texture, NULL, &rect);
 SDL_RenderPresent(renderer);

}


Bye,
Gabry


SDL mailing list
SDL at lists.libsdl.org
http://lists.libsdl.org/listinfo.cgi/sdl-libsdl.org

I have managed to make some progress on the ffmpeg with SDL2.0.
Here is the code that is compiling and running, although can’t display
the image for some reason that i don’t know. i was wondering if you
could run it on your machines and let me know what the image is not
displayed.
You can copy the whole code and run it. For some reason the renderer
does not display the image. I would appreciate if somebody could have a
look as it is almost done. Note that the code is not optimized yet, just
wanted to have a functional code first…

Runit with a mpeg video for example like:
./test2 file.mpeg

thanks

/*

  • test2.c*

*/
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libswscale/swscale.h>
#include <SDL.h>
#include <stdio.h>

void sdl_init(AVFormatContext* format_context, AVCodecContext*
codec_context, int videostream ) {
SDL_Init(SDL_INIT_EVERYTHING);
AVFrame* frame;
AVFrame *pFrameRGB;
AVPacket avpacket;
AVPicture picture;

 // Convert the image from its native format to RGB
 struct SwsContext *img_convert_ctx =NULL;

 SDL_Event event;
 SDL_RendererInfo info;
 SDL_Texture *texture;
 SDL_Surface *image;
 Uint32 then, now , frames;
 int w = 640;
 int h = 480;
 int done = 0;
 SDL_Window * window = SDL_CreateWindow("SDL", 

SDL_WINDOWPOS_UNDEFINED, SDL_WINDOWPOS_UNDEFINED, w, h, 0);
SDL_Renderer * renderer = SDL_CreateRenderer(window, -1, 0);

 SDL_GetRendererInfo(renderer, &info);
 printf("Using %s rendering\n", info.name);



 SDL_Log("+++++ INIT DONE +++++");


 frame = avcodec_alloc_frame();

 pFrameRGB = avcodec_alloc_frame();
     if (pFrameRGB == NULL)
     {
      printf("Cannot allocate pFrame\n");
      exit(-1);
     }

     unsigned char* pixels;
     int pitch;
     uint8_t *buffer;
     int numBytes;
     // Determine required buffer size and allocate buffer
     numBytes=avpicture_get_size(PIX_FMT_RGB24, codec_context->width,
     		codec_context->height);
     buffer=(uint8_t *)av_malloc(numBytes*sizeof(uint8_t));


     // Assign appropriate parts of buffer to image planes in pFrameRGB
     // Note that pFrameRGB is an AVFrame, but AVFrame is a superset
     // of AVPicture
     avpicture_fill((AVPicture *)pFrameRGB, buffer, PIX_FMT_RGB24,
     		codec_context->width, codec_context->height);

     while (av_read_frame(format_context, &avpacket) >= 0) {
     	if (avpacket.stream_index == videostream) {
     		// Video stream packet
     		int frame_finished;

     		avcodec_decode_video2(codec_context, frame, &frame_finished, 

&avpacket);

     		if(frame_finished)
     		{

     			picture.data[0] = frame->data[0];
     			picture.data[1] = frame->data[1];
     			picture.data[2] = frame->data[2];
     			picture.linesize[0] = frame->linesize[0];
     			picture.linesize[1] = frame->linesize[1];
     			picture.linesize[2] = frame->linesize[2];



     			img_convert_ctx  = sws_getCachedContext(img_convert_ctx,
     					codec_context->width,
     					codec_context->height,
     					PIX_FMT_YUV420P,
     					frame->width,
     					frame->height,
     					PIX_FMT_RGB24,
     					SWS_BICUBIC,
     					NULL,
     					NULL,
     					NULL);


     			if (img_convert_ctx == NULL)
     			{
     				fprintf(stderr, "Cannot initialize the conversion context!\n");
     				exit(1);
     			}





     			int s = sws_scale(
     					img_convert_ctx,
     					frame->data,
     					frame->linesize,
     					0,
     					codec_context->height,
     					picture.data,
     					picture.linesize);


     			texture = SDL_CreateTexture(renderer, SDL_PIXELFORMAT_YV12, 

SDL_TEXTUREACCESS_STREAMING, frame->width, frame->height);//3 plane texture

     			if (!texture) {
     				fprintf(stderr, "Couldn't set create texture: %s\n", 

SDL_GetError());
exit(-1);
}

						if(texture && !SDL_LockTexture(texture, NULL, (void **)&pixels, 

&pitch) ) {

							if(pitch == picture.linesize[0]) {
								int size = pitch * frame->height;
								memcpy(pixels, picture.data[0], size);
								memcpy(pixels + size, picture.data[2], size / 4);
								memcpy(pixels + size * 5 / 4, picture.data[1], size / 4);
							}


							else {
								register unsigned char *y1,*y2,*y3,*i1,*i2,*i3;
								int i;
								y1 = pixels;
								y3 = pixels + pitch * frame->height; // invertiti xche' avevo i 

colori sballati!
y2 = pixels + pitch * frame->height * 5 / 4;

								i1=picture.data[0];
								i2=picture.data[1];
								i3=picture.data[2];

								for (i = 0; i<(frame->height/2); i++) {
									memcpy(y1,i1,pitch);
									i1+=picture.linesize[0];
									y1+=pitch;
									memcpy(y1,i1,pitch);

									memcpy(y2,i2,pitch / 2);
									memcpy(y3,i3,pitch / 2);

									y1+=pitch;
									y2+=pitch / 2;
									y3+=pitch / 2;
									i1+=picture.linesize[0];
									i2+=picture.linesize[1];
									i3+=picture.linesize[2];
								}
							}
						}//texture

     		}//frame
     av_free_packet(&avpacket);
     	}//avpacket
 frames = 0;
 then = SDL_GetTicks();

// while (!done) {
 	SDL_PollEvent(&event);

 	 switch(event.type) {

            if (event.key.keysym.sym == SDLK_ESCAPE) {
           //  done = 1;
             }
            break;

        case SDL_QUIT:
        	//done=1;
         SDL_Log("Unhandled event type=%d", event.type);
         break;
    // }
 	 /* Print out some timing information */
 	   // printf("%d\n",avpacket.dts);
     if (!SDL_UpdateTexture(texture, NULL, frame, frame->width)) {
         //SDL_UpdateTexture(texture, NULL, imageYUV, image->w);

             fprintf(stderr, "Couldn't  update texture: %s\n", 

SDL_GetError());
exit(-1);
}

 	      SDL_RenderCopy(renderer, texture, NULL, NULL);
 	      SDL_RenderPresent(renderer);
 	      SDL_Delay(20);
     		}

  }//while av_read_frame

 SDL_Log("+++++ FINISHED +++++");
 SDL_Quit();

}

int main(int argc, char * argv) {

AVCodecContext* codec_context;
int videostream;

if (argc < 2) {
		printf("Usage: %s filename\n", argv[0]);
		return 0;
	}

	// Register all available file formats and codecs
	av_register_all();

	int err;
	// Init SDL with video support
	err = SDL_Init(SDL_INIT_VIDEO);
	if (err < 0) {
		fprintf(stderr, "Unable to init SDL: %s\n", SDL_GetError());
		return -1;
	}

	// Open video file
	const char* filename = argv[1];
	AVFormatContext* format_context = NULL;
	err = avformat_open_input(&format_context, filename, NULL, NULL);
	if (err < 0) {
		fprintf(stderr, "ffmpeg: Unable to open input\n");
		return -1;
	}

	// Retrieve stream information
	err = avformat_find_stream_info(format_context, NULL);
	if (err < 0) {
		fprintf(stderr, "ffmpeg: Unable to find stream info\n");
		return -1;
	}

	// Dump information about file onto standard error
	av_dump_format(format_context, 0, argv[1], 0);

	// Find the first video stream

	for (videostream = 0; videostream < format_context->nb_streams; 

++videostream) {
if (format_context->streams[videostream]->codec->codec_type ==
AVMEDIA_TYPE_VIDEO) {
break;
}
}
if (videostream == format_context->nb_streams) {
fprintf(stderr, “ffmpeg: Unable to find video stream\n”);
return -1;
}

	codec_context = format_context->streams[videostream]->codec;
	AVCodec* codec = avcodec_find_decoder(codec_context->codec_id);

	avcodec_alloc_context3(codec);//FIXME
	//codec_context = avcodec_alloc_context3(codec);//FIXME

	 if (avcodec_open2(codec_context, codec, NULL) < 0)
	 {
		 fprintf(stderr, "ffmpeg: Unable to allocate codec context\n");
	 }

		else {
			printf("Codec initialized\n");

	}

	 /*
	 Initializing display
	 */
	

 sdl_init(format_context, codec_context,videostream);


 return 0;

}

// END OF MYSCRIPT

On 03/09/2012 11:09 AM, David martin wrote:

Hi,
I’m new to SDL2, I used to play with danger ffmpeg tutorials to stream
video to an SDL surface.

I’m updating my program incorporating SDL2.

Here is a piece of snippet and would appreciate your help to actually
draw the video image into the surface using the SDL2 api.
thanks


AVFrame* frame ;
AVPacket avpacket;
int frame_finished;
SDL_Event event;
int w = 640;
int h = 480;
int done = 0, color = 0;
SDL_Window * window = SDL_CreateWindow(“SDL”, SDL_WINDOWPOS_UNDEFINED,
SDL_WINDOWPOS_UNDEFINED, w, h, 0);
SDL_Renderer * renderer = SDL_CreateRenderer(window, -1, 0);
SDL_Log(“+++++ INIT DONE +++++”);

frame = avcodec_alloc_frame();

while (av_read_frame(format_context, &avpacket) >= 0) {
if (avpacket.stream_index == videostream) {

avcodec_decode_video2(codec_context, frame, &frame_finished, &avpacket);

}

while (!done) {
SDL_WaitEvent(&event);

switch(event.type) {
//case FF_ALLOC_EVENT:
//alloc_picture(event.user.data1);
// break;
//case FF_REFRESH_EVENT:
//video_refresh_timer(event.user.data1);
// break;
case SDL_KEYDOWN:
case SDL_KEYUP:
//case SDL_MOUSEBUTTONDOWN:
case SDL_QUIT:
done=1;
SDL_Log(“Unhandled event type=%d”, event.type);

}

//Draw Image into renderer.
//We used to do it through SDL_DisplayYUVOverlay. How to do that with
SDL2.0. Any help for this part ?

SDL_RenderClear(renderer);
SDL_RenderPresent(renderer);
SDL_Delay(20);
}

}//while av_read_frame

SDL_Log(“+++++ FINISHED +++++”);
SDL_Quit();
}

Sorry had a wrong loop on previous file. This should do it.
thanks
/*

  • test2.c*
  • Created on: Mar 9, 2012
  •  Author: dvi
    

*/
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libswscale/swscale.h>
#include <SDL.h>
#include <stdio.h>

void sdl_init(AVFormatContext* format_context, AVCodecContext*
codec_context, int videostream ) {
SDL_Init(SDL_INIT_EVERYTHING);
AVFrame* frame;
AVFrame *pFrameRGB;
AVPacket avpacket;
AVPicture picture;

 // Convert the image from its native format to RGB
 struct SwsContext *img_convert_ctx =NULL;

 SDL_Event event;
 SDL_RendererInfo info;
 SDL_Texture *texture;
 SDL_Surface *image;
 Uint32 then, now , frames;
 int w = 640;
 int h = 480;
 int done = 0;
 SDL_Window * window = SDL_CreateWindow("SDL", 

SDL_WINDOWPOS_UNDEFINED, SDL_WINDOWPOS_UNDEFINED, w, h, 0);
SDL_Renderer * renderer = SDL_CreateRenderer(window, -1, 0);

 SDL_GetRendererInfo(renderer, &info);
 printf("Using %s rendering\n", info.name);



 SDL_Log("+++++ INIT DONE +++++");


 frame = avcodec_alloc_frame();

 pFrameRGB = avcodec_alloc_frame();
     if (pFrameRGB == NULL)
     {
      printf("Cannot allocate pFrame\n");
      exit(-1);
     }

     unsigned char* pixels;
     int pitch;
     uint8_t *buffer;
     int numBytes;
     // Determine required buffer size and allocate buffer
     numBytes=avpicture_get_size(PIX_FMT_RGB24, codec_context->width,
     		codec_context->height);
     buffer=(uint8_t *)av_malloc(numBytes*sizeof(uint8_t));


     // Assign appropriate parts of buffer to image planes in pFrameRGB
     // Note that pFrameRGB is an AVFrame, but AVFrame is a superset
     // of AVPicture
     avpicture_fill((AVPicture *)pFrameRGB, buffer, PIX_FMT_RGB24,
     		codec_context->width, codec_context->height);

     while (av_read_frame(format_context, &avpacket) >= 0) {
     	if (avpacket.stream_index == videostream) {
     		// Video stream packet
     		int frame_finished;

     		avcodec_decode_video2(codec_context, frame, &frame_finished, 

&avpacket);

     		if(frame_finished)
     		{

     			picture.data[0] = frame->data[0];
     			picture.data[1] = frame->data[1];
     			picture.data[2] = frame->data[2];
     			picture.linesize[0] = frame->linesize[0];
     			picture.linesize[1] = frame->linesize[1];
     			picture.linesize[2] = frame->linesize[2];



     			img_convert_ctx  = sws_getCachedContext(img_convert_ctx,
     					codec_context->width,
     					codec_context->height,
     					PIX_FMT_YUV420P,
     					frame->width,
     					frame->height,
     					PIX_FMT_RGB24,
     					SWS_BICUBIC,
     					NULL,
     					NULL,
     					NULL);


     			if (img_convert_ctx == NULL)
     			{
     				fprintf(stderr, "Cannot initialize the conversion context!\n");
     				exit(1);
     			}



     			int s = sws_scale(
     					img_convert_ctx,
     					frame->data,
     					frame->linesize,
     					0,
     					codec_context->height,
     					picture.data,
     					picture.linesize);


     			texture = SDL_CreateTexture(renderer, SDL_PIXELFORMAT_YV12, 

SDL_TEXTUREACCESS_STREAMING, codec_context->width,
codec_context->height);//3 plane texture

     			if (!texture) {
     				fprintf(stderr, "Couldn't set create texture: %s\n", 

SDL_GetError());
exit(-1);
}

						if(texture && !SDL_LockTexture(texture, NULL, (void **)&pixels, 

&pitch) ) {

							if(pitch == picture.linesize[0]) {
								int size = pitch * frame->height;
								memcpy(pixels, picture.data[0], size);
								memcpy(pixels + size, picture.data[2], size / 4);
								memcpy(pixels + size * 5 / 4, picture.data[1], size / 4);
							}


							else {
								register unsigned char *y1,*y2,*y3,*i1,*i2,*i3;
								int i;
								y1 = pixels;
								y3 = pixels + pitch * frame->height; // invertiti xche' avevo i 

colori sballati!
y2 = pixels + pitch * frame->height * 5 / 4;

								i1=picture.data[0];
								i2=picture.data[1];
								i3=picture.data[2];

								for (i = 0; i<(frame->height/2); i++) {
									memcpy(y1,i1,pitch);
									i1+=picture.linesize[0];
									y1+=pitch;
									memcpy(y1,i1,pitch);

									memcpy(y2,i2,pitch / 2);
									memcpy(y3,i3,pitch / 2);

									y1+=pitch;
									y2+=pitch / 2;
									y3+=pitch / 2;
									i1+=picture.linesize[0];
									i2+=picture.linesize[1];
									i3+=picture.linesize[2];
								}
							}
						}//texture
						SDL_UnlockTexture(texture);
     		}//frame
     av_free_packet(&avpacket);
     	}//avpacket

 //while (!done) {
 	SDL_PollEvent(&event);

 	 switch(event.type) {

            if (event.key.keysym.sym == SDLK_ESCAPE) {
             done = 1;
            }
            break;

        case SDL_QUIT:
        	//done=1;
         SDL_Log("Unhandled event type=%d", event.type);
         done=1;
         break;
 	 }
 	 /* Print out some timing information */
 	  //  printf("TIMING %d\n",avpacket.dts);
 	 //SDL_UpdateTexture(texture, NULL, imageYUV, image->w)
     if (!SDL_UpdateTexture(texture, NULL, frame, frame->width)) {
         //SDL_UpdateTexture(texture, NULL, imageYUV, image->w);

             fprintf(stderr, "Couldn't  update texture: %s\n", 

SDL_GetError());
//exit(-1);
}

     SDL_RenderClear(renderer);
     SDL_RenderCopy(renderer, texture, NULL, NULL);
     SDL_RenderPresent(renderer);
     SDL_Delay(20);
 //}

// }

  }//while av_read_frame

 SDL_Log("+++++ FINISHED +++++");
 SDL_Quit();

}

int main(int argc, char * argv) {

AVCodecContext* codec_context;
int videostream;

if (argc < 2) {
		printf("Usage: %s filename\n", argv[0]);
		return 0;
	}

	// Register all available file formats and codecs
	av_register_all();

	int err;
	// Init SDL with video support
	err = SDL_Init(SDL_INIT_VIDEO);
	if (err < 0) {
		fprintf(stderr, "Unable to init SDL: %s\n", SDL_GetError());
		return -1;
	}

	// Open video file
	const char* filename = argv[1];
	AVFormatContext* format_context = NULL;
	err = avformat_open_input(&format_context, filename, NULL, NULL);
	if (err < 0) {
		fprintf(stderr, "ffmpeg: Unable to open input\n");
		return -1;
	}

	// Retrieve stream information
	err = avformat_find_stream_info(format_context, NULL);
	if (err < 0) {
		fprintf(stderr, "ffmpeg: Unable to find stream info\n");
		return -1;
	}

	// Dump information about file onto standard error
	av_dump_format(format_context, 0, argv[1], 0);

	// Find the first video stream

	for (videostream = 0; videostream < format_context->nb_streams; 

++videostream) {
if (format_context->streams[videostream]->codec->codec_type ==
AVMEDIA_TYPE_VIDEO) {
break;
}
}
if (videostream == format_context->nb_streams) {
fprintf(stderr, “ffmpeg: Unable to find video stream\n”);
return -1;
}

	codec_context = format_context->streams[videostream]->codec;
	AVCodec* codec = avcodec_find_decoder(codec_context->codec_id);

	avcodec_alloc_context3(codec);


	 if (avcodec_open2(codec_context, codec, NULL) < 0)
	 {
		 fprintf(stderr, "ffmpeg: Unable to allocate codec context\n");
	 }

		else {
			printf("Codec initialized\n");

	}

	 /*
	 Initializing display
	 */
	 printf("Width:%d\n",codec_context->width);
	 printf("height:%d\n",codec_context->height);
	 //exit(0);


 sdl_init(format_context, codec_context,videostream);


 return 0;

}

On 03/09/2012 11:09 AM, David martin wrote:

Hi,
I’m new to SDL2, I used to play with danger ffmpeg tutorials to stream
video to an SDL surface.

I’m updating my program incorporating SDL2.

Here is a piece of snippet and would appreciate your help to actually
draw the video image into the surface using the SDL2 api.
thanks


AVFrame* frame ;
AVPacket avpacket;
int frame_finished;
SDL_Event event;
int w = 640;
int h = 480;
int done = 0, color = 0;
SDL_Window * window = SDL_CreateWindow(“SDL”, SDL_WINDOWPOS_UNDEFINED,
SDL_WINDOWPOS_UNDEFINED, w, h, 0);
SDL_Renderer * renderer = SDL_CreateRenderer(window, -1, 0);
SDL_Log(“+++++ INIT DONE +++++”);

frame = avcodec_alloc_frame();

while (av_read_frame(format_context, &avpacket) >= 0) {
if (avpacket.stream_index == videostream) {

avcodec_decode_video2(codec_context, frame, &frame_finished, &avpacket);

}

while (!done) {
SDL_WaitEvent(&event);

switch(event.type) {
//case FF_ALLOC_EVENT:
//alloc_picture(event.user.data1);
// break;
//case FF_REFRESH_EVENT:
//video_refresh_timer(event.user.data1);
// break;
case SDL_KEYDOWN:
case SDL_KEYUP:
//case SDL_MOUSEBUTTONDOWN:
case SDL_QUIT:
done=1;
SDL_Log(“Unhandled event type=%d”, event.type);

}

//Draw Image into renderer.
//We used to do it through SDL_DisplayYUVOverlay. How to do that with
SDL2.0. Any help for this part ?

SDL_RenderClear(renderer);
SDL_RenderPresent(renderer);
SDL_Delay(20);
}

}//while av_read_frame

SDL_Log(“+++++ FINISHED +++++”);
SDL_Quit();
}

I haven’t got around to trying to make this work with newer versions of sdl yet, more interested right now in getting the ffmpeg wrapper framework working for both sdl players and non-sdl players.

But here’s a start that might interest some of you.

I suspect I will eventually make this work with sdl 2.0

right now it works with sdl 1.3 revision 5517.

Hi,

anyone has solved this issue?

[SDL] SDl 2.0 and ffmpeg

David martin vilanew at gmail.com
mailto:sdl%40lists.libsdl.org?Subject=Re%3A%20[SDL]%20SDl%202.0%20and%20ffmpeg&In-Reply-To=<jjl0ar%24bsp%242%40dough.gmane.org>
/Mon Mar 12 07:13:47 PDT 2012/

Sorry had a wrong loop on previous file. This should do it.
thanks
/*

  • test2.c
  • Created on: Mar 9, 2012
  •  Author: dvi
    

*/
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libswscale/swscale.h>
#include <SDL.h>
#include <stdio.h>

void sdl_init(AVFormatContext* format_context, AVCodecContext*
codec_context, int videostream ) {
SDL_Init(SDL_INIT_EVERYTHING);
AVFrame* frame;
AVFrame *pFrameRGB;
AVPacket avpacket;
AVPicture picture;

  // Convert the image from its native format to RGB
  struct SwsContext *img_convert_ctx =NULL;

  SDL_Event event;
  SDL_RendererInfo info;
  SDL_Texture *texture;
  SDL_Surface *image;
  Uint32 then, now , frames;
  int w = 640;
  int h = 480;
  int done = 0;
  SDL_Window * window = SDL_CreateWindow("SDL",

SDL_WINDOWPOS_UNDEFINED, SDL_WINDOWPOS_UNDEFINED, w, h, 0);
SDL_Renderer * renderer = SDL_CreateRenderer(window, -1, 0);

  SDL_GetRendererInfo(renderer, &info);
  printf("Using %s rendering\n", info.name);



  SDL_Log("+++++ INIT DONE +++++");


  frame = avcodec_alloc_frame();

  pFrameRGB = avcodec_alloc_frame();
      if (pFrameRGB == NULL)
      {
       printf("Cannot allocate pFrame\n");
       exit(-1);
      }

      unsigned char* pixels;
      int pitch;
      uint8_t *buffer;
      int numBytes;
      // Determine required buffer size and allocate buffer
      numBytes=avpicture_get_size(PIX_FMT_RGB24, codec_context->width,
      		codec_context->height);
      buffer=(uint8_t *)av_malloc(numBytes*sizeof(uint8_t));


      // Assign appropriate parts of buffer to image planes in pFrameRGB
      // Note that pFrameRGB is an AVFrame, but AVFrame is a superset
      // of AVPicture
      avpicture_fill((AVPicture *)pFrameRGB, buffer, PIX_FMT_RGB24,
      		codec_context->width, codec_context->height);

      while (av_read_frame(format_context, &avpacket) >= 0) {
      	if (avpacket.stream_index == videostream) {
      		// Video stream packet
      		int frame_finished;

      		avcodec_decode_video2(codec_context, frame, &frame_finished,

&avpacket);

      		if(frame_finished)
      		{

      			picture.data[0] = frame->data[0];
      			picture.data[1] = frame->data[1];
      			picture.data[2] = frame->data[2];
      			picture.linesize[0] = frame->linesize[0];
      			picture.linesize[1] = frame->linesize[1];
      			picture.linesize[2] = frame->linesize[2];



      			img_convert_ctx  = sws_getCachedContext(img_convert_ctx,
      					codec_context->width,
      					codec_context->height,
      					PIX_FMT_YUV420P,
      					frame->width,
      					frame->height,
      					PIX_FMT_RGB24,
      					SWS_BICUBIC,
      					NULL,
      					NULL,
      					NULL);


      			if (img_convert_ctx == NULL)
      			{
      				fprintf(stderr, "Cannot initialize the conversion context!\n");
      				exit(1);
      			}



      			int s = sws_scale(
      					img_convert_ctx,
      					frame->data,
      					frame->linesize,
      					0,
      					codec_context->height,
      					picture.data,
      					picture.linesize);


      			texture = SDL_CreateTexture(renderer, SDL_PIXELFORMAT_YV12,

SDL_TEXTUREACCESS_STREAMING, codec_context->width,
codec_context->height);//3 plane texture

      			if (!texture) {
      				fprintf(stderr, "Couldn't set create texture: %s\n",

SDL_GetError());
exit(-1);
}

						if(texture && !SDL_LockTexture(texture, NULL, (void **)&pixels,

&pitch) ) {

							if(pitch == picture.linesize[0]) {
								int size = pitch * frame->height;
								memcpy(pixels, picture.data[0], size);
								memcpy(pixels + size, picture.data[2], size / 4);
								memcpy(pixels + size * 5 / 4, picture.data[1], size / 4);
							}


							else {
								register unsigned char *y1,*y2,*y3,*i1,*i2,*i3;
								int i;
								y1 = pixels;
								y3 = pixels + pitch * frame->height; // invertiti xche' avevo i

colori sballati!
y2 = pixels + pitch * frame->height * 5 / 4;

								i1=picture.data[0];
								i2=picture.data[1];
								i3=picture.data[2];

								for (i = 0; i<(frame->height/2); i++) {
									memcpy(y1,i1,pitch);
									i1+=picture.linesize[0];
									y1+=pitch;
									memcpy(y1,i1,pitch);

									memcpy(y2,i2,pitch / 2);
									memcpy(y3,i3,pitch / 2);

									y1+=pitch;
									y2+=pitch / 2;
									y3+=pitch / 2;
									i1+=picture.linesize[0];
									i2+=picture.linesize[1];
									i3+=picture.linesize[2];
								}
							}
						}//texture
						SDL_UnlockTexture(texture);
      		}//frame
      av_free_packet(&avpacket);
      	}//avpacket

  //while (!done) {
  	SDL_PollEvent(&event);

  	 switch(event.type) {

             if (event.key.keysym.sym == SDLK_ESCAPE) {
              done = 1;
             }
             break;

         case SDL_QUIT:
         	//done=1;
          SDL_Log("Unhandled event type=%d", event.type);
          done=1;
          break;
  	 }
  	 /* Print out some timing information */
  	  //  printf("TIMING %d\n",avpacket.dts);
  	 //SDL_UpdateTexture(texture, NULL, imageYUV, image->w)
      if (!SDL_UpdateTexture(texture, NULL, frame, frame->width)) {
          //SDL_UpdateTexture(texture, NULL, imageYUV, image->w);

              fprintf(stderr, "Couldn't  update texture: %s\n",

SDL_GetError());
//exit(-1);
}

      SDL_RenderClear(renderer);
      SDL_RenderCopy(renderer, texture, NULL, NULL);
      SDL_RenderPresent(renderer);
      SDL_Delay(20);
  //}

// }

   }//while av_read_frame

  SDL_Log("+++++ FINISHED +++++");
  SDL_Quit();

}

int main(int argc, char * argv) {

AVCodecContext* codec_context;
int videostream;

if (argc < 2) {
		printf("Usage: %s filename\n", argv[0]);
		return 0;
	}

	// Register all available file formats and codecs
	av_register_all();

	int err;
	// Init SDL with video support
	err = SDL_Init(SDL_INIT_VIDEO);
	if (err < 0) {
		fprintf(stderr, "Unable to init SDL: %s\n", SDL_GetError());
		return -1;
	}

	// Open video file
	const char* filename = argv[1];
	AVFormatContext* format_context = NULL;
	err = avformat_open_input(&format_context, filename, NULL, NULL);
	if (err < 0) {
		fprintf(stderr, "ffmpeg: Unable to open input\n");
		return -1;
	}

	// Retrieve stream information
	err = avformat_find_stream_info(format_context, NULL);
	if (err < 0) {
		fprintf(stderr, "ffmpeg: Unable to find stream info\n");
		return -1;
	}

	// Dump information about file onto standard error
	av_dump_format(format_context, 0, argv[1], 0);

	// Find the first video stream

	for (videostream = 0; videostream < format_context->nb_streams;

++videostream) {
if (format_context->streams[videostream]->codec->codec_type ==
AVMEDIA_TYPE_VIDEO) {
break;
}
}
if (videostream == format_context->nb_streams) {
fprintf(stderr, “ffmpeg: Unable to find video stream\n”);
return -1;
}

	codec_context = format_context->streams[videostream]->codec;
	AVCodec* codec = avcodec_find_decoder(codec_context->codec_id);

	avcodec_alloc_context3(codec);


	 if (avcodec_open2(codec_context, codec, NULL) < 0)
	 {
		 fprintf(stderr, "ffmpeg: Unable to allocate codec context\n");
	 }

		else {
			printf("Codec initialized\n");

	}

	 /*
	 Initializing display
	 */
	 printf("Width:%d\n",codec_context->width);
	 printf("height:%d\n",codec_context->height);
	 //exit(0);


  sdl_init(format_context, codec_context,videostream);


  return 0;

}

On 03/09/2012 11:09 AM, David martin wrote:

/ Hi,
/>/ I’m new to SDL2, I used to play with danger ffmpeg tutorials to stream
/>/ video to an SDL surface.
/>/
/>/ I’m updating my program incorporating SDL2.
/>/
/>/
/>/ Here is a piece of snippet and would appreciate your help to actually
/>/ draw the video image into the surface using the SDL2 api.
/>/ thanks
/>/
/>/
/>/
/>/ …
/>/ AVFrame* frame ;
/>/ AVPacket avpacket;
/>/ int frame_finished;
/>/ SDL_Event event;
/>/ int w = 640;
/>/ int h = 480;
/>/ int done = 0, color = 0;
/>/ SDL_Window * window = SDL_CreateWindow(“SDL”, SDL_WINDOWPOS_UNDEFINED,
/>/ SDL_WINDOWPOS_UNDEFINED, w, h, 0);
/>/ SDL_Renderer * renderer = SDL_CreateRenderer(window, -1, 0);
/>/ SDL_Log(“+++++ INIT DONE +++++”);
/>/
/>/ frame = avcodec_alloc_frame();
/>/
/>/ while (av_read_frame(format_context, &avpacket) >= 0) {
/>/ if (avpacket.stream_index == videostream) {
/>/
/>/ avcodec_decode_video2(codec_context, frame, &frame_finished, &avpacket);
/>/
/>/
/>/
/>/ }
/>/
/>/
/>/
/>/ while (!done) {
/>/ SDL_WaitEvent(&event);
/>/
/>/ switch(event.type) {
/>/ //case FF_ALLOC_EVENT:
/>/ //alloc_picture(event.user.data1);
/>/ // break;
/>/ //case FF_REFRESH_EVENT:
/>/ //video_refresh_timer(event.user.data1);
/>/ // break;
/>/ case SDL_KEYDOWN:
/>/ case SDL_KEYUP:
/>/ //case SDL_MOUSEBUTTONDOWN:
/>/ case SDL_QUIT:
/>/ done=1;
/>/ SDL_Log(“Unhandled event type=%d”, event.type);
/>/
/>/ }
/>/
/>/ //Draw Image into renderer.
/>/ //We used to do it through SDL_DisplayYUVOverlay. How to do that with
/>/ SDL2.0. Any help for this part ?
/>/
/>/ SDL_RenderClear(renderer);
/>/ SDL_RenderPresent(renderer);
/>/ SDL_Delay(20);
/>/ }
/>/
/>/ }//while av_read_frame
/>/
/>/ SDL_Log(“+++++ FINISHED +++++”);
/>/ SDL_Quit();
/>/ }
/

Best

Simone