ffmpeg yuv支持yuyv422转yuv420p吗

2010年12月 C/C++大版内专家分月排行榜第三
2011年11月 专题开发/技术/项目大版内专家分月排行榜第二2011年8月 专题开发/技术/项目大版内专家分月排行榜第二
2011年11月 专题开发/技术/项目大版内专家分月排行榜第二2011年8月 专题开发/技术/项目大版内专家分月排行榜第二
本帖子已过去太久远了,不再提供回复功能。FFMPEG:H264解码-SDL显示(RGB32、RGB24、YUV420P、YUV422)
FFMpeg对视频文件进行解码的大致流程1. 注册所有容器格式: av_register_all()2. 打开文件: av_open_input_file()3. 从文件中提取流信息: av_find_stream_info()4. 穷举所有的流,查找其中种类为CODEC_TYPE_VIDEO5. 查找对应的解码器: avcodec_find_decoder()6. 打开编解码器: avcodec_open()7. 为解码帧分配内存: avcodec_alloc_frame()8. 不停地从码流中提取出帧数据: av_read_frame()9. 判断帧的类型,对于视频帧调用: avcodec_decode_video()10. 解码完后,释放解码器: avcodec_close()11. 关闭输入文件:av_close_input_file()
//添加的库:avcodec.lib avdevice.lib avfilter.lib avformat.lib avutil.lib swscale.lib
SDL.lib extern "C"{ #include &libavcodec/avcodec.h& #include &libavformat/avformat.h& #include &libswscale/swscale.h& #include &libsdl/SDL.h& #include &libsdl/SDL_thread.h&};void SaveFrame(AVFrame *pFrame, int width, int height, int iFrame) { FILE *pF char szFilename[32];
// Open file sprintf(szFilename, "frame%d.ppm", iFrame); pFile=fopen(szFilename, "wb"); if(pFile==NULL)
// Write header fprintf(pFile, "P6\n%d %d\n255\n", width, height);
// Write pixel data for(y=0; y& y++)
fwrite(pFrame-&data[0]+y*pFrame-&linesize[0], 1, width*3, pFile);
// Close file fclose(pFile);}void CTest0Dlg::OnButton1() { // TODO: Add your control notification handler code here
AVFormatContext *pFormatC
AVCodecContext
*pFrameRGB;
static int sws_flags = SWS_BICUBIC;
struct SwsContext *img_convert_
char argv[100] = "d:\\temp\\VIDEO"; // argv[1] = "d:\\temp\\ff.mpg";
// /*注册所有可用的格式和编解码器*/
av_register_all();
// Open video file /*以输入方式打开一个媒体文件,也即源文件,codecs并没有打开,只读取了文件的头信息*/
if(av_open_input_file(&pFormatCtx, argv, NULL, 0, NULL)!=0) // Couldn't open file
// Retrieve stream information/*通过读取媒体文件的中的包来获取媒体文件中的流信息,对于没有头信息的文件如(mpeg)是非常有用的,// 该函数通常重算类似mpeg-2帧模式的真实帧率,该函数并未改变逻辑文件的position.*/
if(av_find_stream_info(pFormatCtx)&0) // Couldn't find stream information
// Dump information about file onto standard error//该函数的作用就是检查下初始化过程中设置的参数是否符合规范
dump_format(pFormatCtx, 0, argv, 0);
// Find the first video stream
videoStream=-1;
printf("%d\n",pFormatCtx-&nb_streams);
getchar();
for(i=0; i&pFormatCtx-&nb_ i++)
if(pFormatCtx-&streams[i]-&codec-&codec_type==CODEC_TYPE_VIDEO&&videoStream & 0) {
videoStream=i;
if(videoStream==-1) // Didn't find a video stream
// Get a pointer to the codec context for the video stream
pCodecCtx=pFormatCtx-&streams[videoStream]-&
// Find the decoder for the video stream
pCodec=avcodec_find_decoder(pCodecCtx-&codec_id);/*通过code ID查找一个已经注册的音视频编码器,查找编码器之前,必须先调用av_register_all注册所有支持的编码器音视频编码器保存在一个链表中,查找过程中,函数从头到尾遍历链表,通过比较编码器的ID来查找*/
if(pCodec==NULL) {
fprintf(stderr, "Unsupported codec!\n"); // Codec not found
// Open codec//使用给定的AVCodec初始化AVCodecContext
if(avcodec_open(pCodecCtx, pCodec)&0) // Could not open codec
//printf("name %s\n",pCodec-&name);
//getchar();
// Allocate video frame
pFrame=avcodec_alloc_frame();
// Allocate an AVFrame structure
pFrameRGB=avcodec_alloc_frame();
if(pFrameRGB==NULL)
// Determine required buffer size and allocate buffer
numBytes=avpicture_get_size(PIX_FMT_RGB32, pCodecCtx-&width,
pCodecCtx-&height);
buffer=(uint8_t *)av_malloc(numBytes*sizeof(uint8_t));
// Assign appropriate parts of buffer to image planes in pFrameRGB
// Note that pFrameRGB is an AVFrame, but AVFrame is a superset
// of AVPicture
avpicture_fill((AVPicture *)pFrameRGB, buffer, PIX_FMT_RGB32,
pCodecCtx-&width, pCodecCtx-&height);
// Read frames and save first five frames to disk
////////////////////////////////////////////////////////////////
if (SDL_Init(SDL_INIT_VIDEO) & 0)
fprintf(stderr, "can not initialize SDL:%s\n", SDL_GetError());
atexit(SDL_Quit);
SDL_Surface *
screen = SDL_SetVideoMode(720, 576, 32, SDL_SWSURFACE|SDL_ANYFORMAT);
if ( screen == NULL )
SDL_Surface *
Uint32 rmask, gmask, bmask,
/* SDL interprets each pixel as a 32-bit number, so our masks must depend
on the endianness (byte order) of the machine */#if 0//SDL_BYTEORDER == SDL_BIG_ENDIAN
rmask = 0xff000000;
gmask = 0x00ff0000;
bmask = 0x0000ff00;
amask = 0x000000#else
rmask = 0x000000
gmask = 0x0000ff00;
bmask = 0x00ff0000;
amask = 0xff000000;#endif image = SDL_CreateRGBSurface(SDL_SWSURFACE, 720, 576, 0,
rmask, gmask, bmask, NULL);
if(image == NULL)
//fprintf(stderr, "CreateRGBSurface failed: %s\n", SDL_GetError());
//////////////////////////////////////////////////////////////////
while(av_read_frame(pFormatCtx, &packet)&=0)
// Is this a packet from the video stream?
if(packet.stream_index==videoStream)
// Decode video frame
avcodec_decode_video(pCodecCtx, pFrame, &frameFinished,
packet.data, packet.size);
// Did we get a video frame?
if(frameFinished)
// Convert the image from its native format to RGB
img_convert_ctx = sws_getContext( pCodecCtx-&width,
pCodecCtx-&height,
pCodecCtx-&pix_fmt,
pCodecCtx-&width,
pCodecCtx-&height,
PIX_FMT_RGB32,
sws_flags, NULL, NULL, NULL);
sws_scale(img_convert_ctx,pFrame-&data,pFrame-&linesize,0,pCodecCtx-&height,pFrameRGB-&data,pFrameRGB-&linesize);
sws_freeContext(img_convert_ctx);
////////////////////////////////////////////////////////////////
memcpy(screen-&pixels,buffer,720*576*4);
SDL_UpdateRect(screen, 0, 0, image-&w, image-&h);
/* Free the allocated BMP surface */
SDL_FreeSurface(image);
/////////////////////////////////////////////////////////////////
// Save the frame to disk
if((++i&=5))
SaveFrame(pFrameRGB, pCodecCtx-&width, pCodecCtx-&height, i);
// Free the packet that was allocated by av_read_frame
av_free_packet(&packet);
// Free the RGB image
av_free(buffer);
av_free(pFrameRGB);
// Free the YUV frame
av_free(pFrame);
// Close the codec
avcodec_close(pCodecCtx);
// Close the video file
av_close_input_file(pFormatCtx);
MessageBox("over");}void CTest0Dlg::OnButton2() { // TODO: Add your control notification handler code here
AVFormatContext *pFormatC
AVCodecContext
*pFrameRGB;
static int sws_flags = SWS_BICUBIC;
struct SwsContext *img_convert_
char argv[100] = "d:\\temp\\VIDEO"; // argv[1] = "d:\\temp\\ff.mpg";
// /*注册所有可用的格式和编解码器*/
av_register_all();
// Open video file /*以输入方式打开一个媒体文件,也即源文件,codecs并没有打开,只读取了文件的头信息*/
if(av_open_input_file(&pFormatCtx, argv, NULL, 0, NULL)!=0) // Couldn't open file
// Retrieve stream information/*通过读取媒体文件的中的包来获取媒体文件中的流信息,对于没有头信息的文件如(mpeg)是非常有用的,// 该函数通常重算类似mpeg-2帧模式的真实帧率,该函数并未改变逻辑文件的position.*/
if(av_find_stream_info(pFormatCtx)&0) // Couldn't find stream information
// Dump information about file onto standard error//该函数的作用就是检查下初始化过程中设置的参数是否符合规范
dump_format(pFormatCtx, 0, argv, 0);
// Find the first video stream
videoStream=-1;
printf("%d\n",pFormatCtx-&nb_streams);
getchar();
for(i=0; i&pFormatCtx-&nb_ i++)
if(pFormatCtx-&streams[i]-&codec-&codec_type==CODEC_TYPE_VIDEO&&videoStream & 0) {
videoStream=i;
if(videoStream==-1) // Didn't find a video stream
// Get a pointer to the codec context for the video stream
pCodecCtx=pFormatCtx-&streams[videoStream]-&
// Find the decoder for the video stream
pCodec=avcodec_find_decoder(pCodecCtx-&codec_id);/*通过code ID查找一个已经注册的音视频编码器,查找编码器之前,必须先调用av_register_all注册所有支持的编码器音视频编码器保存在一个链表中,查找过程中,函数从头到尾遍历链表,通过比较编码器的ID来查找*/
if(pCodec==NULL) {
fprintf(stderr, "Unsupported codec!\n"); // Codec not found
// Open codec//使用给定的AVCodec初始化AVCodecContext
if(avcodec_open(pCodecCtx, pCodec)&0) // Could not open codec
//printf("name %s\n",pCodec-&name);
//getchar();
// Allocate video frame
pFrame=avcodec_alloc_frame();
// Allocate an AVFrame structure
pFrameRGB=avcodec_alloc_frame();
if(pFrameRGB==NULL)
// Determine required buffer size and allocate buffer
numBytes=avpicture_get_size(PIX_FMT_RGB24, pCodecCtx-&width,
pCodecCtx-&height);
buffer=(uint8_t *)av_malloc(numBytes*sizeof(uint8_t));
// Assign appropriate parts of buffer to image planes in pFrameRGB
// Note that pFrameRGB is an AVFrame, but AVFrame is a superset
// of AVPicture
avpicture_fill((AVPicture *)pFrameRGB, buffer, PIX_FMT_RGB24,
pCodecCtx-&width, pCodecCtx-&height);
// Read frames and save first five frames to disk
////////////////////////////////////////////////////////////////
if (SDL_Init(SDL_INIT_VIDEO) & 0)
fprintf(stderr, "can not initialize SDL:%s\n", SDL_GetError());
atexit(SDL_Quit);
SDL_Surface *
screen = SDL_SetVideoMode(720, 576, 24, SDL_SWSURFACE|SDL_ANYFORMAT);
if ( screen == NULL )
SDL_Surface *
Uint32 rmask, gmask, bmask,
/* SDL interprets each pixel as a 32-bit number, so our masks must depend
on the endianness (byte order) of the machine */#if 0//SDL_BYTEORDER == SDL_BIG_ENDIAN
rmask = 0xff000000;
gmask = 0x00ff0000;
bmask = 0x0000ff00;
amask = 0x000000#else
rmask = 0x000000
gmask = 0x0000ff00;
bmask = 0x00ff0000;
amask = 0xff000000;#endif image = SDL_CreateRGBSurface(SDL_SWSURFACE, 720, 576, 0,
rmask, gmask, bmask, NULL);
if(image == NULL)
//fprintf(stderr, "CreateRGBSurface failed: %s\n", SDL_GetError());
//////////////////////////////////////////////////////////////////
while(av_read_frame(pFormatCtx, &packet)&=0)
// Is this a packet from the video stream?
if(packet.stream_index==videoStream)
// Decode video frame
avcodec_decode_video(pCodecCtx, pFrame, &frameFinished,
packet.data, packet.size);
// Did we get a video frame?
if(frameFinished)
// Convert the image from its native format to RGB
img_convert_ctx = sws_getContext( pCodecCtx-&width,
pCodecCtx-&height,
pCodecCtx-&pix_fmt,
pCodecCtx-&width,
pCodecCtx-&height,
PIX_FMT_BGR24,
sws_flags, NULL, NULL, NULL);
sws_scale(img_convert_ctx,pFrame-&data,pFrame-&linesize,0,pCodecCtx-&height,pFrameRGB-&data,pFrameRGB-&linesize);
sws_freeContext(img_convert_ctx);
////////////////////////////////////////////////////////////////
memcpy(screen-&pixels,buffer,720*576*3);
SDL_UpdateRect(screen, 0, 0, image-&w, image-&h);
/* Free the allocated BMP surface */
SDL_FreeSurface(image);
/////////////////////////////////////////////////////////////////
// Save the frame to disk
if((++i&=5))
SaveFrame(pFrameRGB, pCodecCtx-&width, pCodecCtx-&height, i);
// Free the packet that was allocated by av_read_frame
av_free_packet(&packet);
// Free the RGB image
av_free(buffer);
av_free(pFrameRGB);
// Free the YUV frame
av_free(pFrame);
// Close the codec
avcodec_close(pCodecCtx);
// Close the video file
av_close_input_file(pFormatCtx);
MessageBox("over");}void CTest0Dlg::OnButton3() { // TODO: Add your control notification handler code here
// TODO: Add your control notification handler code here
AVFormatContext *pFormatC
AVCodecContext
*pFrameYUV;
static int sws_flags = SWS_BICUBIC;
struct SwsContext *img_convert_
char argv[100] = "d:\\temp\\VIDEO"; // argv[1] = "d:\\temp\\ff.mpg";
// /*注册所有可用的格式和编解码器*/
av_register_all();
// Open video file /*以输入方式打开一个媒体文件,也即源文件,codecs并没有打开,只读取了文件的头信息*/
if(av_open_input_file(&pFormatCtx, argv, NULL, 0, NULL)!=0) // Couldn't open file
// Retrieve stream information/*通过读取媒体文件的中的包来获取媒体文件中的流信息,对于没有头信息的文件如(mpeg)是非常有用的,// 该函数通常重算类似mpeg-2帧模式的真实帧率,该函数并未改变逻辑文件的position.*/
if(av_find_stream_info(pFormatCtx)&0) // Couldn't find stream information
// Dump information about file onto standard error//该函数的作用就是检查下初始化过程中设置的参数是否符合规范
dump_format(pFormatCtx, 0, argv, 0);
// Find the first video stream
videoStream=-1;
printf("%d\n",pFormatCtx-&nb_streams);
getchar();
for(i=0; i&pFormatCtx-&nb_ i++)
if(pFormatCtx-&streams[i]-&codec-&codec_type==CODEC_TYPE_VIDEO&&videoStream & 0) {
videoStream=i;
if(videoStream==-1) // Didn't find a video stream
// Get a pointer to the codec context for the video stream
pCodecCtx=pFormatCtx-&streams[videoStream]-&
// Find the decoder for the video stream
pCodec=avcodec_find_decoder(pCodecCtx-&codec_id);/*通过code ID查找一个已经注册的音视频编码器,查找编码器之前,必须先调用av_register_all注册所有支持的编码器音视频编码器保存在一个链表中,查找过程中,函数从头到尾遍历链表,通过比较编码器的ID来查找*/
if(pCodec==NULL) {
fprintf(stderr, "Unsupported codec!\n"); // Codec not found
// Open codec//使用给定的AVCodec初始化AVCodecContext
if(avcodec_open(pCodecCtx, pCodec)&0) // Could not open codec
//printf("name %s\n",pCodec-&name);
//getchar();
// Allocate video frame
pFrame=avcodec_alloc_frame();
// Allocate an AVFrame structure
pFrameYUV=avcodec_alloc_frame();
if(pFrameYUV==NULL)
// Determine required buffer size and allocate buffer
numBytes=avpicture_get_size(PIX_FMT_YUV420P, pCodecCtx-&width,
pCodecCtx-&height);
buffer=(uint8_t *)av_malloc(numBytes*sizeof(uint8_t));
// Assign appropriate parts of buffer to image planes in pFrameRGB
// Note that pFrameRGB is an AVFrame, but AVFrame is a superset
// of AVPicture
avpicture_fill((AVPicture *)pFrameYUV, buffer, PIX_FMT_YUV420P,
pCodecCtx-&width, pCodecCtx-&height);
// Read frames and save first five frames to disk
////////////////////////////////////////////////////////////////
if (SDL_Init(SDL_INIT_VIDEO) & 0)
fprintf(stderr, "can not initialize SDL:%s\n", SDL_GetError());
atexit(SDL_Quit);
SDL_Surface *
screen = SDL_SetVideoMode(720, 576, 24, SDL_SWSURFACE|SDL_ANYFORMAT);
if ( screen == NULL )
SDL_Overlay * image = SDL_CreateYUVOverlay(pCodecCtx-&width,
pCodecCtx-&height,
SDL_YV12_OVERLAY,
if(image == NULL)
//fprintf(stderr, "CreateRGBSurface failed: %s\n", SDL_GetError());
//////////////////////////////////////////////////////////////////
while(av_read_frame(pFormatCtx, &packet)&=0)
// Is this a packet from the video stream?
if(packet.stream_index==videoStream)
// Decode video frame
avcodec_decode_video(pCodecCtx, pFrame, &frameFinished,
packet.data, packet.size);
// Did we get a video frame?
if(frameFinished)
// Convert the image from its native format to RGB
SDL_LockYUVOverlay(image);//
pFrameYUV-&data[0] = image-&pixels[0];//预先改变指针数据区,不用copy//
pFrameYUV-&data[1] = image-&pixels[2];//
pFrameYUV-&data[2] = image-&pixels[1];//
pFrameYUV-&linesize[0] = image-&pitches[0];//
pFrameYUV-&linesize[1] = image-&pitches[2];//
pFrameYUV-&linesize[2] = image-&pitches[1];
img_convert_ctx = sws_getContext( pCodecCtx-&width,
pCodecCtx-&height,
pCodecCtx-&pix_fmt,
pCodecCtx-&width,
pCodecCtx-&height,
PIX_FMT_YUV420P,
sws_flags, NULL, NULL, NULL);
sws_scale(img_convert_ctx,pFrame-&data,pFrame-&linesize,0,pCodecCtx-&height,pFrameYUV-&data,pFrameYUV-&linesize);
sws_freeContext(img_convert_ctx);
memcpy(image-&pixels[0], pFrameYUV-&data[0],720*576);//拷贝数据yuv420,也可预先改变指针
memcpy(image-&pixels[2], pFrameYUV-&data[1],720*576/4);
memcpy(image-&pixels[1], pFrameYUV-&data[2],720*576/4);
SDL_UnlockYUVOverlay(image);
rect.x = 0;
rect.y = 0;
rect.w = pCodecCtx-&
rect.h = pCodecCtx-& SDL_DisplayYUVOverlay(image, &rect);
// Free the packet that was allocated by av_read_frame
av_free_packet(&packet);
// Free the RGB image
av_free(buffer);
av_free(pFrameYUV);
// Free the YUV frame
av_free(pFrame);
// Close the codec
avcodec_close(pCodecCtx);
// Close the video file
av_close_input_file(pFormatCtx);
MessageBox("over");}void CTest0Dlg::OnButton4() { // TODO: Add your control notification handler code here
// TODO: Add your control notification handler code here
AVFormatContext *pFormatC
AVCodecContext
*pFrameYUV;
static int sws_flags = SWS_BICUBIC;
struct SwsContext *img_convert_
char argv[100] = "d:\\temp\\VIDEO"; // argv[1] = "d:\\temp\\ff.mpg";
// /*注册所有可用的格式和编解码器*/
av_register_all();
// Open video file /*以输入方式打开一个媒体文件,也即源文件,codecs并没有打开,只读取了文件的头信息*/
if(av_open_input_file(&pFormatCtx, argv, NULL, 0, NULL)!=0) // Couldn't open file
// Retrieve stream information/*通过读取媒体文件的中的包来获取媒体文件中的流信息,对于没有头信息的文件如(mpeg)是非常有用的,// 该函数通常重算类似mpeg-2帧模式的真实帧率,该函数并未改变逻辑文件的position.*/
if(av_find_stream_info(pFormatCtx)&0) // Couldn't find stream information
// Dump information about file onto standard error//该函数的作用就是检查下初始化过程中设置的参数是否符合规范
dump_format(pFormatCtx, 0, argv, 0);
// Find the first video stream
videoStream=-1;
printf("%d\n",pFormatCtx-&nb_streams);
getchar();
for(i=0; i&pFormatCtx-&nb_ i++)
if(pFormatCtx-&streams[i]-&codec-&codec_type==CODEC_TYPE_VIDEO&&videoStream & 0) {
videoStream=i;
if(videoStream==-1) // Didn't find a video stream
// Get a pointer to the codec context for the video stream
pCodecCtx=pFormatCtx-&streams[videoStream]-&
// Find the decoder for the video stream
pCodec=avcodec_find_decoder(pCodecCtx-&codec_id);/*通过code ID查找一个已经注册的音视频编码器,查找编码器之前,必须先调用av_register_all注册所有支持的编码器音视频编码器保存在一个链表中,查找过程中,函数从头到尾遍历链表,通过比较编码器的ID来查找*/
if(pCodec==NULL) {
fprintf(stderr, "Unsupported codec!\n"); // Codec not found
// Open codec//使用给定的AVCodec初始化AVCodecContext
if(avcodec_open(pCodecCtx, pCodec)&0) // Could not open codec
//printf("name %s\n",pCodec-&name);
//getchar();
// Allocate video frame
pFrame=avcodec_alloc_frame();
// Allocate an AVFrame structure
pFrameYUV=avcodec_alloc_frame();
if(pFrameYUV==NULL)
// Determine required buffer size and allocate buffer
numBytes=avpicture_get_size(PIX_FMT_YUV422, pCodecCtx-&width,
pCodecCtx-&height);
buffer=(uint8_t *)av_malloc(numBytes*sizeof(uint8_t));
// Assign appropriate parts of buffer to image planes in pFrameRGB
// Note that pFrameRGB is an AVFrame, but AVFrame is a superset
// of AVPicture
avpicture_fill((AVPicture *)pFrameYUV, buffer, PIX_FMT_YUV422,
pCodecCtx-&width, pCodecCtx-&height);
// Read frames and save first five frames to disk
////////////////////////////////////////////////////////////////
if (SDL_Init(SDL_INIT_VIDEO) & 0)
fprintf(stderr, "can not initialize SDL:%s\n", SDL_GetError());
atexit(SDL_Quit);
SDL_Surface *
screen = SDL_SetVideoMode(720, 576, 24, SDL_SWSURFACE|SDL_ANYFORMAT);
if ( screen == NULL )
SDL_Overlay * image = SDL_CreateYUVOverlay(pCodecCtx-&width,
pCodecCtx-&height,
SDL_YUY2_OVERLAY,
if(image == NULL)
//fprintf(stderr, "CreateRGBSurface failed: %s\n", SDL_GetError());
//////////////////////////////////////////////////////////////////
while(av_read_frame(pFormatCtx, &packet)&=0)
// Is this a packet from the video stream?
if(packet.stream_index==videoStream)
// Decode video frame
avcodec_decode_video(pCodecCtx, pFrame, &frameFinished,
packet.data, packet.size);
// Did we get a video frame?
if(frameFinished)
// Convert the image from its native format to RGB
SDL_LockYUVOverlay(image);//
pFrameYUV-&data[0] = image-&pixels[0];//
pFrameYUV-&data[1] = image-&pixels[2];//
pFrameYUV-&data[2] = image-&pixels[1];//
pFrameYUV-&linesize[0] = image-&pitches[0];//
pFrameYUV-&linesize[1] = image-&pitches[2];//
pFrameYUV-&linesize[2] = image-&pitches[1];
img_convert_ctx = sws_getContext( pCodecCtx-&width,
pCodecCtx-&height,
pCodecCtx-&pix_fmt,
pCodecCtx-&width,
pCodecCtx-&height,
PIX_FMT_YUV422,
sws_flags, NULL, NULL, NULL);
sws_scale(img_convert_ctx,pFrame-&data,pFrame-&linesize,0,pCodecCtx-&height,pFrameYUV-&data,pFrameYUV-&linesize);
sws_freeContext(img_convert_ctx);
memcpy(image-&pixels[0], pFrameYUV-&data[0],720*576*2);//拷贝数据yuv422
SDL_UnlockYUVOverlay(image);
rect.x = 0;
rect.y = 0;
rect.w = pCodecCtx-&
rect.h = pCodecCtx-& SDL_DisplayYUVOverlay(image, &rect);
// Free the packet that was allocated by av_read_frame
av_free_packet(&packet);
// Free the RGB image
av_free(buffer);
av_free(pFrameYUV);
// Free the YUV frame
av_free(pFrame);
// Close the codec
avcodec_close(pCodecCtx);
// Close the video file
av_close_input_file(pFormatCtx);
MessageBox("over");}
http://download.csdn.net/detail/mao
版权声明:本文内容由互联网用户自发贡献,本社区不拥有所有权,也不承担相关法律责任。如果您发现本社区中有涉嫌抄袭的内容,欢迎发送邮件至: 进行举报,并提供相关证据,一经查实,本社区将立刻删除涉嫌侵权内容。
用云栖社区APP,舒服~
【云栖快讯】红轴机械键盘、无线鼠标等753个大奖,先到先得,云栖社区首届博主招募大赛9月21日-11月20日限时开启,为你再添一个高端技术交流场所&&
为您提供简单高效、处理能力可弹性伸缩的计算服务,帮助您快速构建更稳定、安全的应用,提升运维效率,降低 IT 成本...
RDS是一种稳定可靠、可弹性伸缩的在线数据库服务。支持MySQL、SQL Server、PostgreSQL、高...
MaxCompute75折抢购
Loading...}

我要回帖

更多关于 ffmpeg yuv 的文章

更多推荐

版权声明:文章内容来源于网络,版权归原作者所有,如有侵权请点击这里与我们联系,我们将及时删除。

点击添加站长微信