linux学习记录
分类:
2010-08-04 14:07:14
source: http://imkongguxiaoren.blog.51cto.com/488693/101859
版权声明:原创作品,允许转载,转载时请务必以超链接形式标明文章 原始出处 、作者信息和本声明。否则将追究法律责任。http://imkongguxiaoren.blog.51cto.com/488693/101859
下面的程序是参考中华视频网上的一个程序,稍作了修改,在RetHat9上编译通过。使用 Darwin Stream Server安装时自带的sample_h264_100kbit.mp4文件做的试验,demo可从文件中读取压缩视频帧,解码,把解码后的YUV数据 转换为YUV422格式,写入输出文件(*.yuv)。可以使用YUVviewerPlus.exe观看输出文件。原始文件976KB,解压后输出文件 186M。
#define HAVE_AV_CONFIG_H
#include
#include
#include
#include "avformat.h"
#undef strcat
#undef sprintf
#define PIC_FMT PIX_FMT_YUV422P
/* new types */
enum bool{false=0,true};
typedef enum bool bool;
int g_yuv_index = 1;
static void smartAV_dump_yuv(char *file_name,AVPicture *pic,int width,int height)
{
FILE *fp =0;
char filename[128],index_name[32];
int i,j,shift;
uint8_t *yuv_factor;
strcpy(filename,file_name);
sprintf(index_name,"new_yuv_dump_%d.yuv",g_yuv_index);
strcat(filename,index_name);
fp = fopen(filename,"wb");
if(fp) {
for(i = 0; i < 3; i++) {
shift = (i == 0 ? 0:1);
yuv_factor = pic->data;
for(j = 0; j < (height>>shift); j++) {
fwrite(yuv_factor,(width>>shift),1,fp);
yuv_factor += pic->linesize;
}
}
fclose(fp);
g_yuv_index++;
}
}
static bool GetNextFrame(AVFormatContext *pFormatCtx, AVCodecContext *pCodecCtx,int videoStream, AVFrame *pFrame)
{
static AVPacket packet;
static int bytesRemaining=0;
static uint8_t *rawData;
static bool fFirstTime=true;
int bytesDecoded;
int frameFinished;
// First time we're called, set packet.data to NULL to indicate it
// doesn't have to be freed
if (fFirstTime){
fFirstTime = false;
packet.data = NULL;
}
// Decode packets until we have decoded a complete frame
while (true)
{
// Work on the current packet until we have decoded all of it
while (bytesRemaining > 0)
{
// Decode the next chunk of data
bytesDecoded = avcodec_decode_video(pCodecCtx, pFrame,
&frameFinished, rawData, bytesRemaining);
// Was there an error?
if (bytesDecoded < 0){
fprintf(stderr, "Error while decoding frame\n");
return false;
}
bytesRemaining -= bytesDecoded;
rawData += bytesDecoded;
// Did we finish the current frame? Then we can return
if (frameFinished)
return true;
}
// Read the next packet, skipping all packets that aren't for this
// stream
do{
// Free old packet
if(packet.data != NULL)
av_free_packet(&packet);
// Read new packet
//if(av_read_packet(pFormatCtx, &packet) < 0)
//goto loop_exit;
if(av_read_frame(pFormatCtx, &packet)<0)
goto loop_exit;
} while(packet.stream_index != videoStream);
bytesRemaining = packet.size;
rawData = packet.data;
}
loop_exit:
// Decode the rest of the last frame
bytesDecoded = avcodec_decode_video(pCodecCtx, pFrame, &frameFinished,
rawData, bytesRemaining);
// Free last packet
if(packet.data != NULL)
av_free_packet(&packet);
return frameFinished != 0;
}
int main(int argc, char **argv)
{
AVFormatContext *pFormatCtx;
int i, videoStream;
AVCodecContext *pCodecCtx;
AVCodec *pCodec;
AVFrame *pFrame;
AVFrame *pFrameYUV;
clock_t t;
double fps;
int y_size, i_frame=0;
int numBytes;
uint8_t *buffer;
//char* infile="test.mp4";
//char* outfile="outfile.yuv";
const char *infile;
const char *outfile;
if (argc != 3) {
printf("usage: %s input_file.mp4 output_file.yuv\n"
"NOTIC: this is a demo program, it reads the video frames \n"
"which are compressed in H.264 standard from input_file.mp4\n"
"and decode them, it finally gets the raw pictures(YUV422P) \n"
"and write to output_file.yuv.\n"
"\n", argv[0]);
return -1;
}
infile = argv[1];
outfile = argv[2];
if(match_ext(outfile, "yuv")==0)
{
fprintf(stderr, "\nthe output file must be *.yuv\n");
return -1;
}
FILE* fp=fopen(outfile, "wb");
if (fp==NULL){
fprintf(stderr, "\nCan't open file %s!", infile);
return -1;
}
// Register all formats and codecs
av_register_all();
// Open video file
if (av_open_input_file(&pFormatCtx, infile, NULL, 0, NULL) != 0)
return -1; // Couldn't open file
// Retrieve stream information
if (av_find_stream_info(pFormatCtx) < 0)
return -1; // Couldn't find stream information
// Dump information about file on
dump_format(pFormatCtx, 0, infile, false);
t = clock();
// Find the first video stream
videoStream = -1;
for (i=0; i
if(pFormatCtx->streams->codec->codec_type == CODEC_TYPE_VIDEO){
videoStream=i;
break;
}
printf("videoStream: %d\n", videoStream);
if (videoStream == -1)
return -1; // Didn't find a video stream
// Get a pointer to the codec context for the video stream
pCodecCtx = pFormatCtx->streams[videoStream]->codec;
// Find the decoder for the video stream
pCodec = avcodec_find_decoder(pCodecCtx->codec_id);
if (pCodec == NULL)
return -1; // Codec not found
printf("codec_id:%d (28 is ok)\n", pCodecCtx->codec_id);
/*
You should not directly try to set CODEC_FLAG_TRUNCATED
because libavformat has done the packet spiltter
*/
#if 0
// Inform the codec that we can handle truncated bitstreams -- i.e.,
// bitstreams where frame boundaries can fall in the middle of packets
if(pCodec->capabilities & CODEC_CAP_TRUNCATED)
pCodecCtx->flags|=CODEC_FLAG_TRUNCATED;
#endif
// Open codec
if (avcodec_open(pCodecCtx, pCodec) < 0)
return -1; // Could not open codec
// Allocate video frame
pFrame = avcodec_alloc_frame();
// Allocate an AVFrame structure
pFrameYUV=avcodec_alloc_frame();
if(pFrameYUV == NULL)
return -1;
// Determine required buffer size and allocate buffer
numBytes=avpicture_get_size(PIC_FMT, pCodecCtx->width,
pCodecCtx->height);
//buffer = (uint8_t*)malloc(numBytes);
buffer = (uint8_t*)av_malloc(numBytes);
// Assign appropriate parts of buffer to image planes in pFrameRGB
avpicture_fill((AVPicture *)pFrameYUV, buffer, PIC_FMT,
pCodecCtx->width, pCodecCtx->height);
// Read frames
while(GetNextFrame(pFormatCtx, pCodecCtx, videoStream, pFrame))
{
img_convert((AVPicture *)pFrameYUV, PIC_FMT, (AVPicture*)pFrame,
pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height);
i_frame++;
y_size = pCodecCtx->width * pCodecCtx->height;
#if 1
if (i_frame==1) //on
{
printf("\npFrame->linesize[0]=%d, pFrame->linesize[1]=%d, pFrame->linesize[2]=%d!\n",
pFrame->linesize[0], pFrame->linesize[1], pFrame->linesize[2]);
printf("\npFrameYUV->linesize[0]=%d, pFrameYUV->linesize[1]=%d, pFrameYUV->linesize[2]=%d!",
pFrameYUV->linesize[0], pFrameYUV->linesize[1], pFrameYUV->linesize[2]);
}
fwrite(pFrameYUV->data[0], 1, y_size, fp);
fwrite(pFrameYUV->data[1], 1, (y_size/2), fp);
fwrite(pFrameYUV->data[2], 1, (y_size/2), fp);
#endif
// if(i_frame > 5)
// break;
// smartAV_dump_yuv(outfile, (AVPicture *)pFrame, pCodecCtx->width, pCodecCtx->height);
}
fclose(fp);
//calculate decode rate
t = clock() - t;
fps = (double)(t) / CLOCKS_PER_SEC;
fps = i_frame / fps;
printf("\n==>Decode rate %.4f fps!\n", fps);
// Free the YUV image
//free(buffer);
av_free(buffer);
av_free(pFrameYUV);
// Free the YUV frame
av_free(pFrame);
// Close the codec
avcodec_close(pCodecCtx);
// Close the video file
av_close_input_file(pFormatCtx);
return 0;
}
本文出自 “中国人是有骨气的” 博客,请务必保留此出处http://imkongguxiaoren.blog.51cto.com/488693/101859