Chinaunix首页 | 论坛 | 博客
  • 博客访问: 1062554
  • 博文数量: 77
  • 博客积分: 821
  • 博客等级: 军士长
  • 技术积分: 1905
  • 用 户 组: 普通用户
  • 注册时间: 2011-10-23 16:17
个人简介

学校:上海交通大学软件工程 学历:硕士 行业:从事流媒体移动开发 QQ: 412595942 邮箱:yiikai1987910@gmail.com

文章分类

全部博文(77)

文章存档

2016年(4)

2015年(15)

2014年(16)

2013年(12)

2012年(21)

2011年(9)

分类: C/C++

2014-11-01 11:43:12

     本例简单实现了解码后的video重新编码264之后在mux成MP4文件的过程,主要是用来记录muxing的方法。
    下面详细说一下细节:
    大家都知道一般解码出来的数据都是播放顺序,解码器是将编码顺序的数据重新按照解码后的播放顺序输出的。而编码器是把数据根据解码需要的顺序重新排序保存的。
    当然,以上情况只在有帧的情况下才有用,否则只有IP帧的话解码和编码的顺序是一样的
   比如:解码后的数据是IBBP,那要将这个数据编码的话,编码后的数据保存的格式就是IPBB
   这只是内部的处理,对于用ffmpeg的库的我们不用太过关心 ,但是 , 要注意,我们将数据塞给编码器的时候,要给顺序的播放加上顺序的时间标记,其实很简单只要保证你送给编码器的每一frame的pts都是顺序的就可以了,否则编码器会报 “non-strictly-monotonic pts at frame” , 究其原因,是因为编码器需要送进来的frame时间上是递增的,为什么需要这个就得去本研究编码器了

点击(此处)折叠或打开

  1. if( pic.i_pts <= largest_pts )
  2.         {
  3.             if( cli_log_level >= X264_LOG_DEBUG || pts_warning_cnt < MAX_PTS_WARNING )
  4.                 x264_cli_log( "x264", X264_LOG_WARNING, "non-strictly-monotonic pts at frame %d (%"PRId64" <= %"PRId64")\n",
  5.                              i_frame, pic.i_pts, largest_pts );
  6.             else if( pts_warning_cnt == MAX_PTS_WARNING )
  7.                 x264_cli_log( "x264", X264_LOG_WARNING, "too many nonmonotonic pts warnings, suppressing further ones\n" );
  8.             pts_warning_cnt++;
  9.             pic.i_pts = largest_pts + ticks_per_frame;
  10.         }

    在将数据送到编码器后,进行编码输出得到的pkt有自己的pts和dts等数据,但是这个数据记得吗?是用我们自己送进去的pts来表示的,所以在和原来的audio mux的时候,会出现严重的音视频不同步,现在想想这个问题,就很容易理解了,两边的pts差距很大,当然解码后做同步的时候会差很多。
  其实ffmpeg在解码的时候将解码出来的顺序时间戳给了frame的pkt_pts这个成员,所以我们可以直接用这个值赋值给frame的pts,在送进编码器,这样编码出来的pkt中的时间戳就和原来的audio对上了。

点击(此处)折叠或打开

  1. ret = avcodec_decode_video2(video_dec_ctx, pFrame, &got_picture, pkt);
  2.             if (ret < 0)
  3.             {
  4.                 delete pkt;
  5.                 return 0;
  6.             }
  7.             pFrame->pts = pFrame->pkt_pts;  //赋值解码后的pts
    最后在进行mux成mp4文件就ok了
   在mux的过程中,有个接口av_rescale_q_rnd,这个是用来换算pts的,因为在设定mp4输出格式的时候time_base这个值是和原来的文件不一样的,所以要用这个来重新算分装数据的在新的mp4中的pts和dts等数据,具体原因后续会继续往里研究
  
  直接上代码:


点击(此处)折叠或打开

  1. const char* SRC_FILE = "1.mkv";
  2. const char* OUT_FILE = "outfile.h264";
  3. const char* OUT_FMT_FILE = "outfmtfile.mp4";
  4. int main()
  5. {
  6.     av_register_all();
  7.     


  8.     AVFormatContext* pFormat = NULL;
  9.     if (avformat_open_input(&pFormat, SRC_FILE, NULL, NULL) < 0)
  10.     {
  11.         return 0;
  12.     }
  13.     AVCodecContext* video_dec_ctx = NULL;
  14.     AVCodec* video_dec = NULL;
  15.     if (avformat_find_stream_info(pFormat, NULL) < 0)
  16.     {
  17.         return 0;
  18.     }
  19.     av_dump_format(pFormat, 0, SRC_FILE, 0);
  20.     video_dec_ctx = pFormat->streams[0]->codec;
  21.     video_dec = avcodec_find_decoder(video_dec_ctx->codec_id);
  22.     if (avcodec_open2(video_dec_ctx, video_dec, NULL) < 0)
  23.     {
  24.         return 0;
  25.     }

  26.     AVFormatContext* pOFormat = NULL;
  27.     AVOutputFormat* ofmt = NULL;
  28.     if (avformat_alloc_output_context2(&pOFormat, NULL, NULL, OUT_FILE) < 0)
  29.     {
  30.         return 0;
  31.     }
  32.     ofmt = pOFormat->oformat;
  33.     if (avio_open(&(pOFormat->pb), OUT_FILE, AVIO_FLAG_READ_WRITE) < 0)
  34.     {
  35.         return 0;
  36.     }
  37.     AVCodecContext *video_enc_ctx = NULL;
  38.     AVCodec *video_enc = NULL;
  39.     video_enc = avcodec_find_encoder(AV_CODEC_ID_H264);
  40.     AVStream *video_st = avformat_new_stream(pOFormat, video_enc);
  41.     if (!video_st)
  42.         return 0;
  43.     video_enc_ctx = video_st->codec;
  44.     video_enc_ctx->width = video_dec_ctx->width;
  45.     video_enc_ctx->height = video_dec_ctx->height;
  46.     video_enc_ctx->pix_fmt = PIX_FMT_YUV420P;
  47.     video_enc_ctx->time_base.num = 1;
  48.     video_enc_ctx->time_base.den = 25;
  49.     video_enc_ctx->bit_rate = video_dec_ctx->bit_rate;
  50.     video_enc_ctx->gop_size = 250;
  51.     video_enc_ctx->max_b_frames = 10;
  52.     //H264
  53.     //pCodecCtx->me_range = 16;
  54.     //pCodecCtx->max_qdiff = 4;
  55.     video_enc_ctx->qmin = 10;
  56.     video_enc_ctx->qmax = 51;
  57.     if (avcodec_open2(video_enc_ctx, video_enc, NULL) < 0)
  58.     {
  59.         printf("编码器打开失败!\n");
  60.         return 0;
  61.     }
  62.     printf("Output264video Information====================\n");
  63.     av_dump_format(pOFormat, 0, OUT_FILE, 1);
  64.     printf("Output264video Information====================\n");

  65.     //mp4 file
  66.     AVFormatContext* pMp4Format = NULL;
  67.     AVOutputFormat* pMp4OFormat = NULL;
  68.     if (avformat_alloc_output_context2(&pMp4Format, NULL, NULL, OUT_FMT_FILE) < 0)
  69.     {
  70.         return 0;
  71.     }
  72.     pMp4OFormat = pMp4Format->oformat;
  73.     if (avio_open(&(pMp4Format->pb), OUT_FMT_FILE, AVIO_FLAG_READ_WRITE) < 0)
  74.     {
  75.         return 0;
  76.     }

  77.     for (int i = 0; i < pFormat->nb_streams; i++) {
  78.         AVStream *in_stream = pFormat->streams[i];
  79.         AVStream *out_stream = avformat_new_stream(pMp4Format, in_stream->codec->codec);
  80.         if (!out_stream) {
  81.             return 0;
  82.         }
  83.         int ret = 0;
  84.         ret = avcodec_copy_context(out_stream->codec, in_stream->codec);
  85.         if (ret < 0) {
  86.             fprintf(stderr, "Failed to copy context from input to output stream codec context\n");
  87.             return 0;
  88.         }
  89.         out_stream->codec->codec_tag = 0;
  90.         if (pMp4Format->oformat->flags & AVFMT_GLOBALHEADER)
  91.             out_stream->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;
  92.     }


  93.     av_dump_format(pMp4Format, 0, OUT_FMT_FILE, 1);

  94.     if (avformat_write_header(pMp4Format, NULL) < 0)
  95.     {
  96.         return 0;
  97.     }


  98.     ////



  99.     av_opt_set(video_enc_ctx->priv_data, "preset", "superfast", 0);
  100.     av_opt_set(video_enc_ctx->priv_data, "tune", "zerolatency", 0);
  101.     avformat_write_header(pOFormat, NULL);
  102.     AVPacket *pkt = new AVPacket();
  103.     av_init_packet(pkt);
  104.     AVFrame *pFrame = avcodec_alloc_frame();
  105.     int ts = 0;
  106.     while (1)
  107.     {
  108.         if (av_read_frame(pFormat, pkt) < 0)
  109.         {
  110.             avio_close(pOFormat->pb);
  111.             av_write_trailer(pMp4Format);
  112.             avio_close(pMp4Format->pb);
  113.             delete pkt;
  114.             return 0;
  115.         }
  116.         if (pkt->stream_index == 0)
  117.         {
  118.             
  119.             int got_picture = 0, ret = 0;
  120.             ret = avcodec_decode_video2(video_dec_ctx, pFrame, &got_picture, pkt);
  121.             if (ret < 0)
  122.             {
  123.                 delete pkt;
  124.                 return 0;
  125.             }
  126.             pFrame->pts = pFrame->pkt_pts;//ts++;
  127.             if (got_picture)
  128.             {
  129.                 AVPacket *tmppkt = new AVPacket;
  130.                 av_init_packet(tmppkt);
  131.                 int size = video_enc_ctx->width*video_enc_ctx->height * 3 / 2;
  132.                 char* buf = new char[size];
  133.                 memset(buf, 0, size);
  134.                 tmppkt->data = (uint8_t*)buf;
  135.                 tmppkt->size = size;
  136.                 ret = avcodec_encode_video2(video_enc_ctx, tmppkt, pFrame, &got_picture);
  137.                 if (ret < 0)
  138.                 {
  139.                     avio_close(pOFormat->pb);
  140.                     delete buf;
  141.                     return 0;
  142.                 }
  143.                 if (got_picture)
  144.                 {
  145.                     //ret = av_interleaved_write_frame(pOFormat, tmppkt);
  146.                     AVStream *in_stream = pFormat->streams[pkt->stream_index];
  147.                     AVStream *out_stream = pMp4Format->streams[pkt->stream_index];
  148.     
  149.                     tmppkt->pts = av_rescale_q_rnd(tmppkt->pts, in_stream->time_base, out_stream->time_base, AV_ROUND_NEAR_INF);
  150.                     tmppkt->dts = av_rescale_q_rnd(tmppkt->dts, in_stream->time_base, out_stream->time_base, AV_ROUND_NEAR_INF);
  151.                     tmppkt->duration = av_rescale_q(tmppkt->duration, in_stream->time_base, out_stream->time_base);
  152.                     tmppkt->pos = -1;
  153.                     ret = av_interleaved_write_frame(pMp4Format, tmppkt);
  154.                     if (ret < 0)
  155.                         return 0;
  156.                     delete tmppkt;
  157.                     delete buf;
  158.                 }
  159.             }
  160.             //avcodec_free_frame(&pFrame);
  161.         }
  162.         else if (pkt->stream_index == 1)
  163.         {
  164.             AVStream *in_stream = pFormat->streams[pkt->stream_index];
  165.             AVStream *out_stream = pMp4Format->streams[pkt->stream_index];

  166.             pkt->pts = av_rescale_q_rnd(pkt->pts, in_stream->time_base, out_stream->time_base, AV_ROUND_NEAR_INF);
  167.             pkt->dts = av_rescale_q_rnd(pkt->dts, in_stream->time_base, out_stream->time_base, AV_ROUND_NEAR_INF);
  168.             pkt->duration = av_rescale_q(pkt->duration, in_stream->time_base, out_stream->time_base);
  169.             pkt->pos = -1;
  170.             if (av_interleaved_write_frame(pMp4Format, pkt) < 0)
  171.                 return 0;
  172.         }
  173.     }
  174.     avcodec_free_frame(&pFrame);
  175.     return 0;
  176. }

阅读(27681) | 评论(0) | 转发(1) |
给主人留下些什么吧!~~