Chinaunix首页 | 论坛 | 博客
  • 博客访问: 1685910
  • 博文数量: 511
  • 博客积分: 967
  • 博客等级: 准尉
  • 技术积分: 2560
  • 用 户 组: 普通用户
  • 注册时间: 2012-07-06 14:19
文章分类

全部博文(511)

文章存档

2016年(11)

2015年(61)

2014年(257)

2013年(63)

2012年(119)

分类: C/C++

2015-01-28 15:40:31



av_seek_frame使用时需要使用四个参数

av_seek_frame(fmt_ctx, -1 , 20 * AV_TIME_BASE, AVSEEK_FLAG_ANY);

参数一: fmt_ctx为容器内容;
参数二: 流索引, stream_index
参数三: 将要定位处的时间戳
参数四: seek功能flag

容器即AVFormatContext, 其中包含了一些视频标准格式中对应的封装信息,例如stream个数,stream类型,AVCodec,字幕信息等;
stream_index,容器中包含了stream,有音频stream,视频stream,3D的电影有多个视频stream,subtitle stream等stream信息。

那么如果需要seek到对应的时间戳处需要考虑几个问题:
1. 得到AVFormatContext信息
2. 需要得到stream_index信息
3. 需要设定将要定位的时间戳信息
4. seek功能需要根据需求设置

结合上述四点,可以实现代码步骤如下:
av_register_all();
avformat_open_input
avformat_find_stream_info
open_codec_context
av_seek_frame
以上为简单的seek步骤,当然细节部分,还要有一些操作,例如avpackets相关的操作等

下面测试用例编译命令为

点击(此处)折叠或打开

  1. gcc -g doc/examples/demuxing.c -o demuxing -lavcodec -lavdevice -lavfilter -lavformat -lavutil -lswscale -lswresample -lpostproc -lx264 -lmp3lame -lz -liconv -lbz2

点击(此处)折叠或打开

  1. #include <libavutil/imgutils.h>
  2. #include <libavutil/samplefmt.h>
  3. #include <libavutil/timestamp.h>
  4. #include <libavformat/avformat.h>
  5. #include <libswscale/swscale.h>

  6. static AVFormatContext *fmt_ctx = NULL;
  7. static AVCodecContext *video_dec_ctx = NULL, *audio_dec_ctx;
  8. static AVStream *video_stream = NULL, *audio_stream = NULL;
  9. static const char *src_filename = NULL;
  10. static const char *video_dst_filename = NULL;
  11. static const char *audio_dst_filename = NULL;
  12. static FILE *video_dst_file = NULL;
  13. static FILE *audio_dst_file = NULL;

  14. static uint8_t *video_dst_data[4] = {NULL};
  15. static int video_dst_linesize[4];
  16. static int video_dst_bufsize;

  17. static int video_stream_idx = -1, audio_stream_idx = -1;
  18. static AVFrame *frame = NULL;
  19. static AVPacket pkt;
  20. static int video_frame_count = 0;
  21. static int audio_frame_count = 0;

  22. static int decode_packet(int *got_frame, int cached)
  23. {
  24.     int ret = 0;
  25.     int decoded = pkt.size;
  26.     uint8_t *dst_data[4];
  27.     int dst_linesize[4];
  28.     if (pkt.stream_index == video_stream_idx) {
  29.         /* decode video frame */
  30.         ret = avcodec_decode_video2(video_dec_ctx, frame, got_frame, &pkt);
  31.         if (ret < 0) {
  32.             fprintf(stderr, "Error decoding video frame\n");
  33.             return ret;
  34.         }

  35.         if (*got_frame) {
  36.             printf("video_frame%s n:%d coded_n:%d pts:%s\n",
  37.                    cached ? "(cached)" : "",
  38.                    video_frame_count++, frame->coded_picture_number,
  39.                    av_ts2timestr(frame->pts, &video_dec_ctx->time_base));

  40.             /* copy decoded frame to destination buffer:
  41.              * this is required since rawvideo expects non aligned data */
  42.             av_image_copy(video_dst_data, video_dst_linesize,
  43.                           (const uint8_t **)(frame->data), frame->linesize,
  44.                           video_dec_ctx->pix_fmt, video_dec_ctx->width, video_dec_ctx->height);

  45.             /* write to rawvideo file */
  46.             fwrite(video_dst_data[0], 1, video_dst_bufsize, video_dst_file);
  47.         }
  48.     } else if (pkt.stream_index == audio_stream_idx) {
  49.         /* decode audio frame */
  50.         ret = avcodec_decode_audio4(audio_dec_ctx, frame, got_frame, &pkt);
  51.         if (ret < 0) {
  52.             fprintf(stderr, "Error decoding audio frame\n");
  53.             return ret;
  54.         }
  55.         /* Some audio decoders decode only part of the packet, and have to be
  56.          * called again with the remainder of the packet data.
  57.          * Sample: fate-suite/lossless-audio/luckynight-partial.shn
  58.          * Also, some decoders might over-read the packet. */
  59.         decoded = FFMIN(ret, pkt.size);

  60.         if (*got_frame) {
  61.             size_t unpadded_linesize = frame->nb_samples * av_get_bytes_per_sample(frame->format);
  62.             printf("audio_frame%s n:%d nb_samples:%d pts:%s\n",
  63.                    cached ? "(cached)" : "",
  64.                    audio_frame_count++, frame->nb_samples,
  65.                    av_ts2timestr(frame->pts, &audio_dec_ctx->time_base));

  66.             /* Write the raw audio data samples of the first plane. This works
  67.              * fine for packed formats (e.g. AV_SAMPLE_FMT_S16). However,
  68.              * most audio decoders output planar audio, which uses a separate
  69.              * plane of audio samples for each channel (e.g. AV_SAMPLE_FMT_S16P).
  70.              * In other words, this code will write only the first audio channel
  71.              * in these cases.
  72.              * You should use libswresample or libavfilter to convert the frame
  73.              * to packed data. */
  74.             fwrite(frame->extended_data[0], 1, unpadded_linesize, audio_dst_file);
  75.         }
  76.     }
  77.     return decoded;
  78. }

  79. static int open_codec_context(int *stream_idx,
  80.                               AVFormatContext *fmt_ctx, enum AVMediaType type)
  81. {
  82.     int ret;
  83.     AVStream *st;
  84.     AVCodecContext *dec_ctx = NULL;
  85.     AVCodec *dec = NULL;

  86.     ret = av_find_best_stream(fmt_ctx, type, -1, -1, NULL, 0);
  87.     if (ret < 0) {
  88.         fprintf(stderr, "Could not find %s stream in input file '%s'\n",
  89.                 av_get_media_type_string(type), src_filename);
  90.         return ret;
  91.     } else {
  92.         *stream_idx = ret;
  93.         st = fmt_ctx->streams[*stream_idx];

  94.         /* find decoder for the stream */
  95.         dec_ctx = st->codec;
  96.         dec = avcodec_find_decoder(dec_ctx->codec_id);
  97.         if (!dec) {
  98.             fprintf(stderr, "Failed to find %s codec\n",
  99.                     av_get_media_type_string(type));
  100.             return ret;
  101.         }

  102.         if ((ret = avcodec_open2(dec_ctx, dec, NULL)) < 0) {
  103.             fprintf(stderr, "Failed to open %s codec\n",
  104.                     av_get_media_type_string(type));
  105.             return ret;
  106.         }
  107.     }

  108.     return 0;
  109. }

  110. static int get_format_from_sample_fmt(const char **fmt,
  111.                                       enum AVSampleFormat sample_fmt)
  112. {
  113.     int i;
  114.     struct sample_fmt_entry {
  115.         enum AVSampleFormat sample_fmt; const char *fmt_be, *fmt_le;
  116.     } sample_fmt_entries[] = {
  117.         { AV_SAMPLE_FMT_U8, "u8", "u8" },
  118.         { AV_SAMPLE_FMT_S16, "s16be", "s16le" },
  119.         { AV_SAMPLE_FMT_S32, "s32be", "s32le" },
  120.         { AV_SAMPLE_FMT_FLT, "f32be", "f32le" },
  121.         { AV_SAMPLE_FMT_DBL, "f64be", "f64le" },
  122.     };
  123.     *fmt = NULL;

  124.     for (i = 0; i < FF_ARRAY_ELEMS(sample_fmt_entries); i++) {
  125.         struct sample_fmt_entry *entry = &sample_fmt_entries[i];
  126.         if (sample_fmt == entry->sample_fmt) {
  127.             *fmt = AV_NE(entry->fmt_be, entry->fmt_le);
  128.             return 0;
  129.         }
  130.     }

  131.     fprintf(stderr,
  132.             "sample format %s is not supported as output format\n",
  133.             av_get_sample_fmt_name(sample_fmt));
  134.     return -1;
  135. }

  136. int main (int argc, char **argv)
  137. {
  138.     int ret = 0, got_frame;

  139.     if (argc != 4) {
  140.         fprintf(stderr, "usage: %s input_file video_output_file audio_output_file\n"
  141.                 "API example program to show how to read frames from an input file.\n"
  142.                 "This program reads frames from a file, decodes them, and writes decoded\n"
  143.                 "video frames to a rawvideo file named video_output_file, and decoded\n"
  144.                 "audio frames to a rawaudio file named audio_output_file.\n"
  145.                 "\n", argv[0]);
  146.         exit(1);
  147.     }
  148.     src_filename = argv[1];
  149.     video_dst_filename = argv[2];
  150.     audio_dst_filename = argv[3];

  151.     /* register all formats and codecs */
  152.     av_register_all();

  153.     /* open input file, and allocate format context */
  154.     if (avformat_open_input(&fmt_ctx, src_filename, NULL, NULL) < 0) {
  155.         fprintf(stderr, "Could not open source file %s\n", src_filename);
  156.         exit(1);
  157.     }

  158.     /* retrieve stream information */
  159.     if (avformat_find_stream_info(fmt_ctx, NULL) < 0) {
  160.         fprintf(stderr, "Could not find stream information\n");
  161.         exit(1);
  162.     }

  163.     if (open_codec_context(&video_stream_idx, fmt_ctx, AVMEDIA_TYPE_VIDEO) >= 0) {
  164.         video_stream = fmt_ctx->streams[video_stream_idx];
  165.         video_dec_ctx = video_stream->codec;

  166.         video_dst_file = fopen(video_dst_filename, "wb");
  167.         if (!video_dst_file) {
  168.             fprintf(stderr, "Could not open destination file %s\n", video_dst_filename);
  169.             ret = 1;
  170.             goto end;
  171.         }

  172.         /* allocate image where the decoded image will be put */
  173.         ret = av_image_alloc(video_dst_data, video_dst_linesize,
  174.                              video_dec_ctx->width, video_dec_ctx->height,
  175.                              video_dec_ctx->pix_fmt, 1);
  176.         if (ret < 0) {
  177.             fprintf(stderr, "Could not allocate raw video buffer\n");
  178.             goto end;
  179.         }
  180.         video_dst_bufsize = ret;
  181.     }

  182.     if (open_codec_context(&audio_stream_idx, fmt_ctx, AVMEDIA_TYPE_AUDIO) >= 0) {
  183.         audio_stream = fmt_ctx->streams[audio_stream_idx];
  184.         audio_dec_ctx = audio_stream->codec;
  185.         audio_dst_file = fopen(audio_dst_filename, "wb");
  186.         if (!audio_dst_file) {
  187.             fprintf(stderr, "Could not open destination file %s\n", video_dst_filename);
  188.             ret = 1;
  189.             goto end;
  190.         }
  191.     }

  192.     /* dump input information to stderr */
  193.     av_dump_format(fmt_ctx, 0, src_filename, 0);

  194.     if (!audio_stream && !video_stream) {
  195.         fprintf(stderr, "Could not find audio or video stream in the input, aborting\n");
  196.         ret = 1;
  197.         goto end;
  198.     }

  199.     frame = avcodec_alloc_frame();
  200.     if (!frame) {
  201.         fprintf(stderr, "Could not allocate frame\n");
  202.         ret = AVERROR(ENOMEM);
  203.         goto end;
  204.     }

  205.     /* initialize packet, set data to NULL, let the demuxer fill it */
  206.     av_init_packet(&pkt);
  207.     pkt.data = NULL;
  208.     pkt.size = 0;

  209.     if (video_stream)
  210.         printf("Demuxing video from file '%s' into '%s'\n", src_filename, video_dst_filename);
  211.     if (audio_stream)
  212.         printf("Demuxing audio from file '%s' into '%s'\n", src_filename, audio_dst_filename);

  213. ///////////////////////////////////////////// 孙悟空 说: 这里是最关键的/////
  214.     av_seek_frame(fmt_ctx, -1 , 20 * AV_TIME_BASE, AVSEEK_FLAG_ANY);
  215. ////////////////////////////////////////////////////////////////
  216.     /* read frames from the file [url]www.chinaffmpeg.com[/url] 孙悟空*/
  217.     while (av_read_frame(fmt_ctx, &pkt) >= 0) {
  218.         AVPacket orig_pkt = pkt;
  219.         do {
  220.             ret = decode_packet(&got_frame, 0);
  221.             if (ret < 0)
  222.                 break;
  223.             pkt.data += ret;
  224.             pkt.size -= ret;
  225.         } while (pkt.size > 0);
  226.         av_free_packet(&orig_pkt);
  227.     }

  228.     /* flush cached frames */
  229.     pkt.data = NULL;
  230.     pkt.size = 0;
  231.     do {
  232.         decode_packet(&got_frame, 1);
  233.     } while (got_frame);

  234.     printf("Demuxing succeeded.\n");

  235.     if (video_stream) {
  236.         printf("Play the output video file with the command:\n"
  237.                "ffplay -f rawvideo -pix_fmt %s -video_size %dx%d %s\n",
  238.                av_get_pix_fmt_name(video_dec_ctx->pix_fmt), video_dec_ctx->width, video_dec_ctx->height,
  239.                video_dst_filename);
  240.     }

  241.     if (audio_stream) {
  242.         enum AVSampleFormat sfmt = audio_dec_ctx->sample_fmt;
  243.         int n_channels = audio_dec_ctx->channels;
  244.         const char *fmt;

  245.         if (av_sample_fmt_is_planar(sfmt)) {
  246.             const char *packed = av_get_sample_fmt_name(sfmt);
  247.             printf("Warning: the sample format the decoder produced is planar "
  248.                    "(%s). This example will output the first channel only.\n",
  249.                    packed ? packed : "?");
  250.             sfmt = av_get_packed_sample_fmt(sfmt);
  251.             n_channels = 1;
  252.         }

  253.         if ((ret = get_format_from_sample_fmt(&fmt, sfmt)) < 0)
  254.             goto end;

  255.         printf("Play the output audio file with the command:\n"
  256.                "ffplay -f %s -ac %d -ar %d %s\n",
  257.                fmt, n_channels, audio_dec_ctx->sample_rate,
  258.                audio_dst_filename);
  259.     }

  260. end:
  261.     if (video_dec_ctx)
  262.         avcodec_close(video_dec_ctx);
  263.     if (audio_dec_ctx)
  264.         avcodec_close(audio_dec_ctx);
  265.     avformat_close_input(&fmt_ctx);
  266.     if (video_dst_file)
  267.         fclose(video_dst_file);
  268.     if (audio_dst_file)
  269.         fclose(audio_dst_file);
  270.     av_free(frame);
  271.     av_free(video_dst_data[0]);

  272.     return ret < 0;
  273. }

测试结果

孙悟空
阅读(6873) | 评论(0) | 转发(0) |
给主人留下些什么吧!~~