Chinaunix首页 | 论坛 | 博客
  • 博客访问: 1166254
  • 博文数量: 173
  • 博客积分: 4048
  • 博客等级:
  • 技术积分: 2679
  • 用 户 组: 普通用户
  • 注册时间: 2010-09-12 18:53
文章分类

全部博文(173)

文章存档

2018年(1)

2016年(1)

2013年(1)

2012年(118)

2011年(52)

分类: 嵌入式

2012-04-23 10:15:09

编译命令:
gcc -o tutorial04 tutorial04.c -lavformat -lavcodec -lz -lm -lswscale `sdl-config --cflags --libs`


点击(此处)折叠或打开

  1. // tutorial07.c
  2. // A pedagogical video player that really Now with seeking features.
  3. //
  4. // Code based on FFplay, Copyright (c) 2003 Fabrice Bellard,
  5. // and a tutorial by Martin Bohme (boehme@inb.uni-luebeckREMOVETHIS.de)
  6. // Tested on Gentoo, CVS version 5/01/07 compiled with GCC 4.1.1
  7. // Use
  8. //
  9. // gcc -o tutorial07 tutorial07.c -lavformat -lavcodec -lz -lm `sdl-config --cflags --libs`
  10. // to build (assuming libavformat and libavcodec are correctly installed,
  11. // and assuming you have sdl-config. Please refer to SDL docs for your installation.)
  12. //
  13. // Run using
  14. // tutorial07 myvideofile.mpg
  15. //
  16. // to play the video.

  17. #include <libavcodec/avcodec.h>
  18. #include <libavformat/avformat.h>
  19. #include <libswscale/swscale.h>

  20. #include <SDL.h>
  21. #include <SDL_thread.h>
  22. #ifdef __MINGW32__
  23. #undef main /* Prevents SDL from overriding main() */
  24. #endif
  25. #include <stdio.h>
  26. #include <math.h>

  27. #define SDL_AUDIO_BUFFER_SIZE 1024
  28. #define MAX_AUDIOQ_SIZE (5 * 16 * 1024)
  29. #define MAX_VIDEOQ_SIZE (5 * 256 * 1024)
  30. #define AV_SYNC_THRESHOLD 0.01
  31. #define AV_NOSYNC_THRESHOLD 10.0
  32. #define SAMPLE_CORRECTION_PERCENT_MAX 10
  33. #define AUDIO_DIFF_AVG_NB 20
  34. #define FF_ALLOC_EVENT (SDL_USEREVENT)
  35. #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
  36. #define FF_QUIT_EVENT (SDL_USEREVENT + 2)
  37. #define VIDEO_PICTURE_QUEUE_SIZE 1
  38. #define DEFAULT_AV_SYNC_TYPE AV_SYNC_VIDEO_MASTER

  39. typedef struct PacketQueue {
  40.   AVPacketList *first_pkt, *last_pkt;
  41.   int nb_packets;
  42.   int size;
  43.   SDL_mutex *mutex;
  44.   SDL_cond *cond;
  45. } PacketQueue;
  46. typedef struct VideoPicture {
  47.   SDL_Overlay *bmp;
  48.   int width, height; /* source height & width */
  49.   int allocated;
  50.   double pts;
  51. } VideoPicture;

  52. typedef struct VideoState {
  53.   AVFormatContext *pFormatCtx;
  54.   int videoStream, audioStream;

  55.   int av_sync_type;
  56.   double external_clock; /* external clock base */
  57.   int64_t external_clock_time;
  58.   int seek_req;
  59.   int seek_flags;
  60.   int64_t seek_pos;

  61.   double audio_clock;
  62.   AVStream *audio_st;
  63.   PacketQueue audioq;
  64.   uint8_t audio_buf[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
  65.   unsigned int audio_buf_size;
  66.   unsigned int audio_buf_index;
  67.   AVPacket audio_pkt;
  68.   uint8_t *audio_pkt_data;
  69.   int audio_pkt_size;
  70.   int audio_hw_buf_size;
  71.   double audio_diff_cum; /* used for AV difference average computation */
  72.   double audio_diff_avg_coef;
  73.   double audio_diff_threshold;
  74.   int audio_diff_avg_count;
  75.   double frame_timer;
  76.   double frame_last_pts;
  77.   double frame_last_delay;
  78.   double video_clock; ///<pts of last decoded frame / predicted pts of next decoded frame
  79.   double video_current_pts; ///<current displayed pts (different from video_clock if frame fifos are used)
  80.   int64_t video_current_pts_time; ///<time (av_gettime) at which we updated video_current_pts - used to have running video pts
  81.   AVStream *video_st;
  82.   PacketQueue videoq;
  83.   VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
  84.   int pictq_size, pictq_rindex, pictq_windex;
  85.   SDL_mutex *pictq_mutex;
  86.   SDL_cond *pictq_cond;
  87.   SDL_Thread *parse_tid;
  88.   SDL_Thread *video_tid;

  89.   char filename[1024];
  90.   int quit;
  91.   struct SwsContext *img_convert_ctx;
  92. } VideoState;

  93. enum {
  94.   AV_SYNC_AUDIO_MASTER,
  95.   AV_SYNC_VIDEO_MASTER,
  96.   AV_SYNC_EXTERNAL_MASTER,
  97. };

  98. SDL_Surface *screen;

  99. /* Since we only have one decoding thread, the Big Struct
  100.    can be global in case we need it. */
  101. VideoState *global_video_state;
  102. AVPacket flush_pkt;

  103. void packet_queue_init(PacketQueue *q) {
  104.   memset(q, 0, sizeof(PacketQueue));
  105.   q->mutex = SDL_CreateMutex();
  106.   q->cond = SDL_CreateCond();
  107. }
  108. int packet_queue_put(PacketQueue *q, AVPacket *pkt) {

  109.   AVPacketList *pkt1;
  110.   if(pkt != &flush_pkt && av_dup_packet(pkt) < 0) {
  111.     return -1;
  112.   }
  113.   pkt1 = av_malloc(sizeof(AVPacketList));
  114.   if (!pkt1)
  115.     return -1;
  116.   pkt1->pkt = *pkt;
  117.   pkt1->next = NULL;
  118.   
  119.   SDL_LockMutex(q->mutex);

  120.   if (!q->last_pkt)
  121.     q->first_pkt = pkt1;
  122.   else
  123.     q->last_pkt->next = pkt1;
  124.   q->last_pkt = pkt1;
  125.   q->nb_packets++;
  126.   q->size += pkt1->pkt.size;
  127.   SDL_CondSignal(q->cond);
  128.   
  129.   SDL_UnlockMutex(q->mutex);
  130.   return 0;
  131. }
  132. static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
  133. {
  134.   AVPacketList *pkt1;
  135.   int ret;

  136.   SDL_LockMutex(q->mutex);
  137.   
  138.   for(;;) {
  139.     
  140.     if(global_video_state->quit) {
  141.       ret = -1;
  142.       break;
  143.     }

  144.     pkt1 = q->first_pkt;
  145.     if (pkt1) {
  146.       q->first_pkt = pkt1->next;
  147.       if (!q->first_pkt)
  148.     q->last_pkt = NULL;
  149.       q->nb_packets--;
  150.       q->size -= pkt1->pkt.size;
  151.       *pkt = pkt1->pkt;
  152.       av_free(pkt1);
  153.       ret = 1;
  154.       break;
  155.     } else if (!block) {
  156.       ret = 0;
  157.       break;
  158.     } else {
  159.       SDL_CondWait(q->cond, q->mutex);
  160.     }
  161.   }
  162.   SDL_UnlockMutex(q->mutex);
  163.   return ret;
  164. }
  165. static void packet_queue_flush(PacketQueue *q) {
  166.   AVPacketList *pkt, *pkt1;

  167.   SDL_LockMutex(q->mutex);
  168.   for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
  169.     pkt1 = pkt->next;
  170.     av_free_packet(&pkt->pkt);
  171.     av_freep(&pkt);
  172.   }
  173.   q->last_pkt = NULL;
  174.   q->first_pkt = NULL;
  175.   q->nb_packets = 0;
  176.   q->size = 0;
  177.   SDL_UnlockMutex(q->mutex);
  178. }
  179. double get_audio_clock(VideoState *is) {
  180.   double pts;
  181.   int hw_buf_size, bytes_per_sec, n;

  182.   pts = is->audio_clock; /* maintained in the audio thread */
  183.   hw_buf_size = is->audio_buf_size - is->audio_buf_index;
  184.   bytes_per_sec = 0;
  185.   n = is->audio_st->codec->channels * 2;
  186.   if(is->audio_st) {
  187.     bytes_per_sec = is->audio_st->codec->sample_rate * n;
  188.   }
  189.   if(bytes_per_sec) {
  190.     pts -= (double)hw_buf_size / bytes_per_sec;
  191.   }
  192.   return pts;
  193. }
  194. double get_video_clock(VideoState *is) {
  195.   double delta;

  196.   delta = (av_gettime() - is->video_current_pts_time) / 1000000.0;
  197.   return is->video_current_pts + delta;
  198. }
  199. double get_external_clock(VideoState *is) {
  200.   return av_gettime() / 1000000.0;
  201. }
  202. double get_master_clock(VideoState *is) {
  203.   if(is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
  204.     return get_video_clock(is);
  205.   } else if(is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
  206.     return get_audio_clock(is);
  207.   } else {
  208.     return get_external_clock(is);
  209.   }
  210. }
  211. /* Add or subtract samples to get a better sync, return new
  212.    audio buffer size */
  213. int synchronize_audio(VideoState *is, short *samples,
  214.          int samples_size, double pts) {
  215.   int n;
  216.   double ref_clock;
  217.   
  218.   n = 2 * is->audio_st->codec->channels;
  219.   
  220.   if(is->av_sync_type != AV_SYNC_AUDIO_MASTER) {
  221.     double diff, avg_diff;
  222.     int wanted_size, min_size, max_size, nb_samples;
  223.     
  224.     ref_clock = get_master_clock(is);
  225.     diff = get_audio_clock(is) - ref_clock;

  226.     if(diff < AV_NOSYNC_THRESHOLD) {
  227.       // accumulate the diffs
  228.       is->audio_diff_cum = diff + is->audio_diff_avg_coef
  229.     * is->audio_diff_cum;
  230.       if(is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
  231.     is->audio_diff_avg_count++;
  232.       } else {
  233.     avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
  234.     if(fabs(avg_diff) >= is->audio_diff_threshold) {
  235.      wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
  236.      min_size = samples_size * ((100 - SAMPLE_CORRECTION_PERCENT_MAX) / 100);
  237.      max_size = samples_size * ((100 + SAMPLE_CORRECTION_PERCENT_MAX) / 100);
  238.      if(wanted_size < min_size) {
  239.      wanted_size = min_size;
  240.      } else if (wanted_size > max_size) {
  241.      wanted_size = max_size;
  242.      }
  243.      if(wanted_size < samples_size) {
  244.      /* remove samples */
  245.      samples_size = wanted_size;
  246.      } else if(wanted_size > samples_size) {
  247.      uint8_t *samples_end, *q;
  248.      int nb;

  249.      /* add samples by copying final sample*/
  250.      nb = (samples_size - wanted_size);
  251.      samples_end = (uint8_t *)samples + samples_size - n;
  252.      q = samples_end + n;
  253.      while(nb > 0) {
  254.      memcpy(q, samples_end, n);
  255.      q += n;
  256.      nb -= n;
  257.      }
  258.      samples_size = wanted_size;
  259.      }
  260.     }
  261.       }
  262.     } else {
  263.       /* difference is TOO big; reset diff stuff */
  264.       is->audio_diff_avg_count = 0;
  265.       is->audio_diff_cum = 0;
  266.     }
  267.   }
  268.   return samples_size;
  269. }

  270. int audio_decode_frame(VideoState *is, uint8_t *audio_buf, int buf_size, double *pts_ptr) {

  271.   int len1, data_size, n;
  272.   AVPacket *pkt = &is->audio_pkt;
  273.   double pts;

  274.   for(;;) {
  275.     while(is->audio_pkt_size > 0) {
  276.       data_size = buf_size;
  277.       len1 = avcodec_decode_audio2(is->audio_st->codec,
  278.                  (int16_t *)audio_buf, &data_size,
  279.                  is->audio_pkt_data, is->audio_pkt_size);
  280.       if(len1 < 0) {
  281.     /* if error, skip frame */
  282.     is->audio_pkt_size = 0;
  283.     break;
  284.       }
  285.       is->audio_pkt_data += len1;
  286.       is->audio_pkt_size -= len1;
  287.       if(data_size <= 0) {
  288.     /* No data yet, get more frames */
  289.     continue;
  290.       }
  291.       pts = is->audio_clock;
  292.       *pts_ptr = pts;
  293.       n = 2 * is->audio_st->codec->channels;
  294.       is->audio_clock += (double)data_size /
  295.     (double)(n * is->audio_st->codec->sample_rate);

  296.       /* We have data, return it and come back for more later */
  297.       return data_size;
  298.     }
  299.     if(pkt->data)
  300.       av_free_packet(pkt);

  301.     if(is->quit) {
  302.       return -1;
  303.     }
  304.     /* next packet */
  305.     if(packet_queue_get(&is->audioq, pkt, 1) < 0) {
  306.       return -1;
  307.     }
  308.     if(pkt->data == flush_pkt.data) {
  309.       avcodec_flush_buffers(is->audio_st->codec);
  310.       continue;
  311.     }
  312.     is->audio_pkt_data = pkt->data;
  313.     is->audio_pkt_size = pkt->size;
  314.     /* if update, update the audio clock w/pts */
  315.     if(pkt->pts != AV_NOPTS_VALUE) {
  316.       is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
  317.     }
  318.   }
  319. }

  320. void audio_callback(void *userdata, Uint8 *stream, int len) {

  321.   VideoState *is = (VideoState *)userdata;
  322.   int len1, audio_size;
  323.   double pts;

  324.   while(len > 0) {
  325.     if(is->audio_buf_index >= is->audio_buf_size) {
  326.       /* We have already sent all our data; get more */
  327.       audio_size = audio_decode_frame(is, is->audio_buf, sizeof(is->audio_buf), &pts);
  328.       if(audio_size < 0) {
  329.     /* If error, output silence */
  330.     is->audio_buf_size = 1024;
  331.     memset(is->audio_buf, 0, is->audio_buf_size);
  332.       } else {
  333.     audio_size = synchronize_audio(is, (int16_t *)is->audio_buf,
  334.                  audio_size, pts);
  335.     is->audio_buf_size = audio_size;
  336.       }
  337.       is->audio_buf_index = 0;
  338.     }
  339.     len1 = is->audio_buf_size - is->audio_buf_index;
  340.     if(len1 > len)
  341.       len1 = len;
  342.     memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
  343.     len -= len1;
  344.     stream += len1;
  345.     is->audio_buf_index += len1;
  346.   }
  347. }

  348. static Uint32 sdl_refresh_timer_cb(Uint32 interval, void *opaque) {
  349.   SDL_Event event;
  350.   event.type = FF_REFRESH_EVENT;
  351.   event.user.data1 = opaque;
  352.   SDL_PushEvent(&event);
  353.   return 0; /* 0 means stop timer */
  354. }

  355. /* schedule a video refresh in 'delay' ms */
  356. static void schedule_refresh(VideoState *is, int delay) {
  357.   SDL_AddTimer(delay, sdl_refresh_timer_cb, is);
  358. }

  359. void video_display(VideoState *is) {

  360.   SDL_Rect rect;
  361.   VideoPicture *vp;
  362.   AVPicture pict;
  363.   float aspect_ratio;
  364.   int w, h, x, y;
  365.   int i;

  366.   vp = &is->pictq[is->pictq_rindex];
  367.   if(vp->bmp) {
  368.     if(is->video_st->codec->sample_aspect_ratio.num == 0) {
  369.       aspect_ratio = 0;
  370.     } else {
  371.       aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio) *
  372.     is->video_st->codec->width / is->video_st->codec->height;
  373.     }
  374.     if(aspect_ratio <= 0.0) {
  375.       aspect_ratio = (float)is->video_st->codec->width /
  376.     (float)is->video_st->codec->height;
  377.     }
  378.     h = screen->h;
  379.     w = ((int)rint(h * aspect_ratio)) & -3;
  380.     if(w > screen->w) {
  381.       w = screen->w;
  382.       h = ((int)rint(w / aspect_ratio)) & -3;
  383.     }
  384.     x = (screen->w - w) / 2;
  385.     y = (screen->h - h) / 2;
  386.     
  387.     rect.x = x;
  388.     rect.y = y;
  389.     rect.w = w;
  390.     rect.h = h;
  391.     SDL_DisplayYUVOverlay(vp->bmp, &rect);
  392.   }
  393. }

  394. void video_refresh_timer(void *userdata) {

  395.   VideoState *is = (VideoState *)userdata;
  396.   VideoPicture *vp;
  397.   double actual_delay, delay, sync_threshold, ref_clock, diff;
  398.   
  399.   if(is->video_st) {
  400.     if(is->pictq_size == 0) {
  401.       schedule_refresh(is, 1);
  402.     } else {
  403.       vp = &is->pictq[is->pictq_rindex];

  404.       is->video_current_pts = vp->pts;
  405.       is->video_current_pts_time = av_gettime();

  406.       delay = vp->pts - is->frame_last_pts; /* the pts from last time */
  407.       if(delay <= 0 || delay >= 1.0) {
  408.     /* if incorrect delay, use previous one */
  409.     delay = is->frame_last_delay;
  410.       }
  411.       /* save for next time */
  412.       is->frame_last_delay = delay;
  413.       is->frame_last_pts = vp->pts;

  414.       /* update delay to sync to audio if not master source */
  415.       if(is->av_sync_type != AV_SYNC_VIDEO_MASTER) {
  416.     ref_clock = get_master_clock(is);
  417.     diff = vp->pts - ref_clock;
  418.     
  419.     /* Skip or repeat the frame. Take delay into account
  420.      FFPlay still doesn't "know if this is the best guess." */
  421.     sync_threshold = (delay > AV_SYNC_THRESHOLD) ? delay : AV_SYNC_THRESHOLD;
  422.     if(fabs(diff) < AV_NOSYNC_THRESHOLD) {
  423.      if(diff <= -sync_threshold) {
  424.      delay = 0;
  425.      } else if(diff >= sync_threshold) {
  426.      delay = 2 * delay;
  427.      }
  428.     }
  429.       }

  430.       is->frame_timer += delay;
  431.       /* computer the REAL delay */
  432.       actual_delay = is->frame_timer - (av_gettime() / 1000000.0);
  433.       if(actual_delay < 0.010) {
  434.     /* Really it should skip the picture instead */
  435.     actual_delay = 0.010;
  436.       }
  437.       schedule_refresh(is, (int)(actual_delay * 1000 + 0.5));

  438.       /* show the */
  439.       video_display(is);
  440.       
  441.       /* update queue for next */
  442.       if(++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE) {
  443.     is->pictq_rindex = 0;
  444.       }
  445.       SDL_LockMutex(is->pictq_mutex);
  446.       is->pictq_size--;
  447.       SDL_CondSignal(is->pictq_cond);
  448.       SDL_UnlockMutex(is->pictq_mutex);
  449.     }
  450.   } else {
  451.     schedule_refresh(is, 100);
  452.   }
  453. }
  454.       
  455. void alloc_picture(void *userdata) {

  456.   VideoState *is = (VideoState *)userdata;
  457.   VideoPicture *vp;

  458.   vp = &is->pictq[is->pictq_windex];
  459.   if(vp->bmp) {
  460.     // we already have one make another, bigger/smaller
  461.     SDL_FreeYUVOverlay(vp->bmp);
  462.   }
  463.   // Allocate a place to put our YUV image on that screen
  464.   vp->bmp = SDL_CreateYUVOverlay(is->video_st->codec->width,
  465.                  is->video_st->codec->height,
  466.                  SDL_YV12_OVERLAY,
  467.                  screen);
  468.   vp->width = is->video_st->codec->width;
  469.   vp->height = is->video_st->codec->height;
  470.   
  471.   SDL_LockMutex(is->pictq_mutex);
  472.   vp->allocated = 1;
  473.   SDL_CondSignal(is->pictq_cond);
  474.   SDL_UnlockMutex(is->pictq_mutex);

  475. }

  476. int queue_picture(VideoState *is, AVFrame *pFrame, double pts) {

  477.   VideoPicture *vp;
  478.   int dst_pix_fmt;
  479.   AVPicture pict;

  480.   /* wait until we have space for a new pic */
  481.   SDL_LockMutex(is->pictq_mutex);
  482.   while(is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
  483.     !is->quit) {
  484.     SDL_CondWait(is->pictq_cond, is->pictq_mutex);
  485.   }
  486.   SDL_UnlockMutex(is->pictq_mutex);

  487.   if(is->quit)
  488.     return -1;

  489.   // windex is set to 0 initially
  490.   vp = &is->pictq[is->pictq_windex];

  491.   /* allocate or resize the */
  492.   if(!vp->bmp ||
  493.      vp->width != is->video_st->codec->width ||
  494.      vp->height != is->video_st->codec->height) {
  495.     SDL_Event event;

  496.     vp->allocated = 0;
  497.     /* we have to do it in the main thread */
  498.     event.type = FF_ALLOC_EVENT;
  499.     event.user.data1 = is;
  500.     SDL_PushEvent(&event);

  501.     /* wait until we have a picture allocated */
  502.     SDL_LockMutex(is->pictq_mutex);
  503.     while(!vp->allocated && !is->quit) {
  504.       SDL_CondWait(is->pictq_cond, is->pictq_mutex);
  505.     }
  506.     SDL_UnlockMutex(is->pictq_mutex);
  507.     if(is->quit) {
  508.       return -1;
  509.     }
  510.   }
  511.   /* We have a place to put our picture on the queue */
  512.   /* If we are skipping a frame, do we set this to null
  513.      but still return vp->allocated = 1? */


  514.   if(vp->bmp) {

  515.     SDL_LockYUVOverlay(vp->bmp);
  516.     
  517.     dst_pix_fmt = PIX_FMT_YUV420P;
  518.     /* point pict at the queue */

  519.     pict.data[0] = vp->bmp->pixels[0];
  520.     pict.data[1] = vp->bmp->pixels[2];
  521.     pict.data[2] = vp->bmp->pixels[1];
  522.     
  523.     pict.linesize[0] = vp->bmp->pitches[0];
  524.     pict.linesize[1] = vp->bmp->pitches[2];
  525.     pict.linesize[2] = vp->bmp->pitches[1];
  526.     
  527.     // Convert the image into YUV format that SDL uses
  528.     /*
  529.     img_convert(&pict, dst_pix_fmt,
  530.         (AVPicture *)pFrame, is->video_st->codec->pix_fmt,
  531.         is->video_st->codec->width, is->video_st->codec->height);
  532.     */
  533.     sws_scale(is->img_convert_ctx,
  534.           pFrame->data,
  535.           pFrame->linesize, 0,
  536.           is->video_st->codec->height,
  537.           pict.data,
  538.           pict.linesize);
  539.           
  540.     SDL_UnlockYUVOverlay(vp->bmp);
  541.     vp->pts = pts;

  542.     /* now we inform our display thread that we have a pic ready */
  543.     if(++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE) {
  544.       is->pictq_windex = 0;
  545.     }
  546.     SDL_LockMutex(is->pictq_mutex);
  547.     is->pictq_size++;
  548.     SDL_UnlockMutex(is->pictq_mutex);
  549.   }
  550.   return 0;
  551. }

  552. double synchronize_video(VideoState *is, AVFrame *src_frame, double pts) {

  553.   double frame_delay;

  554.   if(pts != 0) {
  555.     /* if we have pts, set video clock to it */
  556.     is->video_clock = pts;
  557.   } else {
  558.     /* if we aren't given a pts, set it to the clock */
  559.     pts = is->video_clock;
  560.   }
  561.   /* update the video clock */
  562.   frame_delay = av_q2d(is->video_st->codec->time_base);
  563.   /* if we are repeating a frame, adjust clock accordingly */
  564.   frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
  565.   is->video_clock += frame_delay;
  566.   return pts;
  567. }

  568. uint64_t global_video_pkt_pts = AV_NOPTS_VALUE;

  569. /* These are called whenever we allocate a frame
  570.  * buffer. We use this to store the global_pts in
  571.  * a frame at the time it is allocated.
  572.  */
  573. int our_get_buffer(struct AVCodecContext *c, AVFrame *pic) {
  574.   int ret = avcodec_default_get_buffer(c, pic);
  575.   uint64_t *pts = av_malloc(sizeof(uint64_t));
  576.   *pts = global_video_pkt_pts;
  577.   pic->opaque = pts;
  578.   return ret;
  579. }
  580. void our_release_buffer(struct AVCodecContext *c, AVFrame *pic) {
  581.   if(pic) av_freep(&pic->opaque);
  582.   avcodec_default_release_buffer(c, pic);
  583. }

  584. int video_thread(void *arg) {
  585.   VideoState *is = (VideoState *)arg;
  586.   AVPacket pkt1, *packet = &pkt1;
  587.   int len1, frameFinished;
  588.   AVFrame *pFrame;
  589.   double pts;

  590.   pFrame = avcodec_alloc_frame();

  591.   for(;;) {
  592.     if(packet_queue_get(&is->videoq, packet, 1) < 0) {
  593.       // means we quit getting packets
  594.       break;
  595.     }
  596.     if(packet->data == flush_pkt.data) {
  597.       avcodec_flush_buffers(is->video_st->codec);
  598.       continue;
  599.     }
  600.     pts = 0;

  601.     // Save global pts to be stored in pFrame in first call
  602.     global_video_pkt_pts = packet->pts;
  603.     // Decode video frame
  604.     len1 = avcodec_decode_video(is->video_st->codec, pFrame, &frameFinished,
  605.                 packet->data, packet->size);
  606.     if(packet->dts == AV_NOPTS_VALUE
  607.        && pFrame->opaque && *(uint64_t*)pFrame->opaque != AV_NOPTS_VALUE) {
  608.       pts = *(uint64_t *)pFrame->opaque;
  609.     } else if(packet->dts != AV_NOPTS_VALUE) {
  610.       pts = packet->dts;
  611.     } else {
  612.       pts = 0;
  613.     }
  614.     pts *= av_q2d(is->video_st->time_base);

  615.     // Did we get a video frame?
  616.     if(frameFinished) {
  617.       pts = synchronize_video(is, pFrame, pts);
  618.       if(queue_picture(is, pFrame, pts) < 0) {
  619.     break;
  620.       }
  621.     }
  622.     av_free_packet(packet);
  623.   }
  624.   av_free(pFrame);
  625.   return 0;
  626. }
  627. int stream_component_open(VideoState *is, int stream_index) {

  628.   AVFormatContext *pFormatCtx = is->pFormatCtx;
  629.   AVCodecContext *codecCtx;
  630.   AVCodec *codec;
  631.   SDL_AudioSpec wanted_spec, spec;

  632.   if(stream_index < 0 || stream_index >= pFormatCtx->nb_streams) {
  633.     return -1;
  634.   }

  635.   // Get a pointer to the codec context for the video stream
  636.   codecCtx = pFormatCtx->streams[stream_index]->codec;
  637.     
  638.     is->img_convert_ctx = sws_getContext(codecCtx->width,
  639.                    codecCtx->height,
  640.                    codecCtx->pix_fmt,
  641.                    codecCtx->width,
  642.                    codecCtx->height,
  643.                    PIX_FMT_YUV420P,
  644.                    SWS_BICUBIC,
  645.                    NULL, NULL, NULL);
  646.     
  647.   if(codecCtx->codec_type == CODEC_TYPE_AUDIO) {
  648.     // Set audio settings from codec info
  649.     wanted_spec.freq = codecCtx->sample_rate;
  650.     wanted_spec.format = AUDIO_S16SYS;
  651.     wanted_spec.channels = codecCtx->channels;
  652.     wanted_spec.silence = 0;
  653.     wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
  654.     wanted_spec.callback = audio_callback;
  655.     wanted_spec.userdata = is;
  656.     
  657.     if(SDL_OpenAudio(&wanted_spec, &spec) < 0) {
  658.       fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
  659.       return -1;
  660.     }
  661.     is->audio_hw_buf_size = spec.size;
  662.   }
  663.   codec = avcodec_find_decoder(codecCtx->codec_id);
  664.   if(!codec || (avcodec_open(codecCtx, codec) < 0)) {
  665.     fprintf(stderr, "Unsupported codec!\n");
  666.     return -1;
  667.   }

  668.   switch(codecCtx->codec_type) {
  669.   case CODEC_TYPE_AUDIO:
  670.     is->audioStream = stream_index;
  671.     is->audio_st = pFormatCtx->streams[stream_index];
  672.     is->audio_buf_size = 0;
  673.     is->audio_buf_index = 0;
  674.     
  675.     /* averaging filter for audio sync */
  676.     is->audio_diff_avg_coef = exp(log(0.01 / AUDIO_DIFF_AVG_NB));
  677.     is->audio_diff_avg_count = 0;
  678.     /* Correct audio only if larger error than this */
  679.     is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / codecCtx->sample_rate;

  680.     memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
  681.     packet_queue_init(&is->audioq);
  682.     SDL_PauseAudio(0);
  683.     break;
  684.   case CODEC_TYPE_VIDEO:
  685.     is->videoStream = stream_index;
  686.     is->video_st = pFormatCtx->streams[stream_index];

  687.     is->frame_timer = (double)av_gettime() / 1000000.0;
  688.     is->frame_last_delay = 40e-3;
  689.     is->video_current_pts_time = av_gettime();

  690.     packet_queue_init(&is->videoq);
  691.     is->video_tid = SDL_CreateThread(video_thread, is);
  692.     codecCtx->get_buffer = our_get_buffer;
  693.     codecCtx->release_buffer = our_release_buffer;

  694.     break;
  695.   default:
  696.     break;
  697.   }


  698. }

  699. int decode_interrupt_cb(void) {
  700.   return (global_video_state && global_video_state->quit);
  701. }
  702. int decode_thread(void *arg) {

  703.   VideoState *is = (VideoState *)arg;
  704.   AVFormatContext *pFormatCtx;
  705.   AVPacket pkt1, *packet = &pkt1;

  706.   int video_index = -1;
  707.   int audio_index = -1;
  708.   int i;

  709.   is->videoStream=-1;
  710.   is->audioStream=-1;

  711.   global_video_state = is;
  712.   // will interrupt blocking functions if we
  713.   url_set_interrupt_cb(decode_interrupt_cb);

  714.   // Open video file
  715.   if(av_open_input_file(&pFormatCtx, is->filename, NULL, 0, NULL)!=0)
  716.     return -1; // Couldn't open file

  717.   is->pFormatCtx = pFormatCtx;
  718.   
  719.   // Retrieve stream information
  720.   if(av_find_stream_info(pFormatCtx)<0)
  721.     return -1; // Couldn't find stream information
  722.   
  723.   // Dump information about file onto standard error
  724.   dump_format(pFormatCtx, 0, is->filename, 0);
  725.   
  726.   // Find the first video stream
  727.   for(i=0; i<pFormatCtx->nb_streams; i++) {
  728.     if(pFormatCtx->streams[i]->codec->codec_type==CODEC_TYPE_VIDEO &&
  729.        video_index < 0) {
  730.       video_index=i;
  731.     }
  732.     if(pFormatCtx->streams[i]->codec->codec_type==CODEC_TYPE_AUDIO &&
  733.        audio_index < 0) {
  734.       audio_index=i;
  735.     }
  736.   }
  737.   if(audio_index >= 0) {
  738.     stream_component_open(is, audio_index);
  739.   }
  740.   if(video_index >= 0) {
  741.     stream_component_open(is, video_index);
  742.   }

  743.   if(is->videoStream < 0 || is->audioStream < 0) {
  744.     fprintf(stderr, "%s: could not open codecs\n", is->filename);
  745.     goto fail;
  746.   }

  747.   // main decode loop

  748.   for(;;) {
  749.     if(is->quit) {
  750.       break;
  751.     }
  752.     // seek stuff goes here
  753.     if(is->seek_req) {
  754.       int stream_index= -1;
  755.       int64_t seek_target = is->seek_pos;

  756.       if (is->videoStream >= 0) stream_index = is->videoStream;
  757.       else if(is->audioStream >= 0) stream_index = is->audioStream;

  758.       if(stream_index>=0){
  759.     seek_target= av_rescale_q(seek_target, AV_TIME_BASE_Q, pFormatCtx->streams[stream_index]->time_base);
  760.       }
  761.       if(!av_seek_frame(is->pFormatCtx, stream_index, seek_target, is->seek_flags)) {
  762.     fprintf(stderr, "%s: error while seeking\n", is->pFormatCtx->filename);
  763.       } else {
  764.     if(is->audioStream >= 0) {
  765.      packet_queue_flush(&is->audioq);
  766.      packet_queue_put(&is->audioq, &flush_pkt);
  767.     }
  768.     if(is->videoStream >= 0) {
  769.      packet_queue_flush(&is->videoq);
  770.      packet_queue_put(&is->videoq, &flush_pkt);
  771.     }
  772.       }
  773.       is->seek_req = 0;
  774.     }

  775.     if(is->audioq.size > MAX_AUDIOQ_SIZE ||
  776.        is->videoq.size > MAX_VIDEOQ_SIZE) {
  777.       SDL_Delay(10);
  778.       continue;
  779.     }
  780.     if(av_read_frame(is->pFormatCtx, packet) < 0) {
  781.       if(url_ferror(&pFormatCtx->pb) == 0) {
  782.     SDL_Delay(100); /* no error; wait for user input */
  783.     continue;
  784.       } else {
  785.     break;
  786.       }
  787.     }
  788.     // Is this a packet from the video stream?
  789.     if(packet->stream_index == is->videoStream) {
  790.       packet_queue_put(&is->videoq, packet);
  791.     } else if(packet->stream_index == is->audioStream) {
  792.       packet_queue_put(&is->audioq, packet);
  793.     } else {
  794.       av_free_packet(packet);
  795.     }
  796.   }
  797.   /* all done - wait for it */
  798.   while(!is->quit) {
  799.     SDL_Delay(100);
  800.   }
  801.  fail:
  802.   {
  803.     SDL_Event event;
  804.     event.type = FF_QUIT_EVENT;
  805.     event.user.data1 = is;
  806.     SDL_PushEvent(&event);
  807.   }
  808.   return 0;
  809. }

  810. void stream_seek(VideoState *is, int64_t pos, int rel) {

  811.   if(!is->seek_req) {
  812.     is->seek_pos = pos;
  813.     is->seek_flags = rel < 0 ? AVSEEK_FLAG_BACKWARD : 0;
  814.     is->seek_req = 1;
  815.   }
  816. }
  817. int main(int argc, char *argv[]) {

  818.   SDL_Event event;
  819.   double pts;
  820.   VideoState *is;

  821.   is = av_mallocz(sizeof(VideoState));

  822.   if(argc < 2) {
  823.     fprintf(stderr, "Usage: test \n");
  824.     exit(1);
  825.   }
  826.   // Register all formats and codecs
  827.   av_register_all();
  828.   
  829.   if(SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER)) {
  830.     fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
  831.     exit(1);
  832.   }

  833.   // Make a screen to put our video
  834. #ifndef __DARWIN__
  835.   screen = SDL_SetVideoMode(640, 480, 0, 0);
  836. #else
  837.   screen = SDL_SetVideoMode(640, 480, 24, 0);
  838. #endif
  839.   if(!screen) {
  840.     fprintf(stderr, "SDL: could not set video mode - exiting\n");
  841.     exit(1);
  842.   }

  843.   av_strlcpy(is->filename, argv[1], sizeof(is->filename));

  844.   is->pictq_mutex = SDL_CreateMutex();
  845.   is->pictq_cond = SDL_CreateCond();

  846.   schedule_refresh(is, 40);

  847.   is->av_sync_type = DEFAULT_AV_SYNC_TYPE;
  848.   is->parse_tid = SDL_CreateThread(decode_thread, is);
  849.   if(!is->parse_tid) {
  850.     av_free(is);
  851.     return -1;
  852.   }

  853.   av_init_packet(&flush_pkt);
  854.   flush_pkt.data = "FLUSH";
  855.   
  856.   for(;;) {
  857.     double incr, pos;
  858.     SDL_WaitEvent(&event);
  859.     switch(event.type) {
  860.     case SDL_KEYDOWN:
  861.       switch(event.key.keysym.sym) {
  862.       case SDLK_LEFT:
  863.     incr = -10.0;
  864.     goto do_seek;
  865.       case SDLK_RIGHT:
  866.     incr = 10.0;
  867.     goto do_seek;
  868.       case SDLK_UP:
  869.     incr = 60.0;
  870.     goto do_seek;
  871.       case SDLK_DOWN:
  872.     incr = -60.0;
  873.     goto do_seek;
  874.       do_seek:
  875.     if(global_video_state) {
  876.      pos = get_master_clock(global_video_state);
  877.      pos += incr;
  878.      stream_seek(global_video_state, (int64_t)(pos * AV_TIME_BASE), incr);
  879.     }
  880.     break;
  881.       default:
  882.     break;
  883.       }
  884.       break;
  885.     case FF_QUIT_EVENT:
  886.     case SDL_QUIT:
  887.       is->quit = 1;
  888.       SDL_Quit();
  889.       exit(0);
  890.       break;
  891.     case FF_ALLOC_EVENT:
  892.       alloc_picture(event.user.data1);
  893.       break;
  894.     case FF_REFRESH_EVENT:
  895.       video_refresh_timer(event.user.data1);
  896.       break;
  897.     default:
  898.       break;
  899.     }
  900.   }
  901.   return 0;
  902. }


阅读(1421) | 评论(0) | 转发(1) |
0

上一篇:ffmpeg tutorial 7:快进快退

下一篇:赛马问题

给主人留下些什么吧!~~