Chinaunix首页 | 论坛 | 博客
  • 博客访问: 1170260
  • 博文数量: 173
  • 博客积分: 4048
  • 博客等级:
  • 技术积分: 2679
  • 用 户 组: 普通用户
  • 注册时间: 2010-09-12 18:53
文章分类

全部博文(173)

文章存档

2018年(1)

2016年(1)

2013年(1)

2012年(118)

2011年(52)

分类: 嵌入式

2012-04-17 15:16:07

编译命令:
gcc -o tutorial04 tutorial04.c -lavformat -lavcodec -lz -lm -lswscale `sdl-config --cflags --libs`


点击(此处)折叠或打开

  1. // tutorial06.c
  2. // A pedagogical video player that really
  3. //
  4. // Code based on FFplay, Copyright (c) 2003 Fabrice Bellard,
  5. // and a tutorial by Martin Bohme (boehme@inb.uni-luebeckREMOVETHIS.de)
  6. // Tested on Gentoo, CVS version 5/01/07 compiled with GCC 4.1.1
  7. // Use
  8. //
  9. // gcc -o tutorial02 tutorial02.c -lavformat -lavcodec -lz -lm `sdl-config --cflags --libs`
  10. // to build (assuming libavformat and libavcodec are correctly installed,
  11. // and assuming you have sdl-config. Please refer to SDL docs for your installation.)
  12. //
  13. // Run using
  14. // tutorial06 myvideofile.mpg
  15. //
  16. // to play the video.

  17. #include <libavcodec/avcodec.h>
  18. #include <libavformat/avformat.h>
  19. #include <libswscale/swscale.h>

  20. #include <SDL.h>
  21. #include <SDL_thread.h>

  22. #ifdef __MINGW32__
  23. #undef main /* Prevents SDL from overriding main() */
  24. #endif

  25. #include <stdio.h>
  26. #include <math.h>

  27. #define SDL_AUDIO_BUFFER_SIZE 1024

  28. #define MAX_AUDIOQ_SIZE (5 * 16 * 1024)
  29. #define MAX_VIDEOQ_SIZE (5 * 256 * 1024)

  30. #define AV_SYNC_THRESHOLD 0.01
  31. #define AV_NOSYNC_THRESHOLD 10.0

  32. #define SAMPLE_CORRECTION_PERCENT_MAX 10
  33. #define AUDIO_DIFF_AVG_NB 20

  34. #define FF_ALLOC_EVENT (SDL_USEREVENT)
  35. #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
  36. #define FF_QUIT_EVENT (SDL_USEREVENT + 2)

  37. #define VIDEO_PICTURE_QUEUE_SIZE 1

  38. #define DEFAULT_AV_SYNC_TYPE AV_SYNC_VIDEO_MASTER

  39. typedef struct PacketQueue {
  40.   AVPacketList *first_pkt, *last_pkt;
  41.   int nb_packets;
  42.   int size;
  43.   SDL_mutex *mutex;
  44.   SDL_cond *cond;
  45. } PacketQueue;


  46. typedef struct VideoPicture {
  47.   SDL_Overlay *bmp;
  48.   int width, height; /* source height & width */
  49.   int allocated;
  50.   double pts;
  51. } VideoPicture;

  52. typedef struct VideoState {

  53.   AVFormatContext *pFormatCtx;
  54.   int videoStream, audioStream;

  55.   int av_sync_type;
  56.   double external_clock; /* external clock base */
  57.   int64_t external_clock_time;

  58.   double audio_clock;
  59.   AVStream *audio_st;
  60.   PacketQueue audioq;
  61.   uint8_t audio_buf[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
  62.   unsigned int audio_buf_size;
  63.   unsigned int audio_buf_index;
  64.   AVPacket audio_pkt;
  65.   uint8_t *audio_pkt_data;
  66.   int audio_pkt_size;
  67.   int audio_hw_buf_size;
  68.   double audio_diff_cum; /* used for AV difference average computation */
  69.   double audio_diff_avg_coef;
  70.   double audio_diff_threshold;
  71.   int audio_diff_avg_count;
  72.   double frame_timer;
  73.   double frame_last_pts;
  74.   double frame_last_delay;
  75.   double video_clock; ///<pts of last decoded frame / predicted pts of next decoded frame
  76.   double video_current_pts; ///<current displayed pts (different from video_clock if frame fifos are used)
  77.   int64_t video_current_pts_time; ///<time (av_gettime) at which we updated video_current_pts - used to have running video pts
  78.   AVStream *video_st;
  79.   PacketQueue videoq;

  80.   VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
  81.   int pictq_size, pictq_rindex, pictq_windex;
  82.   SDL_mutex *pictq_mutex;
  83.   SDL_cond *pictq_cond;
  84.   
  85.   SDL_Thread *parse_tid;
  86.   SDL_Thread *video_tid;

  87.   char filename[1024];
  88.   int quit;
  89.   struct SwsContext *img_convert_ctx;
  90. } VideoState;

  91. enum {
  92.   AV_SYNC_AUDIO_MASTER,
  93.   AV_SYNC_VIDEO_MASTER,
  94.   AV_SYNC_EXTERNAL_MASTER,
  95. };

  96. SDL_Surface *screen;

  97. /* Since we only have one decoding thread, the Big Struct
  98.    can be global in case we need it. */
  99. VideoState *global_video_state;

  100. void packet_queue_init(PacketQueue *q) {
  101.   memset(q, 0, sizeof(PacketQueue));
  102.   q->mutex = SDL_CreateMutex();
  103.   q->cond = SDL_CreateCond();
  104. }
  105. int packet_queue_put(PacketQueue *q, AVPacket *pkt) {

  106.   AVPacketList *pkt1;
  107.   if(av_dup_packet(pkt) < 0) {
  108.     return -1;
  109.   }
  110.   pkt1 = av_malloc(sizeof(AVPacketList));
  111.   if (!pkt1)
  112.     return -1;
  113.   pkt1->pkt = *pkt;
  114.   pkt1->next = NULL;
  115.   
  116.   SDL_LockMutex(q->mutex);

  117.   if (!q->last_pkt)
  118.     q->first_pkt = pkt1;
  119.   else
  120.     q->last_pkt->next = pkt1;
  121.   q->last_pkt = pkt1;
  122.   q->nb_packets++;
  123.   q->size += pkt1->pkt.size;
  124.   SDL_CondSignal(q->cond);
  125.   
  126.   SDL_UnlockMutex(q->mutex);
  127.   return 0;
  128. }
  129. static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
  130. {
  131.   AVPacketList *pkt1;
  132.   int ret;

  133.   SDL_LockMutex(q->mutex);
  134.   
  135.   for(;;) {
  136.     
  137.     if(global_video_state->quit) {
  138.       ret = -1;
  139.       break;
  140.     }

  141.     pkt1 = q->first_pkt;
  142.     if (pkt1) {
  143.       q->first_pkt = pkt1->next;
  144.       if (!q->first_pkt)
  145.     q->last_pkt = NULL;
  146.       q->nb_packets--;
  147.       q->size -= pkt1->pkt.size;
  148.       *pkt = pkt1->pkt;
  149.       av_free(pkt1);
  150.       ret = 1;
  151.       break;
  152.     } else if (!block) {
  153.       ret = 0;
  154.       break;
  155.     } else {
  156.       SDL_CondWait(q->cond, q->mutex);
  157.     }
  158.   }
  159.   SDL_UnlockMutex(q->mutex);
  160.   return ret;
  161. }
  162. double get_audio_clock(VideoState *is) {
  163.   double pts;
  164.   int hw_buf_size, bytes_per_sec, n;

  165.   pts = is->audio_clock; /* maintained in the audio thread */
  166.   hw_buf_size = is->audio_buf_size - is->audio_buf_index;
  167.   bytes_per_sec = 0;
  168.   n = is->audio_st->codec->channels * 2;
  169.   if(is->audio_st) {
  170.     bytes_per_sec = is->audio_st->codec->sample_rate * n;
  171.   }
  172.   if(bytes_per_sec) {
  173.     pts -= (double)hw_buf_size / bytes_per_sec;
  174.   }
  175.   return pts;
  176. }
  177. double get_video_clock(VideoState *is) {
  178.   double delta;

  179.   delta = (av_gettime() - is->video_current_pts_time) / 1000000.0;
  180.   return is->video_current_pts + delta;
  181. }
  182. double get_external_clock(VideoState *is) {
  183.   return av_gettime() / 1000000.0;
  184. }

  185. double get_master_clock(VideoState *is) {
  186.   if(is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
  187.     return get_video_clock(is);
  188.   } else if(is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
  189.     return get_audio_clock(is);
  190.   } else {
  191.     return get_external_clock(is);
  192.   }
  193. }
  194. /* Add or subtract samples to get a better sync, return new
  195.    audio buffer size */
  196. int synchronize_audio(VideoState *is, short *samples,
  197.          int samples_size, double pts) {
  198.   int n;
  199.   double ref_clock;

  200.   n = 2 * is->audio_st->codec->channels;
  201.   
  202.   if(is->av_sync_type != AV_SYNC_AUDIO_MASTER) {
  203.     double diff, avg_diff;
  204.     int wanted_size, min_size, max_size, nb_samples;
  205.     
  206.     ref_clock = get_master_clock(is);
  207.     diff = get_audio_clock(is) - ref_clock;

  208.     if(diff < AV_NOSYNC_THRESHOLD) {
  209.       // accumulate the diffs
  210.       is->audio_diff_cum = diff + is->audio_diff_avg_coef
  211.     * is->audio_diff_cum;
  212.       if(is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
  213.     is->audio_diff_avg_count++;
  214.       } else {
  215.     avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
  216.     if(fabs(avg_diff) >= is->audio_diff_threshold) {
  217.      wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
  218.      min_size = samples_size * ((100 - SAMPLE_CORRECTION_PERCENT_MAX) / 100);
  219.      max_size = samples_size * ((100 + SAMPLE_CORRECTION_PERCENT_MAX) / 100);
  220.      if(wanted_size < min_size) {
  221.      wanted_size = min_size;
  222.      } else if (wanted_size > max_size) {
  223.      wanted_size = max_size;
  224.      }
  225.      if(wanted_size < samples_size) {
  226.      /* remove samples */
  227.      samples_size = wanted_size;
  228.      } else if(wanted_size > samples_size) {
  229.      uint8_t *samples_end, *q;
  230.      int nb;

  231.      /* add samples by copying final sample*/
  232.      nb = (samples_size - wanted_size);
  233.      samples_end = (uint8_t *)samples + samples_size - n;
  234.      q = samples_end + n;
  235.      while(nb > 0) {
  236.      memcpy(q, samples_end, n);
  237.      q += n;
  238.      nb -= n;
  239.      }
  240.      samples_size = wanted_size;
  241.      }
  242.     }
  243.       }
  244.     } else {
  245.       /* difference is TOO big; reset diff stuff */
  246.       is->audio_diff_avg_count = 0;
  247.       is->audio_diff_cum = 0;
  248.     }
  249.   }
  250.   return samples_size;
  251. }

  252. int audio_decode_frame(VideoState *is, uint8_t *audio_buf, int buf_size, double *pts_ptr) {

  253.   int len1, data_size, n;
  254.   AVPacket *pkt = &is->audio_pkt;
  255.   double pts;

  256.   for(;;) {
  257.     while(is->audio_pkt_size > 0) {
  258.       data_size = buf_size;
  259.       len1 = avcodec_decode_audio2(is->audio_st->codec,
  260.                  (int16_t *)audio_buf, &data_size,
  261.                  is->audio_pkt_data, is->audio_pkt_size);
  262.       if(len1 < 0) {
  263.     /* if error, skip frame */
  264.     is->audio_pkt_size = 0;
  265.     break;
  266.       }
  267.       is->audio_pkt_data += len1;
  268.       is->audio_pkt_size -= len1;
  269.       if(data_size <= 0) {
  270.     /* No data yet, get more frames */
  271.     continue;
  272.       }
  273.       pts = is->audio_clock;
  274.       *pts_ptr = pts;
  275.       n = 2 * is->audio_st->codec->channels;
  276.       is->audio_clock += (double)data_size /
  277.     (double)(n * is->audio_st->codec->sample_rate);

  278.       /* We have data, return it and come back for more later */
  279.       return data_size;
  280.     }
  281.     if(pkt->data)
  282.       av_free_packet(pkt);

  283.     if(is->quit) {
  284.       return -1;
  285.     }
  286.     /* next packet */
  287.     if(packet_queue_get(&is->audioq, pkt, 1) < 0) {
  288.       return -1;
  289.     }
  290.     is->audio_pkt_data = pkt->data;
  291.     is->audio_pkt_size = pkt->size;
  292.     /* if update, update the audio clock w/pts */
  293.     if(pkt->pts != AV_NOPTS_VALUE) {
  294.       is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
  295.     }
  296.   }
  297. }

  298. void audio_callback(void *userdata, Uint8 *stream, int len) {

  299.   VideoState *is = (VideoState *)userdata;
  300.   int len1, audio_size;
  301.   double pts;

  302.   while(len > 0) {
  303.     if(is->audio_buf_index >= is->audio_buf_size) {
  304.       /* We have already sent all our data; get more */
  305.       audio_size = audio_decode_frame(is, is->audio_buf, sizeof(is->audio_buf), &pts);
  306.       if(audio_size < 0) {
  307.     /* If error, output silence */
  308.     is->audio_buf_size = 1024;
  309.     memset(is->audio_buf, 0, is->audio_buf_size);
  310.       } else {
  311.     audio_size = synchronize_audio(is, (int16_t *)is->audio_buf,
  312.                  audio_size, pts);
  313.     is->audio_buf_size = audio_size;
  314.       }
  315.       is->audio_buf_index = 0;
  316.     }
  317.     len1 = is->audio_buf_size - is->audio_buf_index;
  318.     if(len1 > len)
  319.       len1 = len;
  320.     memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
  321.     len -= len1;
  322.     stream += len1;
  323.     is->audio_buf_index += len1;
  324.   }
  325. }

  326. static Uint32 sdl_refresh_timer_cb(Uint32 interval, void *opaque) {
  327.   SDL_Event event;
  328.   event.type = FF_REFRESH_EVENT;
  329.   event.user.data1 = opaque;
  330.   SDL_PushEvent(&event);
  331.   return 0; /* 0 means stop timer */
  332. }

  333. /* schedule a video refresh in 'delay' ms */
  334. static void schedule_refresh(VideoState *is, int delay) {
  335.   SDL_AddTimer(delay, sdl_refresh_timer_cb, is);
  336. }

  337. void video_display(VideoState *is) {

  338.   SDL_Rect rect;
  339.   VideoPicture *vp;
  340.   AVPicture pict;
  341.   float aspect_ratio;
  342.   int w, h, x, y;
  343.   int i;

  344.   vp = &is->pictq[is->pictq_rindex];
  345.   if(vp->bmp) {
  346.     if(is->video_st->codec->sample_aspect_ratio.num == 0) {
  347.       aspect_ratio = 0;
  348.     } else {
  349.       aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio) *
  350.     is->video_st->codec->width / is->video_st->codec->height;
  351.     }
  352.     if(aspect_ratio <= 0.0) {
  353.       aspect_ratio = (float)is->video_st->codec->width /
  354.     (float)is->video_st->codec->height;
  355.     }
  356.     h = screen->h;
  357.     w = ((int)rint(h * aspect_ratio)) & -3;
  358.     if(w > screen->w) {
  359.       w = screen->w;
  360.       h = ((int)rint(w / aspect_ratio)) & -3;
  361.     }
  362.     x = (screen->w - w) / 2;
  363.     y = (screen->h - h) / 2;
  364.     
  365.     rect.x = x;
  366.     rect.y = y;
  367.     rect.w = w;
  368.     rect.h = h;
  369.     SDL_DisplayYUVOverlay(vp->bmp, &rect);
  370.   }
  371. }

  372. void video_refresh_timer(void *userdata) {

  373.   VideoState *is = (VideoState *)userdata;
  374.   VideoPicture *vp;
  375.   double actual_delay, delay, sync_threshold, ref_clock, diff;
  376.   
  377.   if(is->video_st) {
  378.     if(is->pictq_size == 0) {
  379.       schedule_refresh(is, 1);
  380.     } else {
  381.       vp = &is->pictq[is->pictq_rindex];

  382.       is->video_current_pts = vp->pts;
  383.       is->video_current_pts_time = av_gettime();

  384.       delay = vp->pts - is->frame_last_pts; /* the pts from last time */
  385.       if(delay <= 0 || delay >= 1.0) {
  386.     /* if incorrect delay, use previous one */
  387.     delay = is->frame_last_delay;
  388.       }
  389.       /* save for next time */
  390.       is->frame_last_delay = delay;
  391.       is->frame_last_pts = vp->pts;

  392.       /* update delay to sync to audio if not master source */
  393.       if(is->av_sync_type != AV_SYNC_VIDEO_MASTER) {
  394.     ref_clock = get_master_clock(is);
  395.     diff = vp->pts - ref_clock;
  396.     
  397.     /* Skip or repeat the frame. Take delay into account
  398.      FFPlay still doesn't "know if this is the best guess." */
  399.     sync_threshold = (delay > AV_SYNC_THRESHOLD) ? delay : AV_SYNC_THRESHOLD;
  400.     if(fabs(diff) < AV_NOSYNC_THRESHOLD) {
  401.      if(diff <= -sync_threshold) {
  402.      delay = 0;
  403.      } else if(diff >= sync_threshold) {
  404.      delay = 2 * delay;
  405.      }
  406.     }
  407.       }

  408.       is->frame_timer += delay;
  409.       /* computer the REAL delay */
  410.       actual_delay = is->frame_timer - (av_gettime() / 1000000.0);
  411.       if(actual_delay < 0.010) {
  412.     /* Really it should skip the picture instead */
  413.     actual_delay = 0.010;
  414.       }
  415.       schedule_refresh(is, (int)(actual_delay * 1000 + 0.5));

  416.       /* show the */
  417.       video_display(is);
  418.       
  419.       /* update queue for next */
  420.       if(++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE) {
  421.     is->pictq_rindex = 0;
  422.       }
  423.       SDL_LockMutex(is->pictq_mutex);
  424.       is->pictq_size--;
  425.       SDL_CondSignal(is->pictq_cond);
  426.       SDL_UnlockMutex(is->pictq_mutex);
  427.     }
  428.   } else {
  429.     schedule_refresh(is, 100);
  430.   }
  431. }
  432.       
  433. void alloc_picture(void *userdata) {

  434.   VideoState *is = (VideoState *)userdata;
  435.   VideoPicture *vp;

  436.   vp = &is->pictq[is->pictq_windex];
  437.   if(vp->bmp) {
  438.     // we already have one make another, bigger/smaller
  439.     SDL_FreeYUVOverlay(vp->bmp);
  440.   }
  441.   // Allocate a place to put our YUV image on that screen
  442.   vp->bmp = SDL_CreateYUVOverlay(is->video_st->codec->width,
  443.                  is->video_st->codec->height,
  444.                  SDL_YV12_OVERLAY,
  445.                  screen);
  446.   vp->width = is->video_st->codec->width;
  447.   vp->height = is->video_st->codec->height;
  448.   
  449.   SDL_LockMutex(is->pictq_mutex);
  450.   vp->allocated = 1;
  451.   SDL_CondSignal(is->pictq_cond);
  452.   SDL_UnlockMutex(is->pictq_mutex);

  453. }

  454. int queue_picture(VideoState *is, AVFrame *pFrame, double pts) {

  455.   VideoPicture *vp;
  456.   int dst_pix_fmt;
  457.   AVPicture pict;

  458.   /* wait until we have space for a new pic */
  459.   SDL_LockMutex(is->pictq_mutex);
  460.   while(is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
  461.     !is->quit) {
  462.     SDL_CondWait(is->pictq_cond, is->pictq_mutex);
  463.   }
  464.   SDL_UnlockMutex(is->pictq_mutex);

  465.   if(is->quit)
  466.     return -1;

  467.   // windex is set to 0 initially
  468.   vp = &is->pictq[is->pictq_windex];

  469.   /* allocate or resize the */
  470.   if(!vp->bmp ||
  471.      vp->width != is->video_st->codec->width ||
  472.      vp->height != is->video_st->codec->height) {
  473.     SDL_Event event;

  474.     vp->allocated = 0;
  475.     /* we have to do it in the main thread */
  476.     event.type = FF_ALLOC_EVENT;
  477.     event.user.data1 = is;
  478.     SDL_PushEvent(&event);

  479.     /* wait until we have a picture allocated */
  480.     SDL_LockMutex(is->pictq_mutex);
  481.     while(!vp->allocated && !is->quit) {
  482.       SDL_CondWait(is->pictq_cond, is->pictq_mutex);
  483.     }
  484.     SDL_UnlockMutex(is->pictq_mutex);
  485.     if(is->quit) {
  486.       return -1;
  487.     }
  488.   }
  489.   /* We have a place to put our picture on the queue */
  490.   /* If we are skipping a frame, do we set this to null
  491.      but still return vp->allocated = 1? */


  492.   if(vp->bmp) {

  493.     SDL_LockYUVOverlay(vp->bmp);
  494.     
  495.     dst_pix_fmt = PIX_FMT_YUV420P;
  496.     /* point pict at the queue */

  497.     pict.data[0] = vp->bmp->pixels[0];
  498.     pict.data[1] = vp->bmp->pixels[2];
  499.     pict.data[2] = vp->bmp->pixels[1];
  500.     
  501.     pict.linesize[0] = vp->bmp->pitches[0];
  502.     pict.linesize[1] = vp->bmp->pitches[2];
  503.     pict.linesize[2] = vp->bmp->pitches[1];
  504.     
  505.     // Convert the image into YUV format that SDL uses
  506.     /*
  507.     img_convert(&pict, dst_pix_fmt,
  508.         (AVPicture *)pFrame, is->video_st->codec->pix_fmt,
  509.         is->video_st->codec->width, is->video_st->codec->height);
  510.     */
  511.     sws_scale(is->img_convert_ctx,
  512.           pFrame->data,
  513.           pFrame->linesize, 0,
  514.           is->video_st->codec->height,
  515.           pict.data,
  516.           pict.linesize);
  517.     
  518.     SDL_UnlockYUVOverlay(vp->bmp);
  519.     vp->pts = pts;

  520.     /* now we inform our display thread that we have a pic ready */
  521.     if(++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE) {
  522.       is->pictq_windex = 0;
  523.     }
  524.     SDL_LockMutex(is->pictq_mutex);
  525.     is->pictq_size++;
  526.     SDL_UnlockMutex(is->pictq_mutex);
  527.   }
  528.   return 0;
  529. }

  530. double synchronize_video(VideoState *is, AVFrame *src_frame, double pts) {

  531.   double frame_delay;

  532.   if(pts != 0) {
  533.     /* if we have pts, set video clock to it */
  534.     is->video_clock = pts;
  535.   } else {
  536.     /* if we aren't given a pts, set it to the clock */
  537.     pts = is->video_clock;
  538.   }
  539.   /* update the video clock */
  540.   frame_delay = av_q2d(is->video_st->codec->time_base);
  541.   /* if we are repeating a frame, adjust clock accordingly */
  542.   frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
  543.   is->video_clock += frame_delay;
  544.   return pts;
  545. }

  546. uint64_t global_video_pkt_pts = AV_NOPTS_VALUE;

  547. /* These are called whenever we allocate a frame
  548.  * buffer. We use this to store the global_pts in
  549.  * a frame at the time it is allocated.
  550.  */
  551. int our_get_buffer(struct AVCodecContext *c, AVFrame *pic) {
  552.   int ret = avcodec_default_get_buffer(c, pic);
  553.   uint64_t *pts = av_malloc(sizeof(uint64_t));
  554.   *pts = global_video_pkt_pts;
  555.   pic->opaque = pts;
  556.   return ret;
  557. }
  558. void our_release_buffer(struct AVCodecContext *c, AVFrame *pic) {
  559.   if(pic) av_freep(&pic->opaque);
  560.   avcodec_default_release_buffer(c, pic);
  561. }

  562. int video_thread(void *arg) {
  563.   VideoState *is = (VideoState *)arg;
  564.   AVPacket pkt1, *packet = &pkt1;
  565.   int len1, frameFinished;
  566.   AVFrame *pFrame;
  567.   double pts;

  568.   pFrame = avcodec_alloc_frame();

  569.   for(;;) {
  570.     if(packet_queue_get(&is->videoq, packet, 1) < 0) {
  571.       // means we quit getting packets
  572.       break;
  573.     }
  574.     pts = 0;

  575.     // Save global pts to be stored in pFrame in first call
  576.     global_video_pkt_pts = packet->pts;
  577.     // Decode video frame
  578.     len1 = avcodec_decode_video(is->video_st->codec, pFrame, &frameFinished,
  579.                 packet->data, packet->size);
  580.     if(packet->dts == AV_NOPTS_VALUE
  581.        && pFrame->opaque && *(uint64_t*)pFrame->opaque != AV_NOPTS_VALUE) {
  582.       pts = *(uint64_t *)pFrame->opaque;
  583.     } else if(packet->dts != AV_NOPTS_VALUE) {
  584.       pts = packet->dts;
  585.     } else {
  586.       pts = 0;
  587.     }
  588.     pts *= av_q2d(is->video_st->time_base);

  589.     // Did we get a video frame?
  590.     if(frameFinished) {
  591.       pts = synchronize_video(is, pFrame, pts);
  592.       if(queue_picture(is, pFrame, pts) < 0) {
  593.     break;
  594.       }
  595.     }
  596.     av_free_packet(packet);
  597.   }
  598.   av_free(pFrame);
  599.   return 0;
  600. }

  601. int stream_component_open(VideoState *is, int stream_index) {

  602.   AVFormatContext *pFormatCtx = is->pFormatCtx;
  603.   AVCodecContext *codecCtx;
  604.   AVCodec *codec;
  605.   SDL_AudioSpec wanted_spec, spec;

  606.   if(stream_index < 0 || stream_index >= pFormatCtx->nb_streams) {
  607.     return -1;
  608.   }

  609.   // Get a pointer to the codec context for the video stream
  610.   codecCtx = pFormatCtx->streams[stream_index]->codec;
  611.     
  612.     is->img_convert_ctx = sws_getContext(codecCtx->width,
  613.                    codecCtx->height,
  614.                    codecCtx->pix_fmt,
  615.                    codecCtx->width,
  616.                    codecCtx->height,
  617.                    PIX_FMT_YUV420P,
  618.                    SWS_BICUBIC,
  619.                    NULL, NULL, NULL);
  620.     
  621.   if(codecCtx->codec_type == CODEC_TYPE_AUDIO) {
  622.     // Set audio settings from codec info
  623.     wanted_spec.freq = codecCtx->sample_rate;
  624.     wanted_spec.format = AUDIO_S16SYS;
  625.     wanted_spec.channels = codecCtx->channels;
  626.     wanted_spec.silence = 0;
  627.     wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
  628.     wanted_spec.callback = audio_callback;
  629.     wanted_spec.userdata = is;
  630.     
  631.     if(SDL_OpenAudio(&wanted_spec, &spec) < 0) {
  632.       fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
  633.       return -1;
  634.     }
  635.     is->audio_hw_buf_size = spec.size;
  636.   }
  637.   codec = avcodec_find_decoder(codecCtx->codec_id);
  638.   if(!codec || (avcodec_open(codecCtx, codec) < 0)) {
  639.     fprintf(stderr, "Unsupported codec!\n");
  640.     return -1;
  641.   }

  642.   switch(codecCtx->codec_type) {
  643.   case CODEC_TYPE_AUDIO:
  644.     is->audioStream = stream_index;
  645.     is->audio_st = pFormatCtx->streams[stream_index];
  646.     is->audio_buf_size = 0;
  647.     is->audio_buf_index = 0;
  648.     
  649.     /* averaging filter for audio sync */
  650.     is->audio_diff_avg_coef = exp(log(0.01 / AUDIO_DIFF_AVG_NB));
  651.     is->audio_diff_avg_count = 0;
  652.     /* Correct audio only if larger error than this */
  653.     is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / codecCtx->sample_rate;

  654.     memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
  655.     packet_queue_init(&is->audioq);
  656.     SDL_PauseAudio(0);
  657.     break;
  658.   case CODEC_TYPE_VIDEO:
  659.     is->videoStream = stream_index;
  660.     is->video_st = pFormatCtx->streams[stream_index];

  661.     is->frame_timer = (double)av_gettime() / 1000000.0;
  662.     is->frame_last_delay = 40e-3;
  663.     is->video_current_pts_time = av_gettime();

  664.     packet_queue_init(&is->videoq);
  665.     is->video_tid = SDL_CreateThread(video_thread, is);
  666.     codecCtx->get_buffer = our_get_buffer;
  667.     codecCtx->release_buffer = our_release_buffer;
  668.     break;
  669.   default:
  670.     break;
  671.   }


  672. }

  673. int decode_interrupt_cb(void) {
  674.   return (global_video_state && global_video_state->quit);
  675. }

  676. int decode_thread(void *arg) {

  677.   VideoState *is = (VideoState *)arg;
  678.   AVFormatContext *pFormatCtx;
  679.   AVPacket pkt1, *packet = &pkt1;

  680.   int video_index = -1;
  681.   int audio_index = -1;
  682.   int i;

  683.   is->videoStream=-1;
  684.   is->audioStream=-1;

  685.   global_video_state = is;
  686.   // will interrupt blocking functions if we
  687.   url_set_interrupt_cb(decode_interrupt_cb);

  688.   // Open video file
  689.   if(av_open_input_file(&pFormatCtx, is->filename, NULL, 0, NULL)!=0)
  690.     return -1; // Couldn't open file

  691.   is->pFormatCtx = pFormatCtx;
  692.   
  693.   // Retrieve stream information
  694.   if(av_find_stream_info(pFormatCtx)<0)
  695.     return -1; // Couldn't find stream information
  696.   
  697.   // Dump information about file onto standard error
  698.   dump_format(pFormatCtx, 0, is->filename, 0);
  699.   
  700.   // Find the first video stream

  701.   for(i=0; i<pFormatCtx->nb_streams; i++) {
  702.     if(pFormatCtx->streams[i]->codec->codec_type==CODEC_TYPE_VIDEO &&
  703.        video_index < 0) {
  704.       video_index=i;
  705.     }
  706.     if(pFormatCtx->streams[i]->codec->codec_type==CODEC_TYPE_AUDIO &&
  707.        audio_index < 0) {
  708.       audio_index=i;
  709.     }
  710.   }
  711.   if(audio_index >= 0) {
  712.     stream_component_open(is, audio_index);
  713.   }
  714.   if(video_index >= 0) {
  715.     stream_component_open(is, video_index);
  716.   }

  717.   if(is->videoStream < 0 || is->audioStream < 0) {
  718.     fprintf(stderr, "%s: could not open codecs\n", is->filename);
  719.     goto fail;
  720.   }

  721.   // main decode loop

  722.   for(;;) {
  723.     if(is->quit) {
  724.       break;
  725.     }
  726.     // seek stuff goes here
  727.     if(is->audioq.size > MAX_AUDIOQ_SIZE ||
  728.        is->videoq.size > MAX_VIDEOQ_SIZE) {
  729.       SDL_Delay(10);
  730.       continue;
  731.     }
  732.     if(av_read_frame(is->pFormatCtx, packet) < 0) {
  733.       if(url_ferror(&pFormatCtx->pb) == 0) {
  734.     SDL_Delay(100); /* no error; wait for user input */
  735.     continue;
  736.       } else {
  737.     break;
  738.       }
  739.     }
  740.     // Is this a packet from the video stream?
  741.     if(packet->stream_index == is->videoStream) {
  742.       packet_queue_put(&is->videoq, packet);
  743.     } else if(packet->stream_index == is->audioStream) {
  744.       packet_queue_put(&is->audioq, packet);
  745.     } else {
  746.       av_free_packet(packet);
  747.     }
  748.   }
  749.   /* all done - wait for it */
  750.   while(!is->quit) {
  751.     SDL_Delay(100);
  752.   }

  753.  fail:
  754.   {
  755.     SDL_Event event;
  756.     event.type = FF_QUIT_EVENT;
  757.     event.user.data1 = is;
  758.     SDL_PushEvent(&event);
  759.   }
  760.   return 0;
  761. }

  762. int main(int argc, char *argv[]) {

  763.   SDL_Event event;

  764.   VideoState *is;

  765.   is = av_mallocz(sizeof(VideoState));

  766.   if(argc < 2) {
  767.     fprintf(stderr, "Usage: test \n");
  768.     exit(1);
  769.   }
  770.   // Register all formats and codecs
  771.   av_register_all();
  772.   
  773.   if(SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER)) {
  774.     fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
  775.     exit(1);
  776.   }

  777.   // Make a screen to put our video
  778. #ifndef __DARWIN__
  779.         screen = SDL_SetVideoMode(640, 480, 0, 0);
  780. #else
  781.         screen = SDL_SetVideoMode(640, 480, 24, 0);
  782. #endif
  783.   if(!screen) {
  784.     fprintf(stderr, "SDL: could not set video mode - exiting\n");
  785.     exit(1);
  786.   }

  787.   av_strlcpy(is->filename, argv[1], sizeof(is->filename));

  788.   is->pictq_mutex = SDL_CreateMutex();
  789.   is->pictq_cond = SDL_CreateCond();

  790.   schedule_refresh(is, 40);

  791.   is->av_sync_type = DEFAULT_AV_SYNC_TYPE;
  792.   is->parse_tid = SDL_CreateThread(decode_thread, is);
  793.   if(!is->parse_tid) {
  794.     av_free(is);
  795.     return -1;
  796.   }
  797.   for(;;) {

  798.     SDL_WaitEvent(&event);
  799.     switch(event.type) {
  800.     case FF_QUIT_EVENT:
  801.     case SDL_QUIT:
  802.       is->quit = 1;
  803.       SDL_Quit();
  804.       exit(0);
  805.       break;
  806.     case FF_ALLOC_EVENT:
  807.       alloc_picture(event.user.data1);
  808.       break;
  809.     case FF_REFRESH_EVENT:
  810.       video_refresh_timer(event.user.data1);
  811.       break;
  812.     default:
  813.       break;
  814.     }
  815.   }
  816.   return 0;

  817. }


阅读(1422) | 评论(0) | 转发(0) |
给主人留下些什么吧!~~