00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022 #include "config.h"
00023 #include <inttypes.h>
00024 #include <math.h>
00025 #include <limits.h>
00026 #include "libavutil/avstring.h"
00027 #include "libavutil/pixdesc.h"
00028 #include "libavformat/avformat.h"
00029 #include "libavdevice/avdevice.h"
00030 #include "libswscale/swscale.h"
00031 #include "libavcodec/audioconvert.h"
00032 #include "libavcodec/colorspace.h"
00033 #include "libavcodec/opt.h"
00034 #include "libavcodec/avfft.h"
00035
00036 #if CONFIG_AVFILTER
00037 # include "libavfilter/avfilter.h"
00038 # include "libavfilter/avfiltergraph.h"
00039 # include "libavfilter/graphparser.h"
00040 #endif
00041
00042 #include "cmdutils.h"
00043
00044 #include <SDL.h>
00045 #include <SDL_thread.h>
00046
00047 #ifdef __MINGW32__
00048 #undef main
00049 #endif
00050
00051 #include <unistd.h>
00052 #include <assert.h>
00053
00054 const char program_name[] = "FFplay";
00055 const int program_birth_year = 2003;
00056
00057
00058
00059 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
00060 #define MIN_AUDIOQ_SIZE (20 * 16 * 1024)
00061 #define MIN_FRAMES 5
00062
00063
00064
00065 #define SDL_AUDIO_BUFFER_SIZE 1024
00066
00067
00068 #define AV_SYNC_THRESHOLD 0.01
00069
00070 #define AV_NOSYNC_THRESHOLD 10.0
00071
00072 #define FRAME_SKIP_FACTOR 0.05
00073
00074
00075 #define SAMPLE_CORRECTION_PERCENT_MAX 10
00076
00077
00078 #define AUDIO_DIFF_AVG_NB 20
00079
00080
00081 #define SAMPLE_ARRAY_SIZE (2*65536)
00082
00083 #if !CONFIG_AVFILTER
00084 static int sws_flags = SWS_BICUBIC;
00085 #endif
00086
00087 typedef struct PacketQueue {
00088 AVPacketList *first_pkt, *last_pkt;
00089 int nb_packets;
00090 int size;
00091 int abort_request;
00092 SDL_mutex *mutex;
00093 SDL_cond *cond;
00094 } PacketQueue;
00095
00096 #define VIDEO_PICTURE_QUEUE_SIZE 2
00097 #define SUBPICTURE_QUEUE_SIZE 4
00098
00099 typedef struct VideoPicture {
00100 double pts;
00101 double target_clock;
00102 int64_t pos;
00103 SDL_Overlay *bmp;
00104 int width, height;
00105 int allocated;
00106 enum PixelFormat pix_fmt;
00107
00108 #if CONFIG_AVFILTER
00109 AVFilterPicRef *picref;
00110 #endif
00111 } VideoPicture;
00112
00113 typedef struct SubPicture {
00114 double pts;
00115 AVSubtitle sub;
00116 } SubPicture;
00117
00118 enum {
00119 AV_SYNC_AUDIO_MASTER,
00120 AV_SYNC_VIDEO_MASTER,
00121 AV_SYNC_EXTERNAL_CLOCK,
00122 };
00123
00124 typedef struct VideoState {
00125 SDL_Thread *parse_tid;
00126 SDL_Thread *video_tid;
00127 SDL_Thread *refresh_tid;
00128 AVInputFormat *iformat;
00129 int no_background;
00130 int abort_request;
00131 int paused;
00132 int last_paused;
00133 int seek_req;
00134 int seek_flags;
00135 int64_t seek_pos;
00136 int64_t seek_rel;
00137 int read_pause_return;
00138 AVFormatContext *ic;
00139 int dtg_active_format;
00140
00141 int audio_stream;
00142
00143 int av_sync_type;
00144 double external_clock;
00145 int64_t external_clock_time;
00146
00147 double audio_clock;
00148 double audio_diff_cum;
00149 double audio_diff_avg_coef;
00150 double audio_diff_threshold;
00151 int audio_diff_avg_count;
00152 AVStream *audio_st;
00153 PacketQueue audioq;
00154 int audio_hw_buf_size;
00155
00156
00157 DECLARE_ALIGNED(16,uint8_t,audio_buf1)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
00158 DECLARE_ALIGNED(16,uint8_t,audio_buf2)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
00159 uint8_t *audio_buf;
00160 unsigned int audio_buf_size;
00161 int audio_buf_index;
00162 AVPacket audio_pkt_temp;
00163 AVPacket audio_pkt;
00164 enum SampleFormat audio_src_fmt;
00165 AVAudioConvert *reformat_ctx;
00166
00167 int show_audio;
00168 int16_t sample_array[SAMPLE_ARRAY_SIZE];
00169 int sample_array_index;
00170 int last_i_start;
00171 RDFTContext *rdft;
00172 int rdft_bits;
00173 int xpos;
00174
00175 SDL_Thread *subtitle_tid;
00176 int subtitle_stream;
00177 int subtitle_stream_changed;
00178 AVStream *subtitle_st;
00179 PacketQueue subtitleq;
00180 SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
00181 int subpq_size, subpq_rindex, subpq_windex;
00182 SDL_mutex *subpq_mutex;
00183 SDL_cond *subpq_cond;
00184
00185 double frame_timer;
00186 double frame_last_pts;
00187 double frame_last_delay;
00188 double video_clock;
00189 int video_stream;
00190 AVStream *video_st;
00191 PacketQueue videoq;
00192 double video_current_pts;
00193 double video_current_pts_drift;
00194 int64_t video_current_pos;
00195 VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
00196 int pictq_size, pictq_rindex, pictq_windex;
00197 SDL_mutex *pictq_mutex;
00198 SDL_cond *pictq_cond;
00199 #if !CONFIG_AVFILTER
00200 struct SwsContext *img_convert_ctx;
00201 #endif
00202
00203
00204 char filename[1024];
00205 int width, height, xleft, ytop;
00206
00207 int64_t faulty_pts;
00208 int64_t faulty_dts;
00209 int64_t last_dts_for_fault_detection;
00210 int64_t last_pts_for_fault_detection;
00211
00212 #if CONFIG_AVFILTER
00213 AVFilterContext *out_video_filter;
00214 #endif
00215
00216 float skip_frames;
00217 float skip_frames_index;
00218 int refresh;
00219 } VideoState;
00220
00221 static void show_help(void);
00222 static int audio_write_get_buf_size(VideoState *is);
00223
00224
00225 static AVInputFormat *file_iformat;
00226 static const char *input_filename;
00227 static const char *window_title;
00228 static int fs_screen_width;
00229 static int fs_screen_height;
00230 static int screen_width = 0;
00231 static int screen_height = 0;
00232 static int frame_width = 0;
00233 static int frame_height = 0;
00234 static enum PixelFormat frame_pix_fmt = PIX_FMT_NONE;
00235 static int audio_disable;
00236 static int video_disable;
00237 static int wanted_stream[AVMEDIA_TYPE_NB]={
00238 [AVMEDIA_TYPE_AUDIO]=-1,
00239 [AVMEDIA_TYPE_VIDEO]=-1,
00240 [AVMEDIA_TYPE_SUBTITLE]=-1,
00241 };
00242 static int seek_by_bytes=-1;
00243 static int display_disable;
00244 static int show_status = 1;
00245 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
00246 static int64_t start_time = AV_NOPTS_VALUE;
00247 static int64_t duration = AV_NOPTS_VALUE;
00248 static int debug = 0;
00249 static int debug_mv = 0;
00250 static int step = 0;
00251 static int thread_count = 1;
00252 static int workaround_bugs = 1;
00253 static int fast = 0;
00254 static int genpts = 0;
00255 static int lowres = 0;
00256 static int idct = FF_IDCT_AUTO;
00257 static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
00258 static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
00259 static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
00260 static int error_recognition = FF_ER_CAREFUL;
00261 static int error_concealment = 3;
00262 static int decoder_reorder_pts= -1;
00263 static int autoexit;
00264 static int loop=1;
00265 static int framedrop=1;
00266
00267 static int rdftspeed=20;
00268 #if CONFIG_AVFILTER
00269 static char *vfilters = NULL;
00270 #endif
00271
00272
00273 static int is_full_screen;
00274 static VideoState *cur_stream;
00275 static int64_t audio_callback_time;
00276
00277 static AVPacket flush_pkt;
00278
00279 #define FF_ALLOC_EVENT (SDL_USEREVENT)
00280 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
00281 #define FF_QUIT_EVENT (SDL_USEREVENT + 2)
00282
00283 static SDL_Surface *screen;
00284
00285 static int packet_queue_put(PacketQueue *q, AVPacket *pkt);
00286
00287
00288 static void packet_queue_init(PacketQueue *q)
00289 {
00290 memset(q, 0, sizeof(PacketQueue));
00291 q->mutex = SDL_CreateMutex();
00292 q->cond = SDL_CreateCond();
00293 packet_queue_put(q, &flush_pkt);
00294 }
00295
00296 static void packet_queue_flush(PacketQueue *q)
00297 {
00298 AVPacketList *pkt, *pkt1;
00299
00300 SDL_LockMutex(q->mutex);
00301 for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
00302 pkt1 = pkt->next;
00303 av_free_packet(&pkt->pkt);
00304 av_freep(&pkt);
00305 }
00306 q->last_pkt = NULL;
00307 q->first_pkt = NULL;
00308 q->nb_packets = 0;
00309 q->size = 0;
00310 SDL_UnlockMutex(q->mutex);
00311 }
00312
00313 static void packet_queue_end(PacketQueue *q)
00314 {
00315 packet_queue_flush(q);
00316 SDL_DestroyMutex(q->mutex);
00317 SDL_DestroyCond(q->cond);
00318 }
00319
00320 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
00321 {
00322 AVPacketList *pkt1;
00323
00324
00325 if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
00326 return -1;
00327
00328 pkt1 = av_malloc(sizeof(AVPacketList));
00329 if (!pkt1)
00330 return -1;
00331 pkt1->pkt = *pkt;
00332 pkt1->next = NULL;
00333
00334
00335 SDL_LockMutex(q->mutex);
00336
00337 if (!q->last_pkt)
00338
00339 q->first_pkt = pkt1;
00340 else
00341 q->last_pkt->next = pkt1;
00342 q->last_pkt = pkt1;
00343 q->nb_packets++;
00344 q->size += pkt1->pkt.size + sizeof(*pkt1);
00345
00346 SDL_CondSignal(q->cond);
00347
00348 SDL_UnlockMutex(q->mutex);
00349 return 0;
00350 }
00351
00352 static void packet_queue_abort(PacketQueue *q)
00353 {
00354 SDL_LockMutex(q->mutex);
00355
00356 q->abort_request = 1;
00357
00358 SDL_CondSignal(q->cond);
00359
00360 SDL_UnlockMutex(q->mutex);
00361 }
00362
00363
00364 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
00365 {
00366 AVPacketList *pkt1;
00367 int ret;
00368
00369 SDL_LockMutex(q->mutex);
00370
00371 for(;;) {
00372 if (q->abort_request) {
00373 ret = -1;
00374 break;
00375 }
00376
00377 pkt1 = q->first_pkt;
00378 if (pkt1) {
00379 q->first_pkt = pkt1->next;
00380 if (!q->first_pkt)
00381 q->last_pkt = NULL;
00382 q->nb_packets--;
00383 q->size -= pkt1->pkt.size + sizeof(*pkt1);
00384 *pkt = pkt1->pkt;
00385 av_free(pkt1);
00386 ret = 1;
00387 break;
00388 } else if (!block) {
00389 ret = 0;
00390 break;
00391 } else {
00392 SDL_CondWait(q->cond, q->mutex);
00393 }
00394 }
00395 SDL_UnlockMutex(q->mutex);
00396 return ret;
00397 }
00398
00399 static inline void fill_rectangle(SDL_Surface *screen,
00400 int x, int y, int w, int h, int color)
00401 {
00402 SDL_Rect rect;
00403 rect.x = x;
00404 rect.y = y;
00405 rect.w = w;
00406 rect.h = h;
00407 SDL_FillRect(screen, &rect, color);
00408 }
00409
00410 #if 0
00411
00412 void fill_border(VideoState *s, int x, int y, int w, int h, int color)
00413 {
00414 int w1, w2, h1, h2;
00415
00416
00417 w1 = x;
00418 if (w1 < 0)
00419 w1 = 0;
00420 w2 = s->width - (x + w);
00421 if (w2 < 0)
00422 w2 = 0;
00423 h1 = y;
00424 if (h1 < 0)
00425 h1 = 0;
00426 h2 = s->height - (y + h);
00427 if (h2 < 0)
00428 h2 = 0;
00429 fill_rectangle(screen,
00430 s->xleft, s->ytop,
00431 w1, s->height,
00432 color);
00433 fill_rectangle(screen,
00434 s->xleft + s->width - w2, s->ytop,
00435 w2, s->height,
00436 color);
00437 fill_rectangle(screen,
00438 s->xleft + w1, s->ytop,
00439 s->width - w1 - w2, h1,
00440 color);
00441 fill_rectangle(screen,
00442 s->xleft + w1, s->ytop + s->height - h2,
00443 s->width - w1 - w2, h2,
00444 color);
00445 }
00446 #endif
00447
00448 #define ALPHA_BLEND(a, oldp, newp, s)\
00449 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
00450
00451 #define RGBA_IN(r, g, b, a, s)\
00452 {\
00453 unsigned int v = ((const uint32_t *)(s))[0];\
00454 a = (v >> 24) & 0xff;\
00455 r = (v >> 16) & 0xff;\
00456 g = (v >> 8) & 0xff;\
00457 b = v & 0xff;\
00458 }
00459
00460 #define YUVA_IN(y, u, v, a, s, pal)\
00461 {\
00462 unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
00463 a = (val >> 24) & 0xff;\
00464 y = (val >> 16) & 0xff;\
00465 u = (val >> 8) & 0xff;\
00466 v = val & 0xff;\
00467 }
00468
00469 #define YUVA_OUT(d, y, u, v, a)\
00470 {\
00471 ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
00472 }
00473
00474
00475 #define BPP 1
00476
00477 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
00478 {
00479 int wrap, wrap3, width2, skip2;
00480 int y, u, v, a, u1, v1, a1, w, h;
00481 uint8_t *lum, *cb, *cr;
00482 const uint8_t *p;
00483 const uint32_t *pal;
00484 int dstx, dsty, dstw, dsth;
00485
00486 dstw = av_clip(rect->w, 0, imgw);
00487 dsth = av_clip(rect->h, 0, imgh);
00488 dstx = av_clip(rect->x, 0, imgw - dstw);
00489 dsty = av_clip(rect->y, 0, imgh - dsth);
00490 lum = dst->data[0] + dsty * dst->linesize[0];
00491 cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
00492 cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
00493
00494 width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
00495 skip2 = dstx >> 1;
00496 wrap = dst->linesize[0];
00497 wrap3 = rect->pict.linesize[0];
00498 p = rect->pict.data[0];
00499 pal = (const uint32_t *)rect->pict.data[1];
00500
00501 if (dsty & 1) {
00502 lum += dstx;
00503 cb += skip2;
00504 cr += skip2;
00505
00506 if (dstx & 1) {
00507 YUVA_IN(y, u, v, a, p, pal);
00508 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
00509 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
00510 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
00511 cb++;
00512 cr++;
00513 lum++;
00514 p += BPP;
00515 }
00516 for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
00517 YUVA_IN(y, u, v, a, p, pal);
00518 u1 = u;
00519 v1 = v;
00520 a1 = a;
00521 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
00522
00523 YUVA_IN(y, u, v, a, p + BPP, pal);
00524 u1 += u;
00525 v1 += v;
00526 a1 += a;
00527 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
00528 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
00529 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
00530 cb++;
00531 cr++;
00532 p += 2 * BPP;
00533 lum += 2;
00534 }
00535 if (w) {
00536 YUVA_IN(y, u, v, a, p, pal);
00537 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
00538 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
00539 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
00540 p++;
00541 lum++;
00542 }
00543 p += wrap3 - dstw * BPP;
00544 lum += wrap - dstw - dstx;
00545 cb += dst->linesize[1] - width2 - skip2;
00546 cr += dst->linesize[2] - width2 - skip2;
00547 }
00548 for(h = dsth - (dsty & 1); h >= 2; h -= 2) {
00549 lum += dstx;
00550 cb += skip2;
00551 cr += skip2;
00552
00553 if (dstx & 1) {
00554 YUVA_IN(y, u, v, a, p, pal);
00555 u1 = u;
00556 v1 = v;
00557 a1 = a;
00558 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
00559 p += wrap3;
00560 lum += wrap;
00561 YUVA_IN(y, u, v, a, p, pal);
00562 u1 += u;
00563 v1 += v;
00564 a1 += a;
00565 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
00566 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
00567 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
00568 cb++;
00569 cr++;
00570 p += -wrap3 + BPP;
00571 lum += -wrap + 1;
00572 }
00573 for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
00574 YUVA_IN(y, u, v, a, p, pal);
00575 u1 = u;
00576 v1 = v;
00577 a1 = a;
00578 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
00579
00580 YUVA_IN(y, u, v, a, p + BPP, pal);
00581 u1 += u;
00582 v1 += v;
00583 a1 += a;
00584 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
00585 p += wrap3;
00586 lum += wrap;
00587
00588 YUVA_IN(y, u, v, a, p, pal);
00589 u1 += u;
00590 v1 += v;
00591 a1 += a;
00592 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
00593
00594 YUVA_IN(y, u, v, a, p + BPP, pal);
00595 u1 += u;
00596 v1 += v;
00597 a1 += a;
00598 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
00599
00600 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
00601 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
00602
00603 cb++;
00604 cr++;
00605 p += -wrap3 + 2 * BPP;
00606 lum += -wrap + 2;
00607 }
00608 if (w) {
00609 YUVA_IN(y, u, v, a, p, pal);
00610 u1 = u;
00611 v1 = v;
00612 a1 = a;
00613 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
00614 p += wrap3;
00615 lum += wrap;
00616 YUVA_IN(y, u, v, a, p, pal);
00617 u1 += u;
00618 v1 += v;
00619 a1 += a;
00620 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
00621 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
00622 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
00623 cb++;
00624 cr++;
00625 p += -wrap3 + BPP;
00626 lum += -wrap + 1;
00627 }
00628 p += wrap3 + (wrap3 - dstw * BPP);
00629 lum += wrap + (wrap - dstw - dstx);
00630 cb += dst->linesize[1] - width2 - skip2;
00631 cr += dst->linesize[2] - width2 - skip2;
00632 }
00633
00634 if (h) {
00635 lum += dstx;
00636 cb += skip2;
00637 cr += skip2;
00638
00639 if (dstx & 1) {
00640 YUVA_IN(y, u, v, a, p, pal);
00641 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
00642 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
00643 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
00644 cb++;
00645 cr++;
00646 lum++;
00647 p += BPP;
00648 }
00649 for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
00650 YUVA_IN(y, u, v, a, p, pal);
00651 u1 = u;
00652 v1 = v;
00653 a1 = a;
00654 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
00655
00656 YUVA_IN(y, u, v, a, p + BPP, pal);
00657 u1 += u;
00658 v1 += v;
00659 a1 += a;
00660 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
00661 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
00662 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
00663 cb++;
00664 cr++;
00665 p += 2 * BPP;
00666 lum += 2;
00667 }
00668 if (w) {
00669 YUVA_IN(y, u, v, a, p, pal);
00670 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
00671 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
00672 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
00673 }
00674 }
00675 }
00676
00677 static void free_subpicture(SubPicture *sp)
00678 {
00679 int i;
00680
00681 for (i = 0; i < sp->sub.num_rects; i++)
00682 {
00683 av_freep(&sp->sub.rects[i]->pict.data[0]);
00684 av_freep(&sp->sub.rects[i]->pict.data[1]);
00685 av_freep(&sp->sub.rects[i]);
00686 }
00687
00688 av_free(sp->sub.rects);
00689
00690 memset(&sp->sub, 0, sizeof(AVSubtitle));
00691 }
00692
00693 static void video_image_display(VideoState *is)
00694 {
00695 VideoPicture *vp;
00696 SubPicture *sp;
00697 AVPicture pict;
00698 float aspect_ratio;
00699 int width, height, x, y;
00700 SDL_Rect rect;
00701 int i;
00702
00703 vp = &is->pictq[is->pictq_rindex];
00704 if (vp->bmp) {
00705 #if CONFIG_AVFILTER
00706 if (vp->picref->pixel_aspect.num == 0)
00707 aspect_ratio = 0;
00708 else
00709 aspect_ratio = av_q2d(vp->picref->pixel_aspect);
00710 #else
00711
00712
00713 if (is->video_st->sample_aspect_ratio.num)
00714 aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
00715 else if (is->video_st->codec->sample_aspect_ratio.num)
00716 aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
00717 else
00718 aspect_ratio = 0;
00719 #endif
00720 if (aspect_ratio <= 0.0)
00721 aspect_ratio = 1.0;
00722 aspect_ratio *= (float)vp->width / (float)vp->height;
00723
00724
00725 #if 0
00726 if (is->video_st->codec->dtg_active_format != is->dtg_active_format) {
00727 is->dtg_active_format = is->video_st->codec->dtg_active_format;
00728 printf("dtg_active_format=%d\n", is->dtg_active_format);
00729 }
00730 #endif
00731 #if 0
00732 switch(is->video_st->codec->dtg_active_format) {
00733 case FF_DTG_AFD_SAME:
00734 default:
00735
00736 break;
00737 case FF_DTG_AFD_4_3:
00738 aspect_ratio = 4.0 / 3.0;
00739 break;
00740 case FF_DTG_AFD_16_9:
00741 aspect_ratio = 16.0 / 9.0;
00742 break;
00743 case FF_DTG_AFD_14_9:
00744 aspect_ratio = 14.0 / 9.0;
00745 break;
00746 case FF_DTG_AFD_4_3_SP_14_9:
00747 aspect_ratio = 14.0 / 9.0;
00748 break;
00749 case FF_DTG_AFD_16_9_SP_14_9:
00750 aspect_ratio = 14.0 / 9.0;
00751 break;
00752 case FF_DTG_AFD_SP_4_3:
00753 aspect_ratio = 4.0 / 3.0;
00754 break;
00755 }
00756 #endif
00757
00758 if (is->subtitle_st)
00759 {
00760 if (is->subpq_size > 0)
00761 {
00762 sp = &is->subpq[is->subpq_rindex];
00763
00764 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000))
00765 {
00766 SDL_LockYUVOverlay (vp->bmp);
00767
00768 pict.data[0] = vp->bmp->pixels[0];
00769 pict.data[1] = vp->bmp->pixels[2];
00770 pict.data[2] = vp->bmp->pixels[1];
00771
00772 pict.linesize[0] = vp->bmp->pitches[0];
00773 pict.linesize[1] = vp->bmp->pitches[2];
00774 pict.linesize[2] = vp->bmp->pitches[1];
00775
00776 for (i = 0; i < sp->sub.num_rects; i++)
00777 blend_subrect(&pict, sp->sub.rects[i],
00778 vp->bmp->w, vp->bmp->h);
00779
00780 SDL_UnlockYUVOverlay (vp->bmp);
00781 }
00782 }
00783 }
00784
00785
00786
00787 height = is->height;
00788 width = ((int)rint(height * aspect_ratio)) & ~1;
00789 if (width > is->width) {
00790 width = is->width;
00791 height = ((int)rint(width / aspect_ratio)) & ~1;
00792 }
00793 x = (is->width - width) / 2;
00794 y = (is->height - height) / 2;
00795 if (!is->no_background) {
00796
00797
00798 } else {
00799 is->no_background = 0;
00800 }
00801 rect.x = is->xleft + x;
00802 rect.y = is->ytop + y;
00803 rect.w = width;
00804 rect.h = height;
00805 SDL_DisplayYUVOverlay(vp->bmp, &rect);
00806 } else {
00807 #if 0
00808 fill_rectangle(screen,
00809 is->xleft, is->ytop, is->width, is->height,
00810 QERGB(0x00, 0x00, 0x00));
00811 #endif
00812 }
00813 }
00814
00815 static inline int compute_mod(int a, int b)
00816 {
00817 a = a % b;
00818 if (a >= 0)
00819 return a;
00820 else
00821 return a + b;
00822 }
00823
00824 static void video_audio_display(VideoState *s)
00825 {
00826 int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
00827 int ch, channels, h, h2, bgcolor, fgcolor;
00828 int16_t time_diff;
00829 int rdft_bits, nb_freq;
00830
00831 for(rdft_bits=1; (1<<rdft_bits)<2*s->height; rdft_bits++)
00832 ;
00833 nb_freq= 1<<(rdft_bits-1);
00834
00835
00836 channels = s->audio_st->codec->channels;
00837 nb_display_channels = channels;
00838 if (!s->paused) {
00839 int data_used= s->show_audio==1 ? s->width : (2*nb_freq);
00840 n = 2 * channels;
00841 delay = audio_write_get_buf_size(s);
00842 delay /= n;
00843
00844
00845
00846 if (audio_callback_time) {
00847 time_diff = av_gettime() - audio_callback_time;
00848 delay -= (time_diff * s->audio_st->codec->sample_rate) / 1000000;
00849 }
00850
00851 delay += 2*data_used;
00852 if (delay < data_used)
00853 delay = data_used;
00854
00855 i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
00856 if(s->show_audio==1){
00857 h= INT_MIN;
00858 for(i=0; i<1000; i+=channels){
00859 int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
00860 int a= s->sample_array[idx];
00861 int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE];
00862 int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE];
00863 int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE];
00864 int score= a-d;
00865 if(h<score && (b^c)<0){
00866 h= score;
00867 i_start= idx;
00868 }
00869 }
00870 }
00871
00872 s->last_i_start = i_start;
00873 } else {
00874 i_start = s->last_i_start;
00875 }
00876
00877 bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
00878 if(s->show_audio==1){
00879 fill_rectangle(screen,
00880 s->xleft, s->ytop, s->width, s->height,
00881 bgcolor);
00882
00883 fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
00884
00885
00886 h = s->height / nb_display_channels;
00887
00888 h2 = (h * 9) / 20;
00889 for(ch = 0;ch < nb_display_channels; ch++) {
00890 i = i_start + ch;
00891 y1 = s->ytop + ch * h + (h / 2);
00892 for(x = 0; x < s->width; x++) {
00893 y = (s->sample_array[i] * h2) >> 15;
00894 if (y < 0) {
00895 y = -y;
00896 ys = y1 - y;
00897 } else {
00898 ys = y1;
00899 }
00900 fill_rectangle(screen,
00901 s->xleft + x, ys, 1, y,
00902 fgcolor);
00903 i += channels;
00904 if (i >= SAMPLE_ARRAY_SIZE)
00905 i -= SAMPLE_ARRAY_SIZE;
00906 }
00907 }
00908
00909 fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
00910
00911 for(ch = 1;ch < nb_display_channels; ch++) {
00912 y = s->ytop + ch * h;
00913 fill_rectangle(screen,
00914 s->xleft, y, s->width, 1,
00915 fgcolor);
00916 }
00917 SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
00918 }else{
00919 nb_display_channels= FFMIN(nb_display_channels, 2);
00920 if(rdft_bits != s->rdft_bits){
00921 av_rdft_end(s->rdft);
00922 s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
00923 s->rdft_bits= rdft_bits;
00924 }
00925 {
00926 FFTSample data[2][2*nb_freq];
00927 for(ch = 0;ch < nb_display_channels; ch++) {
00928 i = i_start + ch;
00929 for(x = 0; x < 2*nb_freq; x++) {
00930 double w= (x-nb_freq)*(1.0/nb_freq);
00931 data[ch][x]= s->sample_array[i]*(1.0-w*w);
00932 i += channels;
00933 if (i >= SAMPLE_ARRAY_SIZE)
00934 i -= SAMPLE_ARRAY_SIZE;
00935 }
00936 av_rdft_calc(s->rdft, data[ch]);
00937 }
00938
00939 for(y=0; y<s->height; y++){
00940 double w= 1/sqrt(nb_freq);
00941 int a= sqrt(w*sqrt(data[0][2*y+0]*data[0][2*y+0] + data[0][2*y+1]*data[0][2*y+1]));
00942 int b= sqrt(w*sqrt(data[1][2*y+0]*data[1][2*y+0] + data[1][2*y+1]*data[1][2*y+1]));
00943 a= FFMIN(a,255);
00944 b= FFMIN(b,255);
00945 fgcolor = SDL_MapRGB(screen->format, a, b, (a+b)/2);
00946
00947 fill_rectangle(screen,
00948 s->xpos, s->height-y, 1, 1,
00949 fgcolor);
00950 }
00951 }
00952 SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
00953 s->xpos++;
00954 if(s->xpos >= s->width)
00955 s->xpos= s->xleft;
00956 }
00957 }
00958
00959 static int video_open(VideoState *is){
00960 int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
00961 int w,h;
00962
00963 if(is_full_screen) flags |= SDL_FULLSCREEN;
00964 else flags |= SDL_RESIZABLE;
00965
00966 if (is_full_screen && fs_screen_width) {
00967 w = fs_screen_width;
00968 h = fs_screen_height;
00969 } else if(!is_full_screen && screen_width){
00970 w = screen_width;
00971 h = screen_height;
00972 #if CONFIG_AVFILTER
00973 }else if (is->out_video_filter && is->out_video_filter->inputs[0]){
00974 w = is->out_video_filter->inputs[0]->w;
00975 h = is->out_video_filter->inputs[0]->h;
00976 #else
00977 }else if (is->video_st && is->video_st->codec->width){
00978 w = is->video_st->codec->width;
00979 h = is->video_st->codec->height;
00980 #endif
00981 } else {
00982 w = 640;
00983 h = 480;
00984 }
00985 if(screen && is->width == screen->w && screen->w == w
00986 && is->height== screen->h && screen->h == h)
00987 return 0;
00988
00989 #ifndef __APPLE__
00990 screen = SDL_SetVideoMode(w, h, 0, flags);
00991 #else
00992
00993 screen = SDL_SetVideoMode(w, h, 24, flags);
00994 #endif
00995 if (!screen) {
00996 fprintf(stderr, "SDL: could not set video mode - exiting\n");
00997 return -1;
00998 }
00999 if (!window_title)
01000 window_title = input_filename;
01001 SDL_WM_SetCaption(window_title, window_title);
01002
01003 is->width = screen->w;
01004 is->height = screen->h;
01005
01006 return 0;
01007 }
01008
01009
01010 static void video_display(VideoState *is)
01011 {
01012 if(!screen)
01013 video_open(cur_stream);
01014 if (is->audio_st && is->show_audio)
01015 video_audio_display(is);
01016 else if (is->video_st)
01017 video_image_display(is);
01018 }
01019
01020 static int refresh_thread(void *opaque)
01021 {
01022 VideoState *is= opaque;
01023 while(!is->abort_request){
01024 SDL_Event event;
01025 event.type = FF_REFRESH_EVENT;
01026 event.user.data1 = opaque;
01027 if(!is->refresh){
01028 is->refresh=1;
01029 SDL_PushEvent(&event);
01030 }
01031 usleep(is->audio_st && is->show_audio ? rdftspeed*1000 : 5000);
01032 }
01033 return 0;
01034 }
01035
01036
01037 static double get_audio_clock(VideoState *is)
01038 {
01039 double pts;
01040 int hw_buf_size, bytes_per_sec;
01041 pts = is->audio_clock;
01042 hw_buf_size = audio_write_get_buf_size(is);
01043 bytes_per_sec = 0;
01044 if (is->audio_st) {
01045 bytes_per_sec = is->audio_st->codec->sample_rate *
01046 2 * is->audio_st->codec->channels;
01047 }
01048 if (bytes_per_sec)
01049 pts -= (double)hw_buf_size / bytes_per_sec;
01050 return pts;
01051 }
01052
01053
01054 static double get_video_clock(VideoState *is)
01055 {
01056 if (is->paused) {
01057 return is->video_current_pts;
01058 } else {
01059 return is->video_current_pts_drift + av_gettime() / 1000000.0;
01060 }
01061 }
01062
01063
01064 static double get_external_clock(VideoState *is)
01065 {
01066 int64_t ti;
01067 ti = av_gettime();
01068 return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
01069 }
01070
01071
01072 static double get_master_clock(VideoState *is)
01073 {
01074 double val;
01075
01076 if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
01077 if (is->video_st)
01078 val = get_video_clock(is);
01079 else
01080 val = get_audio_clock(is);
01081 } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
01082 if (is->audio_st)
01083 val = get_audio_clock(is);
01084 else
01085 val = get_video_clock(is);
01086 } else {
01087 val = get_external_clock(is);
01088 }
01089 return val;
01090 }
01091
01092
01093 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
01094 {
01095 if (!is->seek_req) {
01096 is->seek_pos = pos;
01097 is->seek_rel = rel;
01098 is->seek_flags &= ~AVSEEK_FLAG_BYTE;
01099 if (seek_by_bytes)
01100 is->seek_flags |= AVSEEK_FLAG_BYTE;
01101 is->seek_req = 1;
01102 }
01103 }
01104
01105
01106 static void stream_pause(VideoState *is)
01107 {
01108 if (is->paused) {
01109 is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
01110 if(is->read_pause_return != AVERROR(ENOSYS)){
01111 is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
01112 }
01113 is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
01114 }
01115 is->paused = !is->paused;
01116 }
01117
01118 static double compute_target_time(double frame_current_pts, VideoState *is)
01119 {
01120 double delay, sync_threshold, diff;
01121
01122
01123 delay = frame_current_pts - is->frame_last_pts;
01124 if (delay <= 0 || delay >= 10.0) {
01125
01126 delay = is->frame_last_delay;
01127 } else {
01128 is->frame_last_delay = delay;
01129 }
01130 is->frame_last_pts = frame_current_pts;
01131
01132
01133 if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
01134 is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
01135
01136
01137 diff = get_video_clock(is) - get_master_clock(is);
01138
01139
01140
01141
01142 sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
01143 if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
01144 if (diff <= -sync_threshold)
01145 delay = 0;
01146 else if (diff >= sync_threshold)
01147 delay = 2 * delay;
01148 }
01149 }
01150 is->frame_timer += delay;
01151 #if defined(DEBUG_SYNC)
01152 printf("video: delay=%0.3f actual_delay=%0.3f pts=%0.3f A-V=%f\n",
01153 delay, actual_delay, frame_current_pts, -diff);
01154 #endif
01155
01156 return is->frame_timer;
01157 }
01158
01159
01160 static void video_refresh_timer(void *opaque)
01161 {
01162 VideoState *is = opaque;
01163 VideoPicture *vp;
01164
01165 SubPicture *sp, *sp2;
01166
01167 if (is->video_st) {
01168 retry:
01169 if (is->pictq_size == 0) {
01170
01171 } else {
01172 double time= av_gettime()/1000000.0;
01173 double next_target;
01174
01175 vp = &is->pictq[is->pictq_rindex];
01176
01177 if(time < vp->target_clock)
01178 return;
01179
01180 is->video_current_pts = vp->pts;
01181 is->video_current_pts_drift = is->video_current_pts - time;
01182 is->video_current_pos = vp->pos;
01183 if(is->pictq_size > 1){
01184 VideoPicture *nextvp= &is->pictq[(is->pictq_rindex+1)%VIDEO_PICTURE_QUEUE_SIZE];
01185 assert(nextvp->target_clock >= vp->target_clock);
01186 next_target= nextvp->target_clock;
01187 }else{
01188 next_target= vp->target_clock + is->video_clock - vp->pts;
01189 }
01190 if(framedrop && time > next_target){
01191 is->skip_frames *= 1.0 + FRAME_SKIP_FACTOR;
01192 if(is->pictq_size > 1 || time > next_target + 0.5){
01193
01194 if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
01195 is->pictq_rindex = 0;
01196
01197 SDL_LockMutex(is->pictq_mutex);
01198 is->pictq_size--;
01199 SDL_CondSignal(is->pictq_cond);
01200 SDL_UnlockMutex(is->pictq_mutex);
01201 goto retry;
01202 }
01203 }
01204
01205 if(is->subtitle_st) {
01206 if (is->subtitle_stream_changed) {
01207 SDL_LockMutex(is->subpq_mutex);
01208
01209 while (is->subpq_size) {
01210 free_subpicture(&is->subpq[is->subpq_rindex]);
01211
01212
01213 if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
01214 is->subpq_rindex = 0;
01215
01216 is->subpq_size--;
01217 }
01218 is->subtitle_stream_changed = 0;
01219
01220 SDL_CondSignal(is->subpq_cond);
01221 SDL_UnlockMutex(is->subpq_mutex);
01222 } else {
01223 if (is->subpq_size > 0) {
01224 sp = &is->subpq[is->subpq_rindex];
01225
01226 if (is->subpq_size > 1)
01227 sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
01228 else
01229 sp2 = NULL;
01230
01231 if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
01232 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
01233 {
01234 free_subpicture(sp);
01235
01236
01237 if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
01238 is->subpq_rindex = 0;
01239
01240 SDL_LockMutex(is->subpq_mutex);
01241 is->subpq_size--;
01242 SDL_CondSignal(is->subpq_cond);
01243 SDL_UnlockMutex(is->subpq_mutex);
01244 }
01245 }
01246 }
01247 }
01248
01249
01250 video_display(is);
01251
01252
01253 if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
01254 is->pictq_rindex = 0;
01255
01256 SDL_LockMutex(is->pictq_mutex);
01257 is->pictq_size--;
01258 SDL_CondSignal(is->pictq_cond);
01259 SDL_UnlockMutex(is->pictq_mutex);
01260 }
01261 } else if (is->audio_st) {
01262
01263
01264
01265
01266
01267
01268 video_display(is);
01269 }
01270 if (show_status) {
01271 static int64_t last_time;
01272 int64_t cur_time;
01273 int aqsize, vqsize, sqsize;
01274 double av_diff;
01275
01276 cur_time = av_gettime();
01277 if (!last_time || (cur_time - last_time) >= 30000) {
01278 aqsize = 0;
01279 vqsize = 0;
01280 sqsize = 0;
01281 if (is->audio_st)
01282 aqsize = is->audioq.size;
01283 if (is->video_st)
01284 vqsize = is->videoq.size;
01285 if (is->subtitle_st)
01286 sqsize = is->subtitleq.size;
01287 av_diff = 0;
01288 if (is->audio_st && is->video_st)
01289 av_diff = get_audio_clock(is) - get_video_clock(is);
01290 printf("%7.2f A-V:%7.3f s:%3.1f aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64" \r",
01291 get_master_clock(is), av_diff, FFMAX(is->skip_frames-1, 0), aqsize / 1024, vqsize / 1024, sqsize, is->faulty_dts, is->faulty_pts);
01292 fflush(stdout);
01293 last_time = cur_time;
01294 }
01295 }
01296 }
01297
01298
01299
01300 static void alloc_picture(void *opaque)
01301 {
01302 VideoState *is = opaque;
01303 VideoPicture *vp;
01304
01305 vp = &is->pictq[is->pictq_windex];
01306
01307 if (vp->bmp)
01308 SDL_FreeYUVOverlay(vp->bmp);
01309
01310 #if CONFIG_AVFILTER
01311 if (vp->picref)
01312 avfilter_unref_pic(vp->picref);
01313 vp->picref = NULL;
01314
01315 vp->width = is->out_video_filter->inputs[0]->w;
01316 vp->height = is->out_video_filter->inputs[0]->h;
01317 vp->pix_fmt = is->out_video_filter->inputs[0]->format;
01318 #else
01319 vp->width = is->video_st->codec->width;
01320 vp->height = is->video_st->codec->height;
01321 vp->pix_fmt = is->video_st->codec->pix_fmt;
01322 #endif
01323
01324 vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
01325 SDL_YV12_OVERLAY,
01326 screen);
01327
01328 SDL_LockMutex(is->pictq_mutex);
01329 vp->allocated = 1;
01330 SDL_CondSignal(is->pictq_cond);
01331 SDL_UnlockMutex(is->pictq_mutex);
01332 }
01333
01338 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, int64_t pos)
01339 {
01340 VideoPicture *vp;
01341 int dst_pix_fmt;
01342 #if CONFIG_AVFILTER
01343 AVPicture pict_src;
01344 #endif
01345
01346 SDL_LockMutex(is->pictq_mutex);
01347
01348 if(is->pictq_size>=VIDEO_PICTURE_QUEUE_SIZE && !is->refresh)
01349 is->skip_frames= FFMAX(1.0 - FRAME_SKIP_FACTOR, is->skip_frames * (1.0-FRAME_SKIP_FACTOR));
01350
01351 while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
01352 !is->videoq.abort_request) {
01353 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
01354 }
01355 SDL_UnlockMutex(is->pictq_mutex);
01356
01357 if (is->videoq.abort_request)
01358 return -1;
01359
01360 vp = &is->pictq[is->pictq_windex];
01361
01362
01363 if (!vp->bmp ||
01364 #if CONFIG_AVFILTER
01365 vp->width != is->out_video_filter->inputs[0]->w ||
01366 vp->height != is->out_video_filter->inputs[0]->h) {
01367 #else
01368 vp->width != is->video_st->codec->width ||
01369 vp->height != is->video_st->codec->height) {
01370 #endif
01371 SDL_Event event;
01372
01373 vp->allocated = 0;
01374
01375
01376
01377 event.type = FF_ALLOC_EVENT;
01378 event.user.data1 = is;
01379 SDL_PushEvent(&event);
01380
01381
01382 SDL_LockMutex(is->pictq_mutex);
01383 while (!vp->allocated && !is->videoq.abort_request) {
01384 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
01385 }
01386 SDL_UnlockMutex(is->pictq_mutex);
01387
01388 if (is->videoq.abort_request)
01389 return -1;
01390 }
01391
01392
01393 if (vp->bmp) {
01394 AVPicture pict;
01395 #if CONFIG_AVFILTER
01396 if(vp->picref)
01397 avfilter_unref_pic(vp->picref);
01398 vp->picref = src_frame->opaque;
01399 #endif
01400
01401
01402 SDL_LockYUVOverlay (vp->bmp);
01403
01404 dst_pix_fmt = PIX_FMT_YUV420P;
01405 memset(&pict,0,sizeof(AVPicture));
01406 pict.data[0] = vp->bmp->pixels[0];
01407 pict.data[1] = vp->bmp->pixels[2];
01408 pict.data[2] = vp->bmp->pixels[1];
01409
01410 pict.linesize[0] = vp->bmp->pitches[0];
01411 pict.linesize[1] = vp->bmp->pitches[2];
01412 pict.linesize[2] = vp->bmp->pitches[1];
01413
01414 #if CONFIG_AVFILTER
01415 pict_src.data[0] = src_frame->data[0];
01416 pict_src.data[1] = src_frame->data[1];
01417 pict_src.data[2] = src_frame->data[2];
01418
01419 pict_src.linesize[0] = src_frame->linesize[0];
01420 pict_src.linesize[1] = src_frame->linesize[1];
01421 pict_src.linesize[2] = src_frame->linesize[2];
01422
01423
01424 av_picture_copy(&pict, &pict_src,
01425 vp->pix_fmt, vp->width, vp->height);
01426 #else
01427 sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
01428 is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
01429 vp->width, vp->height, vp->pix_fmt, vp->width, vp->height,
01430 dst_pix_fmt, sws_flags, NULL, NULL, NULL);
01431 if (is->img_convert_ctx == NULL) {
01432 fprintf(stderr, "Cannot initialize the conversion context\n");
01433 exit(1);
01434 }
01435 sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
01436 0, vp->height, pict.data, pict.linesize);
01437 #endif
01438
01439 SDL_UnlockYUVOverlay(vp->bmp);
01440
01441 vp->pts = pts;
01442 vp->pos = pos;
01443
01444
01445 if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
01446 is->pictq_windex = 0;
01447 SDL_LockMutex(is->pictq_mutex);
01448 vp->target_clock= compute_target_time(vp->pts, is);
01449
01450 is->pictq_size++;
01451 SDL_UnlockMutex(is->pictq_mutex);
01452 }
01453 return 0;
01454 }
01455
01460 static int output_picture2(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
01461 {
01462 double frame_delay, pts;
01463
01464 pts = pts1;
01465
01466 if (pts != 0) {
01467
01468 is->video_clock = pts;
01469 } else {
01470 pts = is->video_clock;
01471 }
01472
01473 frame_delay = av_q2d(is->video_st->codec->time_base);
01474
01475
01476 frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
01477 is->video_clock += frame_delay;
01478
01479 #if defined(DEBUG_SYNC) && 0
01480 printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
01481 av_get_pict_type_char(src_frame->pict_type), pts, pts1);
01482 #endif
01483 return queue_picture(is, src_frame, pts, pos);
01484 }
01485
01486 static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt)
01487 {
01488 int len1, got_picture, i;
01489
01490 if (packet_queue_get(&is->videoq, pkt, 1) < 0)
01491 return -1;
01492
01493 if(pkt->data == flush_pkt.data){
01494 avcodec_flush_buffers(is->video_st->codec);
01495
01496 SDL_LockMutex(is->pictq_mutex);
01497
01498 for(i=0; i<VIDEO_PICTURE_QUEUE_SIZE; i++){
01499 is->pictq[i].target_clock= 0;
01500 }
01501 while (is->pictq_size && !is->videoq.abort_request) {
01502 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
01503 }
01504 is->video_current_pos= -1;
01505 SDL_UnlockMutex(is->pictq_mutex);
01506
01507 is->last_dts_for_fault_detection=
01508 is->last_pts_for_fault_detection= INT64_MIN;
01509 is->frame_last_pts= AV_NOPTS_VALUE;
01510 is->frame_last_delay = 0;
01511 is->frame_timer = (double)av_gettime() / 1000000.0;
01512 is->skip_frames= 1;
01513 is->skip_frames_index= 0;
01514 return 0;
01515 }
01516
01517
01518
01519 is->video_st->codec->reordered_opaque= pkt->pts;
01520 len1 = avcodec_decode_video2(is->video_st->codec,
01521 frame, &got_picture,
01522 pkt);
01523
01524 if (got_picture) {
01525 if(pkt->dts != AV_NOPTS_VALUE){
01526 is->faulty_dts += pkt->dts <= is->last_dts_for_fault_detection;
01527 is->last_dts_for_fault_detection= pkt->dts;
01528 }
01529 if(frame->reordered_opaque != AV_NOPTS_VALUE){
01530 is->faulty_pts += frame->reordered_opaque <= is->last_pts_for_fault_detection;
01531 is->last_pts_for_fault_detection= frame->reordered_opaque;
01532 }
01533 }
01534
01535 if( ( decoder_reorder_pts==1
01536 || (decoder_reorder_pts && is->faulty_pts<is->faulty_dts)
01537 || pkt->dts == AV_NOPTS_VALUE)
01538 && frame->reordered_opaque != AV_NOPTS_VALUE)
01539 *pts= frame->reordered_opaque;
01540 else if(pkt->dts != AV_NOPTS_VALUE)
01541 *pts= pkt->dts;
01542 else
01543 *pts= 0;
01544
01545
01546
01547 if (got_picture){
01548 is->skip_frames_index += 1;
01549 if(is->skip_frames_index >= is->skip_frames){
01550 is->skip_frames_index -= FFMAX(is->skip_frames, 1.0);
01551 return 1;
01552 }
01553
01554 }
01555 return 0;
01556 }
01557
01558 #if CONFIG_AVFILTER
01559 typedef struct {
01560 VideoState *is;
01561 AVFrame *frame;
01562 int use_dr1;
01563 } FilterPriv;
01564
01565 static int input_get_buffer(AVCodecContext *codec, AVFrame *pic)
01566 {
01567 AVFilterContext *ctx = codec->opaque;
01568 AVFilterPicRef *ref;
01569 int perms = AV_PERM_WRITE;
01570 int w, h, stride[4];
01571 unsigned edge;
01572
01573 if(pic->buffer_hints & FF_BUFFER_HINTS_VALID) {
01574 if(pic->buffer_hints & FF_BUFFER_HINTS_READABLE) perms |= AV_PERM_READ;
01575 if(pic->buffer_hints & FF_BUFFER_HINTS_PRESERVE) perms |= AV_PERM_PRESERVE;
01576 if(pic->buffer_hints & FF_BUFFER_HINTS_REUSABLE) perms |= AV_PERM_REUSE2;
01577 }
01578 if(pic->reference) perms |= AV_PERM_READ | AV_PERM_PRESERVE;
01579
01580 w = codec->width;
01581 h = codec->height;
01582 avcodec_align_dimensions2(codec, &w, &h, stride);
01583 edge = codec->flags & CODEC_FLAG_EMU_EDGE ? 0 : avcodec_get_edge_width();
01584 w += edge << 1;
01585 h += edge << 1;
01586
01587 if(!(ref = avfilter_get_video_buffer(ctx->outputs[0], perms, w, h)))
01588 return -1;
01589
01590 ref->w = codec->width;
01591 ref->h = codec->height;
01592 for(int i = 0; i < 3; i ++) {
01593 unsigned hshift = i == 0 ? 0 : av_pix_fmt_descriptors[ref->pic->format].log2_chroma_w;
01594 unsigned vshift = i == 0 ? 0 : av_pix_fmt_descriptors[ref->pic->format].log2_chroma_h;
01595
01596 ref->data[i] += (edge >> hshift) + ((edge * ref->linesize[i]) >> vshift);
01597 pic->data[i] = ref->data[i];
01598 pic->linesize[i] = ref->linesize[i];
01599 }
01600 pic->opaque = ref;
01601 pic->age = INT_MAX;
01602 pic->type = FF_BUFFER_TYPE_USER;
01603 return 0;
01604 }
01605
01606 static void input_release_buffer(AVCodecContext *codec, AVFrame *pic)
01607 {
01608 memset(pic->data, 0, sizeof(pic->data));
01609 avfilter_unref_pic(pic->opaque);
01610 }
01611
01612 static int input_init(AVFilterContext *ctx, const char *args, void *opaque)
01613 {
01614 FilterPriv *priv = ctx->priv;
01615 AVCodecContext *codec;
01616 if(!opaque) return -1;
01617
01618 priv->is = opaque;
01619 codec = priv->is->video_st->codec;
01620 codec->opaque = ctx;
01621 if(codec->codec->capabilities & CODEC_CAP_DR1) {
01622 priv->use_dr1 = 1;
01623 codec->get_buffer = input_get_buffer;
01624 codec->release_buffer = input_release_buffer;
01625 }
01626
01627 priv->frame = avcodec_alloc_frame();
01628
01629 return 0;
01630 }
01631
01632 static void input_uninit(AVFilterContext *ctx)
01633 {
01634 FilterPriv *priv = ctx->priv;
01635 av_free(priv->frame);
01636 }
01637
01638 static int input_request_frame(AVFilterLink *link)
01639 {
01640 FilterPriv *priv = link->src->priv;
01641 AVFilterPicRef *picref;
01642 int64_t pts = 0;
01643 AVPacket pkt;
01644 int ret;
01645
01646 while (!(ret = get_video_frame(priv->is, priv->frame, &pts, &pkt)))
01647 av_free_packet(&pkt);
01648 if (ret < 0)
01649 return -1;
01650
01651 if(priv->use_dr1) {
01652 picref = avfilter_ref_pic(priv->frame->opaque, ~0);
01653 } else {
01654 picref = avfilter_get_video_buffer(link, AV_PERM_WRITE, link->w, link->h);
01655 av_picture_copy((AVPicture *)&picref->data, (AVPicture *)priv->frame,
01656 picref->pic->format, link->w, link->h);
01657 }
01658 av_free_packet(&pkt);
01659
01660 picref->pts = pts;
01661 picref->pos = pkt.pos;
01662 picref->pixel_aspect = priv->is->video_st->codec->sample_aspect_ratio;
01663 avfilter_start_frame(link, picref);
01664 avfilter_draw_slice(link, 0, link->h, 1);
01665 avfilter_end_frame(link);
01666
01667 return 0;
01668 }
01669
01670 static int input_query_formats(AVFilterContext *ctx)
01671 {
01672 FilterPriv *priv = ctx->priv;
01673 enum PixelFormat pix_fmts[] = {
01674 priv->is->video_st->codec->pix_fmt, PIX_FMT_NONE
01675 };
01676
01677 avfilter_set_common_formats(ctx, avfilter_make_format_list(pix_fmts));
01678 return 0;
01679 }
01680
01681 static int input_config_props(AVFilterLink *link)
01682 {
01683 FilterPriv *priv = link->src->priv;
01684 AVCodecContext *c = priv->is->video_st->codec;
01685
01686 link->w = c->width;
01687 link->h = c->height;
01688
01689 return 0;
01690 }
01691
01692 static AVFilter input_filter =
01693 {
01694 .name = "ffplay_input",
01695
01696 .priv_size = sizeof(FilterPriv),
01697
01698 .init = input_init,
01699 .uninit = input_uninit,
01700
01701 .query_formats = input_query_formats,
01702
01703 .inputs = (AVFilterPad[]) {{ .name = NULL }},
01704 .outputs = (AVFilterPad[]) {{ .name = "default",
01705 .type = AVMEDIA_TYPE_VIDEO,
01706 .request_frame = input_request_frame,
01707 .config_props = input_config_props, },
01708 { .name = NULL }},
01709 };
01710
01711 static void output_end_frame(AVFilterLink *link)
01712 {
01713 }
01714
01715 static int output_query_formats(AVFilterContext *ctx)
01716 {
01717 enum PixelFormat pix_fmts[] = { PIX_FMT_YUV420P, PIX_FMT_NONE };
01718
01719 avfilter_set_common_formats(ctx, avfilter_make_format_list(pix_fmts));
01720 return 0;
01721 }
01722
01723 static int get_filtered_video_frame(AVFilterContext *ctx, AVFrame *frame,
01724 int64_t *pts, int64_t *pos)
01725 {
01726 AVFilterPicRef *pic;
01727
01728 if(avfilter_request_frame(ctx->inputs[0]))
01729 return -1;
01730 if(!(pic = ctx->inputs[0]->cur_pic))
01731 return -1;
01732 ctx->inputs[0]->cur_pic = NULL;
01733
01734 frame->opaque = pic;
01735 *pts = pic->pts;
01736 *pos = pic->pos;
01737
01738 memcpy(frame->data, pic->data, sizeof(frame->data));
01739 memcpy(frame->linesize, pic->linesize, sizeof(frame->linesize));
01740
01741 return 1;
01742 }
01743
01744 static AVFilter output_filter =
01745 {
01746 .name = "ffplay_output",
01747
01748 .query_formats = output_query_formats,
01749
01750 .inputs = (AVFilterPad[]) {{ .name = "default",
01751 .type = AVMEDIA_TYPE_VIDEO,
01752 .end_frame = output_end_frame,
01753 .min_perms = AV_PERM_READ, },
01754 { .name = NULL }},
01755 .outputs = (AVFilterPad[]) {{ .name = NULL }},
01756 };
01757 #endif
01758
01759 static int video_thread(void *arg)
01760 {
01761 VideoState *is = arg;
01762 AVFrame *frame= avcodec_alloc_frame();
01763 int64_t pts_int;
01764 double pts;
01765 int ret;
01766
01767 #if CONFIG_AVFILTER
01768 int64_t pos;
01769 AVFilterContext *filt_src = NULL, *filt_out = NULL;
01770 AVFilterGraph *graph = av_mallocz(sizeof(AVFilterGraph));
01771 graph->scale_sws_opts = av_strdup("sws_flags=bilinear");
01772
01773 if(!(filt_src = avfilter_open(&input_filter, "src"))) goto the_end;
01774 if(!(filt_out = avfilter_open(&output_filter, "out"))) goto the_end;
01775
01776 if(avfilter_init_filter(filt_src, NULL, is)) goto the_end;
01777 if(avfilter_init_filter(filt_out, NULL, frame)) goto the_end;
01778
01779
01780 if(vfilters) {
01781 AVFilterInOut *outputs = av_malloc(sizeof(AVFilterInOut));
01782 AVFilterInOut *inputs = av_malloc(sizeof(AVFilterInOut));
01783
01784 outputs->name = av_strdup("in");
01785 outputs->filter = filt_src;
01786 outputs->pad_idx = 0;
01787 outputs->next = NULL;
01788
01789 inputs->name = av_strdup("out");
01790 inputs->filter = filt_out;
01791 inputs->pad_idx = 0;
01792 inputs->next = NULL;
01793
01794 if (avfilter_graph_parse(graph, vfilters, inputs, outputs, NULL) < 0)
01795 goto the_end;
01796 av_freep(&vfilters);
01797 } else {
01798 if(avfilter_link(filt_src, 0, filt_out, 0) < 0) goto the_end;
01799 }
01800 avfilter_graph_add_filter(graph, filt_src);
01801 avfilter_graph_add_filter(graph, filt_out);
01802
01803 if(avfilter_graph_check_validity(graph, NULL)) goto the_end;
01804 if(avfilter_graph_config_formats(graph, NULL)) goto the_end;
01805 if(avfilter_graph_config_links(graph, NULL)) goto the_end;
01806
01807 is->out_video_filter = filt_out;
01808 #endif
01809
01810 for(;;) {
01811 #if !CONFIG_AVFILTER
01812 AVPacket pkt;
01813 #endif
01814 while (is->paused && !is->videoq.abort_request)
01815 SDL_Delay(10);
01816 #if CONFIG_AVFILTER
01817 ret = get_filtered_video_frame(filt_out, frame, &pts_int, &pos);
01818 #else
01819 ret = get_video_frame(is, frame, &pts_int, &pkt);
01820 #endif
01821
01822 if (ret < 0) goto the_end;
01823
01824 if (!ret)
01825 continue;
01826
01827 pts = pts_int*av_q2d(is->video_st->time_base);
01828
01829 #if CONFIG_AVFILTER
01830 ret = output_picture2(is, frame, pts, pos);
01831 #else
01832 ret = output_picture2(is, frame, pts, pkt.pos);
01833 av_free_packet(&pkt);
01834 #endif
01835 if (ret < 0)
01836 goto the_end;
01837
01838 if (step)
01839 if (cur_stream)
01840 stream_pause(cur_stream);
01841 }
01842 the_end:
01843 #if CONFIG_AVFILTER
01844 avfilter_graph_destroy(graph);
01845 av_freep(&graph);
01846 #endif
01847 av_free(frame);
01848 return 0;
01849 }
01850
01851 static int subtitle_thread(void *arg)
01852 {
01853 VideoState *is = arg;
01854 SubPicture *sp;
01855 AVPacket pkt1, *pkt = &pkt1;
01856 int len1, got_subtitle;
01857 double pts;
01858 int i, j;
01859 int r, g, b, y, u, v, a;
01860
01861 for(;;) {
01862 while (is->paused && !is->subtitleq.abort_request) {
01863 SDL_Delay(10);
01864 }
01865 if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
01866 break;
01867
01868 if(pkt->data == flush_pkt.data){
01869 avcodec_flush_buffers(is->subtitle_st->codec);
01870 continue;
01871 }
01872 SDL_LockMutex(is->subpq_mutex);
01873 while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
01874 !is->subtitleq.abort_request) {
01875 SDL_CondWait(is->subpq_cond, is->subpq_mutex);
01876 }
01877 SDL_UnlockMutex(is->subpq_mutex);
01878
01879 if (is->subtitleq.abort_request)
01880 goto the_end;
01881
01882 sp = &is->subpq[is->subpq_windex];
01883
01884
01885
01886 pts = 0;
01887 if (pkt->pts != AV_NOPTS_VALUE)
01888 pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
01889
01890 len1 = avcodec_decode_subtitle2(is->subtitle_st->codec,
01891 &sp->sub, &got_subtitle,
01892 pkt);
01893
01894
01895 if (got_subtitle && sp->sub.format == 0) {
01896 sp->pts = pts;
01897
01898 for (i = 0; i < sp->sub.num_rects; i++)
01899 {
01900 for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
01901 {
01902 RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
01903 y = RGB_TO_Y_CCIR(r, g, b);
01904 u = RGB_TO_U_CCIR(r, g, b, 0);
01905 v = RGB_TO_V_CCIR(r, g, b, 0);
01906 YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
01907 }
01908 }
01909
01910
01911 if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
01912 is->subpq_windex = 0;
01913 SDL_LockMutex(is->subpq_mutex);
01914 is->subpq_size++;
01915 SDL_UnlockMutex(is->subpq_mutex);
01916 }
01917 av_free_packet(pkt);
01918
01919
01920
01921 }
01922 the_end:
01923 return 0;
01924 }
01925
01926
01927 static void update_sample_display(VideoState *is, short *samples, int samples_size)
01928 {
01929 int size, len, channels;
01930
01931 channels = is->audio_st->codec->channels;
01932
01933 size = samples_size / sizeof(short);
01934 while (size > 0) {
01935 len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
01936 if (len > size)
01937 len = size;
01938 memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
01939 samples += len;
01940 is->sample_array_index += len;
01941 if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
01942 is->sample_array_index = 0;
01943 size -= len;
01944 }
01945 }
01946
01947
01948
01949 static int synchronize_audio(VideoState *is, short *samples,
01950 int samples_size1, double pts)
01951 {
01952 int n, samples_size;
01953 double ref_clock;
01954
01955 n = 2 * is->audio_st->codec->channels;
01956 samples_size = samples_size1;
01957
01958
01959 if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
01960 is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
01961 double diff, avg_diff;
01962 int wanted_size, min_size, max_size, nb_samples;
01963
01964 ref_clock = get_master_clock(is);
01965 diff = get_audio_clock(is) - ref_clock;
01966
01967 if (diff < AV_NOSYNC_THRESHOLD) {
01968 is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
01969 if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
01970
01971 is->audio_diff_avg_count++;
01972 } else {
01973
01974 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
01975
01976 if (fabs(avg_diff) >= is->audio_diff_threshold) {
01977 wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
01978 nb_samples = samples_size / n;
01979
01980 min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
01981 max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
01982 if (wanted_size < min_size)
01983 wanted_size = min_size;
01984 else if (wanted_size > max_size)
01985 wanted_size = max_size;
01986
01987
01988 if (wanted_size < samples_size) {
01989
01990 samples_size = wanted_size;
01991 } else if (wanted_size > samples_size) {
01992 uint8_t *samples_end, *q;
01993 int nb;
01994
01995
01996 nb = (samples_size - wanted_size);
01997 samples_end = (uint8_t *)samples + samples_size - n;
01998 q = samples_end + n;
01999 while (nb > 0) {
02000 memcpy(q, samples_end, n);
02001 q += n;
02002 nb -= n;
02003 }
02004 samples_size = wanted_size;
02005 }
02006 }
02007 #if 0
02008 printf("diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
02009 diff, avg_diff, samples_size - samples_size1,
02010 is->audio_clock, is->video_clock, is->audio_diff_threshold);
02011 #endif
02012 }
02013 } else {
02014
02015
02016 is->audio_diff_avg_count = 0;
02017 is->audio_diff_cum = 0;
02018 }
02019 }
02020
02021 return samples_size;
02022 }
02023
02024
02025 static int audio_decode_frame(VideoState *is, double *pts_ptr)
02026 {
02027 AVPacket *pkt_temp = &is->audio_pkt_temp;
02028 AVPacket *pkt = &is->audio_pkt;
02029 AVCodecContext *dec= is->audio_st->codec;
02030 int n, len1, data_size;
02031 double pts;
02032
02033 for(;;) {
02034
02035 while (pkt_temp->size > 0) {
02036 data_size = sizeof(is->audio_buf1);
02037 len1 = avcodec_decode_audio3(dec,
02038 (int16_t *)is->audio_buf1, &data_size,
02039 pkt_temp);
02040 if (len1 < 0) {
02041
02042 pkt_temp->size = 0;
02043 break;
02044 }
02045
02046 pkt_temp->data += len1;
02047 pkt_temp->size -= len1;
02048 if (data_size <= 0)
02049 continue;
02050
02051 if (dec->sample_fmt != is->audio_src_fmt) {
02052 if (is->reformat_ctx)
02053 av_audio_convert_free(is->reformat_ctx);
02054 is->reformat_ctx= av_audio_convert_alloc(SAMPLE_FMT_S16, 1,
02055 dec->sample_fmt, 1, NULL, 0);
02056 if (!is->reformat_ctx) {
02057 fprintf(stderr, "Cannot convert %s sample format to %s sample format\n",
02058 avcodec_get_sample_fmt_name(dec->sample_fmt),
02059 avcodec_get_sample_fmt_name(SAMPLE_FMT_S16));
02060 break;
02061 }
02062 is->audio_src_fmt= dec->sample_fmt;
02063 }
02064
02065 if (is->reformat_ctx) {
02066 const void *ibuf[6]= {is->audio_buf1};
02067 void *obuf[6]= {is->audio_buf2};
02068 int istride[6]= {av_get_bits_per_sample_format(dec->sample_fmt)/8};
02069 int ostride[6]= {2};
02070 int len= data_size/istride[0];
02071 if (av_audio_convert(is->reformat_ctx, obuf, ostride, ibuf, istride, len)<0) {
02072 printf("av_audio_convert() failed\n");
02073 break;
02074 }
02075 is->audio_buf= is->audio_buf2;
02076
02077
02078 data_size= len*2;
02079 }else{
02080 is->audio_buf= is->audio_buf1;
02081 }
02082
02083
02084 pts = is->audio_clock;
02085 *pts_ptr = pts;
02086 n = 2 * dec->channels;
02087 is->audio_clock += (double)data_size /
02088 (double)(n * dec->sample_rate);
02089 #if defined(DEBUG_SYNC)
02090 {
02091 static double last_clock;
02092 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
02093 is->audio_clock - last_clock,
02094 is->audio_clock, pts);
02095 last_clock = is->audio_clock;
02096 }
02097 #endif
02098 return data_size;
02099 }
02100
02101
02102 if (pkt->data)
02103 av_free_packet(pkt);
02104
02105 if (is->paused || is->audioq.abort_request) {
02106 return -1;
02107 }
02108
02109
02110 if (packet_queue_get(&is->audioq, pkt, 1) < 0)
02111 return -1;
02112 if(pkt->data == flush_pkt.data){
02113 avcodec_flush_buffers(dec);
02114 continue;
02115 }
02116
02117 pkt_temp->data = pkt->data;
02118 pkt_temp->size = pkt->size;
02119
02120
02121 if (pkt->pts != AV_NOPTS_VALUE) {
02122 is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
02123 }
02124 }
02125 }
02126
02127
02128
02129 static int audio_write_get_buf_size(VideoState *is)
02130 {
02131 return is->audio_buf_size - is->audio_buf_index;
02132 }
02133
02134
02135
02136 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
02137 {
02138 VideoState *is = opaque;
02139 int audio_size, len1;
02140 double pts;
02141
02142 audio_callback_time = av_gettime();
02143
02144 while (len > 0) {
02145 if (is->audio_buf_index >= is->audio_buf_size) {
02146 audio_size = audio_decode_frame(is, &pts);
02147 if (audio_size < 0) {
02148
02149 is->audio_buf = is->audio_buf1;
02150 is->audio_buf_size = 1024;
02151 memset(is->audio_buf, 0, is->audio_buf_size);
02152 } else {
02153 if (is->show_audio)
02154 update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
02155 audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
02156 pts);
02157 is->audio_buf_size = audio_size;
02158 }
02159 is->audio_buf_index = 0;
02160 }
02161 len1 = is->audio_buf_size - is->audio_buf_index;
02162 if (len1 > len)
02163 len1 = len;
02164 memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
02165 len -= len1;
02166 stream += len1;
02167 is->audio_buf_index += len1;
02168 }
02169 }
02170
02171
02172 static int stream_component_open(VideoState *is, int stream_index)
02173 {
02174 AVFormatContext *ic = is->ic;
02175 AVCodecContext *avctx;
02176 AVCodec *codec;
02177 SDL_AudioSpec wanted_spec, spec;
02178
02179 if (stream_index < 0 || stream_index >= ic->nb_streams)
02180 return -1;
02181 avctx = ic->streams[stream_index]->codec;
02182
02183
02184 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
02185 if (avctx->channels > 0) {
02186 avctx->request_channels = FFMIN(2, avctx->channels);
02187 } else {
02188 avctx->request_channels = 2;
02189 }
02190 }
02191
02192 codec = avcodec_find_decoder(avctx->codec_id);
02193 avctx->debug_mv = debug_mv;
02194 avctx->debug = debug;
02195 avctx->workaround_bugs = workaround_bugs;
02196 avctx->lowres = lowres;
02197 if(lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
02198 avctx->idct_algo= idct;
02199 if(fast) avctx->flags2 |= CODEC_FLAG2_FAST;
02200 avctx->skip_frame= skip_frame;
02201 avctx->skip_idct= skip_idct;
02202 avctx->skip_loop_filter= skip_loop_filter;
02203 avctx->error_recognition= error_recognition;
02204 avctx->error_concealment= error_concealment;
02205 avcodec_thread_init(avctx, thread_count);
02206
02207 set_context_opts(avctx, avcodec_opts[avctx->codec_type], 0);
02208
02209 if (!codec ||
02210 avcodec_open(avctx, codec) < 0)
02211 return -1;
02212
02213
02214 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
02215 wanted_spec.freq = avctx->sample_rate;
02216 wanted_spec.format = AUDIO_S16SYS;
02217 wanted_spec.channels = avctx->channels;
02218 wanted_spec.silence = 0;
02219 wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
02220 wanted_spec.callback = sdl_audio_callback;
02221 wanted_spec.userdata = is;
02222 if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
02223 fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
02224 return -1;
02225 }
02226 is->audio_hw_buf_size = spec.size;
02227 is->audio_src_fmt= SAMPLE_FMT_S16;
02228 }
02229
02230 ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
02231 switch(avctx->codec_type) {
02232 case AVMEDIA_TYPE_AUDIO:
02233 is->audio_stream = stream_index;
02234 is->audio_st = ic->streams[stream_index];
02235 is->audio_buf_size = 0;
02236 is->audio_buf_index = 0;
02237
02238
02239 is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
02240 is->audio_diff_avg_count = 0;
02241
02242
02243 is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / avctx->sample_rate;
02244
02245 memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
02246 packet_queue_init(&is->audioq);
02247 SDL_PauseAudio(0);
02248 break;
02249 case AVMEDIA_TYPE_VIDEO:
02250 is->video_stream = stream_index;
02251 is->video_st = ic->streams[stream_index];
02252
02253
02254
02255 packet_queue_init(&is->videoq);
02256 is->video_tid = SDL_CreateThread(video_thread, is);
02257 break;
02258 case AVMEDIA_TYPE_SUBTITLE:
02259 is->subtitle_stream = stream_index;
02260 is->subtitle_st = ic->streams[stream_index];
02261 packet_queue_init(&is->subtitleq);
02262
02263 is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
02264 break;
02265 default:
02266 break;
02267 }
02268 return 0;
02269 }
02270
02271 static void stream_component_close(VideoState *is, int stream_index)
02272 {
02273 AVFormatContext *ic = is->ic;
02274 AVCodecContext *avctx;
02275
02276 if (stream_index < 0 || stream_index >= ic->nb_streams)
02277 return;
02278 avctx = ic->streams[stream_index]->codec;
02279
02280 switch(avctx->codec_type) {
02281 case AVMEDIA_TYPE_AUDIO:
02282 packet_queue_abort(&is->audioq);
02283
02284 SDL_CloseAudio();
02285
02286 packet_queue_end(&is->audioq);
02287 if (is->reformat_ctx)
02288 av_audio_convert_free(is->reformat_ctx);
02289 is->reformat_ctx = NULL;
02290 break;
02291 case AVMEDIA_TYPE_VIDEO:
02292 packet_queue_abort(&is->videoq);
02293
02294
02295
02296 SDL_LockMutex(is->pictq_mutex);
02297 SDL_CondSignal(is->pictq_cond);
02298 SDL_UnlockMutex(is->pictq_mutex);
02299
02300 SDL_WaitThread(is->video_tid, NULL);
02301
02302 packet_queue_end(&is->videoq);
02303 break;
02304 case AVMEDIA_TYPE_SUBTITLE:
02305 packet_queue_abort(&is->subtitleq);
02306
02307
02308
02309 SDL_LockMutex(is->subpq_mutex);
02310 is->subtitle_stream_changed = 1;
02311
02312 SDL_CondSignal(is->subpq_cond);
02313 SDL_UnlockMutex(is->subpq_mutex);
02314
02315 SDL_WaitThread(is->subtitle_tid, NULL);
02316
02317 packet_queue_end(&is->subtitleq);
02318 break;
02319 default:
02320 break;
02321 }
02322
02323 ic->streams[stream_index]->discard = AVDISCARD_ALL;
02324 avcodec_close(avctx);
02325 switch(avctx->codec_type) {
02326 case AVMEDIA_TYPE_AUDIO:
02327 is->audio_st = NULL;
02328 is->audio_stream = -1;
02329 break;
02330 case AVMEDIA_TYPE_VIDEO:
02331 is->video_st = NULL;
02332 is->video_stream = -1;
02333 break;
02334 case AVMEDIA_TYPE_SUBTITLE:
02335 is->subtitle_st = NULL;
02336 is->subtitle_stream = -1;
02337 break;
02338 default:
02339 break;
02340 }
02341 }
02342
02343
02344
02345 static VideoState *global_video_state;
02346
02347 static int decode_interrupt_cb(void)
02348 {
02349 return (global_video_state && global_video_state->abort_request);
02350 }
02351
02352
02353 static int decode_thread(void *arg)
02354 {
02355 VideoState *is = arg;
02356 AVFormatContext *ic;
02357 int err, i, ret;
02358 int st_index[AVMEDIA_TYPE_NB];
02359 int st_count[AVMEDIA_TYPE_NB]={0};
02360 int st_best_packet_count[AVMEDIA_TYPE_NB];
02361 AVPacket pkt1, *pkt = &pkt1;
02362 AVFormatParameters params, *ap = ¶ms;
02363 int eof=0;
02364 int pkt_in_play_range = 0;
02365
02366 ic = avformat_alloc_context();
02367
02368 memset(st_index, -1, sizeof(st_index));
02369 memset(st_best_packet_count, -1, sizeof(st_best_packet_count));
02370 is->video_stream = -1;
02371 is->audio_stream = -1;
02372 is->subtitle_stream = -1;
02373
02374 global_video_state = is;
02375 url_set_interrupt_cb(decode_interrupt_cb);
02376
02377 memset(ap, 0, sizeof(*ap));
02378
02379 ap->prealloced_context = 1;
02380 ap->width = frame_width;
02381 ap->height= frame_height;
02382 ap->time_base= (AVRational){1, 25};
02383 ap->pix_fmt = frame_pix_fmt;
02384
02385 set_context_opts(ic, avformat_opts, AV_OPT_FLAG_DECODING_PARAM);
02386
02387 err = av_open_input_file(&ic, is->filename, is->iformat, 0, ap);
02388 if (err < 0) {
02389 print_error(is->filename, err);
02390 ret = -1;
02391 goto fail;
02392 }
02393 is->ic = ic;
02394
02395 if(genpts)
02396 ic->flags |= AVFMT_FLAG_GENPTS;
02397
02398 err = av_find_stream_info(ic);
02399 if (err < 0) {
02400 fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
02401 ret = -1;
02402 goto fail;
02403 }
02404 if(ic->pb)
02405 ic->pb->eof_reached= 0;
02406
02407 if(seek_by_bytes<0)
02408 seek_by_bytes= !!(ic->iformat->flags & AVFMT_TS_DISCONT);
02409
02410
02411 if (start_time != AV_NOPTS_VALUE) {
02412 int64_t timestamp;
02413
02414 timestamp = start_time;
02415
02416 if (ic->start_time != AV_NOPTS_VALUE)
02417 timestamp += ic->start_time;
02418 ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
02419 if (ret < 0) {
02420 fprintf(stderr, "%s: could not seek to position %0.3f\n",
02421 is->filename, (double)timestamp / AV_TIME_BASE);
02422 }
02423 }
02424
02425 for(i = 0; i < ic->nb_streams; i++) {
02426 AVStream *st= ic->streams[i];
02427 AVCodecContext *avctx = st->codec;
02428 ic->streams[i]->discard = AVDISCARD_ALL;
02429 if(avctx->codec_type >= (unsigned)AVMEDIA_TYPE_NB)
02430 continue;
02431 if(st_count[avctx->codec_type]++ != wanted_stream[avctx->codec_type] && wanted_stream[avctx->codec_type] >= 0)
02432 continue;
02433
02434 if(st_best_packet_count[avctx->codec_type] >= st->codec_info_nb_frames)
02435 continue;
02436 st_best_packet_count[avctx->codec_type]= st->codec_info_nb_frames;
02437
02438 switch(avctx->codec_type) {
02439 case AVMEDIA_TYPE_AUDIO:
02440 if (!audio_disable)
02441 st_index[AVMEDIA_TYPE_AUDIO] = i;
02442 break;
02443 case AVMEDIA_TYPE_VIDEO:
02444 case AVMEDIA_TYPE_SUBTITLE:
02445 if (!video_disable)
02446 st_index[avctx->codec_type] = i;
02447 break;
02448 default:
02449 break;
02450 }
02451 }
02452 if (show_status) {
02453 dump_format(ic, 0, is->filename, 0);
02454 }
02455
02456
02457 if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
02458 stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
02459 }
02460
02461 ret=-1;
02462 if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
02463 ret= stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
02464 }
02465 is->refresh_tid = SDL_CreateThread(refresh_thread, is);
02466 if(ret<0) {
02467 if (!display_disable)
02468 is->show_audio = 2;
02469 }
02470
02471 if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
02472 stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
02473 }
02474
02475 if (is->video_stream < 0 && is->audio_stream < 0) {
02476 fprintf(stderr, "%s: could not open codecs\n", is->filename);
02477 ret = -1;
02478 goto fail;
02479 }
02480
02481 for(;;) {
02482 if (is->abort_request)
02483 break;
02484 if (is->paused != is->last_paused) {
02485 is->last_paused = is->paused;
02486 if (is->paused)
02487 is->read_pause_return= av_read_pause(ic);
02488 else
02489 av_read_play(ic);
02490 }
02491 #if CONFIG_RTSP_DEMUXER
02492 if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
02493
02494
02495 SDL_Delay(10);
02496 continue;
02497 }
02498 #endif
02499 if (is->seek_req) {
02500 int64_t seek_target= is->seek_pos;
02501 int64_t seek_min= is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
02502 int64_t seek_max= is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
02503
02504
02505
02506 ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
02507 if (ret < 0) {
02508 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
02509 }else{
02510 if (is->audio_stream >= 0) {
02511 packet_queue_flush(&is->audioq);
02512 packet_queue_put(&is->audioq, &flush_pkt);
02513 }
02514 if (is->subtitle_stream >= 0) {
02515 packet_queue_flush(&is->subtitleq);
02516 packet_queue_put(&is->subtitleq, &flush_pkt);
02517 }
02518 if (is->video_stream >= 0) {
02519 packet_queue_flush(&is->videoq);
02520 packet_queue_put(&is->videoq, &flush_pkt);
02521 }
02522 }
02523 is->seek_req = 0;
02524 eof= 0;
02525 }
02526
02527
02528 if ( is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
02529 || ( (is->audioq .size > MIN_AUDIOQ_SIZE || is->audio_stream<0)
02530 && (is->videoq .nb_packets > MIN_FRAMES || is->video_stream<0)
02531 && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream<0))) {
02532
02533 SDL_Delay(10);
02534 continue;
02535 }
02536 if(url_feof(ic->pb) || eof) {
02537 if(is->video_stream >= 0){
02538 av_init_packet(pkt);
02539 pkt->data=NULL;
02540 pkt->size=0;
02541 pkt->stream_index= is->video_stream;
02542 packet_queue_put(&is->videoq, pkt);
02543 }
02544 SDL_Delay(10);
02545 if(is->audioq.size + is->videoq.size + is->subtitleq.size ==0){
02546 if(loop!=1 && (!loop || --loop)){
02547 stream_seek(cur_stream, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
02548 }else if(autoexit){
02549 ret=AVERROR_EOF;
02550 goto fail;
02551 }
02552 }
02553 continue;
02554 }
02555 ret = av_read_frame(ic, pkt);
02556 if (ret < 0) {
02557 if (ret == AVERROR_EOF)
02558 eof=1;
02559 if (url_ferror(ic->pb))
02560 break;
02561 SDL_Delay(100);
02562 continue;
02563 }
02564
02565 pkt_in_play_range = duration == AV_NOPTS_VALUE ||
02566 (pkt->pts - ic->streams[pkt->stream_index]->start_time) *
02567 av_q2d(ic->streams[pkt->stream_index]->time_base) -
02568 (double)(start_time != AV_NOPTS_VALUE ? start_time : 0)/1000000
02569 <= ((double)duration/1000000);
02570 if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
02571 packet_queue_put(&is->audioq, pkt);
02572 } else if (pkt->stream_index == is->video_stream && pkt_in_play_range) {
02573 packet_queue_put(&is->videoq, pkt);
02574 } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
02575 packet_queue_put(&is->subtitleq, pkt);
02576 } else {
02577 av_free_packet(pkt);
02578 }
02579 }
02580
02581 while (!is->abort_request) {
02582 SDL_Delay(100);
02583 }
02584
02585 ret = 0;
02586 fail:
02587
02588 global_video_state = NULL;
02589
02590
02591 if (is->audio_stream >= 0)
02592 stream_component_close(is, is->audio_stream);
02593 if (is->video_stream >= 0)
02594 stream_component_close(is, is->video_stream);
02595 if (is->subtitle_stream >= 0)
02596 stream_component_close(is, is->subtitle_stream);
02597 if (is->ic) {
02598 av_close_input_file(is->ic);
02599 is->ic = NULL;
02600 }
02601 url_set_interrupt_cb(NULL);
02602
02603 if (ret != 0) {
02604 SDL_Event event;
02605
02606 event.type = FF_QUIT_EVENT;
02607 event.user.data1 = is;
02608 SDL_PushEvent(&event);
02609 }
02610 return 0;
02611 }
02612
02613 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
02614 {
02615 VideoState *is;
02616
02617 is = av_mallocz(sizeof(VideoState));
02618 if (!is)
02619 return NULL;
02620 av_strlcpy(is->filename, filename, sizeof(is->filename));
02621 is->iformat = iformat;
02622 is->ytop = 0;
02623 is->xleft = 0;
02624
02625
02626 is->pictq_mutex = SDL_CreateMutex();
02627 is->pictq_cond = SDL_CreateCond();
02628
02629 is->subpq_mutex = SDL_CreateMutex();
02630 is->subpq_cond = SDL_CreateCond();
02631
02632 is->av_sync_type = av_sync_type;
02633 is->parse_tid = SDL_CreateThread(decode_thread, is);
02634 if (!is->parse_tid) {
02635 av_free(is);
02636 return NULL;
02637 }
02638 return is;
02639 }
02640
02641 static void stream_close(VideoState *is)
02642 {
02643 VideoPicture *vp;
02644 int i;
02645
02646 is->abort_request = 1;
02647 SDL_WaitThread(is->parse_tid, NULL);
02648 SDL_WaitThread(is->refresh_tid, NULL);
02649
02650
02651 for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
02652 vp = &is->pictq[i];
02653 #if CONFIG_AVFILTER
02654 if (vp->picref) {
02655 avfilter_unref_pic(vp->picref);
02656 vp->picref = NULL;
02657 }
02658 #endif
02659 if (vp->bmp) {
02660 SDL_FreeYUVOverlay(vp->bmp);
02661 vp->bmp = NULL;
02662 }
02663 }
02664 SDL_DestroyMutex(is->pictq_mutex);
02665 SDL_DestroyCond(is->pictq_cond);
02666 SDL_DestroyMutex(is->subpq_mutex);
02667 SDL_DestroyCond(is->subpq_cond);
02668 #if !CONFIG_AVFILTER
02669 if (is->img_convert_ctx)
02670 sws_freeContext(is->img_convert_ctx);
02671 #endif
02672 av_free(is);
02673 }
02674
02675 static void stream_cycle_channel(VideoState *is, int codec_type)
02676 {
02677 AVFormatContext *ic = is->ic;
02678 int start_index, stream_index;
02679 AVStream *st;
02680
02681 if (codec_type == AVMEDIA_TYPE_VIDEO)
02682 start_index = is->video_stream;
02683 else if (codec_type == AVMEDIA_TYPE_AUDIO)
02684 start_index = is->audio_stream;
02685 else
02686 start_index = is->subtitle_stream;
02687 if (start_index < (codec_type == AVMEDIA_TYPE_SUBTITLE ? -1 : 0))
02688 return;
02689 stream_index = start_index;
02690 for(;;) {
02691 if (++stream_index >= is->ic->nb_streams)
02692 {
02693 if (codec_type == AVMEDIA_TYPE_SUBTITLE)
02694 {
02695 stream_index = -1;
02696 goto the_end;
02697 } else
02698 stream_index = 0;
02699 }
02700 if (stream_index == start_index)
02701 return;
02702 st = ic->streams[stream_index];
02703 if (st->codec->codec_type == codec_type) {
02704
02705 switch(codec_type) {
02706 case AVMEDIA_TYPE_AUDIO:
02707 if (st->codec->sample_rate != 0 &&
02708 st->codec->channels != 0)
02709 goto the_end;
02710 break;
02711 case AVMEDIA_TYPE_VIDEO:
02712 case AVMEDIA_TYPE_SUBTITLE:
02713 goto the_end;
02714 default:
02715 break;
02716 }
02717 }
02718 }
02719 the_end:
02720 stream_component_close(is, start_index);
02721 stream_component_open(is, stream_index);
02722 }
02723
02724
02725 static void toggle_full_screen(void)
02726 {
02727 is_full_screen = !is_full_screen;
02728 if (!fs_screen_width) {
02729
02730
02731 }
02732 video_open(cur_stream);
02733 }
02734
02735 static void toggle_pause(void)
02736 {
02737 if (cur_stream)
02738 stream_pause(cur_stream);
02739 step = 0;
02740 }
02741
02742 static void step_to_next_frame(void)
02743 {
02744 if (cur_stream) {
02745
02746 if (cur_stream->paused)
02747 stream_pause(cur_stream);
02748 }
02749 step = 1;
02750 }
02751
02752 static void do_exit(void)
02753 {
02754 int i;
02755 if (cur_stream) {
02756 stream_close(cur_stream);
02757 cur_stream = NULL;
02758 }
02759 for (i = 0; i < AVMEDIA_TYPE_NB; i++)
02760 av_free(avcodec_opts[i]);
02761 av_free(avformat_opts);
02762 av_free(sws_opts);
02763 #if CONFIG_AVFILTER
02764 avfilter_uninit();
02765 #endif
02766 if (show_status)
02767 printf("\n");
02768 SDL_Quit();
02769 exit(0);
02770 }
02771
02772 static void toggle_audio_display(void)
02773 {
02774 if (cur_stream) {
02775 int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
02776 cur_stream->show_audio = (cur_stream->show_audio + 1) % 3;
02777 fill_rectangle(screen,
02778 cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height,
02779 bgcolor);
02780 SDL_UpdateRect(screen, cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height);
02781 }
02782 }
02783
02784
02785 static void event_loop(void)
02786 {
02787 SDL_Event event;
02788 double incr, pos, frac;
02789
02790 for(;;) {
02791 double x;
02792 SDL_WaitEvent(&event);
02793 switch(event.type) {
02794 case SDL_KEYDOWN:
02795 switch(event.key.keysym.sym) {
02796 case SDLK_ESCAPE:
02797 case SDLK_q:
02798 do_exit();
02799 break;
02800 case SDLK_f:
02801 toggle_full_screen();
02802 break;
02803 case SDLK_p:
02804 case SDLK_SPACE:
02805 toggle_pause();
02806 break;
02807 case SDLK_s:
02808 step_to_next_frame();
02809 break;
02810 case SDLK_a:
02811 if (cur_stream)
02812 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
02813 break;
02814 case SDLK_v:
02815 if (cur_stream)
02816 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
02817 break;
02818 case SDLK_t:
02819 if (cur_stream)
02820 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
02821 break;
02822 case SDLK_w:
02823 toggle_audio_display();
02824 break;
02825 case SDLK_LEFT:
02826 incr = -10.0;
02827 goto do_seek;
02828 case SDLK_RIGHT:
02829 incr = 10.0;
02830 goto do_seek;
02831 case SDLK_UP:
02832 incr = 60.0;
02833 goto do_seek;
02834 case SDLK_DOWN:
02835 incr = -60.0;
02836 do_seek:
02837 if (cur_stream) {
02838 if (seek_by_bytes) {
02839 if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos>=0){
02840 pos= cur_stream->video_current_pos;
02841 }else if(cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos>=0){
02842 pos= cur_stream->audio_pkt.pos;
02843 }else
02844 pos = url_ftell(cur_stream->ic->pb);
02845 if (cur_stream->ic->bit_rate)
02846 incr *= cur_stream->ic->bit_rate / 8.0;
02847 else
02848 incr *= 180000.0;
02849 pos += incr;
02850 stream_seek(cur_stream, pos, incr, 1);
02851 } else {
02852 pos = get_master_clock(cur_stream);
02853 pos += incr;
02854 stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
02855 }
02856 }
02857 break;
02858 default:
02859 break;
02860 }
02861 break;
02862 case SDL_MOUSEBUTTONDOWN:
02863 case SDL_MOUSEMOTION:
02864 if(event.type ==SDL_MOUSEBUTTONDOWN){
02865 x= event.button.x;
02866 }else{
02867 if(event.motion.state != SDL_PRESSED)
02868 break;
02869 x= event.motion.x;
02870 }
02871 if (cur_stream) {
02872 if(seek_by_bytes || cur_stream->ic->duration<=0){
02873 uint64_t size= url_fsize(cur_stream->ic->pb);
02874 stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
02875 }else{
02876 int64_t ts;
02877 int ns, hh, mm, ss;
02878 int tns, thh, tmm, tss;
02879 tns = cur_stream->ic->duration/1000000LL;
02880 thh = tns/3600;
02881 tmm = (tns%3600)/60;
02882 tss = (tns%60);
02883 frac = x/cur_stream->width;
02884 ns = frac*tns;
02885 hh = ns/3600;
02886 mm = (ns%3600)/60;
02887 ss = (ns%60);
02888 fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d) \n", frac*100,
02889 hh, mm, ss, thh, tmm, tss);
02890 ts = frac*cur_stream->ic->duration;
02891 if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
02892 ts += cur_stream->ic->start_time;
02893 stream_seek(cur_stream, ts, 0, 0);
02894 }
02895 }
02896 break;
02897 case SDL_VIDEORESIZE:
02898 if (cur_stream) {
02899 screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
02900 SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
02901 screen_width = cur_stream->width = event.resize.w;
02902 screen_height= cur_stream->height= event.resize.h;
02903 }
02904 break;
02905 case SDL_QUIT:
02906 case FF_QUIT_EVENT:
02907 do_exit();
02908 break;
02909 case FF_ALLOC_EVENT:
02910 video_open(event.user.data1);
02911 alloc_picture(event.user.data1);
02912 break;
02913 case FF_REFRESH_EVENT:
02914 video_refresh_timer(event.user.data1);
02915 cur_stream->refresh=0;
02916 break;
02917 default:
02918 break;
02919 }
02920 }
02921 }
02922
02923 static void opt_frame_size(const char *arg)
02924 {
02925 if (av_parse_video_frame_size(&frame_width, &frame_height, arg) < 0) {
02926 fprintf(stderr, "Incorrect frame size\n");
02927 exit(1);
02928 }
02929 if ((frame_width % 2) != 0 || (frame_height % 2) != 0) {
02930 fprintf(stderr, "Frame size must be a multiple of 2\n");
02931 exit(1);
02932 }
02933 }
02934
02935 static int opt_width(const char *opt, const char *arg)
02936 {
02937 screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
02938 return 0;
02939 }
02940
02941 static int opt_height(const char *opt, const char *arg)
02942 {
02943 screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
02944 return 0;
02945 }
02946
02947 static void opt_format(const char *arg)
02948 {
02949 file_iformat = av_find_input_format(arg);
02950 if (!file_iformat) {
02951 fprintf(stderr, "Unknown input format: %s\n", arg);
02952 exit(1);
02953 }
02954 }
02955
02956 static void opt_frame_pix_fmt(const char *arg)
02957 {
02958 frame_pix_fmt = av_get_pix_fmt(arg);
02959 }
02960
02961 static int opt_sync(const char *opt, const char *arg)
02962 {
02963 if (!strcmp(arg, "audio"))
02964 av_sync_type = AV_SYNC_AUDIO_MASTER;
02965 else if (!strcmp(arg, "video"))
02966 av_sync_type = AV_SYNC_VIDEO_MASTER;
02967 else if (!strcmp(arg, "ext"))
02968 av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
02969 else {
02970 fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
02971 exit(1);
02972 }
02973 return 0;
02974 }
02975
02976 static int opt_seek(const char *opt, const char *arg)
02977 {
02978 start_time = parse_time_or_die(opt, arg, 1);
02979 return 0;
02980 }
02981
02982 static int opt_duration(const char *opt, const char *arg)
02983 {
02984 duration = parse_time_or_die(opt, arg, 1);
02985 return 0;
02986 }
02987
02988 static int opt_debug(const char *opt, const char *arg)
02989 {
02990 av_log_set_level(99);
02991 debug = parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
02992 return 0;
02993 }
02994
02995 static int opt_vismv(const char *opt, const char *arg)
02996 {
02997 debug_mv = parse_number_or_die(opt, arg, OPT_INT64, INT_MIN, INT_MAX);
02998 return 0;
02999 }
03000
03001 static int opt_thread_count(const char *opt, const char *arg)
03002 {
03003 thread_count= parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
03004 #if !HAVE_THREADS
03005 fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
03006 #endif
03007 return 0;
03008 }
03009
03010 static const OptionDef options[] = {
03011 #include "cmdutils_common_opts.h"
03012 { "x", HAS_ARG | OPT_FUNC2, {(void*)opt_width}, "force displayed width", "width" },
03013 { "y", HAS_ARG | OPT_FUNC2, {(void*)opt_height}, "force displayed height", "height" },
03014 { "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or abbreviation)", "size" },
03015 { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
03016 { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
03017 { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
03018 { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_AUDIO]}, "select desired audio stream", "stream_number" },
03019 { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_VIDEO]}, "select desired video stream", "stream_number" },
03020 { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_SUBTITLE]}, "select desired subtitle stream", "stream_number" },
03021 { "ss", HAS_ARG | OPT_FUNC2, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
03022 { "t", HAS_ARG | OPT_FUNC2, {(void*)&opt_duration}, "play \"duration\" seconds of audio/video", "duration" },
03023 { "bytes", OPT_INT | HAS_ARG, {(void*)&seek_by_bytes}, "seek by bytes 0=off 1=on -1=auto", "val" },
03024 { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
03025 { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
03026 { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_frame_pix_fmt}, "set pixel format", "format" },
03027 { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
03028 { "debug", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_debug}, "print specific debug info", "" },
03029 { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
03030 { "vismv", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_vismv}, "visualize motion vectors", "" },
03031 { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
03032 { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
03033 { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&decoder_reorder_pts}, "let decoder reorder pts 0=off 1=on -1=auto", ""},
03034 { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
03035 { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
03036 { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
03037 { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
03038 { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo", "algo" },
03039 { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_recognition}, "set error detection threshold (0-4)", "threshold" },
03040 { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options", "bit_mask" },
03041 { "sync", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
03042 { "threads", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_thread_count}, "thread count", "count" },
03043 { "autoexit", OPT_BOOL | OPT_EXPERT, {(void*)&autoexit}, "exit at the end", "" },
03044 { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&loop}, "set number of times the playback shall be looped", "loop count" },
03045 { "framedrop", OPT_BOOL | OPT_EXPERT, {(void*)&framedrop}, "drop frames when cpu is too slow", "" },
03046 { "window_title", OPT_STRING | HAS_ARG, {(void*)&window_title}, "set window title", "window title" },
03047 #if CONFIG_AVFILTER
03048 { "vfilters", OPT_STRING | HAS_ARG, {(void*)&vfilters}, "video filters", "filter list" },
03049 #endif
03050 { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, {(void*)&rdftspeed}, "rdft speed", "msecs" },
03051 { "default", OPT_FUNC2 | HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {(void*)opt_default}, "generic catch all option", "" },
03052 { NULL, },
03053 };
03054
03055 static void show_usage(void)
03056 {
03057 printf("Simple media player\n");
03058 printf("usage: ffplay [options] input_file\n");
03059 printf("\n");
03060 }
03061
03062 static void show_help(void)
03063 {
03064 show_usage();
03065 show_help_options(options, "Main options:\n",
03066 OPT_EXPERT, 0);
03067 show_help_options(options, "\nAdvanced options:\n",
03068 OPT_EXPERT, OPT_EXPERT);
03069 printf("\nWhile playing:\n"
03070 "q, ESC quit\n"
03071 "f toggle full screen\n"
03072 "p, SPC pause\n"
03073 "a cycle audio channel\n"
03074 "v cycle video channel\n"
03075 "t cycle subtitle channel\n"
03076 "w show audio waves\n"
03077 "s activate frame-step mode\n"
03078 "left/right seek backward/forward 10 seconds\n"
03079 "down/up seek backward/forward 1 minute\n"
03080 "mouse click seek to percentage in file corresponding to fraction of width\n"
03081 );
03082 }
03083
03084 static void opt_input_file(const char *filename)
03085 {
03086 if (input_filename) {
03087 fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
03088 filename, input_filename);
03089 exit(1);
03090 }
03091 if (!strcmp(filename, "-"))
03092 filename = "pipe:";
03093 input_filename = filename;
03094 }
03095
03096
03097 int main(int argc, char **argv)
03098 {
03099 int flags, i;
03100
03101
03102 avcodec_register_all();
03103 avdevice_register_all();
03104 #if CONFIG_AVFILTER
03105 avfilter_register_all();
03106 #endif
03107 av_register_all();
03108
03109 for(i=0; i<AVMEDIA_TYPE_NB; i++){
03110 avcodec_opts[i]= avcodec_alloc_context2(i);
03111 }
03112 avformat_opts = avformat_alloc_context();
03113 #if !CONFIG_AVFILTER
03114 sws_opts = sws_getContext(16,16,0, 16,16,0, sws_flags, NULL,NULL,NULL);
03115 #endif
03116
03117 show_banner();
03118
03119 parse_options(argc, argv, options, opt_input_file);
03120
03121 if (!input_filename) {
03122 show_usage();
03123 fprintf(stderr, "An input file must be specified\n");
03124 fprintf(stderr, "Use -h to get full help or, even better, run 'man ffplay'\n");
03125 exit(1);
03126 }
03127
03128 if (display_disable) {
03129 video_disable = 1;
03130 }
03131 flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
03132 #if !defined(__MINGW32__) && !defined(__APPLE__)
03133 flags |= SDL_INIT_EVENTTHREAD;
03134 #endif
03135 if (SDL_Init (flags)) {
03136 fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
03137 exit(1);
03138 }
03139
03140 if (!display_disable) {
03141 #if HAVE_SDL_VIDEO_SIZE
03142 const SDL_VideoInfo *vi = SDL_GetVideoInfo();
03143 fs_screen_width = vi->current_w;
03144 fs_screen_height = vi->current_h;
03145 #endif
03146 }
03147
03148 SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
03149 SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
03150 SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
03151
03152 av_init_packet(&flush_pkt);
03153 flush_pkt.data= "FLUSH";
03154
03155 cur_stream = stream_open(input_filename, file_iformat);
03156
03157 event_loop();
03158
03159
03160
03161 return 0;
03162 }