summaryrefslogtreecommitdiff
path: root/ffplay.c
diff options
context:
space:
mode:
Diffstat (limited to 'ffplay.c')
-rw-r--r--ffplay.c499
1 files changed, 235 insertions, 264 deletions
diff --git a/ffplay.c b/ffplay.c
index 73b30c400b..599b288529 100644
--- a/ffplay.c
+++ b/ffplay.c
@@ -1,21 +1,21 @@
/*
- * ffplay : Simple Media Player based on the Libav libraries
+ * ffplay : Simple Media Player based on the FFmpeg libraries
* Copyright (c) 2003 Fabrice Bellard
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -30,6 +30,7 @@
#include "libavutil/dict.h"
#include "libavutil/parseutils.h"
#include "libavutil/samplefmt.h"
+#include "libavutil/avassert.h"
#include "libavformat/avformat.h"
#include "libavdevice/avdevice.h"
#include "libswscale/swscale.h"
@@ -38,18 +39,16 @@
#include "libavcodec/avfft.h"
#if CONFIG_AVFILTER
+# include "libavfilter/avcodec.h"
# include "libavfilter/avfilter.h"
# include "libavfilter/avfiltergraph.h"
+# include "libavfilter/vsink_buffer.h"
#endif
-#include "cmdutils.h"
-
#include <SDL.h>
#include <SDL_thread.h>
-#ifdef __MINGW32__
-#undef main /* We don't want SDL to override our main() */
-#endif
+#include "cmdutils.h"
#include <unistd.h>
#include <assert.h>
@@ -121,7 +120,7 @@ enum {
};
typedef struct VideoState {
- SDL_Thread *parse_tid;
+ SDL_Thread *read_tid;
SDL_Thread *video_tid;
SDL_Thread *refresh_tid;
AVInputFormat *iformat;
@@ -135,7 +134,6 @@ typedef struct VideoState {
int64_t seek_rel;
int read_pause_return;
AVFormatContext *ic;
- int dtg_active_format;
int audio_stream;
@@ -163,7 +161,9 @@ typedef struct VideoState {
enum AVSampleFormat audio_src_fmt;
AVAudioConvert *reformat_ctx;
- int show_audio; /* if true, display audio samples */
+ enum ShowMode {
+ SHOW_MODE_NONE = -1, SHOW_MODE_VIDEO = 0, SHOW_MODE_WAVES, SHOW_MODE_RDFT, SHOW_MODE_NB
+ } show_mode;
int16_t sample_array[SAMPLE_ARRAY_SIZE];
int sample_array_index;
int last_i_start;
@@ -200,12 +200,9 @@ typedef struct VideoState {
struct SwsContext *img_convert_ctx;
#endif
- // QETimer *video_timer;
char filename[1024];
int width, height, xleft, ytop;
- PtsCorrectionContext pts_ctx;
-
#if CONFIG_AVFILTER
AVFilterContext *out_video_filter; ///<the last filter in the video chain
#endif
@@ -215,7 +212,7 @@ typedef struct VideoState {
int refresh;
} VideoState;
-static void show_help(void);
+static int opt_help(const char *opt, const char *arg);
/* options specified by the user */
static AVInputFormat *file_iformat;
@@ -241,8 +238,6 @@ static int show_status = 1;
static int av_sync_type = AV_SYNC_AUDIO_MASTER;
static int64_t start_time = AV_NOPTS_VALUE;
static int64_t duration = AV_NOPTS_VALUE;
-static int debug = 0;
-static int debug_mv = 0;
static int step = 0;
static int thread_count = 1;
static int workaround_bugs = 1;
@@ -260,7 +255,8 @@ static int autoexit;
static int exit_on_keydown;
static int exit_on_mousedown;
static int loop=1;
-static int framedrop=1;
+static int framedrop=-1;
+static enum ShowMode show_mode = SHOW_MODE_NONE;
static int rdftspeed=20;
#if CONFIG_AVFILTER
@@ -280,7 +276,37 @@ static AVPacket flush_pkt;
static SDL_Surface *screen;
-static int packet_queue_put(PacketQueue *q, AVPacket *pkt);
+static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
+{
+ AVPacketList *pkt1;
+
+ /* duplicate the packet */
+ if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
+ return -1;
+
+ pkt1 = av_malloc(sizeof(AVPacketList));
+ if (!pkt1)
+ return -1;
+ pkt1->pkt = *pkt;
+ pkt1->next = NULL;
+
+
+ SDL_LockMutex(q->mutex);
+
+ if (!q->last_pkt)
+
+ q->first_pkt = pkt1;
+ else
+ q->last_pkt->next = pkt1;
+ q->last_pkt = pkt1;
+ q->nb_packets++;
+ q->size += pkt1->pkt.size + sizeof(*pkt1);
+ /* XXX: should duplicate packet data in DV case */
+ SDL_CondSignal(q->cond);
+
+ SDL_UnlockMutex(q->mutex);
+ return 0;
+}
/* packet queue handling */
static void packet_queue_init(PacketQueue *q)
@@ -315,38 +341,6 @@ static void packet_queue_end(PacketQueue *q)
SDL_DestroyCond(q->cond);
}
-static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
-{
- AVPacketList *pkt1;
-
- /* duplicate the packet */
- if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
- return -1;
-
- pkt1 = av_malloc(sizeof(AVPacketList));
- if (!pkt1)
- return -1;
- pkt1->pkt = *pkt;
- pkt1->next = NULL;
-
-
- SDL_LockMutex(q->mutex);
-
- if (!q->last_pkt)
-
- q->first_pkt = pkt1;
- else
- q->last_pkt->next = pkt1;
- q->last_pkt = pkt1;
- q->nb_packets++;
- q->size += pkt1->pkt.size + sizeof(*pkt1);
- /* XXX: should duplicate packet data in DV case */
- SDL_CondSignal(q->cond);
-
- SDL_UnlockMutex(q->mutex);
- return 0;
-}
-
static void packet_queue_abort(PacketQueue *q)
{
SDL_LockMutex(q->mutex);
@@ -652,10 +646,10 @@ static void video_image_display(VideoState *is)
vp = &is->pictq[is->pictq_rindex];
if (vp->bmp) {
#if CONFIG_AVFILTER
- if (vp->picref->video->pixel_aspect.num == 0)
+ if (vp->picref->video->sample_aspect_ratio.num == 0)
aspect_ratio = 0;
else
- aspect_ratio = av_q2d(vp->picref->video->pixel_aspect);
+ aspect_ratio = av_q2d(vp->picref->video->sample_aspect_ratio);
#else
/* XXX: use variable in the frame */
@@ -670,14 +664,11 @@ static void video_image_display(VideoState *is)
aspect_ratio = 1.0;
aspect_ratio *= (float)vp->width / (float)vp->height;
- if (is->subtitle_st)
- {
- if (is->subpq_size > 0)
- {
+ if (is->subtitle_st) {
+ if (is->subpq_size > 0) {
sp = &is->subpq[is->subpq_rindex];
- if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000))
- {
+ if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000)) {
SDL_LockYUVOverlay (vp->bmp);
pict.data[0] = vp->bmp->pixels[0];
@@ -710,8 +701,8 @@ static void video_image_display(VideoState *is)
is->no_background = 0;
rect.x = is->xleft + x;
rect.y = is->ytop + y;
- rect.w = width;
- rect.h = height;
+ rect.w = FFMAX(width, 1);
+ rect.h = FFMAX(height, 1);
SDL_DisplayYUVOverlay(vp->bmp, &rect);
}
}
@@ -725,11 +716,7 @@ static int audio_write_get_buf_size(VideoState *is)
static inline int compute_mod(int a, int b)
{
- a = a % b;
- if (a >= 0)
- return a;
- else
- return a + b;
+ return a < 0 ? a%b + b : a%b;
}
static void video_audio_display(VideoState *s)
@@ -747,7 +734,7 @@ static void video_audio_display(VideoState *s)
channels = s->audio_st->codec->channels;
nb_display_channels = channels;
if (!s->paused) {
- int data_used= s->show_audio==1 ? s->width : (2*nb_freq);
+ int data_used= s->show_mode == SHOW_MODE_WAVES ? s->width : (2*nb_freq);
n = 2 * channels;
delay = audio_write_get_buf_size(s);
delay /= n;
@@ -764,7 +751,7 @@ static void video_audio_display(VideoState *s)
delay = data_used;
i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
- if(s->show_audio==1){
+ if (s->show_mode == SHOW_MODE_WAVES) {
h= INT_MIN;
for(i=0; i<1000; i+=channels){
int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
@@ -786,7 +773,7 @@ static void video_audio_display(VideoState *s)
}
bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
- if(s->show_audio==1){
+ if (s->show_mode == SHOW_MODE_WAVES) {
fill_rectangle(screen,
s->xleft, s->ytop, s->width, s->height,
bgcolor);
@@ -871,6 +858,57 @@ static void video_audio_display(VideoState *s)
}
}
+static void stream_close(VideoState *is)
+{
+ VideoPicture *vp;
+ int i;
+ /* XXX: use a special url_shutdown call to abort parse cleanly */
+ is->abort_request = 1;
+ SDL_WaitThread(is->read_tid, NULL);
+ SDL_WaitThread(is->refresh_tid, NULL);
+
+ /* free all pictures */
+ for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
+ vp = &is->pictq[i];
+#if CONFIG_AVFILTER
+ if (vp->picref) {
+ avfilter_unref_buffer(vp->picref);
+ vp->picref = NULL;
+ }
+#endif
+ if (vp->bmp) {
+ SDL_FreeYUVOverlay(vp->bmp);
+ vp->bmp = NULL;
+ }
+ }
+ SDL_DestroyMutex(is->pictq_mutex);
+ SDL_DestroyCond(is->pictq_cond);
+ SDL_DestroyMutex(is->subpq_mutex);
+ SDL_DestroyCond(is->subpq_cond);
+#if !CONFIG_AVFILTER
+ if (is->img_convert_ctx)
+ sws_freeContext(is->img_convert_ctx);
+#endif
+ av_free(is);
+}
+
+static void do_exit(void)
+{
+ if (cur_stream) {
+ stream_close(cur_stream);
+ cur_stream = NULL;
+ }
+ uninit_opts();
+#if CONFIG_AVFILTER
+ avfilter_uninit();
+#endif
+ if (show_status)
+ printf("\n");
+ SDL_Quit();
+ av_log(NULL, AV_LOG_QUIET, "%s", "");
+ exit(0);
+}
+
static int video_open(VideoState *is){
int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
int w,h;
@@ -909,7 +947,7 @@ static int video_open(VideoState *is){
#endif
if (!screen) {
fprintf(stderr, "SDL: could not set video mode - exiting\n");
- return -1;
+ do_exit();
}
if (!window_title)
window_title = input_filename;
@@ -926,7 +964,7 @@ static void video_display(VideoState *is)
{
if(!screen)
video_open(cur_stream);
- if (is->audio_st && is->show_audio)
+ if (is->audio_st && is->show_mode != SHOW_MODE_VIDEO)
video_audio_display(is);
else if (is->video_st)
video_image_display(is);
@@ -943,7 +981,8 @@ static int refresh_thread(void *opaque)
is->refresh=1;
SDL_PushEvent(&event);
}
- usleep(is->audio_st && is->show_audio ? rdftspeed*1000 : 5000); //FIXME ideally we should wait the correct time but SDLs event passing is so slow it would be silly
+ //FIXME ideally we should wait the correct time but SDLs event passing is so slow it would be silly
+ usleep(is->audio_st && is->show_mode != SHOW_MODE_VIDEO ? rdftspeed*1000 : 5000);
}
return 0;
}
@@ -1018,7 +1057,7 @@ static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_by
}
/* pause or resume the video */
-static void stream_pause(VideoState *is)
+static void stream_toggle_pause(VideoState *is)
{
if (is->paused) {
is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
@@ -1071,7 +1110,7 @@ static double compute_target_time(double frame_current_pts, VideoState *is)
}
/* called to display each frame */
-static void video_refresh_timer(void *opaque)
+static void video_refresh(void *opaque)
{
VideoState *is = opaque;
VideoPicture *vp;
@@ -1101,7 +1140,7 @@ retry:
}else{
next_target= vp->target_clock + is->video_clock - vp->pts; //FIXME pass durations cleanly
}
- if(framedrop && time > next_target){
+ if((framedrop>0 || (framedrop && is->audio_st)) && time > next_target){
is->skip_frames *= 1.0 + FRAME_SKIP_FACTOR;
if(is->pictq_size > 1 || time > next_target + 0.5){
/* update queue size and signal for next picture */
@@ -1204,64 +1243,20 @@ retry:
if (is->audio_st && is->video_st)
av_diff = get_audio_clock(is) - get_video_clock(is);
printf("%7.2f A-V:%7.3f s:%3.1f aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64" \r",
- get_master_clock(is), av_diff, FFMAX(is->skip_frames-1, 0), aqsize / 1024, vqsize / 1024, sqsize, is->pts_ctx.num_faulty_dts, is->pts_ctx.num_faulty_pts);
+ get_master_clock(is),
+ av_diff,
+ FFMAX(is->skip_frames-1, 0),
+ aqsize / 1024,
+ vqsize / 1024,
+ sqsize,
+ is->video_st ? is->video_st->codec->pts_correction_num_faulty_dts : 0,
+ is->video_st ? is->video_st->codec->pts_correction_num_faulty_pts : 0);
fflush(stdout);
last_time = cur_time;
}
}
}
-static void stream_close(VideoState *is)
-{
- VideoPicture *vp;
- int i;
- /* XXX: use a special url_shutdown call to abort parse cleanly */
- is->abort_request = 1;
- SDL_WaitThread(is->parse_tid, NULL);
- SDL_WaitThread(is->refresh_tid, NULL);
-
- /* free all pictures */
- for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
- vp = &is->pictq[i];
-#if CONFIG_AVFILTER
- if (vp->picref) {
- avfilter_unref_buffer(vp->picref);
- vp->picref = NULL;
- }
-#endif
- if (vp->bmp) {
- SDL_FreeYUVOverlay(vp->bmp);
- vp->bmp = NULL;
- }
- }
- SDL_DestroyMutex(is->pictq_mutex);
- SDL_DestroyCond(is->pictq_cond);
- SDL_DestroyMutex(is->subpq_mutex);
- SDL_DestroyCond(is->subpq_cond);
-#if !CONFIG_AVFILTER
- if (is->img_convert_ctx)
- sws_freeContext(is->img_convert_ctx);
-#endif
- av_free(is);
-}
-
-static void do_exit(void)
-{
- if (cur_stream) {
- stream_close(cur_stream);
- cur_stream = NULL;
- }
- uninit_opts();
-#if CONFIG_AVFILTER
- avfilter_uninit();
-#endif
- if (show_status)
- printf("\n");
- SDL_Quit();
- av_log(NULL, AV_LOG_QUIET, "");
- exit(0);
-}
-
/* allocate a picture (needs to do that in main thread to avoid
potential locking problems */
static void alloc_picture(void *opaque)
@@ -1306,18 +1301,31 @@ static void alloc_picture(void *opaque)
SDL_UnlockMutex(is->pictq_mutex);
}
-/**
- *
- * @param pts the dts of the pkt / pts of the frame and guessed if not known
- */
-static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, int64_t pos)
+static int queue_picture(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
{
VideoPicture *vp;
-#if CONFIG_AVFILTER
- AVPicture pict_src;
-#else
- int dst_pix_fmt = PIX_FMT_YUV420P;
+ double frame_delay, pts = pts1;
+
+ /* compute the exact PTS for the picture if it is omitted in the stream
+ * pts1 is the dts of the pkt / pts of the frame */
+ if (pts != 0) {
+ /* update video clock with pts, if present */
+ is->video_clock = pts;
+ } else {
+ pts = is->video_clock;
+ }
+ /* update video clock for next frame */
+ frame_delay = av_q2d(is->video_st->codec->time_base);
+ /* for MPEG2, the frame can be repeated, so we update the
+ clock accordingly */
+ frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
+ is->video_clock += frame_delay;
+
+#if defined(DEBUG_SYNC) && 0
+ printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
+ av_get_picture_type_char(src_frame->pict_type), pts, pts1);
#endif
+
/* wait until we have space to put a new picture */
SDL_LockMutex(is->pictq_mutex);
@@ -1387,22 +1395,14 @@ static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, int64_t
pict.linesize[2] = vp->bmp->pitches[1];
#if CONFIG_AVFILTER
- pict_src.data[0] = src_frame->data[0];
- pict_src.data[1] = src_frame->data[1];
- pict_src.data[2] = src_frame->data[2];
-
- pict_src.linesize[0] = src_frame->linesize[0];
- pict_src.linesize[1] = src_frame->linesize[1];
- pict_src.linesize[2] = src_frame->linesize[2];
-
//FIXME use direct rendering
- av_picture_copy(&pict, &pict_src,
+ av_picture_copy(&pict, (AVPicture *)src_frame,
vp->pix_fmt, vp->width, vp->height);
#else
sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
vp->width, vp->height, vp->pix_fmt, vp->width, vp->height,
- dst_pix_fmt, sws_flags, NULL, NULL, NULL);
+ PIX_FMT_YUV420P, sws_flags, NULL, NULL, NULL);
if (is->img_convert_ctx == NULL) {
fprintf(stderr, "Cannot initialize the conversion context\n");
exit(1);
@@ -1428,35 +1428,9 @@ static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, int64_t
return 0;
}
-/**
- * compute the exact PTS for the picture if it is omitted in the stream
- * @param pts1 the dts of the pkt / pts of the frame
- */
-static int output_picture2(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
-{
- double frame_delay, pts;
-
- pts = pts1;
-
- if (pts != 0) {
- /* update video clock with pts, if present */
- is->video_clock = pts;
- } else {
- pts = is->video_clock;
- }
- /* update video clock for next frame */
- frame_delay = av_q2d(is->video_st->codec->time_base);
- /* for MPEG2, the frame can be repeated, so we update the
- clock accordingly */
- frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
- is->video_clock += frame_delay;
-
- return queue_picture(is, src_frame, pts, pos);
-}
-
static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt)
{
- int len1, got_picture, i;
+ int len1 av_unused, got_picture, i;
if (packet_queue_get(&is->videoq, pkt, 1) < 0)
return -1;
@@ -1475,7 +1449,6 @@ static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacke
is->video_current_pos = -1;
SDL_UnlockMutex(is->pictq_mutex);
- init_pts_correction(&is->pts_ctx);
is->frame_last_pts = AV_NOPTS_VALUE;
is->frame_last_delay = 0;
is->frame_timer = (double)av_gettime() / 1000000.0;
@@ -1490,7 +1463,7 @@ static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacke
if (got_picture) {
if (decoder_reorder_pts == -1) {
- *pts = guess_correct_pts(&is->pts_ctx, frame->pkt_pts, frame->pkt_dts);
+ *pts = frame->best_effort_timestamp;
} else if (decoder_reorder_pts) {
*pts = frame->pkt_pts;
} else {
@@ -1527,6 +1500,8 @@ static int input_get_buffer(AVCodecContext *codec, AVFrame *pic)
unsigned edge;
int pixel_size;
+ av_assert0(codec->flags & CODEC_FLAG_EMU_EDGE);
+
if (codec->codec->capabilities & CODEC_CAP_NEG_LINESIZES)
perms |= AV_PERM_NEG_LINESIZES;
@@ -1539,6 +1514,10 @@ static int input_get_buffer(AVCodecContext *codec, AVFrame *pic)
w = codec->width;
h = codec->height;
+
+ if(av_image_check_size(w, h, 0, codec))
+ return -1;
+
avcodec_align_dimensions2(codec, &w, &h, stride);
edge = codec->flags & CODEC_FLAG_EMU_EDGE ? 0 : avcodec_get_edge_width();
w += edge << 1;
@@ -1605,7 +1584,9 @@ static int input_init(AVFilterContext *ctx, const char *args, void *opaque)
priv->is = opaque;
codec = priv->is->video_st->codec;
codec->opaque = ctx;
- if(codec->codec->capabilities & CODEC_CAP_DR1) {
+ if((codec->codec->capabilities & CODEC_CAP_DR1)
+ ) {
+ av_assert0(codec->flags & CODEC_FLAG_EMU_EDGE);
priv->use_dr1 = 1;
codec->get_buffer = input_get_buffer;
codec->release_buffer = input_release_buffer;
@@ -1637,7 +1618,7 @@ static int input_request_frame(AVFilterLink *link)
if (ret < 0)
return -1;
- if(priv->use_dr1) {
+ if(priv->use_dr1 && priv->frame->opaque) {
picref = avfilter_ref_buffer(priv->frame->opaque, ~0);
} else {
picref = avfilter_get_video_buffer(link, AV_PERM_WRITE, link->w, link->h);
@@ -1647,9 +1628,9 @@ static int input_request_frame(AVFilterLink *link)
}
av_free_packet(&pkt);
+ avfilter_copy_frame_props(picref, priv->frame);
picref->pts = pts;
- picref->pos = pkt.pos;
- picref->video->pixel_aspect = priv->is->video_st->codec->sample_aspect_ratio;
+
avfilter_start_frame(link, picref);
avfilter_draw_slice(link, 0, link->h, 1);
avfilter_end_frame(link);
@@ -1664,7 +1645,7 @@ static int input_query_formats(AVFilterContext *ctx)
priv->is->video_st->codec->pix_fmt, PIX_FMT_NONE
};
- avfilter_set_common_formats(ctx, avfilter_make_format_list(pix_fmts));
+ avfilter_set_common_pixel_formats(ctx, avfilter_make_format_list(pix_fmts));
return 0;
}
@@ -1703,7 +1684,7 @@ static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const c
{
char sws_flags_str[128];
int ret;
- FFSinkContext ffsink_ctx = { .pix_fmt = PIX_FMT_YUV420P };
+ enum PixelFormat pix_fmts[] = { PIX_FMT_YUV420P, PIX_FMT_NONE };
AVFilterContext *filt_src = NULL, *filt_out = NULL;
snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%d", sws_flags);
graph->scale_sws_opts = av_strdup(sws_flags_str);
@@ -1711,13 +1692,13 @@ static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const c
if ((ret = avfilter_graph_create_filter(&filt_src, &input_filter, "src",
NULL, is, graph)) < 0)
goto the_end;
- if ((ret = avfilter_graph_create_filter(&filt_out, &ffsink, "out",
- NULL, &ffsink_ctx, graph)) < 0)
+ if ((ret = avfilter_graph_create_filter(&filt_out, avfilter_get_by_name("buffersink"), "out",
+ NULL, pix_fmts, graph)) < 0)
goto the_end;
if(vfilters) {
- AVFilterInOut *outputs = av_malloc(sizeof(AVFilterInOut));
- AVFilterInOut *inputs = av_malloc(sizeof(AVFilterInOut));
+ AVFilterInOut *outputs = avfilter_inout_alloc();
+ AVFilterInOut *inputs = avfilter_inout_alloc();
outputs->name = av_strdup("in");
outputs->filter_ctx = filt_src;
@@ -1729,7 +1710,7 @@ static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const c
inputs->pad_idx = 0;
inputs->next = NULL;
- if ((ret = avfilter_graph_parse(graph, vfilters, inputs, outputs, NULL)) < 0)
+ if ((ret = avfilter_graph_parse(graph, vfilters, &inputs, &outputs, NULL)) < 0)
goto the_end;
av_freep(&vfilters);
} else {
@@ -1751,14 +1732,13 @@ static int video_thread(void *arg)
{
VideoState *is = arg;
AVFrame *frame= avcodec_alloc_frame();
- int64_t pts_int;
+ int64_t pts_int = AV_NOPTS_VALUE, pos = -1;
double pts;
int ret;
#if CONFIG_AVFILTER
AVFilterGraph *graph = avfilter_graph_alloc();
AVFilterContext *filt_out = NULL;
- int64_t pos;
if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
goto the_end;
@@ -1770,13 +1750,14 @@ static int video_thread(void *arg)
AVPacket pkt;
#else
AVFilterBufferRef *picref;
- AVRational tb;
+ AVRational tb = filt_out->inputs[0]->time_base;
#endif
while (is->paused && !is->videoq.abort_request)
SDL_Delay(10);
#if CONFIG_AVFILTER
- ret = get_filtered_video_frame(filt_out, frame, &picref, &tb);
+ ret = av_vsink_buffer_get_video_buffer_ref(filt_out, &picref, 0);
if (picref) {
+ avfilter_fill_frame_from_video_buffer_ref(frame, picref);
pts_int = picref->pts;
pos = picref->pos;
frame->opaque = picref;
@@ -1792,27 +1773,25 @@ static int video_thread(void *arg)
}
#else
ret = get_video_frame(is, frame, &pts_int, &pkt);
+ pos = pkt.pos;
+ av_free_packet(&pkt);
#endif
if (ret < 0) goto the_end;
- if (!ret)
+ if (!picref)
continue;
pts = pts_int*av_q2d(is->video_st->time_base);
-#if CONFIG_AVFILTER
- ret = output_picture2(is, frame, pts, pos);
-#else
- ret = output_picture2(is, frame, pts, pkt.pos);
- av_free_packet(&pkt);
-#endif
+ ret = queue_picture(is, frame, pts, pos);
+
if (ret < 0)
goto the_end;
if (step)
if (cur_stream)
- stream_pause(cur_stream);
+ stream_toggle_pause(cur_stream);
}
the_end:
#if CONFIG_AVFILTER
@@ -1827,7 +1806,7 @@ static int subtitle_thread(void *arg)
VideoState *is = arg;
SubPicture *sp;
AVPacket pkt1, *pkt = &pkt1;
- int len1, got_subtitle;
+ int len1 av_unused, got_subtitle;
double pts;
int i, j;
int r, g, b, y, u, v, a;
@@ -1971,9 +1950,11 @@ static int synchronize_audio(VideoState *is, short *samples,
samples_size = wanted_size;
}
}
- av_dlog(NULL, "diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
- diff, avg_diff, samples_size - samples_size1,
- is->audio_clock, is->video_clock, is->audio_diff_threshold);
+#if 0
+ printf("diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
+ diff, avg_diff, samples_size - samples_size1,
+ is->audio_clock, is->video_clock, is->audio_diff_threshold);
+#endif
}
} else {
/* too big difference : may be initial PTS errors, so
@@ -2107,7 +2088,7 @@ static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
is->audio_buf_size = 1024;
memset(is->audio_buf, 0, is->audio_buf_size);
} else {
- if (is->show_audio)
+ if (is->show_mode != SHOW_MODE_VIDEO)
update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
pts);
@@ -2147,8 +2128,9 @@ static int stream_component_open(VideoState *is, int stream_index)
}
codec = avcodec_find_decoder(avctx->codec_id);
- avctx->debug_mv = debug_mv;
- avctx->debug = debug;
+ if (!codec)
+ return -1;
+
avctx->workaround_bugs = workaround_bugs;
avctx->lowres = lowres;
if(lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
@@ -2163,12 +2145,18 @@ static int stream_component_open(VideoState *is, int stream_index)
set_context_opts(avctx, avcodec_opts[avctx->codec_type], 0, codec);
- if (!codec ||
- avcodec_open(avctx, codec) < 0)
+ if(codec->capabilities & CODEC_CAP_DR1)
+ avctx->flags |= CODEC_FLAG_EMU_EDGE;
+
+ if (avcodec_open(avctx, codec) < 0)
return -1;
/* prepare audio output */
if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
+ if(avctx->sample_rate <= 0 || avctx->channels <= 0){
+ fprintf(stderr, "Invalid sample rate or channel count\n");
+ return -1;
+ }
wanted_spec.freq = avctx->sample_rate;
wanted_spec.format = AUDIO_S16SYS;
wanted_spec.channels = avctx->channels;
@@ -2305,7 +2293,7 @@ static int decode_interrupt_cb(void)
}
/* this thread gets the stream from the disk or the network */
-static int decode_thread(void *arg)
+static int read_thread(void *arg)
{
VideoState *is = arg;
AVFormatContext *ic = NULL;
@@ -2340,23 +2328,6 @@ static int decode_thread(void *arg)
if(genpts)
ic->flags |= AVFMT_FLAG_GENPTS;
- /* Set AVCodecContext options so they will be seen by av_find_stream_info() */
- for (i = 0; i < ic->nb_streams; i++) {
- AVCodecContext *dec = ic->streams[i]->codec;
- switch (dec->codec_type) {
- case AVMEDIA_TYPE_AUDIO:
- set_context_opts(dec, avcodec_opts[AVMEDIA_TYPE_AUDIO],
- AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_DECODING_PARAM,
- NULL);
- break;
- case AVMEDIA_TYPE_VIDEO:
- set_context_opts(dec, avcodec_opts[AVMEDIA_TYPE_VIDEO],
- AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM,
- NULL);
- break;
- }
- }
-
err = av_find_stream_info(ic);
if (err < 0) {
fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
@@ -2408,6 +2379,8 @@ static int decode_thread(void *arg)
av_dump_format(ic, 0, is->filename, 0);
}
+ is->show_mode = show_mode;
+
/* open the streams */
if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
@@ -2418,10 +2391,8 @@ static int decode_thread(void *arg)
ret= stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
}
is->refresh_tid = SDL_CreateThread(refresh_thread, is);
- if(ret<0) {
- if (!display_disable)
- is->show_audio = 2;
- }
+ if (is->show_mode == SHOW_MODE_NONE)
+ is->show_mode = ret >= 0 ? SHOW_MODE_VIDEO : SHOW_MODE_RDFT;
if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
@@ -2505,11 +2476,12 @@ static int decode_thread(void *arg)
goto fail;
}
}
+ eof=0;
continue;
}
ret = av_read_frame(ic, pkt);
if (ret < 0) {
- if (ret == AVERROR_EOF || (ic->pb && ic->pb->eof_reached))
+ if (ret == AVERROR_EOF || url_feof(ic->pb))
eof=1;
if (ic->pb && ic->pb->error)
break;
@@ -2585,8 +2557,8 @@ static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
is->subpq_cond = SDL_CreateCond();
is->av_sync_type = av_sync_type;
- is->parse_tid = SDL_CreateThread(decode_thread, is);
- if (!is->parse_tid) {
+ is->read_tid = SDL_CreateThread(read_thread, is);
+ if (!is->read_tid) {
av_free(is);
return NULL;
}
@@ -2652,7 +2624,7 @@ static void toggle_full_screen(void)
static void toggle_pause(void)
{
if (cur_stream)
- stream_pause(cur_stream);
+ stream_toggle_pause(cur_stream);
step = 0;
}
@@ -2661,7 +2633,7 @@ static void step_to_next_frame(void)
if (cur_stream) {
/* if the stream is paused unpause it, then step */
if (cur_stream->paused)
- stream_pause(cur_stream);
+ stream_toggle_pause(cur_stream);
}
step = 1;
}
@@ -2670,7 +2642,7 @@ static void toggle_audio_display(void)
{
if (cur_stream) {
int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
- cur_stream->show_audio = (cur_stream->show_audio + 1) % 3;
+ cur_stream->show_mode = (cur_stream->show_mode + 1) % SHOW_MODE_NB;
fill_rectangle(screen,
cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height,
bgcolor);
@@ -2816,7 +2788,7 @@ static void event_loop(void)
alloc_picture(event.user.data1);
break;
case FF_REFRESH_EVENT:
- video_refresh_timer(event.user.data1);
+ video_refresh(event.user.data1);
cur_stream->refresh=0;
break;
default:
@@ -2893,25 +2865,34 @@ static int opt_duration(const char *opt, const char *arg)
return 0;
}
-static int opt_debug(const char *opt, const char *arg)
+static int opt_thread_count(const char *opt, const char *arg)
{
- av_log_set_level(99);
- debug = parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
+ thread_count= parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
+#if !HAVE_THREADS
+ fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
+#endif
return 0;
}
-static int opt_vismv(const char *opt, const char *arg)
+static int opt_show_mode(const char *opt, const char *arg)
{
- debug_mv = parse_number_or_die(opt, arg, OPT_INT64, INT_MIN, INT_MAX);
+ show_mode = !strcmp(arg, "video") ? SHOW_MODE_VIDEO :
+ !strcmp(arg, "waves") ? SHOW_MODE_WAVES :
+ !strcmp(arg, "rdft" ) ? SHOW_MODE_RDFT :
+ parse_number_or_die(opt, arg, OPT_INT, 0, SHOW_MODE_NB-1);
return 0;
}
-static int opt_thread_count(const char *opt, const char *arg)
+static int opt_input_file(const char *opt, const char *filename)
{
- thread_count= parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
-#if !HAVE_THREADS
- fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
-#endif
+ if (input_filename) {
+ fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
+ filename, input_filename);
+ exit(1);
+ }
+ if (!strcmp(filename, "-"))
+ filename = "pipe:";
+ input_filename = filename;
return 0;
}
@@ -2933,9 +2914,7 @@ static const OptionDef options[] = {
{ "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
{ "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_frame_pix_fmt}, "set pixel format", "format" },
{ "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
- { "debug", HAS_ARG | OPT_EXPERT, {(void*)opt_debug}, "print specific debug info", "" },
{ "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
- { "vismv", HAS_ARG | OPT_EXPERT, {(void*)opt_vismv}, "visualize motion vectors", "" },
{ "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
{ "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
{ "drp", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&decoder_reorder_pts}, "let decoder reorder pts 0=off 1=on -1=auto", ""},
@@ -2958,8 +2937,9 @@ static const OptionDef options[] = {
{ "vf", OPT_STRING | HAS_ARG, {(void*)&vfilters}, "video filters", "filter list" },
#endif
{ "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, {(void*)&rdftspeed}, "rdft speed", "msecs" },
+ { "showmode", HAS_ARG, {(void*)opt_show_mode}, "select show mode (0 = video, 1 = waves, 2 = RDFT)", "mode" },
{ "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {(void*)opt_default}, "generic catch all option", "" },
- { "i", 0, {NULL}, "ffmpeg compatibility dummy option", ""},
+ { "i", HAS_ARG, {(void *)opt_input_file}, "read specified file", "input_file"},
{ NULL, },
};
@@ -2970,7 +2950,7 @@ static void show_usage(void)
printf("\n");
}
-static void show_help(void)
+static int opt_help(const char *opt, const char *arg)
{
av_log_set_callback(log_callback_help);
show_usage();
@@ -3002,18 +2982,7 @@ static void show_help(void)
"down/up seek backward/forward 1 minute\n"
"mouse click seek to percentage in file corresponding to fraction of width\n"
);
-}
-
-static void opt_input_file(const char *filename)
-{
- if (input_filename) {
- fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
- filename, input_filename);
- exit(1);
- }
- if (!strcmp(filename, "-"))
- filename = "pipe:";
- input_filename = filename;
+ return 0;
}
/* Called from the main */
@@ -3050,6 +3019,8 @@ int main(int argc, char **argv)
video_disable = 1;
}
flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
+ if (audio_disable)
+ flags &= ~SDL_INIT_AUDIO;
#if !defined(__MINGW32__) && !defined(__APPLE__)
flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
#endif