summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--cmdutils.c8
-rwxr-xr-xconfigure35
-rw-r--r--doc/APIchanges4
-rw-r--r--doc/demuxers.texi6
-rw-r--r--doc/faq.texi38
-rw-r--r--doc/ffprobe.xsd1
-rw-r--r--doc/filters.texi27
-rw-r--r--doc/scaler.texi20
-rw-r--r--doc/t2h.init2
-rw-r--r--ffmpeg.c2
-rw-r--r--ffmpeg_opt.c8
-rw-r--r--ffprobe.c9
-rw-r--r--libavcodec/aac_adtstoasc_bsf.c6
-rw-r--r--libavcodec/alac.c3
-rw-r--r--libavcodec/atrac.c82
-rw-r--r--libavcodec/atrac.h67
-rw-r--r--libavcodec/atrac3.c105
-rw-r--r--libavcodec/avcodec.h2
-rw-r--r--libavcodec/chomp_bsf.c5
-rw-r--r--libavcodec/dump_extradata_bsf.c5
-rw-r--r--libavcodec/exif.c2
-rw-r--r--libavcodec/fraps.c5
-rw-r--r--libavcodec/get_bits.h1
-rw-r--r--libavcodec/h264.c2
-rw-r--r--libavcodec/h264_mp4toannexb_bsf.c6
-rw-r--r--libavcodec/imgconvert.c3
-rw-r--r--libavcodec/imx_dump_header_bsf.c5
-rw-r--r--libavcodec/mjpega_dump_header_bsf.c5
-rw-r--r--libavcodec/movsub_bsf.c10
-rw-r--r--libavcodec/mp3_header_compress_bsf.c5
-rw-r--r--libavcodec/mp3_header_decompress_bsf.c5
-rw-r--r--libavcodec/noise_bsf.c6
-rw-r--r--libavcodec/pcx.c10
-rw-r--r--libavcodec/pngdec.c6
-rw-r--r--libavcodec/remove_extradata_bsf.c5
-rw-r--r--libavcodec/rpza.c2
-rw-r--r--libavcodec/tiff_common.c39
-rw-r--r--libavcodec/tiff_common.h6
-rw-r--r--libavcodec/vdpau_vc1.c4
-rw-r--r--libavcodec/wmalosslessdec.c5
-rw-r--r--libavcodec/xan.c44
-rw-r--r--libavcodec/xxan.c13
-rw-r--r--libavfilter/af_afade.c56
-rw-r--r--libavfilter/af_silencedetect.c33
-rw-r--r--libavfilter/avf_showspectrum.c186
-rw-r--r--libavfilter/dualinput.c8
-rw-r--r--libavfilter/dualinput.h3
-rw-r--r--libavfilter/vf_blend.c14
-rw-r--r--libavfilter/vf_hflip.c74
-rw-r--r--libavfilter/vf_histogram.c107
-rw-r--r--libavfilter/vf_interlace.c9
-rw-r--r--libavfilter/vf_lut3d.c14
-rw-r--r--libavfilter/vf_overlay.c14
-rw-r--r--libavfilter/vf_psnr.c14
-rw-r--r--libavfilter/vf_removelogo.c76
-rw-r--r--libavfilter/vf_rotate.c155
-rw-r--r--libavformat/amr.c2
-rw-r--r--libavformat/asfdec.c4
-rw-r--r--libavformat/avformat.h30
-rw-r--r--libavformat/avidec.c3
-rw-r--r--libavformat/bfi.c13
-rw-r--r--libavformat/electronicarts.c2
-rw-r--r--libavformat/mov.c7
-rw-r--r--libavformat/mpegenc.c10
-rw-r--r--libavformat/mtv.c4
-rw-r--r--libavformat/mvi.c6
-rw-r--r--libavformat/mxfdec.c8
-rw-r--r--libavformat/mxfenc.c2
-rw-r--r--libavformat/oma.c14
-rw-r--r--libavformat/oma.h3
-rw-r--r--libavformat/omadec.c15
-rw-r--r--libavformat/riffdec.c5
-rw-r--r--libavformat/rtmpproto.c1
-rw-r--r--libavformat/utils.c29
-rw-r--r--libavformat/version.h4
-rw-r--r--libavformat/vqf.c15
-rw-r--r--libavformat/xwma.c6
-rw-r--r--libswresample/swresample.h4
-rw-r--r--libswscale/options.c2
-rw-r--r--libswscale/utils.c4
-rwxr-xr-xtests/fate-run.sh2
-rw-r--r--tests/fate/filter-video.mak5
-rw-r--r--tests/ref/fate/exif-image-jpg14
-rw-r--r--tests/ref/fate/ffprobe_compact2
-rw-r--r--tests/ref/fate/ffprobe_csv2
-rw-r--r--tests/ref/fate/ffprobe_default1
-rw-r--r--tests/ref/fate/ffprobe_flat1
-rw-r--r--tests/ref/fate/ffprobe_ini1
-rw-r--r--tests/ref/fate/filter-pixfmts-fieldorder89
-rw-r--r--tests/ref/lavf/gif4
90 files changed, 1053 insertions, 648 deletions
diff --git a/cmdutils.c b/cmdutils.c
index 79870cedde..aea02c0e5f 100644
--- a/cmdutils.c
+++ b/cmdutils.c
@@ -1481,8 +1481,9 @@ int show_filters(void *optctx, const char *opt, const char *arg)
const AVFilterPad *pad;
printf("Filters:\n"
- " T. = Timeline support\n"
- " .S = Slice threading\n"
+ " T.. = Timeline support\n"
+ " .S. = Slice threading\n"
+ " ..C = Commmand support\n"
" A = Audio input/output\n"
" V = Video input/output\n"
" N = Dynamic number and/or type of input/output\n"
@@ -1506,9 +1507,10 @@ int show_filters(void *optctx, const char *opt, const char *arg)
( i && (filter->flags & AVFILTER_FLAG_DYNAMIC_OUTPUTS))) ? 'N' : '|';
}
*descr_cur = 0;
- printf(" %c%c %-16s %-10s %s\n",
+ printf(" %c%c%c %-16s %-10s %s\n",
filter->flags & AVFILTER_FLAG_SUPPORT_TIMELINE ? 'T' : '.',
filter->flags & AVFILTER_FLAG_SLICE_THREADS ? 'S' : '.',
+ filter->process_command ? 'C' : '.',
filter->name, descr, filter->description);
}
#endif
diff --git a/configure b/configure
index 10ee3a18bb..7b8cc813c2 100755
--- a/configure
+++ b/configure
@@ -1776,29 +1776,28 @@ dnxhd_decoder_select="dsputil"
dnxhd_encoder_select="aandcttables dsputil mpegvideoenc"
dvvideo_decoder_select="dsputil"
dvvideo_encoder_select="dsputil"
-dxa_decoder_deps="zlib"
+dxa_decoder_select="zlib"
eac3_decoder_select="ac3_decoder"
eac3_encoder_select="ac3_encoder"
eamad_decoder_select="aandcttables dsputil mpegvideo"
eatgq_decoder_select="aandcttables"
eatqi_decoder_select="aandcttables error_resilience mpegvideo"
-exr_decoder_deps="zlib"
+exr_decoder_select="zlib"
ffv1_decoder_select="dsputil golomb rangecoder"
ffv1_encoder_select="dsputil rangecoder"
ffvhuff_decoder_select="dsputil"
ffvhuff_encoder_select="dsputil huffman"
flac_decoder_select="golomb"
flac_encoder_select="dsputil golomb lpc"
-flashsv_decoder_deps="zlib"
-flashsv_encoder_deps="zlib"
-flashsv2_encoder_deps="zlib"
-flashsv2_decoder_deps="zlib"
+flashsv_decoder_select="zlib"
+flashsv_encoder_select="zlib"
+flashsv2_encoder_select="zlib"
+flashsv2_decoder_select="zlib"
flv_decoder_select="h263_decoder"
flv_encoder_select="h263_encoder"
fourxm_decoder_select="dsputil"
fraps_decoder_select="dsputil huffman"
-g2m_decoder_deps="zlib"
-g2m_decoder_select="dsputil"
+g2m_decoder_select="dsputil zlib"
g729_decoder_select="dsputil"
h261_decoder_select="error_resilience mpegvideo"
h261_encoder_select="aandcttables mpegvideoenc"
@@ -1858,9 +1857,8 @@ mxpeg_decoder_select="dsputil hpeldsp exif"
nellymoser_decoder_select="mdct sinewin"
nellymoser_encoder_select="audio_frame_queue mdct sinewin"
nuv_decoder_select="dsputil lzo"
-png_decoder_deps="zlib"
-png_encoder_deps="zlib"
-png_encoder_select="dsputil"
+png_decoder_select="zlib"
+png_encoder_select="dsputil zlib"
prores_decoder_select="dsputil"
prores_encoder_select="dsputil"
qcelp_decoder_select="lsp"
@@ -1894,7 +1892,7 @@ thp_decoder_select="dsputil hpeldsp exif"
truehd_decoder_select="mlp_parser"
truemotion2_decoder_select="dsputil"
truespeech_decoder_select="dsputil"
-tscc_decoder_deps="zlib"
+tscc_decoder_select="zlib"
twinvq_decoder_select="mdct lsp sinewin"
utvideo_decoder_select="dsputil"
utvideo_encoder_select="dsputil huffman"
@@ -1923,11 +1921,11 @@ wmv2_decoder_select="h263_decoder videodsp"
wmv2_encoder_select="h263_encoder"
wmv3_decoder_select="vc1_decoder"
wmv3image_decoder_select="wmv3_decoder"
-zerocodec_decoder_deps="zlib"
-zlib_decoder_deps="zlib"
-zlib_encoder_deps="zlib"
-zmbv_decoder_deps="zlib"
-zmbv_encoder_deps="zlib"
+zerocodec_decoder_select="zlib"
+zlib_decoder_select="zlib"
+zlib_encoder_select="zlib"
+zmbv_decoder_select="zlib"
+zmbv_encoder_select="zlib"
# hardware accelerators
crystalhd_deps="libcrystalhd_libcrystalhd_if_h"
@@ -4287,7 +4285,7 @@ enabled openal && { { for al_libs in "${OPENAL_LIBS}" "-lopenal" "-lO
enabled opencl && { check_lib2 OpenCL/cl.h clEnqueueNDRangeKernel -Wl,-framework,OpenCL ||
check_lib2 CL/cl.h clEnqueueNDRangeKernel -lOpenCL ||
die "ERROR: opencl not found"; } &&
- { enabled_any w32threads os2threads &&
+ { ! enabled_any w32threads os2threads ||
die "opencl currently needs --enable-pthreads or --disable-w32threads"; } &&
{ check_cpp_condition "OpenCL/cl.h" "defined(CL_VERSION_1_2)" ||
check_cpp_condition "CL/cl.h" "defined(CL_VERSION_1_2)" ||
@@ -4429,6 +4427,7 @@ enabled xmm_clobber_test &&
-Wl,--wrap,avcodec_decode_video2 \
-Wl,--wrap,avcodec_decode_subtitle2 \
-Wl,--wrap,avcodec_encode_audio2 \
+ -Wl,--wrap,avcodec_encode_video \
-Wl,--wrap,avcodec_encode_video2 \
-Wl,--wrap,avcodec_encode_subtitle \
-Wl,--wrap,sws_scale ||
diff --git a/doc/APIchanges b/doc/APIchanges
index ab932c34d0..743b5b0a2b 100644
--- a/doc/APIchanges
+++ b/doc/APIchanges
@@ -15,6 +15,10 @@ libavutil: 2012-10-22
API changes, most recent first:
+2013-10-02 - xxxxxxx - lavf 55.19.100 - avformat.h
+ Add audio/video/subtitle AVCodec fields to AVFormatContext to force specific
+ decoders
+
2013-08-xx - xxxxxxx - lavfi 3.11.0 - avfilter.h
Add AVFilterGraph.execute and AVFilterGraph.opaque for custom slice threading
implementations.
diff --git a/doc/demuxers.texi b/doc/demuxers.texi
index 44db21f697..bfc0bdc6b1 100644
--- a/doc/demuxers.texi
+++ b/doc/demuxers.texi
@@ -262,20 +262,20 @@ Use @command{ffmpeg} for creating a video from the images in the file
sequence @file{img-001.jpeg}, @file{img-002.jpeg}, ..., assuming an
input frame rate of 10 frames per second:
@example
-ffmpeg -i 'img-%03d.jpeg' -r 10 out.mkv
+ffmpeg -framerate 10 -i 'img-%03d.jpeg' out.mkv
@end example
@item
As above, but start by reading from a file with index 100 in the sequence:
@example
-ffmpeg -start_number 100 -i 'img-%03d.jpeg' -r 10 out.mkv
+ffmpeg -framerate 10 -start_number 100 -i 'img-%03d.jpeg' out.mkv
@end example
@item
Read images matching the "*.png" glob pattern , that is all the files
terminating with the ".png" suffix:
@example
-ffmpeg -pattern_type glob -i "*.png" -r 10 out.mkv
+ffmpeg -framerate 10 -pattern_type glob -i "*.png" out.mkv
@end example
@end itemize
diff --git a/doc/faq.texi b/doc/faq.texi
index a0ae537444..c47d9d969e 100644
--- a/doc/faq.texi
+++ b/doc/faq.texi
@@ -105,7 +105,7 @@ For example, img1.jpg, img2.jpg, img3.jpg,...
Then you may run:
@example
- ffmpeg -f image2 -i img%d.jpg /tmp/a.mpg
+ffmpeg -f image2 -i img%d.jpg /tmp/a.mpg
@end example
Notice that @samp{%d} is replaced by the image number.
@@ -118,7 +118,7 @@ the sequence. This is useful if your sequence does not start with
example will start with @file{img100.jpg}:
@example
- ffmpeg -f image2 -start_number 100 -i img%d.jpg /tmp/a.mpg
+ffmpeg -f image2 -start_number 100 -i img%d.jpg /tmp/a.mpg
@end example
If you have large number of pictures to rename, you can use the
@@ -128,7 +128,7 @@ that match @code{*jpg} to the @file{/tmp} directory in the sequence of
@file{img001.jpg}, @file{img002.jpg} and so on.
@example
- x=1; for i in *jpg; do counter=$(printf %03d $x); ln -s "$i" /tmp/img"$counter".jpg; x=$(($x+1)); done
+x=1; for i in *jpg; do counter=$(printf %03d $x); ln -s "$i" /tmp/img"$counter".jpg; x=$(($x+1)); done
@end example
If you want to sequence them by oldest modified first, substitute
@@ -137,7 +137,7 @@ If you want to sequence them by oldest modified first, substitute
Then run:
@example
- ffmpeg -f image2 -i /tmp/img%03d.jpg /tmp/a.mpg
+ffmpeg -f image2 -i /tmp/img%03d.jpg /tmp/a.mpg
@end example
The same logic is used for any image format that ffmpeg reads.
@@ -145,7 +145,7 @@ The same logic is used for any image format that ffmpeg reads.
You can also use @command{cat} to pipe images to ffmpeg:
@example
- cat *.jpg | ffmpeg -f image2pipe -c:v mjpeg -i - output.mpg
+cat *.jpg | ffmpeg -f image2pipe -c:v mjpeg -i - output.mpg
@end example
@section How do I encode movie to single pictures?
@@ -153,7 +153,7 @@ You can also use @command{cat} to pipe images to ffmpeg:
Use:
@example
- ffmpeg -i movie.mpg movie%d.jpg
+ffmpeg -i movie.mpg movie%d.jpg
@end example
The @file{movie.mpg} used as input will be converted to
@@ -169,7 +169,7 @@ to force the encoding.
Applying that to the previous example:
@example
- ffmpeg -i movie.mpg -f image2 -c:v mjpeg menu%d.jpg
+ffmpeg -i movie.mpg -f image2 -c:v mjpeg menu%d.jpg
@end example
Beware that there is no "jpeg" codec. Use "mjpeg" instead.
@@ -227,11 +227,11 @@ then you may use any file that DirectShow can read as input.
Just create an "input.avs" text file with this single line ...
@example
- DirectShowSource("C:\path to your file\yourfile.asf")
+DirectShowSource("C:\path to your file\yourfile.asf")
@end example
... and then feed that text file to ffmpeg:
@example
- ffmpeg -i input.avs
+ffmpeg -i input.avs
@end example
For ANY other help on AviSynth, please visit the
@@ -475,9 +475,10 @@ read @uref{http://www.tux.org/lkml/#s15, "Programming Religion"}.
@section Why are the ffmpeg programs devoid of debugging symbols?
-The build process creates ffmpeg_g, ffplay_g, etc. which contain full debug
-information. Those binaries are stripped to create ffmpeg, ffplay, etc. If
-you need the debug information, use the *_g versions.
+The build process creates @command{ffmpeg_g}, @command{ffplay_g}, etc. which
+contain full debug information. Those binaries are stripped to create
+@command{ffmpeg}, @command{ffplay}, etc. If you need the debug information, use
+the *_g versions.
@section I do not like the LGPL, can I contribute code under the GPL instead?
@@ -497,7 +498,7 @@ An easy way to get the full list of required libraries in dependency order
is to use @code{pkg-config}.
@example
- c99 -o program program.c $(pkg-config --cflags --libs libavformat libavcodec)
+c99 -o program program.c $(pkg-config --cflags --libs libavformat libavcodec)
@end example
See @file{doc/example/Makefile} and @file{doc/example/pc-uninstalled} for
@@ -521,10 +522,6 @@ to use them you have to append -D__STDC_CONSTANT_MACROS to your CXXFLAGS
You have to create a custom AVIOContext using @code{avio_alloc_context},
see @file{libavformat/aviobuf.c} in FFmpeg and @file{libmpdemux/demux_lavf.c} in MPlayer or MPlayer2 sources.
-@section Where can I find libav* headers for Pascal/Delphi?
-
-see @url{http://www.iversenit.dk/dev/ffmpeg-headers/}
-
@section Where is the documentation about ffv1, msmpeg4, asv1, 4xm?
see @url{http://www.ffmpeg.org/~michael/}
@@ -537,11 +534,12 @@ In this specific case please look at RFC 4629 to see how it should be done.
@section AVStream.r_frame_rate is wrong, it is much larger than the frame rate.
-r_frame_rate is NOT the average frame rate, it is the smallest frame rate
+@code{r_frame_rate} is NOT the average frame rate, it is the smallest frame rate
that can accurately represent all timestamps. So no, it is not
wrong if it is larger than the average!
-For example, if you have mixed 25 and 30 fps content, then r_frame_rate
-will be 150.
+For example, if you have mixed 25 and 30 fps content, then @code{r_frame_rate}
+will be 150 (it is the least common multiple).
+If you are looking for the average frame rate, see @code{AVStream.avg_frame_rate}.
@section Why is @code{make fate} not running all tests?
diff --git a/doc/ffprobe.xsd b/doc/ffprobe.xsd
index dae2b3fcf2..6a48ff432b 100644
--- a/doc/ffprobe.xsd
+++ b/doc/ffprobe.xsd
@@ -138,6 +138,7 @@
<xsd:attribute name="sample_fmt" type="xsd:string"/>
<xsd:attribute name="sample_rate" type="xsd:int"/>
<xsd:attribute name="channels" type="xsd:int"/>
+ <xsd:attribute name="channel_layout" type="xsd:string"/>
<xsd:attribute name="bits_per_sample" type="xsd:int"/>
<xsd:attribute name="id" type="xsd:string"/>
diff --git a/doc/filters.texi b/doc/filters.texi
index bd394951e9..efadaf19dc 100644
--- a/doc/filters.texi
+++ b/doc/filters.texi
@@ -4698,7 +4698,7 @@ It accepts the following values:
@item levels
standard histogram that display color components distribution in an image.
Displays color graph for each color component. Shows distribution
-of the Y, U, V, A or G, B, R components, depending on input format,
+of the Y, U, V, A or R, G, B components, depending on input format,
in current frame. Bellow each graph is color component scale meter.
@item color
@@ -6559,7 +6559,11 @@ the next filter, the scale filter will convert the input to the
requested format.
@subsection Options
-The filter accepts the following options:
+The filter accepts the following options, or any of the options
+supported by the libswscale scaler.
+
+See @ref{scaler_options,,the ffmpeg-scaler manual,ffmpeg-scaler} for
+the complete list of scaler options.
@table @option
@item width, w
@@ -6597,6 +6601,8 @@ Default value is @samp{0}.
@item flags
Set libswscale scaling flags. If not explictly specified the filter
applies a bilinear scaling algorithm.
+See @ref{sws_flags,,the ffmpeg-scaler manual,ffmpeg-scaler} for
+the complete list of values.
@item size, s
Set the video size, the value must be a valid abbreviation or in the
@@ -6663,23 +6669,6 @@ Set full range (0-255 in case of 8-bit luma).
Set "MPEG" range (16-235 in case of 8-bit luma).
@end table
-@item sws_dither
-Set the dithering algorithm
-
-@table @samp
-@item auto
-Choose automatically.
-
-@item none
-No dithering
-
-@item bayer
-bayer dither
-
-@item ed
-error diffusion dither
-@end table
-
@item force_original_aspect_ratio
Enable decreasing or increasing output video width or height if necessary to
keep the original aspect ratio. Possible values:
diff --git a/doc/scaler.texi b/doc/scaler.texi
index c33b6d93ad..08d90bcc81 100644
--- a/doc/scaler.texi
+++ b/doc/scaler.texi
@@ -1,3 +1,4 @@
+@anchor{scaler_options}
@chapter Scaler Options
@c man begin SCALER OPTIONS
@@ -9,6 +10,7 @@ FFmpeg tools. For programmatic use, they can be set explicitly in the
@table @option
+@anchor{sws_flags}
@item sws_flags
Set the scaler flags. This is also used to set the scaling
algorithm. Only a single algorithm should be selected.
@@ -94,6 +96,24 @@ Set scaling algorithm parameters. The specified values are specific of
some scaling algorithms and ignored by others. The specified values
are floating point number values.
+@item sws_dither
+Set the dithering algorithm. Accepts one of the following
+values. Default value is @samp{auto}.
+
+@table @samp
+@item auto
+automatic choice
+
+@item none
+no dithering
+
+@item bayer
+bayer dither
+
+@item ed
+error diffusion dither
+@end table
+
@end table
@c man end SCALER OPTIONS
diff --git a/doc/t2h.init b/doc/t2h.init
index 2aab488eed..0f08fd9847 100644
--- a/doc/t2h.init
+++ b/doc/t2h.init
@@ -92,8 +92,6 @@ $Texi2HTML::THISDOC{program_authors}
$description
<meta name="keywords" content="$longtitle">
-<meta name="resource-type" content="document">
-<meta name="distribution" content="global">
<meta name="Generator" content="$Texi2HTML::THISDOC{program}">
$encoding
$CSS_LINES
diff --git a/ffmpeg.c b/ffmpeg.c
index 2e084a49a0..d1c841fb8b 100644
--- a/ffmpeg.c
+++ b/ffmpeg.c
@@ -3421,7 +3421,7 @@ int main(int argc, char **argv)
av_log(NULL, AV_LOG_DEBUG, "%"PRIu64" frames successfully decoded, %"PRIu64" decoding errors\n",
decode_error_stat[0], decode_error_stat[1]);
if (2*decode_error_stat[0] < decode_error_stat[1])
- exit_program(254);
+ exit_program(69);
exit_program(received_nb_signals ? 255 : 0);
return 0;
diff --git a/ffmpeg_opt.c b/ffmpeg_opt.c
index 8ff5bdfbda..a542b8d67e 100644
--- a/ffmpeg_opt.c
+++ b/ffmpeg_opt.c
@@ -785,6 +785,14 @@ static int open_input_file(OptionsContext *o, const char *filename)
find_codec_or_die(audio_codec_name , AVMEDIA_TYPE_AUDIO , 0)->id : AV_CODEC_ID_NONE;
ic->subtitle_codec_id= subtitle_codec_name ?
find_codec_or_die(subtitle_codec_name, AVMEDIA_TYPE_SUBTITLE, 0)->id : AV_CODEC_ID_NONE;
+
+ if (video_codec_name)
+ av_format_set_video_codec (ic, find_codec_or_die(video_codec_name , AVMEDIA_TYPE_VIDEO , 0));
+ if (audio_codec_name)
+ av_format_set_audio_codec (ic, find_codec_or_die(audio_codec_name , AVMEDIA_TYPE_AUDIO , 0));
+ if (subtitle_codec_name)
+ av_format_set_subtitle_codec(ic, find_codec_or_die(subtitle_codec_name, AVMEDIA_TYPE_SUBTITLE, 0));
+
ic->flags |= AVFMT_FLAG_NONBLOCK;
ic->interrupt_callback = int_cb;
diff --git a/ffprobe.c b/ffprobe.c
index 8b1a584ab3..b7f287c7fb 100644
--- a/ffprobe.c
+++ b/ffprobe.c
@@ -1823,6 +1823,15 @@ static void show_stream(WriterContext *w, AVFormatContext *fmt_ctx, int stream_i
else print_str_opt("sample_fmt", "unknown");
print_val("sample_rate", dec_ctx->sample_rate, unit_hertz_str);
print_int("channels", dec_ctx->channels);
+
+ if (dec_ctx->channel_layout) {
+ av_bprint_clear(&pbuf);
+ av_bprint_channel_layout(&pbuf, dec_ctx->channels, dec_ctx->channel_layout);
+ print_str ("channel_layout", pbuf.str);
+ } else {
+ print_str_opt("channel_layout", "unknown");
+ }
+
print_int("bits_per_sample", av_get_bits_per_sample(dec_ctx->codec_id));
break;
diff --git a/libavcodec/aac_adtstoasc_bsf.c b/libavcodec/aac_adtstoasc_bsf.c
index c7d7b3a016..c8f9e0ae69 100644
--- a/libavcodec/aac_adtstoasc_bsf.c
+++ b/libavcodec/aac_adtstoasc_bsf.c
@@ -112,7 +112,7 @@ static int aac_adtstoasc_filter(AVBitStreamFilterContext *bsfc,
}
AVBitStreamFilter ff_aac_adtstoasc_bsf = {
- "aac_adtstoasc",
- sizeof(AACBSFContext),
- aac_adtstoasc_filter,
+ .name = "aac_adtstoasc",
+ .priv_data_size = sizeof(AACBSFContext),
+ .filter = aac_adtstoasc_filter,
};
diff --git a/libavcodec/alac.c b/libavcodec/alac.c
index 69384dc432..cf19484fe6 100644
--- a/libavcodec/alac.c
+++ b/libavcodec/alac.c
@@ -320,6 +320,9 @@ static int decode_element(AVCodecContext *avctx, AVFrame *frame, int ch_index,
rice_history_mult[ch] = get_bits(&alac->gb, 3);
lpc_order[ch] = get_bits(&alac->gb, 5);
+ if (lpc_order[ch] >= alac->max_samples_per_frame)
+ return AVERROR_INVALIDDATA;
+
/* read the predictor table */
for (i = lpc_order[ch] - 1; i >= 0; i--)
lpc_coefs[ch][i] = get_sbits(&alac->gb, 16);
diff --git a/libavcodec/atrac.c b/libavcodec/atrac.c
index 3d37b02fd1..141248f54d 100644
--- a/libavcodec/atrac.c
+++ b/libavcodec/atrac.c
@@ -1,6 +1,7 @@
/*
- * ATRAC common functions
- * Copyright (c) 2006-2008 Maxim Poliakovski
+ * Common functions for the ATRAC family of decoders
+ *
+ * Copyright (c) 2006-2013 Maxim Poliakovski
* Copyright (c) 2006-2008 Benjamin Larsson
*
* This file is part of FFmpeg.
@@ -44,11 +45,7 @@ static const float qmf_48tap_half[24] = {
-0.043596379, -0.099384367, 0.13207909, 0.46424159
};
-/**
- * Generate common tables
- */
-
-void ff_atrac_generate_tables(void)
+av_cold void ff_atrac_generate_tables(void)
{
int i;
float s;
@@ -66,20 +63,69 @@ void ff_atrac_generate_tables(void)
}
}
+av_cold void ff_atrac_init_gain_compensation(AtracGCContext *gctx, int id2exp_offset,
+ int loc_scale)
+{
+ int i;
-/**
- * Quadrature mirror synthesis filter.
- *
- * @param inlo lower part of spectrum
- * @param inhi higher part of spectrum
- * @param nIn size of spectrum buffer
- * @param pOut out buffer
- * @param delayBuf delayBuf buffer
- * @param temp temp buffer
- */
+ gctx->loc_scale = loc_scale;
+ gctx->loc_size = 1 << loc_scale;
+ gctx->id2exp_offset = id2exp_offset;
+ /* Generate gain level table. */
+ for (i = 0; i < 16; i++)
+ gctx->gain_tab1[i] = powf(2.0, id2exp_offset - i);
+
+ /* Generate gain interpolation table. */
+ for (i = -15; i < 16; i++)
+ gctx->gain_tab2[i + 15] = powf(2.0, -1.0f / gctx->loc_size * i);
+}
+
+void ff_atrac_gain_compensation(AtracGCContext *gctx, float *in, float *prev,
+ AtracGainInfo *gc_now, AtracGainInfo *gc_next,
+ int num_samples, float *out)
+{
+ float lev, gc_scale, gain_inc;
+ int i, pos, lastpos;
+
+ gc_scale = gc_next->num_points ? gctx->gain_tab1[gc_next->levcode[0]] : 1.0f;
+
+ if (!gc_now->num_points) {
+ for (pos = 0; pos < num_samples; pos++)
+ out[pos] = in[pos] * gc_scale + prev[pos];
+ } else {
+ pos = 0;
+
+ for (i = 0; i < gc_now->num_points; i++) {
+ lastpos = gc_now->loccode[i] << gctx->loc_scale;
+
+ lev = gctx->gain_tab1[gc_now->levcode[i]];
+ gain_inc = gctx->gain_tab2[(i + 1 < gc_now->num_points
+ ? gc_now->levcode[i + 1]
+ : gctx->id2exp_offset)
+ - gc_now->levcode[i] + 15];
+
+ /* apply constant gain level and overlap */
+ for (; pos < lastpos; pos++)
+ out[pos] = (in[pos] * gc_scale + prev[pos]) * lev;
+
+ /* interpolate between two different gain levels */
+ for (; pos < lastpos + gctx->loc_size; pos++) {
+ out[pos] = (in[pos] * gc_scale + prev[pos]) * lev;
+ lev *= gain_inc;
+ }
+ }
+
+ for (; pos < num_samples; pos++)
+ out[pos] = in[pos] * gc_scale + prev[pos];
+ }
+
+ /* copy the overlapping part into the delay buffer */
+ memcpy(prev, &in[num_samples], num_samples * sizeof(float));
+}
-void ff_atrac_iqmf (float *inlo, float *inhi, unsigned int nIn, float *pOut, float *delayBuf, float *temp)
+void ff_atrac_iqmf(float *inlo, float *inhi, unsigned int nIn, float *pOut,
+ float *delayBuf, float *temp)
{
int i, j;
float *p1, *p3;
diff --git a/libavcodec/atrac.h b/libavcodec/atrac.h
index 9041d85481..e47d4ca18d 100644
--- a/libavcodec/atrac.h
+++ b/libavcodec/atrac.h
@@ -1,6 +1,7 @@
/*
- * ATRAC common data
- * Copyright (c) 2009 Maxim Poliakovski
+ * Common functions for the ATRAC family of decoders
+ *
+ * Copyright (c) 2009-2013 Maxim Poliakovski
* Copyright (c) 2009 Benjamin Larsson
*
* This file is part of FFmpeg.
@@ -28,9 +29,69 @@
#ifndef AVCODEC_ATRAC_H
#define AVCODEC_ATRAC_H
+/**
+ * Gain control parameters for one subband.
+ */
+typedef struct AtracGainInfo {
+ int num_points; ///< number of gain control points
+ int levcode[7]; ///< level at corresponding control point
+ int loccode[7]; ///< location of gain control points
+} AtracGainInfo;
+
+/**
+ * Gain compensation context structure.
+ */
+typedef struct AtracGCContext {
+ float gain_tab1[16]; ///< gain compensation level table
+ float gain_tab2[31]; ///< gain compensation interpolation table
+ int id2exp_offset; ///< offset for converting level index into level exponent
+ int loc_scale; ///< scale of location code = 2^loc_scale samples
+ int loc_size; ///< size of location code in samples
+} AtracGCContext;
+
extern float ff_atrac_sf_table[64];
+/**
+ * Generate common tables.
+ */
void ff_atrac_generate_tables(void);
-void ff_atrac_iqmf (float *inlo, float *inhi, unsigned int nIn, float *pOut, float *delayBuf, float *temp);
+
+/**
+ * Initialize gain compensation context.
+ *
+ * @param gctx pointer to gain compensation context to initialize
+ * @param id2exp_offset offset for converting level index into level exponent
+ * @param loc_scale location size factor
+ */
+void ff_atrac_init_gain_compensation(AtracGCContext *gctx, int id2exp_offset,
+ int loc_scale);
+
+/**
+ * Apply gain compensation and perform the MDCT overlapping part.
+ *
+ * @param gctx pointer to gain compensation context
+ * @param in input buffer
+ * @param prev previous buffer to perform overlap against
+ * @param gc_now gain control information for current frame
+ * @param gc_next gain control information for next frame
+ * @param num_samples number of samples to process
+ * @param out output data goes here
+ */
+void ff_atrac_gain_compensation(AtracGCContext *gctx, float *in, float *prev,
+ AtracGainInfo *gc_now, AtracGainInfo *gc_next,
+ int num_samples, float *out);
+
+/**
+ * Quadrature mirror synthesis filter.
+ *
+ * @param inlo lower part of spectrum
+ * @param inhi higher part of spectrum
+ * @param nIn size of spectrum buffer
+ * @param pOut out buffer
+ * @param delayBuf delayBuf buffer
+ * @param temp temp buffer
+ */
+void ff_atrac_iqmf(float *inlo, float *inhi, unsigned int nIn, float *pOut,
+ float *delayBuf, float *temp);
#endif /* AVCODEC_ATRAC_H */
diff --git a/libavcodec/atrac3.c b/libavcodec/atrac3.c
index c2da6e2411..245805c8ef 100644
--- a/libavcodec/atrac3.c
+++ b/libavcodec/atrac3.c
@@ -55,14 +55,8 @@
#define SAMPLES_PER_FRAME 1024
#define MDCT_SIZE 512
-typedef struct GainInfo {
- int num_gain_data;
- int lev_code[8];
- int loc_code[8];
-} GainInfo;
-
typedef struct GainBlock {
- GainInfo g_block[4];
+ AtracGainInfo g_block[4];
} GainBlock;
typedef struct TonalComponent {
@@ -112,7 +106,8 @@ typedef struct ATRAC3Context {
int scrambled_stream;
//@}
- FFTContext mdct_ctx;
+ AtracGCContext gainc_ctx;
+ FFTContext mdct_ctx;
FmtConvertContext fmt_conv;
AVFloatDSPContext fdsp;
} ATRAC3Context;
@@ -418,90 +413,32 @@ static int decode_tonal_components(GetBitContext *gb,
static int decode_gain_control(GetBitContext *gb, GainBlock *block,
int num_bands)
{
- int i, cf, num_data;
+ int i, b;
int *level, *loc;
- GainInfo *gain = block->g_block;
+ AtracGainInfo *gain = block->g_block;
- for (i = 0; i <= num_bands; i++) {
- num_data = get_bits(gb, 3);
- gain[i].num_gain_data = num_data;
- level = gain[i].lev_code;
- loc = gain[i].loc_code;
+ for (b = 0; b <= num_bands; b++) {
+ gain[b].num_points = get_bits(gb, 3);
+ level = gain[b].levcode;
+ loc = gain[b].loccode;
- for (cf = 0; cf < gain[i].num_gain_data; cf++) {
- level[cf] = get_bits(gb, 4);
- loc [cf] = get_bits(gb, 5);
- if (cf && loc[cf] <= loc[cf - 1])
+ for (i = 0; i < gain[b].num_points; i++) {
+ level[i] = get_bits(gb, 4);
+ loc [i] = get_bits(gb, 5);
+ if (i && loc[i] <= loc[i-1])
return AVERROR_INVALIDDATA;
}
}
- /* Clear the unused blocks. */
- for (; i < 4 ; i++)
- gain[i].num_gain_data = 0;
+ /* Clear unused blocks. */
+ for (; b < 4 ; b++)
+ gain[b].num_points = 0;
return 0;
}
/**
- * Apply gain parameters and perform the MDCT overlapping part
- *
- * @param input input buffer
- * @param prev previous buffer to perform overlap against
- * @param output output buffer
- * @param gain1 current band gain info
- * @param gain2 next band gain info
- */
-static void gain_compensate_and_overlap(float *input, float *prev,
- float *output, GainInfo *gain1,
- GainInfo *gain2)
-{
- float g1, g2, gain_inc;
- int i, j, num_data, start_loc, end_loc;
-
-
- if (gain2->num_gain_data == 0)
- g1 = 1.0;
- else
- g1 = gain_tab1[gain2->lev_code[0]];
-
- if (gain1->num_gain_data == 0) {
- for (i = 0; i < 256; i++)
- output[i] = input[i] * g1 + prev[i];
- } else {
- num_data = gain1->num_gain_data;
- gain1->loc_code[num_data] = 32;
- gain1->lev_code[num_data] = 4;
-
- for (i = 0, j = 0; i < num_data; i++) {
- start_loc = gain1->loc_code[i] * 8;
- end_loc = start_loc + 8;
-
- g2 = gain_tab1[gain1->lev_code[i]];
- gain_inc = gain_tab2[gain1->lev_code[i + 1] -
- gain1->lev_code[i ] + 15];
-
- /* interpolate */
- for (; j < start_loc; j++)
- output[j] = (input[j] * g1 + prev[j]) * g2;
-
- /* interpolation is done over eight samples */
- for (; j < end_loc; j++) {
- output[j] = (input[j] * g1 + prev[j]) * g2;
- g2 *= gain_inc;
- }
- }
-
- for (; j < 256; j++)
- output[j] = input[j] * g1 + prev[j];
- }
-
- /* Delay for the overlapping part. */
- memcpy(prev, &input[256], 256 * sizeof(*prev));
-}
-
-/**
* Combine the tonal band spectrum and regular band spectrum
*
* @param spectrum output spectrum buffer
@@ -691,11 +628,10 @@ static int decode_channel_sound_unit(ATRAC3Context *q, GetBitContext *gb,
memset(snd->imdct_buf, 0, 512 * sizeof(*snd->imdct_buf));
/* gain compensation and overlapping */
- gain_compensate_and_overlap(snd->imdct_buf,
- &snd->prev_frame[band * 256],
- &output[band * 256],
- &gain1->g_block[band],
- &gain2->g_block[band]);
+ ff_atrac_gain_compensation(&q->gainc_ctx, snd->imdct_buf,
+ &snd->prev_frame[band * 256],
+ &gain1->g_block[band], &gain2->g_block[band],
+ 256, &output[band * 256]);
}
/* Swap the gain control buffers for the next frame. */
@@ -988,6 +924,7 @@ static av_cold int atrac3_decode_init(AVCodecContext *avctx)
q->matrix_coeff_index_next[i] = 3;
}
+ ff_atrac_init_gain_compensation(&q->gainc_ctx, 4, 3);
avpriv_float_dsp_init(&q->fdsp, avctx->flags & CODEC_FLAG_BITEXACT);
ff_fmt_convert_init(&q->fmt_conv, avctx);
diff --git a/libavcodec/avcodec.h b/libavcodec/avcodec.h
index 1ae89d5aed..2cf30dd92a 100644
--- a/libavcodec/avcodec.h
+++ b/libavcodec/avcodec.h
@@ -2932,7 +2932,9 @@ typedef struct AVCodec {
const int *supported_samplerates; ///< array of supported audio samplerates, or NULL if unknown, array is terminated by 0
const enum AVSampleFormat *sample_fmts; ///< array of supported sample formats, or NULL if unknown, array is terminated by -1
const uint64_t *channel_layouts; ///< array of support channel layouts, or NULL if unknown. array is terminated by 0
+#if FF_API_LOWRES
uint8_t max_lowres; ///< maximum value for lowres supported by the decoder
+#endif
const AVClass *priv_class; ///< AVClass for the private context
const AVProfile *profiles; ///< array of recognized profiles, or NULL if unknown, array is terminated by {FF_PROFILE_UNKNOWN}
diff --git a/libavcodec/chomp_bsf.c b/libavcodec/chomp_bsf.c
index eaefaaa539..2b93fa999e 100644
--- a/libavcodec/chomp_bsf.c
+++ b/libavcodec/chomp_bsf.c
@@ -41,7 +41,6 @@ static int chomp_filter(AVBitStreamFilterContext *bsfc,
* This filter removes a string of NULL bytes from the end of a packet.
*/
AVBitStreamFilter ff_chomp_bsf = {
- "chomp",
- 0,
- chomp_filter,
+ .name = "chomp",
+ .filter = chomp_filter,
};
diff --git a/libavcodec/dump_extradata_bsf.c b/libavcodec/dump_extradata_bsf.c
index 94b7b428bf..2dcbf8fdad 100644
--- a/libavcodec/dump_extradata_bsf.c
+++ b/libavcodec/dump_extradata_bsf.c
@@ -47,7 +47,6 @@ static int dump_extradata(AVBitStreamFilterContext *bsfc, AVCodecContext *avctx,
}
AVBitStreamFilter ff_dump_extradata_bsf={
- "dump_extra",
- 0,
- dump_extradata,
+ .name = "dump_extra",
+ .filter = dump_extradata,
};
diff --git a/libavcodec/exif.c b/libavcodec/exif.c
index ec524a5943..96464261c1 100644
--- a/libavcodec/exif.c
+++ b/libavcodec/exif.c
@@ -50,7 +50,7 @@ static int exif_add_metadata(AVCodecContext *avctx, int count, int type,
case TIFF_DOUBLE : return ff_tadd_doubles_metadata(count, name, sep, gb, le, metadata);
case TIFF_SHORT : return ff_tadd_shorts_metadata(count, name, sep, gb, le, metadata);
case TIFF_BYTE :
- case TIFF_UNDEFINED:
+ case TIFF_UNDEFINED: return ff_tadd_bytes_metadata(count, name, sep, gb, le, metadata);
case TIFF_STRING : return ff_tadd_string_metadata(count, name, gb, le, metadata);
case TIFF_SRATIONAL:
case TIFF_RATIONAL : return ff_tadd_rational_metadata(count, name, sep, gb, le, metadata);
diff --git a/libavcodec/fraps.c b/libavcodec/fraps.c
index 8334afcce2..60c579db2b 100644
--- a/libavcodec/fraps.c
+++ b/libavcodec/fraps.c
@@ -40,6 +40,7 @@
#include "thread.h"
#define FPS_TAG MKTAG('F', 'P', 'S', 'x')
+#define VLC_BITS 11
/**
* local variable storage
@@ -94,7 +95,7 @@ static int fraps2_decode_plane(FrapsContext *s, uint8_t *dst, int stride, int w,
for (i = 0; i < 256; i++)
nodes[i].count = bytestream_get_le32(&src);
size -= 1024;
- if ((ret = ff_huff_build_tree(s->avctx, &vlc, 256, FF_HUFFMAN_BITS,
+ if ((ret = ff_huff_build_tree(s->avctx, &vlc, 256, VLC_BITS,
nodes, huff_cmp,
FF_HUFFMAN_FLAG_ZERO_COUNT)) < 0)
return ret;
@@ -106,7 +107,7 @@ static int fraps2_decode_plane(FrapsContext *s, uint8_t *dst, int stride, int w,
init_get_bits(&gb, s->tmpbuf, size * 8);
for (j = 0; j < h; j++) {
for (i = 0; i < w*step; i += step) {
- dst[i] = get_vlc2(&gb, vlc.table, FF_HUFFMAN_BITS, 3);
+ dst[i] = get_vlc2(&gb, vlc.table, VLC_BITS, 3);
/* lines are stored as deltas between previous lines
* and we need to add 0x80 to the first lines of chroma planes
*/
diff --git a/libavcodec/get_bits.h b/libavcodec/get_bits.h
index 5da90b6b42..4ddb08817b 100644
--- a/libavcodec/get_bits.h
+++ b/libavcodec/get_bits.h
@@ -292,7 +292,6 @@ static inline unsigned int show_bits(GetBitContext *s, int n)
static inline void skip_bits(GetBitContext *s, int n)
{
OPEN_READER(re, s);
- UPDATE_CACHE(re, s);
LAST_SKIP_BITS(re, s, n);
CLOSE_READER(re, s);
}
diff --git a/libavcodec/h264.c b/libavcodec/h264.c
index f2ee328124..cda26d6e6b 100644
--- a/libavcodec/h264.c
+++ b/libavcodec/h264.c
@@ -3473,7 +3473,7 @@ static int decode_slice_header(H264Context *h, H264Context *h0)
h->avctx->pix_fmt = ret;
av_log(h->avctx, AV_LOG_INFO, "Reinit context to %dx%d, "
- "pix_fmt: %d\n", h->width, h->height, h->avctx->pix_fmt);
+ "pix_fmt: %s\n", h->width, h->height, av_get_pix_fmt_name(h->avctx->pix_fmt));
if ((ret = h264_slice_header_init(h, 1)) < 0) {
av_log(h->avctx, AV_LOG_ERROR,
diff --git a/libavcodec/h264_mp4toannexb_bsf.c b/libavcodec/h264_mp4toannexb_bsf.c
index 6ca01007c2..58568a7a5a 100644
--- a/libavcodec/h264_mp4toannexb_bsf.c
+++ b/libavcodec/h264_mp4toannexb_bsf.c
@@ -205,7 +205,7 @@ fail:
}
AVBitStreamFilter ff_h264_mp4toannexb_bsf = {
- "h264_mp4toannexb",
- sizeof(H264BSFContext),
- h264_mp4toannexb_filter,
+ .name = "h264_mp4toannexb",
+ .priv_data_size = sizeof(H264BSFContext),
+ .filter = h264_mp4toannexb_filter,
};
diff --git a/libavcodec/imgconvert.c b/libavcodec/imgconvert.c
index d9dc55d325..ee58cfb49a 100644
--- a/libavcodec/imgconvert.c
+++ b/libavcodec/imgconvert.c
@@ -71,6 +71,9 @@ void avcodec_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int
}
static int get_color_type(const AVPixFmtDescriptor *desc) {
+ if (desc->flags & AV_PIX_FMT_FLAG_PAL)
+ return FF_COLOR_RGB;
+
if(desc->nb_components == 1 || desc->nb_components == 2)
return FF_COLOR_GRAY;
diff --git a/libavcodec/imx_dump_header_bsf.c b/libavcodec/imx_dump_header_bsf.c
index 9f276bc76e..be43fbc159 100644
--- a/libavcodec/imx_dump_header_bsf.c
+++ b/libavcodec/imx_dump_header_bsf.c
@@ -53,7 +53,6 @@ static int imx_dump_header(AVBitStreamFilterContext *bsfc, AVCodecContext *avctx
}
AVBitStreamFilter ff_imx_dump_header_bsf = {
- "imxdump",
- 0,
- imx_dump_header,
+ .name = "imxdump",
+ .filter = imx_dump_header,
};
diff --git a/libavcodec/mjpega_dump_header_bsf.c b/libavcodec/mjpega_dump_header_bsf.c
index 9de6ac3f8a..3947c821a8 100644
--- a/libavcodec/mjpega_dump_header_bsf.c
+++ b/libavcodec/mjpega_dump_header_bsf.c
@@ -88,7 +88,6 @@ static int mjpega_dump_header(AVBitStreamFilterContext *bsfc, AVCodecContext *av
}
AVBitStreamFilter ff_mjpega_dump_header_bsf = {
- "mjpegadump",
- 0,
- mjpega_dump_header,
+ .name = "mjpegadump",
+ .filter = mjpega_dump_header,
};
diff --git a/libavcodec/movsub_bsf.c b/libavcodec/movsub_bsf.c
index a745190d36..123c7a547d 100644
--- a/libavcodec/movsub_bsf.c
+++ b/libavcodec/movsub_bsf.c
@@ -35,9 +35,8 @@ static int text2movsub(AVBitStreamFilterContext *bsfc, AVCodecContext *avctx, co
}
AVBitStreamFilter ff_text2movsub_bsf={
- "text2movsub",
- 0,
- text2movsub,
+ .name = "text2movsub",
+ .filter = text2movsub,
};
static int mov2textsub(AVBitStreamFilterContext *bsfc, AVCodecContext *avctx, const char *args,
@@ -51,7 +50,6 @@ static int mov2textsub(AVBitStreamFilterContext *bsfc, AVCodecContext *avctx, co
}
AVBitStreamFilter ff_mov2textsub_bsf={
- "mov2textsub",
- 0,
- mov2textsub,
+ .name = "mov2textsub",
+ .filter = mov2textsub,
};
diff --git a/libavcodec/mp3_header_compress_bsf.c b/libavcodec/mp3_header_compress_bsf.c
index 3c5e2fb3bb..e479f6b986 100644
--- a/libavcodec/mp3_header_compress_bsf.c
+++ b/libavcodec/mp3_header_compress_bsf.c
@@ -82,7 +82,6 @@ output_unchanged:
}
AVBitStreamFilter ff_mp3_header_compress_bsf={
- "mp3comp",
- 0,
- mp3_header_compress,
+ .name = "mp3comp",
+ .filter = mp3_header_compress,
};
diff --git a/libavcodec/mp3_header_decompress_bsf.c b/libavcodec/mp3_header_decompress_bsf.c
index adf5a7f426..df455322df 100644
--- a/libavcodec/mp3_header_decompress_bsf.c
+++ b/libavcodec/mp3_header_decompress_bsf.c
@@ -92,7 +92,6 @@ static int mp3_header_decompress(AVBitStreamFilterContext *bsfc, AVCodecContext
}
AVBitStreamFilter ff_mp3_header_decompress_bsf={
- "mp3decomp",
- 0,
- mp3_header_decompress,
+ .name = "mp3decomp",
+ .filter = mp3_header_decompress,
};
diff --git a/libavcodec/noise_bsf.c b/libavcodec/noise_bsf.c
index c91e85bc83..4f609de7bf 100644
--- a/libavcodec/noise_bsf.c
+++ b/libavcodec/noise_bsf.c
@@ -49,7 +49,7 @@ static int noise(AVBitStreamFilterContext *bsfc, AVCodecContext *avctx, const ch
}
AVBitStreamFilter ff_noise_bsf={
- "noise",
- sizeof(int),
- noise,
+ .name = "noise",
+ .priv_data_size = sizeof(int),
+ .filter = noise,
};
diff --git a/libavcodec/pcx.c b/libavcodec/pcx.c
index 67bc839efb..835dcfeb0a 100644
--- a/libavcodec/pcx.c
+++ b/libavcodec/pcx.c
@@ -163,6 +163,13 @@ static int pcx_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
} else if (nplanes == 1 && bits_per_pixel == 8) {
int palstart = avpkt->size - 769;
+ if (avpkt->size < 769) {
+ av_log(avctx, AV_LOG_ERROR, "File is too short\n");
+ ret = avctx->err_recognition & AV_EF_EXPLODE ?
+ AVERROR_INVALIDDATA : avpkt->size;
+ goto end;
+ }
+
for (y = 0; y < h; y++, ptr += stride) {
pcx_rle_decode(&gb, scanline, bytes_per_scanline, compressed);
memcpy(ptr, scanline, w);
@@ -174,7 +181,8 @@ static int pcx_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
}
if (bytestream2_get_byte(&gb) != 12) {
av_log(avctx, AV_LOG_ERROR, "expected palette after image data\n");
- ret = AVERROR_INVALIDDATA;
+ ret = avctx->err_recognition & AV_EF_EXPLODE ?
+ AVERROR_INVALIDDATA : avpkt->size;
goto end;
}
} else if (nplanes == 1) { /* all packed formats, max. 16 colors */
diff --git a/libavcodec/pngdec.c b/libavcodec/pngdec.c
index 18295c550e..c37f73d776 100644
--- a/libavcodec/pngdec.c
+++ b/libavcodec/pngdec.c
@@ -381,8 +381,10 @@ static int png_decode_idat(PNGDecContext *s, int length)
s->zstream.avail_out = s->crow_size;
s->zstream.next_out = s->crow_buf;
}
- if (ret == Z_STREAM_END)
- break;
+ if (ret == Z_STREAM_END && s->zstream.avail_in > 0) {
+ av_log(NULL, AV_LOG_WARNING, "%d undecompressed bytes left in buffer\n", s->zstream.avail_in);
+ return 0;
+ }
}
return 0;
}
diff --git a/libavcodec/remove_extradata_bsf.c b/libavcodec/remove_extradata_bsf.c
index f0d9b4513a..e880b95809 100644
--- a/libavcodec/remove_extradata_bsf.c
+++ b/libavcodec/remove_extradata_bsf.c
@@ -49,7 +49,6 @@ static int remove_extradata(AVBitStreamFilterContext *bsfc, AVCodecContext *avct
}
AVBitStreamFilter ff_remove_extradata_bsf={
- "remove_extra",
- 0,
- remove_extradata,
+ .name = "remove_extra",
+ .filter = remove_extradata,
};
diff --git a/libavcodec/rpza.c b/libavcodec/rpza.c
index 416f8b6722..0bcec35ac4 100644
--- a/libavcodec/rpza.c
+++ b/libavcodec/rpza.c
@@ -204,7 +204,7 @@ static void rpza_decode_stream(RpzaContext *s)
/* Fill block with 16 colors */
case 0x00:
- if (s->size - stream_ptr < 16)
+ if (s->size - stream_ptr < 30)
return;
ADVANCE_BLOCK();
block_ptr = row_ptr + pixel_ptr;
diff --git a/libavcodec/tiff_common.c b/libavcodec/tiff_common.c
index b7bd587bbd..f051022952 100644
--- a/libavcodec/tiff_common.c
+++ b/libavcodec/tiff_common.c
@@ -90,7 +90,7 @@ int ff_tadd_rational_metadata(int count, const char *name, const char *sep,
return AVERROR_INVALIDDATA;
if (!sep) sep = ", ";
- av_bprint_init(&bp, 10 * count, AV_BPRINT_SIZE_AUTOMATIC);
+ av_bprint_init(&bp, 10 * count, AV_BPRINT_SIZE_UNLIMITED);
for (i = 0; i < count; i++) {
nom = ff_tget_long(gb, le);
@@ -124,7 +124,7 @@ int ff_tadd_long_metadata(int count, const char *name, const char *sep,
return AVERROR_INVALIDDATA;
if (!sep) sep = ", ";
- av_bprint_init(&bp, 10 * count, AV_BPRINT_SIZE_AUTOMATIC);
+ av_bprint_init(&bp, 10 * count, AV_BPRINT_SIZE_UNLIMITED);
for (i = 0; i < count; i++) {
av_bprintf(&bp, "%s%i", (i ? sep : ""), ff_tget_long(gb, le));
@@ -156,7 +156,7 @@ int ff_tadd_doubles_metadata(int count, const char *name, const char *sep,
return AVERROR_INVALIDDATA;
if (!sep) sep = ", ";
- av_bprint_init(&bp, 10 * count, AV_BPRINT_SIZE_AUTOMATIC);
+ av_bprint_init(&bp, 10 * count, 100 * count);
for (i = 0; i < count; i++) {
av_bprintf(&bp, "%s%f", (i ? sep : ""), ff_tget_double(gb, le));
@@ -188,7 +188,7 @@ int ff_tadd_shorts_metadata(int count, const char *name, const char *sep,
return AVERROR_INVALIDDATA;
if (!sep) sep = ", ";
- av_bprint_init(&bp, 10 * count, AV_BPRINT_SIZE_AUTOMATIC);
+ av_bprint_init(&bp, 10 * count, AV_BPRINT_SIZE_UNLIMITED);
for (i = 0; i < count; i++) {
av_bprintf(&bp, "%s%i", (i ? sep : ""), ff_tget_short(gb, le));
@@ -207,6 +207,37 @@ int ff_tadd_shorts_metadata(int count, const char *name, const char *sep,
}
+int ff_tadd_bytes_metadata(int count, const char *name, const char *sep,
+ GetByteContext *gb, int le, AVDictionary **metadata)
+{
+ AVBPrint bp;
+ char *ap;
+ int i;
+
+ if (count >= INT_MAX / sizeof(int8_t) || count <= 0)
+ return AVERROR_INVALIDDATA;
+ if (bytestream2_get_bytes_left(gb) < count * sizeof(int8_t))
+ return AVERROR_INVALIDDATA;
+ if (!sep) sep = ", ";
+
+ av_bprint_init(&bp, 10 * count, AV_BPRINT_SIZE_UNLIMITED);
+
+ for (i = 0; i < count; i++) {
+ av_bprintf(&bp, "%s%i", (i ? sep : ""), bytestream2_get_byte(gb));
+ }
+
+ if ((i = av_bprint_finalize(&bp, &ap))) {
+ return i;
+ }
+ if (!ap) {
+ return AVERROR(ENOMEM);
+ }
+
+ av_dict_set(metadata, name, ap, AV_DICT_DONT_STRDUP_VAL);
+
+ return 0;
+}
+
int ff_tadd_string_metadata(int count, const char *name,
GetByteContext *gb, int le, AVDictionary **metadata)
{
diff --git a/libavcodec/tiff_common.h b/libavcodec/tiff_common.h
index 2a2cb3f8e6..01a7b0891b 100644
--- a/libavcodec/tiff_common.h
+++ b/libavcodec/tiff_common.h
@@ -123,6 +123,12 @@ int ff_tadd_doubles_metadata(int count, const char *name, const char *sep,
int ff_tadd_shorts_metadata(int count, const char *name, const char *sep,
GetByteContext *gb, int le, AVDictionary **metadata);
+/** Adds count bytes converted to a string
+ * into the metadata dictionary.
+ */
+int ff_tadd_bytes_metadata(int count, const char *name, const char *sep,
+ GetByteContext *gb, int le, AVDictionary **metadata);
+
/** Adds a string of count characters
* into the metadata dictionary.
*/
diff --git a/libavcodec/vdpau_vc1.c b/libavcodec/vdpau_vc1.c
index 272b2d9d94..c6e3343460 100644
--- a/libavcodec/vdpau_vc1.c
+++ b/libavcodec/vdpau_vc1.c
@@ -44,14 +44,18 @@ static int vdpau_vc1_start_frame(AVCodecContext *avctx,
switch (s->pict_type) {
case AV_PICTURE_TYPE_B:
+ if (s->next_picture_ptr) {
ref = ff_vdpau_get_surface_id(&s->next_picture);
assert(ref != VDP_INVALID_HANDLE);
info->backward_reference = ref;
+ }
/* fall-through */
case AV_PICTURE_TYPE_P:
+ if (s->last_picture_ptr) {
ref = ff_vdpau_get_surface_id(&s->last_picture);
assert(ref != VDP_INVALID_HANDLE);
info->forward_reference = ref;
+ }
}
info->slice_count = 0;
diff --git a/libavcodec/wmalosslessdec.c b/libavcodec/wmalosslessdec.c
index df44855e53..ab764ed8d8 100644
--- a/libavcodec/wmalosslessdec.c
+++ b/libavcodec/wmalosslessdec.c
@@ -178,6 +178,11 @@ static av_cold int decode_init(AVCodecContext *avctx)
unsigned int channel_mask;
int i, log2_max_num_subframes;
+ if (!avctx->block_align) {
+ av_log(avctx, AV_LOG_ERROR, "block_align is not set\n");
+ return AVERROR(EINVAL);
+ }
+
s->avctx = avctx;
init_put_bits(&s->pb, s->frame_data, MAX_FRAMESIZE);
diff --git a/libavcodec/xan.c b/libavcodec/xan.c
index 7710f743fa..50773810e1 100644
--- a/libavcodec/xan.c
+++ b/libavcodec/xan.c
@@ -110,6 +110,7 @@ static int xan_huffman_decode(uint8_t *dest, int dest_len,
int ptr_len = src_len - 1 - byte*2;
uint8_t val = ival;
uint8_t *dest_end = dest + dest_len;
+ uint8_t *dest_start = dest;
GetBitContext gb;
if (ptr_len < 0)
@@ -125,13 +126,13 @@ static int xan_huffman_decode(uint8_t *dest, int dest_len,
if (val < 0x16) {
if (dest >= dest_end)
- return 0;
+ return dest_len;
*dest++ = val;
val = ival;
}
}
- return 0;
+ return dest - dest_start;
}
/**
@@ -291,7 +292,7 @@ static int xan_wc3_decode_frame(XanContext *s, AVFrame *frame)
uint8_t flag = 0;
int size = 0;
int motion_x, motion_y;
- int x, y;
+ int x, y, ret;
uint8_t *opcode_buffer = s->buffer1;
uint8_t *opcode_buffer_end = s->buffer1 + s->buffer1_size;
@@ -300,10 +301,9 @@ static int xan_wc3_decode_frame(XanContext *s, AVFrame *frame)
/* pointers to segments inside the compressed chunk */
const uint8_t *huffman_segment;
- const uint8_t *size_segment;
- const uint8_t *vector_segment;
+ GetByteContext size_segment;
+ GetByteContext vector_segment;
const uint8_t *imagedata_segment;
- const uint8_t *buf_end = s->buf + s->size;
int huffman_offset, size_offset, vector_offset, imagedata_offset,
imagedata_size;
@@ -322,13 +322,14 @@ static int xan_wc3_decode_frame(XanContext *s, AVFrame *frame)
return AVERROR_INVALIDDATA;
huffman_segment = s->buf + huffman_offset;
- size_segment = s->buf + size_offset;
- vector_segment = s->buf + vector_offset;
+ bytestream2_init(&size_segment, s->buf + size_offset, s->size - size_offset);
+ bytestream2_init(&vector_segment, s->buf + vector_offset, s->size - vector_offset);
imagedata_segment = s->buf + imagedata_offset;
- if (xan_huffman_decode(opcode_buffer, opcode_buffer_size,
- huffman_segment, s->size - huffman_offset) < 0)
+ if ((ret = xan_huffman_decode(opcode_buffer, opcode_buffer_size,
+ huffman_segment, s->size - huffman_offset)) < 0)
return AVERROR_INVALIDDATA;
+ opcode_buffer_end = opcode_buffer + ret;
if (imagedata_segment[0] == 2) {
xan_unpack(s->buffer2, s->buffer2_size,
@@ -375,31 +376,29 @@ static int xan_wc3_decode_frame(XanContext *s, AVFrame *frame)
case 9:
case 19:
- if (buf_end - size_segment < 1) {
+ if (bytestream2_get_bytes_left(&size_segment) < 1) {
av_log(s->avctx, AV_LOG_ERROR, "size_segment overread\n");
return AVERROR_INVALIDDATA;
}
- size = *size_segment++;
+ size = bytestream2_get_byte(&size_segment);
break;
case 10:
case 20:
- if (buf_end - size_segment < 2) {
+ if (bytestream2_get_bytes_left(&size_segment) < 2) {
av_log(s->avctx, AV_LOG_ERROR, "size_segment overread\n");
return AVERROR_INVALIDDATA;
}
- size = AV_RB16(&size_segment[0]);
- size_segment += 2;
+ size = bytestream2_get_be16(&size_segment);
break;
case 11:
case 21:
- if (buf_end - size_segment < 3) {
+ if (bytestream2_get_bytes_left(&size_segment) < 3) {
av_log(s->avctx, AV_LOG_ERROR, "size_segment overread\n");
return AVERROR_INVALIDDATA;
}
- size = AV_RB24(size_segment);
- size_segment += 3;
+ size = bytestream2_get_be24(&size_segment);
break;
}
@@ -420,14 +419,15 @@ static int xan_wc3_decode_frame(XanContext *s, AVFrame *frame)
imagedata_size -= size;
}
} else {
- if (vector_segment >= buf_end) {
+ uint8_t vector;
+ if (bytestream2_get_bytes_left(&vector_segment) <= 0) {
av_log(s->avctx, AV_LOG_ERROR, "vector_segment overread\n");
return AVERROR_INVALIDDATA;
}
/* run-based motion compensation from last frame */
- motion_x = sign_extend(*vector_segment >> 4, 4);
- motion_y = sign_extend(*vector_segment & 0xF, 4);
- vector_segment++;
+ vector = bytestream2_get_byte(&vector_segment);
+ motion_x = sign_extend(vector >> 4, 4);
+ motion_y = sign_extend(vector & 0xF, 4);
/* copy a run of pixels from the previous frame */
xan_wc3_copy_pixel_run(s, frame, x, y, size, motion_x, motion_y);
diff --git a/libavcodec/xxan.c b/libavcodec/xxan.c
index cb913da53a..4030889bd2 100644
--- a/libavcodec/xxan.c
+++ b/libavcodec/xxan.c
@@ -52,6 +52,10 @@ static av_cold int xan_decode_init(AVCodecContext *avctx)
av_log(avctx, AV_LOG_ERROR, "Invalid frame height: %d.\n", avctx->height);
return AVERROR(EINVAL);
}
+ if (avctx->width & 1) {
+ av_log(avctx, AV_LOG_ERROR, "Invalid frame width: %d.\n", avctx->width);
+ return AVERROR(EINVAL);
+ }
s->buffer_size = avctx->width * avctx->height;
s->y_buffer = av_malloc(s->buffer_size);
@@ -300,8 +304,7 @@ static int xan_decode_frame_type0(AVCodecContext *avctx)
ybuf[j+1] = cur << 1;
last = cur;
}
- if(j < avctx->width)
- ybuf[j] = last << 1;
+ ybuf[j] = last << 1;
prev_buf = ybuf;
ybuf += avctx->width;
@@ -314,8 +317,7 @@ static int xan_decode_frame_type0(AVCodecContext *avctx)
ybuf[j+1] = cur << 1;
last = cur;
}
- if(j < avctx->width)
- ybuf[j] = last << 1;
+ ybuf[j] = last << 1;
prev_buf = ybuf;
ybuf += avctx->width;
}
@@ -375,8 +377,7 @@ static int xan_decode_frame_type1(AVCodecContext *avctx)
ybuf[j+1] = cur;
last = cur;
}
- if(j < avctx->width)
- ybuf[j] = last;
+ ybuf[j] = last;
ybuf += avctx->width;
}
diff --git a/libavfilter/af_afade.c b/libavfilter/af_afade.c
index 4c9c196229..2ffb9624c0 100644
--- a/libavfilter/af_afade.c
+++ b/libavfilter/af_afade.c
@@ -79,9 +79,9 @@ AVFILTER_DEFINE_CLASS(afade);
static av_cold int init(AVFilterContext *ctx)
{
- AudioFadeContext *afade = ctx->priv;
+ AudioFadeContext *s = ctx->priv;
- if (INT64_MAX - afade->nb_samples < afade->start_sample)
+ if (INT64_MAX - s->nb_samples < s->start_sample)
return AVERROR(EINVAL);
return 0;
@@ -202,38 +202,38 @@ FADE(s32, int32_t)
static int config_input(AVFilterLink *inlink)
{
- AVFilterContext *ctx = inlink->dst;
- AudioFadeContext *afade = ctx->priv;
+ AVFilterContext *ctx = inlink->dst;
+ AudioFadeContext *s = ctx->priv;
switch (inlink->format) {
- case AV_SAMPLE_FMT_DBL: afade->fade_samples = fade_samples_dbl; break;
- case AV_SAMPLE_FMT_DBLP: afade->fade_samples = fade_samples_dblp; break;
- case AV_SAMPLE_FMT_FLT: afade->fade_samples = fade_samples_flt; break;
- case AV_SAMPLE_FMT_FLTP: afade->fade_samples = fade_samples_fltp; break;
- case AV_SAMPLE_FMT_S16: afade->fade_samples = fade_samples_s16; break;
- case AV_SAMPLE_FMT_S16P: afade->fade_samples = fade_samples_s16p; break;
- case AV_SAMPLE_FMT_S32: afade->fade_samples = fade_samples_s32; break;
- case AV_SAMPLE_FMT_S32P: afade->fade_samples = fade_samples_s32p; break;
+ case AV_SAMPLE_FMT_DBL: s->fade_samples = fade_samples_dbl; break;
+ case AV_SAMPLE_FMT_DBLP: s->fade_samples = fade_samples_dblp; break;
+ case AV_SAMPLE_FMT_FLT: s->fade_samples = fade_samples_flt; break;
+ case AV_SAMPLE_FMT_FLTP: s->fade_samples = fade_samples_fltp; break;
+ case AV_SAMPLE_FMT_S16: s->fade_samples = fade_samples_s16; break;
+ case AV_SAMPLE_FMT_S16P: s->fade_samples = fade_samples_s16p; break;
+ case AV_SAMPLE_FMT_S32: s->fade_samples = fade_samples_s32; break;
+ case AV_SAMPLE_FMT_S32P: s->fade_samples = fade_samples_s32p; break;
}
- if (afade->duration)
- afade->nb_samples = av_rescale(afade->duration, inlink->sample_rate, AV_TIME_BASE);
- if (afade->start_time)
- afade->start_sample = av_rescale(afade->start_time, inlink->sample_rate, AV_TIME_BASE);
+ if (s->duration)
+ s->nb_samples = av_rescale(s->duration, inlink->sample_rate, AV_TIME_BASE);
+ if (s->start_time)
+ s->start_sample = av_rescale(s->start_time, inlink->sample_rate, AV_TIME_BASE);
return 0;
}
static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
{
- AudioFadeContext *afade = inlink->dst->priv;
+ AudioFadeContext *s = inlink->dst->priv;
AVFilterLink *outlink = inlink->dst->outputs[0];
int nb_samples = buf->nb_samples;
AVFrame *out_buf;
int64_t cur_sample = av_rescale_q(buf->pts, (AVRational){1, outlink->sample_rate}, outlink->time_base);
- if ((!afade->type && (afade->start_sample + afade->nb_samples < cur_sample)) ||
- ( afade->type && (cur_sample + afade->nb_samples < afade->start_sample)))
+ if ((!s->type && (s->start_sample + s->nb_samples < cur_sample)) ||
+ ( s->type && (cur_sample + s->nb_samples < s->start_sample)))
return ff_filter_frame(outlink, buf);
if (av_frame_is_writable(buf)) {
@@ -245,22 +245,22 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
av_frame_copy_props(out_buf, buf);
}
- if ((!afade->type && (cur_sample + nb_samples < afade->start_sample)) ||
- ( afade->type && (afade->start_sample + afade->nb_samples < cur_sample))) {
+ if ((!s->type && (cur_sample + nb_samples < s->start_sample)) ||
+ ( s->type && (s->start_sample + s->nb_samples < cur_sample))) {
av_samples_set_silence(out_buf->extended_data, 0, nb_samples,
av_frame_get_channels(out_buf), out_buf->format);
} else {
int64_t start;
- if (!afade->type)
- start = cur_sample - afade->start_sample;
+ if (!s->type)
+ start = cur_sample - s->start_sample;
else
- start = afade->start_sample + afade->nb_samples - cur_sample;
+ start = s->start_sample + s->nb_samples - cur_sample;
- afade->fade_samples(out_buf->extended_data, buf->extended_data,
- nb_samples, av_frame_get_channels(buf),
- afade->type ? -1 : 1, start,
- afade->nb_samples, afade->curve);
+ s->fade_samples(out_buf->extended_data, buf->extended_data,
+ nb_samples, av_frame_get_channels(buf),
+ s->type ? -1 : 1, start,
+ s->nb_samples, s->curve);
}
if (buf != out_buf)
diff --git a/libavfilter/af_silencedetect.c b/libavfilter/af_silencedetect.c
index b875ec4806..a9b9358f6b 100644
--- a/libavfilter/af_silencedetect.c
+++ b/libavfilter/af_silencedetect.c
@@ -62,17 +62,16 @@ static char *get_metadata_val(AVFrame *insamples, const char *key)
static int filter_frame(AVFilterLink *inlink, AVFrame *insamples)
{
int i;
- SilenceDetectContext *silence = inlink->dst->priv;
+ SilenceDetectContext *s = inlink->dst->priv;
const int nb_channels = inlink->channels;
const int srate = inlink->sample_rate;
const int nb_samples = insamples->nb_samples * nb_channels;
- const int64_t nb_samples_notify = srate * silence->duration * nb_channels;
+ const int64_t nb_samples_notify = srate * s->duration * nb_channels;
// scale number of null samples to the new sample rate
- if (silence->last_sample_rate && silence->last_sample_rate != srate)
- silence->nb_null_samples =
- srate * silence->nb_null_samples / silence->last_sample_rate;
- silence->last_sample_rate = srate;
+ if (s->last_sample_rate && s->last_sample_rate != srate)
+ s->nb_null_samples = srate * s->nb_null_samples / s->last_sample_rate;
+ s->last_sample_rate = srate;
// TODO: support more sample formats
// TODO: document metadata
@@ -80,29 +79,29 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *insamples)
double *p = (double *)insamples->data[0];
for (i = 0; i < nb_samples; i++, p++) {
- if (*p < silence->noise && *p > -silence->noise) {
- if (!silence->start) {
- silence->nb_null_samples++;
- if (silence->nb_null_samples >= nb_samples_notify) {
- silence->start = insamples->pts - (int64_t)(silence->duration / av_q2d(inlink->time_base) + .5);
+ if (*p < s->noise && *p > -s->noise) {
+ if (!s->start) {
+ s->nb_null_samples++;
+ if (s->nb_null_samples >= nb_samples_notify) {
+ s->start = insamples->pts - (int64_t)(s->duration / av_q2d(inlink->time_base) + .5);
av_dict_set(&insamples->metadata, "lavfi.silence_start",
- av_ts2timestr(silence->start, &inlink->time_base), 0);
- av_log(silence, AV_LOG_INFO, "silence_start: %s\n",
+ av_ts2timestr(s->start, &inlink->time_base), 0);
+ av_log(s, AV_LOG_INFO, "silence_start: %s\n",
get_metadata_val(insamples, "lavfi.silence_start"));
}
}
} else {
- if (silence->start) {
+ if (s->start) {
av_dict_set(&insamples->metadata, "lavfi.silence_end",
av_ts2timestr(insamples->pts, &inlink->time_base), 0);
av_dict_set(&insamples->metadata, "lavfi.silence_duration",
- av_ts2timestr(insamples->pts - silence->start, &inlink->time_base), 0);
- av_log(silence, AV_LOG_INFO,
+ av_ts2timestr(insamples->pts - s->start, &inlink->time_base), 0);
+ av_log(s, AV_LOG_INFO,
"silence_end: %s | silence_duration: %s\n",
get_metadata_val(insamples, "lavfi.silence_end"),
get_metadata_val(insamples, "lavfi.silence_duration"));
}
- silence->nb_null_samples = silence->start = 0;
+ s->nb_null_samples = s->start = 0;
}
}
}
diff --git a/libavfilter/avf_showspectrum.c b/libavfilter/avf_showspectrum.c
index a0bb983427..27dd1f2820 100644
--- a/libavfilter/avf_showspectrum.c
+++ b/libavfilter/avf_showspectrum.c
@@ -99,16 +99,16 @@ static const struct {
static av_cold void uninit(AVFilterContext *ctx)
{
- ShowSpectrumContext *showspectrum = ctx->priv;
+ ShowSpectrumContext *s = ctx->priv;
int i;
- av_freep(&showspectrum->combine_buffer);
- av_rdft_end(showspectrum->rdft);
- for (i = 0; i < showspectrum->nb_display_channels; i++)
- av_freep(&showspectrum->rdft_data[i]);
- av_freep(&showspectrum->rdft_data);
- av_freep(&showspectrum->window_func_lut);
- av_frame_free(&showspectrum->outpicref);
+ av_freep(&s->combine_buffer);
+ av_rdft_end(s->rdft);
+ for (i = 0; i < s->nb_display_channels; i++)
+ av_freep(&s->rdft_data[i]);
+ av_freep(&s->rdft_data);
+ av_freep(&s->window_func_lut);
+ av_frame_free(&s->outpicref);
}
static int query_formats(AVFilterContext *ctx)
@@ -149,64 +149,64 @@ static int config_output(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
AVFilterLink *inlink = ctx->inputs[0];
- ShowSpectrumContext *showspectrum = ctx->priv;
+ ShowSpectrumContext *s = ctx->priv;
int i, rdft_bits, win_size, h;
- outlink->w = showspectrum->w;
- outlink->h = showspectrum->h;
+ outlink->w = s->w;
+ outlink->h = s->h;
- h = (showspectrum->mode == COMBINED) ? outlink->h : outlink->h / inlink->channels;
- showspectrum->channel_height = h;
+ h = (s->mode == COMBINED) ? outlink->h : outlink->h / inlink->channels;
+ s->channel_height = h;
/* RDFT window size (precision) according to the requested output frame height */
for (rdft_bits = 1; 1 << rdft_bits < 2 * h; rdft_bits++);
win_size = 1 << rdft_bits;
/* (re-)configuration if the video output changed (or first init) */
- if (rdft_bits != showspectrum->rdft_bits) {
+ if (rdft_bits != s->rdft_bits) {
size_t rdft_size, rdft_listsize;
AVFrame *outpicref;
- av_rdft_end(showspectrum->rdft);
- showspectrum->rdft = av_rdft_init(rdft_bits, DFT_R2C);
- showspectrum->rdft_bits = rdft_bits;
+ av_rdft_end(s->rdft);
+ s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
+ s->rdft_bits = rdft_bits;
/* RDFT buffers: x2 for each (display) channel buffer.
* Note: we use free and malloc instead of a realloc-like function to
* make sure the buffer is aligned in memory for the FFT functions. */
- for (i = 0; i < showspectrum->nb_display_channels; i++)
- av_freep(&showspectrum->rdft_data[i]);
- av_freep(&showspectrum->rdft_data);
- showspectrum->nb_display_channels = inlink->channels;
+ for (i = 0; i < s->nb_display_channels; i++)
+ av_freep(&s->rdft_data[i]);
+ av_freep(&s->rdft_data);
+ s->nb_display_channels = inlink->channels;
- if (av_size_mult(sizeof(*showspectrum->rdft_data),
- showspectrum->nb_display_channels, &rdft_listsize) < 0)
+ if (av_size_mult(sizeof(*s->rdft_data),
+ s->nb_display_channels, &rdft_listsize) < 0)
return AVERROR(EINVAL);
- if (av_size_mult(sizeof(**showspectrum->rdft_data),
+ if (av_size_mult(sizeof(**s->rdft_data),
win_size, &rdft_size) < 0)
return AVERROR(EINVAL);
- showspectrum->rdft_data = av_malloc(rdft_listsize);
- if (!showspectrum->rdft_data)
+ s->rdft_data = av_malloc(rdft_listsize);
+ if (!s->rdft_data)
return AVERROR(ENOMEM);
- for (i = 0; i < showspectrum->nb_display_channels; i++) {
- showspectrum->rdft_data[i] = av_malloc(rdft_size);
- if (!showspectrum->rdft_data[i])
+ for (i = 0; i < s->nb_display_channels; i++) {
+ s->rdft_data[i] = av_malloc(rdft_size);
+ if (!s->rdft_data[i])
return AVERROR(ENOMEM);
}
- showspectrum->filled = 0;
+ s->filled = 0;
/* pre-calc windowing function (hann here) */
- showspectrum->window_func_lut =
- av_realloc_f(showspectrum->window_func_lut, win_size,
- sizeof(*showspectrum->window_func_lut));
- if (!showspectrum->window_func_lut)
+ s->window_func_lut =
+ av_realloc_f(s->window_func_lut, win_size,
+ sizeof(*s->window_func_lut));
+ if (!s->window_func_lut)
return AVERROR(ENOMEM);
for (i = 0; i < win_size; i++)
- showspectrum->window_func_lut[i] = .5f * (1 - cos(2*M_PI*i / (win_size-1)));
+ s->window_func_lut[i] = .5f * (1 - cos(2*M_PI*i / (win_size-1)));
/* prepare the initial picref buffer (black frame) */
- av_frame_free(&showspectrum->outpicref);
- showspectrum->outpicref = outpicref =
+ av_frame_free(&s->outpicref);
+ s->outpicref = outpicref =
ff_get_video_buffer(outlink, outlink->w, outlink->h);
if (!outpicref)
return AVERROR(ENOMEM);
@@ -218,43 +218,43 @@ static int config_output(AVFilterLink *outlink)
}
}
- if (showspectrum->xpos >= outlink->w)
- showspectrum->xpos = 0;
+ if (s->xpos >= outlink->w)
+ s->xpos = 0;
- showspectrum->combine_buffer =
- av_realloc_f(showspectrum->combine_buffer, outlink->h * 3,
- sizeof(*showspectrum->combine_buffer));
+ s->combine_buffer =
+ av_realloc_f(s->combine_buffer, outlink->h * 3,
+ sizeof(*s->combine_buffer));
av_log(ctx, AV_LOG_VERBOSE, "s:%dx%d RDFT window size:%d\n",
- showspectrum->w, showspectrum->h, win_size);
+ s->w, s->h, win_size);
return 0;
}
inline static int push_frame(AVFilterLink *outlink)
{
- ShowSpectrumContext *showspectrum = outlink->src->priv;
+ ShowSpectrumContext *s = outlink->src->priv;
- showspectrum->xpos++;
- if (showspectrum->xpos >= outlink->w)
- showspectrum->xpos = 0;
- showspectrum->filled = 0;
- showspectrum->req_fullfilled = 1;
+ s->xpos++;
+ if (s->xpos >= outlink->w)
+ s->xpos = 0;
+ s->filled = 0;
+ s->req_fullfilled = 1;
- return ff_filter_frame(outlink, av_frame_clone(showspectrum->outpicref));
+ return ff_filter_frame(outlink, av_frame_clone(s->outpicref));
}
static int request_frame(AVFilterLink *outlink)
{
- ShowSpectrumContext *showspectrum = outlink->src->priv;
+ ShowSpectrumContext *s = outlink->src->priv;
AVFilterLink *inlink = outlink->src->inputs[0];
int ret;
- showspectrum->req_fullfilled = 0;
+ s->req_fullfilled = 0;
do {
ret = ff_request_frame(inlink);
- } while (!showspectrum->req_fullfilled && ret >= 0);
+ } while (!s->req_fullfilled && ret >= 0);
- if (ret == AVERROR_EOF && showspectrum->outpicref)
+ if (ret == AVERROR_EOF && s->outpicref)
push_frame(outlink);
return ret;
}
@@ -264,60 +264,60 @@ static int plot_spectrum_column(AVFilterLink *inlink, AVFrame *insamples, int nb
int ret;
AVFilterContext *ctx = inlink->dst;
AVFilterLink *outlink = ctx->outputs[0];
- ShowSpectrumContext *showspectrum = ctx->priv;
- AVFrame *outpicref = showspectrum->outpicref;
+ ShowSpectrumContext *s = ctx->priv;
+ AVFrame *outpicref = s->outpicref;
/* nb_freq contains the power of two superior or equal to the output image
* height (or half the RDFT window size) */
- const int nb_freq = 1 << (showspectrum->rdft_bits - 1);
+ const int nb_freq = 1 << (s->rdft_bits - 1);
const int win_size = nb_freq << 1;
const double w = 1. / (sqrt(nb_freq) * 32768.);
int ch, plane, n, y;
- const int start = showspectrum->filled;
+ const int start = s->filled;
const int add_samples = FFMIN(win_size - start, nb_samples);
/* fill RDFT input with the number of samples available */
- for (ch = 0; ch < showspectrum->nb_display_channels; ch++) {
+ for (ch = 0; ch < s->nb_display_channels; ch++) {
const int16_t *p = (int16_t *)insamples->extended_data[ch];
- p += showspectrum->consumed;
+ p += s->consumed;
for (n = 0; n < add_samples; n++)
- showspectrum->rdft_data[ch][start + n] = p[n] * showspectrum->window_func_lut[start + n];
+ s->rdft_data[ch][start + n] = p[n] * s->window_func_lut[start + n];
}
- showspectrum->filled += add_samples;
+ s->filled += add_samples;
/* complete RDFT window size? */
- if (showspectrum->filled == win_size) {
+ if (s->filled == win_size) {
/* channel height */
- int h = showspectrum->channel_height;
+ int h = s->channel_height;
/* run RDFT on each samples set */
- for (ch = 0; ch < showspectrum->nb_display_channels; ch++)
- av_rdft_calc(showspectrum->rdft, showspectrum->rdft_data[ch]);
+ for (ch = 0; ch < s->nb_display_channels; ch++)
+ av_rdft_calc(s->rdft, s->rdft_data[ch]);
/* fill a new spectrum column */
-#define RE(y, ch) showspectrum->rdft_data[ch][2 * y + 0]
-#define IM(y, ch) showspectrum->rdft_data[ch][2 * y + 1]
+#define RE(y, ch) s->rdft_data[ch][2 * y + 0]
+#define IM(y, ch) s->rdft_data[ch][2 * y + 1]
#define MAGNITUDE(y, ch) hypot(RE(y, ch), IM(y, ch))
/* initialize buffer for combining to black */
for (y = 0; y < outlink->h; y++) {
- showspectrum->combine_buffer[3 * y ] = 0;
- showspectrum->combine_buffer[3 * y + 1] = 127.5;
- showspectrum->combine_buffer[3 * y + 2] = 127.5;
+ s->combine_buffer[3 * y ] = 0;
+ s->combine_buffer[3 * y + 1] = 127.5;
+ s->combine_buffer[3 * y + 2] = 127.5;
}
- for (ch = 0; ch < showspectrum->nb_display_channels; ch++) {
+ for (ch = 0; ch < s->nb_display_channels; ch++) {
float yf, uf, vf;
/* decide color range */
- switch (showspectrum->mode) {
+ switch (s->mode) {
case COMBINED:
// reduce range by channel count
- yf = 256.0f / showspectrum->nb_display_channels;
- switch (showspectrum->color_mode) {
+ yf = 256.0f / s->nb_display_channels;
+ switch (s->color_mode) {
case INTENSITY:
uf = yf;
vf = yf;
@@ -342,28 +342,28 @@ static int plot_spectrum_column(AVFilterLink *inlink, AVFrame *insamples, int nb
av_assert0(0);
}
- if (showspectrum->color_mode == CHANNEL) {
- if (showspectrum->nb_display_channels > 1) {
- uf *= 0.5 * sin((2 * M_PI * ch) / showspectrum->nb_display_channels);
- vf *= 0.5 * cos((2 * M_PI * ch) / showspectrum->nb_display_channels);
+ if (s->color_mode == CHANNEL) {
+ if (s->nb_display_channels > 1) {
+ uf *= 0.5 * sin((2 * M_PI * ch) / s->nb_display_channels);
+ vf *= 0.5 * cos((2 * M_PI * ch) / s->nb_display_channels);
} else {
uf = 0.0f;
vf = 0.0f;
}
}
- uf *= showspectrum->saturation;
- vf *= showspectrum->saturation;
+ uf *= s->saturation;
+ vf *= s->saturation;
/* draw the channel */
for (y = 0; y < h; y++) {
- int row = (showspectrum->mode == COMBINED) ? y : ch * h + y;
- float *out = &showspectrum->combine_buffer[3 * row];
+ int row = (s->mode == COMBINED) ? y : ch * h + y;
+ float *out = &s->combine_buffer[3 * row];
/* get magnitude */
float a = w * MAGNITUDE(y, ch);
/* apply scale */
- switch (showspectrum->scale) {
+ switch (s->scale) {
case LINEAR:
break;
case SQRT:
@@ -379,7 +379,7 @@ static int plot_spectrum_column(AVFilterLink *inlink, AVFrame *insamples, int nb
av_assert0(0);
}
- if (showspectrum->color_mode == INTENSITY) {
+ if (s->color_mode == INTENSITY) {
float y, u, v;
int i;
@@ -420,7 +420,7 @@ static int plot_spectrum_column(AVFilterLink *inlink, AVFrame *insamples, int nb
}
/* copy to output */
- if (showspectrum->sliding) {
+ if (s->sliding) {
for (plane = 0; plane < 3; plane++) {
for (y = 0; y < outlink->h; y++) {
uint8_t *p = outpicref->data[plane] +
@@ -428,20 +428,20 @@ static int plot_spectrum_column(AVFilterLink *inlink, AVFrame *insamples, int nb
memmove(p, p + 1, outlink->w - 1);
}
}
- showspectrum->xpos = outlink->w - 1;
+ s->xpos = outlink->w - 1;
}
for (plane = 0; plane < 3; plane++) {
uint8_t *p = outpicref->data[plane] +
(outlink->h - 1) * outpicref->linesize[plane] +
- showspectrum->xpos;
+ s->xpos;
for (y = 0; y < outlink->h; y++) {
- *p = rint(FFMAX(0, FFMIN(showspectrum->combine_buffer[3 * y + plane], 255)));
+ *p = rint(FFMAX(0, FFMIN(s->combine_buffer[3 * y + plane], 255)));
p -= outpicref->linesize[plane];
}
}
outpicref->pts = insamples->pts +
- av_rescale_q(showspectrum->consumed,
+ av_rescale_q(s->consumed,
(AVRational){ 1, inlink->sample_rate },
outlink->time_base);
ret = push_frame(outlink);
@@ -455,15 +455,15 @@ static int plot_spectrum_column(AVFilterLink *inlink, AVFrame *insamples, int nb
static int filter_frame(AVFilterLink *inlink, AVFrame *insamples)
{
AVFilterContext *ctx = inlink->dst;
- ShowSpectrumContext *showspectrum = ctx->priv;
+ ShowSpectrumContext *s = ctx->priv;
int ret = 0, left_samples = insamples->nb_samples;
- showspectrum->consumed = 0;
+ s->consumed = 0;
while (left_samples) {
int ret = plot_spectrum_column(inlink, insamples, left_samples);
if (ret < 0)
break;
- showspectrum->consumed += ret;
+ s->consumed += ret;
left_samples -= ret;
}
diff --git a/libavfilter/dualinput.c b/libavfilter/dualinput.c
index 4dc1708527..97e15cbe01 100644
--- a/libavfilter/dualinput.c
+++ b/libavfilter/dualinput.c
@@ -66,18 +66,12 @@ int ff_dualinput_init(AVFilterContext *ctx, FFDualInputContext *s)
return ff_framesync_configure(&s->fs);
}
-int ff_dualinput_filter_frame_main(FFDualInputContext *s,
+int ff_dualinput_filter_frame(FFDualInputContext *s,
AVFilterLink *inlink, AVFrame *in)
{
return ff_framesync_filter_frame(&s->fs, inlink, in);
}
-int ff_dualinput_filter_frame_second(FFDualInputContext *s,
- AVFilterLink *inlink, AVFrame *in)
-{
- return ff_framesync_filter_frame(&s->fs, inlink, in);
-}
-
int ff_dualinput_request_frame(FFDualInputContext *s, AVFilterLink *outlink)
{
return ff_framesync_request_frame(&s->fs, outlink);
diff --git a/libavfilter/dualinput.h b/libavfilter/dualinput.h
index c22066f36e..0ec0ea7350 100644
--- a/libavfilter/dualinput.h
+++ b/libavfilter/dualinput.h
@@ -39,8 +39,7 @@ typedef struct {
} FFDualInputContext;
int ff_dualinput_init(AVFilterContext *ctx, FFDualInputContext *s);
-int ff_dualinput_filter_frame_main(FFDualInputContext *s, AVFilterLink *inlink, AVFrame *in);
-int ff_dualinput_filter_frame_second(FFDualInputContext *s, AVFilterLink *inlink, AVFrame *in);
+int ff_dualinput_filter_frame(FFDualInputContext *s, AVFilterLink *inlink, AVFrame *in);
int ff_dualinput_request_frame(FFDualInputContext *s, AVFilterLink *outlink);
void ff_dualinput_uninit(FFDualInputContext *s);
diff --git a/libavfilter/vf_blend.c b/libavfilter/vf_blend.c
index 3bc8eec26e..a5c8e9fb28 100644
--- a/libavfilter/vf_blend.c
+++ b/libavfilter/vf_blend.c
@@ -422,27 +422,21 @@ static int request_frame(AVFilterLink *outlink)
return ff_dualinput_request_frame(&b->dinput, outlink);
}
-static int filter_frame_top(AVFilterLink *inlink, AVFrame *buf)
+static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
{
BlendContext *b = inlink->dst->priv;
- return ff_dualinput_filter_frame_main(&b->dinput, inlink, buf);
-}
-
-static int filter_frame_bottom(AVFilterLink *inlink, AVFrame *buf)
-{
- BlendContext *b = inlink->dst->priv;
- return ff_dualinput_filter_frame_second(&b->dinput, inlink, buf);
+ return ff_dualinput_filter_frame(&b->dinput, inlink, buf);
}
static const AVFilterPad blend_inputs[] = {
{
.name = "top",
.type = AVMEDIA_TYPE_VIDEO,
- .filter_frame = filter_frame_top,
+ .filter_frame = filter_frame,
},{
.name = "bottom",
.type = AVMEDIA_TYPE_VIDEO,
- .filter_frame = filter_frame_bottom,
+ .filter_frame = filter_frame,
},
{ NULL }
};
diff --git a/libavfilter/vf_hflip.c b/libavfilter/vf_hflip.c
index ec94e35063..561879f306 100644
--- a/libavfilter/vf_hflip.c
+++ b/libavfilter/vf_hflip.c
@@ -37,7 +37,8 @@
typedef struct {
int max_step[4]; ///< max pixel step for each plane, expressed as a number of bytes
- int hsub, vsub; ///< chroma subsampling
+ int planewidth[4]; ///< width of each plane
+ int planeheight[4]; ///< height of each plane
} FlipContext;
static int query_formats(AVFilterContext *ctx)
@@ -62,42 +63,42 @@ static int config_props(AVFilterLink *inlink)
{
FlipContext *s = inlink->dst->priv;
const AVPixFmtDescriptor *pix_desc = av_pix_fmt_desc_get(inlink->format);
+ const int hsub = pix_desc->log2_chroma_w;
+ const int vsub = pix_desc->log2_chroma_h;
av_image_fill_max_pixsteps(s->max_step, NULL, pix_desc);
- s->hsub = pix_desc->log2_chroma_w;
- s->vsub = pix_desc->log2_chroma_h;
+ s->planewidth[0] = s->planewidth[3] = inlink->w;
+ s->planewidth[1] = s->planewidth[2] = FF_CEIL_RSHIFT(inlink->w, hsub);
+ s->planeheight[0] = s->planeheight[3] = inlink->h;
+ s->planeheight[1] = s->planeheight[2] = FF_CEIL_RSHIFT(inlink->h, vsub);
return 0;
}
-static int filter_frame(AVFilterLink *inlink, AVFrame *in)
+typedef struct ThreadData {
+ AVFrame *in, *out;
+} ThreadData;
+
+static int filter_slice(AVFilterContext *ctx, void *arg, int job, int nb_jobs)
{
- AVFilterContext *ctx = inlink->dst;
- FlipContext *s = ctx->priv;
- AVFilterLink *outlink = ctx->outputs[0];
- AVFrame *out;
+ FlipContext *s = ctx->priv;
+ ThreadData *td = arg;
+ AVFrame *in = td->in;
+ AVFrame *out = td->out;
uint8_t *inrow, *outrow;
int i, j, plane, step;
- out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
- if (!out) {
- av_frame_free(&in);
- return AVERROR(ENOMEM);
- }
- av_frame_copy_props(out, in);
-
- /* copy palette if required */
- if (av_pix_fmt_desc_get(inlink->format)->flags & AV_PIX_FMT_FLAG_PAL)
- memcpy(out->data[1], in->data[1], AVPALETTE_SIZE);
-
for (plane = 0; plane < 4 && in->data[plane] && in->linesize[plane]; plane++) {
- const int width = (plane == 1 || plane == 2) ? FF_CEIL_RSHIFT(inlink->w, s->hsub) : inlink->w;
- const int height = (plane == 1 || plane == 2) ? FF_CEIL_RSHIFT(inlink->h, s->vsub) : inlink->h;
+ const int width = s->planewidth[plane];
+ const int height = s->planeheight[plane];
+ const int start = (height * job ) / nb_jobs;
+ const int end = (height * (job+1)) / nb_jobs;
+
step = s->max_step[plane];
- outrow = out->data[plane];
- inrow = in ->data[plane] + (width - 1) * step;
- for (i = 0; i < height; i++) {
+ outrow = out->data[plane] + start * out->linesize[plane];
+ inrow = in ->data[plane] + start * in->linesize[plane] + (width - 1) * step;
+ for (i = start; i < end; i++) {
switch (step) {
case 1:
for (j = 0; j < width; j++)
@@ -143,6 +144,30 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
}
}
+ return 0;
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *in)
+{
+ AVFilterContext *ctx = inlink->dst;
+ AVFilterLink *outlink = ctx->outputs[0];
+ ThreadData td;
+ AVFrame *out;
+
+ out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!out) {
+ av_frame_free(&in);
+ return AVERROR(ENOMEM);
+ }
+ av_frame_copy_props(out, in);
+
+ /* copy palette if required */
+ if (av_pix_fmt_desc_get(inlink->format)->flags & AV_PIX_FMT_FLAG_PAL)
+ memcpy(out->data[1], in->data[1], AVPALETTE_SIZE);
+
+ td.in = in, td.out = out;
+ ctx->internal->execute(ctx, filter_slice, &td, NULL, FFMIN(outlink->h, ctx->graph->nb_threads));
+
av_frame_free(&in);
return ff_filter_frame(outlink, out);
}
@@ -172,4 +197,5 @@ AVFilter avfilter_vf_hflip = {
.query_formats = query_formats,
.inputs = avfilter_vf_hflip_inputs,
.outputs = avfilter_vf_hflip_outputs,
+ .flags = AVFILTER_FLAG_SLICE_THREADS,
};
diff --git a/libavfilter/vf_histogram.c b/libavfilter/vf_histogram.c
index db52199357..273d03b7e1 100644
--- a/libavfilter/vf_histogram.c
+++ b/libavfilter/vf_histogram.c
@@ -48,6 +48,7 @@ typedef struct HistogramContext {
int waveform_mode;
int display_mode;
int levels_mode;
+ const AVPixFmtDescriptor *desc;
} HistogramContext;
#define OFFSET(x) offsetof(HistogramContext, x)
@@ -86,6 +87,18 @@ static const enum AVPixelFormat levels_pix_fmts[] = {
AV_PIX_FMT_GRAY8, AV_PIX_FMT_GBRP, AV_PIX_FMT_GBRAP, AV_PIX_FMT_NONE
};
+static const enum AVPixelFormat waveform_pix_fmts[] = {
+ AV_PIX_FMT_GBRP, AV_PIX_FMT_GBRAP,
+ AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV420P,
+ AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV440P,
+ AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV410P,
+ AV_PIX_FMT_YUVJ440P, AV_PIX_FMT_YUVJ411P, AV_PIX_FMT_YUVJ420P,
+ AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ444P,
+ AV_PIX_FMT_YUVA444P, AV_PIX_FMT_YUVA422P, AV_PIX_FMT_YUVA420P,
+ AV_PIX_FMT_GRAY8,
+ AV_PIX_FMT_NONE
+};
+
static int query_formats(AVFilterContext *ctx)
{
HistogramContext *h = ctx->priv;
@@ -93,6 +106,8 @@ static int query_formats(AVFilterContext *ctx)
switch (h->mode) {
case MODE_WAVEFORM:
+ pix_fmts = waveform_pix_fmts;
+ break;
case MODE_LEVELS:
pix_fmts = levels_pix_fmts;
break;
@@ -117,9 +132,9 @@ static const uint8_t white_gbrp_color[4] = { 255, 255, 255, 255 };
static int config_input(AVFilterLink *inlink)
{
HistogramContext *h = inlink->dst->priv;
- const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
- h->ncomp = desc->nb_components;
+ h->desc = av_pix_fmt_desc_get(inlink->format);
+ h->ncomp = h->desc->nb_components;
switch (inlink->format) {
case AV_PIX_FMT_GBRAP:
@@ -164,6 +179,44 @@ static int config_output(AVFilterLink *outlink)
return 0;
}
+static void gen_waveform(HistogramContext *h, AVFrame *inpicref, AVFrame *outpicref,
+ int component, int intensity, int offset, int col_mode)
+{
+ const int plane = h->desc->comp[component].plane;
+ const int src_linesize = inpicref->linesize[plane];
+ const int dst_linesize = outpicref->linesize[plane];
+ uint8_t *src_data = inpicref->data[plane];
+ uint8_t *dst_data = outpicref->data[plane] + (col_mode ? offset * dst_linesize : offset);
+ uint8_t * const dst_line = dst_data;
+ const uint8_t max = 255 - intensity;
+ const int is_chroma = (component == 1 || component == 2);
+ const int shift_w = (is_chroma ? h->desc->log2_chroma_w : 0);
+ const int shift_h = (is_chroma ? h->desc->log2_chroma_h : 0);
+ const int src_h = FF_CEIL_RSHIFT(inpicref->height, shift_h);
+ const int src_w = FF_CEIL_RSHIFT(inpicref->width, shift_w);
+ uint8_t *dst, *p;
+ int y;
+
+ for (y = 0; y < src_h; y++) {
+ const uint8_t *src_data_end = src_data + src_w;
+ dst = dst_line;
+ for (p = src_data; p < src_data_end; p++) {
+ uint8_t *target;
+ if (col_mode)
+ target = dst++ + dst_linesize * (*p >> shift_h);
+ else
+ target = dst_data + (*p >> shift_w);
+ if (*target <= max)
+ *target += intensity;
+ else
+ *target = 255;
+ }
+ src_data += src_linesize;
+ dst_data += dst_linesize;
+ }
+}
+
+
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
HistogramContext *h = inlink->dst->priv;
@@ -182,19 +235,26 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
out->pts = in->pts;
- for (k = 0; k < h->ncomp; k++)
- for (i = 0; i < outlink->h; i++)
- memset(out->data[k] + i * out->linesize[k], h->bg_color[k], outlink->w);
+ for (k = 0; k < h->ncomp; k++) {
+ int is_chroma = (k == 1 || k == 2);
+ int dst_h = FF_CEIL_RSHIFT(outlink->h, (is_chroma ? h->desc->log2_chroma_h : 0));
+ int dst_w = FF_CEIL_RSHIFT(outlink->w, (is_chroma ? h->desc->log2_chroma_w : 0));
+ for (i = 0; i < dst_h ; i++)
+ memset(out->data[h->desc->comp[k].plane] +
+ i * out->linesize[h->desc->comp[k].plane],
+ h->bg_color[k], dst_w);
+ }
switch (h->mode) {
case MODE_LEVELS:
for (k = 0; k < h->ncomp; k++) {
+ const int p = h->desc->comp[k].plane;
int start = k * (h->level_height + h->scale_height) * h->display_mode;
double max_hval_log;
unsigned max_hval = 0;
for (i = 0; i < in->height; i++) {
- src = in->data[k] + i * in->linesize[k];
+ src = in->data[p] + i * in->linesize[p];
for (j = 0; j < in->width; j++)
h->histogram[src[j]]++;
}
@@ -216,45 +276,20 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
for (l = 0; l < h->ncomp; l++)
out->data[l][(j + start) * out->linesize[l] + i] = h->fg_color[l];
} else {
- out->data[k][(j + start) * out->linesize[k] + i] = 255;
+ out->data[p][(j + start) * out->linesize[p] + i] = 255;
}
}
for (j = h->level_height + h->scale_height - 1; j >= h->level_height; j--)
- out->data[k][(j + start) * out->linesize[k] + i] = i;
+ out->data[p][(j + start) * out->linesize[p] + i] = i;
}
memset(h->histogram, 0, 256 * sizeof(unsigned));
}
break;
case MODE_WAVEFORM:
- if (h->waveform_mode) {
- for (k = 0; k < h->ncomp; k++) {
- int offset = k * 256 * h->display_mode;
- for (i = 0; i < inlink->w; i++) {
- for (j = 0; j < inlink->h; j++) {
- int pos = (offset +
- in->data[k][j * in->linesize[k] + i]) *
- out->linesize[k] + i;
- unsigned value = out->data[k][pos];
- value = FFMIN(value + h->step, 255);
- out->data[k][pos] = value;
- }
- }
- }
- } else {
- for (k = 0; k < h->ncomp; k++) {
- int offset = k * 256 * h->display_mode;
- for (i = 0; i < inlink->h; i++) {
- src = in ->data[k] + i * in ->linesize[k];
- dst = out->data[k] + i * out->linesize[k];
- for (j = 0; j < inlink->w; j++) {
- int pos = src[j] + offset;
- unsigned value = dst[pos];
- value = FFMIN(value + h->step, 255);
- dst[pos] = value;
- }
- }
- }
+ for (k = 0; k < h->ncomp; k++) {
+ int offset = k * 256 * h->display_mode;
+ gen_waveform(h, in, out, k, h->step, offset, h->waveform_mode);
}
break;
case MODE_COLOR:
diff --git a/libavfilter/vf_interlace.c b/libavfilter/vf_interlace.c
index bcb5419154..7b45595ff5 100644
--- a/libavfilter/vf_interlace.c
+++ b/libavfilter/vf_interlace.c
@@ -180,6 +180,15 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
if (!s->cur || !s->next)
return 0;
+ if (s->cur->interlaced_frame) {
+ av_log(ctx, AV_LOG_WARNING,
+ "video is already interlaced, adjusting framerate only\n");
+ out = av_frame_clone(s->cur);
+ out->pts /= 2; // adjust pts to new framerate
+ ret = ff_filter_frame(outlink, out);
+ return ret;
+ }
+
tff = (s->scan == MODE_TFF);
out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
if (!out)
diff --git a/libavfilter/vf_lut3d.c b/libavfilter/vf_lut3d.c
index 3c1b482d4e..cc15f20c13 100644
--- a/libavfilter/vf_lut3d.c
+++ b/libavfilter/vf_lut3d.c
@@ -667,16 +667,10 @@ static int config_output(AVFilterLink *outlink)
return 0;
}
-static int filter_frame_main(AVFilterLink *inlink, AVFrame *inpicref)
+static int filter_frame_hald(AVFilterLink *inlink, AVFrame *inpicref)
{
LUT3DContext *s = inlink->dst->priv;
- return ff_dualinput_filter_frame_main(&s->dinput, inlink, inpicref);
-}
-
-static int filter_frame_clut(AVFilterLink *inlink, AVFrame *inpicref)
-{
- LUT3DContext *s = inlink->dst->priv;
- return ff_dualinput_filter_frame_second(&s->dinput, inlink, inpicref);
+ return ff_dualinput_filter_frame(&s->dinput, inlink, inpicref);
}
static int request_frame(AVFilterLink *outlink)
@@ -766,12 +760,12 @@ static const AVFilterPad haldclut_inputs[] = {
{
.name = "main",
.type = AVMEDIA_TYPE_VIDEO,
- .filter_frame = filter_frame_main,
+ .filter_frame = filter_frame_hald,
.config_props = config_input,
},{
.name = "clut",
.type = AVMEDIA_TYPE_VIDEO,
- .filter_frame = filter_frame_clut,
+ .filter_frame = filter_frame_hald,
.config_props = config_clut,
},
{ NULL }
diff --git a/libavfilter/vf_overlay.c b/libavfilter/vf_overlay.c
index c3f1c2edb2..b5ade85401 100644
--- a/libavfilter/vf_overlay.c
+++ b/libavfilter/vf_overlay.c
@@ -549,16 +549,10 @@ static AVFrame *do_blend(AVFilterContext *ctx, AVFrame *mainpic,
return mainpic;
}
-static int filter_frame_main(AVFilterLink *inlink, AVFrame *inpicref)
+static int filter_frame(AVFilterLink *inlink, AVFrame *inpicref)
{
OverlayContext *s = inlink->dst->priv;
- return ff_dualinput_filter_frame_main(&s->dinput, inlink, inpicref);
-}
-
-static int filter_frame_over(AVFilterLink *inlink, AVFrame *inpicref)
-{
- OverlayContext *s = inlink->dst->priv;
- return ff_dualinput_filter_frame_second(&s->dinput, inlink, inpicref);
+ return ff_dualinput_filter_frame(&s->dinput, inlink, inpicref);
}
static int request_frame(AVFilterLink *outlink)
@@ -606,14 +600,14 @@ static const AVFilterPad avfilter_vf_overlay_inputs[] = {
.name = "main",
.type = AVMEDIA_TYPE_VIDEO,
.config_props = config_input_main,
- .filter_frame = filter_frame_main,
+ .filter_frame = filter_frame,
.needs_writable = 1,
},
{
.name = "overlay",
.type = AVMEDIA_TYPE_VIDEO,
.config_props = config_input_overlay,
- .filter_frame = filter_frame_over,
+ .filter_frame = filter_frame,
},
{ NULL }
};
diff --git a/libavfilter/vf_psnr.c b/libavfilter/vf_psnr.c
index c4af4ac7c6..353f5358d0 100644
--- a/libavfilter/vf_psnr.c
+++ b/libavfilter/vf_psnr.c
@@ -320,16 +320,10 @@ static int config_output(AVFilterLink *outlink)
return 0;
}
-static int filter_frame_main(AVFilterLink *inlink, AVFrame *inpicref)
+static int filter_frame(AVFilterLink *inlink, AVFrame *inpicref)
{
PSNRContext *s = inlink->dst->priv;
- return ff_dualinput_filter_frame_main(&s->dinput, inlink, inpicref);
-}
-
-static int filter_frame_ref(AVFilterLink *inlink, AVFrame *inpicref)
-{
- PSNRContext *s = inlink->dst->priv;
- return ff_dualinput_filter_frame_second(&s->dinput, inlink, inpicref);
+ return ff_dualinput_filter_frame(&s->dinput, inlink, inpicref);
}
static int request_frame(AVFilterLink *outlink)
@@ -359,11 +353,11 @@ static const AVFilterPad psnr_inputs[] = {
{
.name = "main",
.type = AVMEDIA_TYPE_VIDEO,
- .filter_frame = filter_frame_main,
+ .filter_frame = filter_frame,
},{
.name = "reference",
.type = AVMEDIA_TYPE_VIDEO,
- .filter_frame = filter_frame_ref,
+ .filter_frame = filter_frame,
.config_props = config_input_ref,
},
{ NULL }
diff --git a/libavfilter/vf_removelogo.c b/libavfilter/vf_removelogo.c
index e586486ee1..2044739797 100644
--- a/libavfilter/vf_removelogo.c
+++ b/libavfilter/vf_removelogo.c
@@ -279,44 +279,44 @@ static void generate_half_size_image(const uint8_t *src_data, int src_linesize,
static av_cold int init(AVFilterContext *ctx)
{
- RemovelogoContext *removelogo = ctx->priv;
+ RemovelogoContext *s = ctx->priv;
int ***mask;
int ret = 0;
int a, b, c, w, h;
int full_max_mask_size, half_max_mask_size;
- if (!removelogo->filename) {
+ if (!s->filename) {
av_log(ctx, AV_LOG_ERROR, "The bitmap file name is mandatory\n");
return AVERROR(EINVAL);
}
/* Load our mask image. */
- if ((ret = load_mask(&removelogo->full_mask_data, &w, &h, removelogo->filename, ctx)) < 0)
+ if ((ret = load_mask(&s->full_mask_data, &w, &h, s->filename, ctx)) < 0)
return ret;
- removelogo->mask_w = w;
- removelogo->mask_h = h;
+ s->mask_w = w;
+ s->mask_h = h;
- convert_mask_to_strength_mask(removelogo->full_mask_data, w, w, h,
+ convert_mask_to_strength_mask(s->full_mask_data, w, w, h,
16, &full_max_mask_size);
/* Create the scaled down mask image for the chroma planes. */
- if (!(removelogo->half_mask_data = av_mallocz(w/2 * h/2)))
+ if (!(s->half_mask_data = av_mallocz(w/2 * h/2)))
return AVERROR(ENOMEM);
- generate_half_size_image(removelogo->full_mask_data, w,
- removelogo->half_mask_data, w/2,
+ generate_half_size_image(s->full_mask_data, w,
+ s->half_mask_data, w/2,
w, h, &half_max_mask_size);
- removelogo->max_mask_size = FFMAX(full_max_mask_size, half_max_mask_size);
+ s->max_mask_size = FFMAX(full_max_mask_size, half_max_mask_size);
/* Create a circular mask for each size up to max_mask_size. When
the filter is applied, the mask size is determined on a pixel
by pixel basis, with pixels nearer the edge of the logo getting
smaller mask sizes. */
- mask = (int ***)av_malloc(sizeof(int **) * (removelogo->max_mask_size + 1));
+ mask = (int ***)av_malloc(sizeof(int **) * (s->max_mask_size + 1));
if (!mask)
return AVERROR(ENOMEM);
- for (a = 0; a <= removelogo->max_mask_size; a++) {
+ for (a = 0; a <= s->max_mask_size; a++) {
mask[a] = (int **)av_malloc(sizeof(int *) * ((a * 2) + 1));
if (!mask[a])
return AVERROR(ENOMEM);
@@ -332,17 +332,17 @@ static av_cold int init(AVFilterContext *ctx)
}
}
}
- removelogo->mask = mask;
+ s->mask = mask;
/* Calculate our bounding rectangles, which determine in what
* region the logo resides for faster processing. */
- ff_calculate_bounding_box(&removelogo->full_mask_bbox, removelogo->full_mask_data, w, w, h, 0);
- ff_calculate_bounding_box(&removelogo->half_mask_bbox, removelogo->half_mask_data, w/2, w/2, h/2, 0);
+ ff_calculate_bounding_box(&s->full_mask_bbox, s->full_mask_data, w, w, h, 0);
+ ff_calculate_bounding_box(&s->half_mask_bbox, s->half_mask_data, w/2, w/2, h/2, 0);
#define SHOW_LOGO_INFO(mask_type) \
av_log(ctx, AV_LOG_VERBOSE, #mask_type " x1:%d x2:%d y1:%d y2:%d max_mask_size:%d\n", \
- removelogo->mask_type##_mask_bbox.x1, removelogo->mask_type##_mask_bbox.x2, \
- removelogo->mask_type##_mask_bbox.y1, removelogo->mask_type##_mask_bbox.y2, \
+ s->mask_type##_mask_bbox.x1, s->mask_type##_mask_bbox.x2, \
+ s->mask_type##_mask_bbox.y1, s->mask_type##_mask_bbox.y2, \
mask_type##_max_mask_size);
SHOW_LOGO_INFO(full);
SHOW_LOGO_INFO(half);
@@ -353,12 +353,12 @@ static av_cold int init(AVFilterContext *ctx)
static int config_props_input(AVFilterLink *inlink)
{
AVFilterContext *ctx = inlink->dst;
- RemovelogoContext *removelogo = ctx->priv;
+ RemovelogoContext *s = ctx->priv;
- if (inlink->w != removelogo->mask_w || inlink->h != removelogo->mask_h) {
+ if (inlink->w != s->mask_w || inlink->h != s->mask_h) {
av_log(ctx, AV_LOG_INFO,
"Mask image size %dx%d does not match with the input video size %dx%d\n",
- removelogo->mask_w, removelogo->mask_h, inlink->w, inlink->h);
+ s->mask_w, s->mask_h, inlink->w, inlink->h);
return AVERROR(EINVAL);
}
@@ -488,7 +488,7 @@ static void blur_image(int ***mask,
static int filter_frame(AVFilterLink *inlink, AVFrame *inpicref)
{
- RemovelogoContext *removelogo = inlink->dst->priv;
+ RemovelogoContext *s = inlink->dst->priv;
AVFilterLink *outlink = inlink->dst->outputs[0];
AVFrame *outpicref;
int direct = 0;
@@ -505,21 +505,21 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *inpicref)
av_frame_copy_props(outpicref, inpicref);
}
- blur_image(removelogo->mask,
+ blur_image(s->mask,
inpicref ->data[0], inpicref ->linesize[0],
outpicref->data[0], outpicref->linesize[0],
- removelogo->full_mask_data, inlink->w,
- inlink->w, inlink->h, direct, &removelogo->full_mask_bbox);
- blur_image(removelogo->mask,
+ s->full_mask_data, inlink->w,
+ inlink->w, inlink->h, direct, &s->full_mask_bbox);
+ blur_image(s->mask,
inpicref ->data[1], inpicref ->linesize[1],
outpicref->data[1], outpicref->linesize[1],
- removelogo->half_mask_data, inlink->w/2,
- inlink->w/2, inlink->h/2, direct, &removelogo->half_mask_bbox);
- blur_image(removelogo->mask,
+ s->half_mask_data, inlink->w/2,
+ inlink->w/2, inlink->h/2, direct, &s->half_mask_bbox);
+ blur_image(s->mask,
inpicref ->data[2], inpicref ->linesize[2],
outpicref->data[2], outpicref->linesize[2],
- removelogo->half_mask_data, inlink->w/2,
- inlink->w/2, inlink->h/2, direct, &removelogo->half_mask_bbox);
+ s->half_mask_data, inlink->w/2,
+ inlink->w/2, inlink->h/2, direct, &s->half_mask_bbox);
if (!direct)
av_frame_free(&inpicref);
@@ -529,23 +529,23 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *inpicref)
static av_cold void uninit(AVFilterContext *ctx)
{
- RemovelogoContext *removelogo = ctx->priv;
+ RemovelogoContext *s = ctx->priv;
int a, b;
- av_freep(&removelogo->full_mask_data);
- av_freep(&removelogo->half_mask_data);
+ av_freep(&s->full_mask_data);
+ av_freep(&s->half_mask_data);
- if (removelogo->mask) {
+ if (s->mask) {
/* Loop through each mask. */
- for (a = 0; a <= removelogo->max_mask_size; a++) {
+ for (a = 0; a <= s->max_mask_size; a++) {
/* Loop through each scanline in a mask. */
for (b = -a; b <= a; b++) {
- av_free(removelogo->mask[a][b + a]); /* Free a scanline. */
+ av_free(s->mask[a][b + a]); /* Free a scanline. */
}
- av_free(removelogo->mask[a]);
+ av_free(s->mask[a]);
}
/* Free the array of pointers pointing to the masks. */
- av_freep(&removelogo->mask);
+ av_freep(&s->mask);
}
}
diff --git a/libavfilter/vf_rotate.c b/libavfilter/vf_rotate.c
index d13f678dc3..639100b763 100644
--- a/libavfilter/vf_rotate.c
+++ b/libavfilter/vf_rotate.c
@@ -77,6 +77,16 @@ typedef struct {
FFDrawColor color;
} RotContext;
+typedef struct ThreadData {
+ AVFrame *in, *out;
+ int inw, inh;
+ int outw, outh;
+ int plane;
+ int xi, yi;
+ int xprime, yprime;
+ int c, s;
+} ThreadData;
+
#define OFFSET(x) offsetof(RotContext, x)
#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
@@ -299,6 +309,76 @@ static uint8_t *interpolate_bilinear(uint8_t *dst_color,
#define TS2T(ts, tb) ((ts) == AV_NOPTS_VALUE ? NAN : (double)(ts)*av_q2d(tb))
+static int filter_slice(AVFilterContext *ctx, void *arg, int job, int nb_jobs)
+{
+ ThreadData *td = arg;
+ AVFrame *in = td->in;
+ AVFrame *out = td->out;
+ RotContext *rot = ctx->priv;
+ const int outw = td->outw, outh = td->outh;
+ const int inw = td->inw, inh = td->inh;
+ const int plane = td->plane;
+ const int xi = td->xi, yi = td->yi;
+ const int c = td->c, s = td->s;
+ const int start = (outh * job ) / nb_jobs;
+ const int end = (outh * (job+1)) / nb_jobs;
+ int xprime = td->xprime + start * s;
+ int yprime = td->yprime + start * c;
+ int i, j, x, y;
+
+ for (j = start; j < end; j++) {
+ x = xprime + xi + FIXP*inw/2;
+ y = yprime + yi + FIXP*inh/2;
+
+ for (i = 0; i < outw; i++) {
+ int32_t v;
+ int x1, y1;
+ uint8_t *pin, *pout;
+ x += c;
+ y -= s;
+ x1 = x>>16;
+ y1 = y>>16;
+
+ /* the out-of-range values avoid border artifacts */
+ if (x1 >= -1 && x1 <= inw && y1 >= -1 && y1 <= inh) {
+ uint8_t inp_inv[4]; /* interpolated input value */
+ pout = out->data[plane] + j * out->linesize[plane] + i * rot->draw.pixelstep[plane];
+ if (rot->use_bilinear) {
+ pin = interpolate_bilinear(inp_inv,
+ in->data[plane], in->linesize[plane], rot->draw.pixelstep[plane],
+ x, y, inw-1, inh-1);
+ } else {
+ int x2 = av_clip(x1, 0, inw-1);
+ int y2 = av_clip(y1, 0, inh-1);
+ pin = in->data[plane] + y2 * in->linesize[plane] + x2 * rot->draw.pixelstep[plane];
+ }
+ switch (rot->draw.pixelstep[plane]) {
+ case 1:
+ *pout = *pin;
+ break;
+ case 2:
+ *((uint16_t *)pout) = *((uint16_t *)pin);
+ break;
+ case 3:
+ v = AV_RB24(pin);
+ AV_WB24(pout, v);
+ break;
+ case 4:
+ *((uint32_t *)pout) = *((uint32_t *)pin);
+ break;
+ default:
+ memcpy(pout, pin, rot->draw.pixelstep[plane]);
+ break;
+ }
+ }
+ }
+ xprime += s;
+ yprime += c;
+ }
+
+ return 0;
+}
+
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
AVFilterContext *ctx = inlink->dst;
@@ -334,66 +414,19 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
for (plane = 0; plane < rot->nb_planes; plane++) {
int hsub = plane == 1 || plane == 2 ? rot->hsub : 0;
int vsub = plane == 1 || plane == 2 ? rot->vsub : 0;
- int inw = FF_CEIL_RSHIFT(inlink->w, hsub);
- int inh = FF_CEIL_RSHIFT(inlink->h, vsub);
- int outw = FF_CEIL_RSHIFT(outlink->w, hsub);
- int outh = FF_CEIL_RSHIFT(outlink->h, hsub);
-
- const int xi = -outw/2 * c;
- const int yi = outw/2 * s;
- int xprime = -outh/2 * s;
- int yprime = -outh/2 * c;
- int i, j, x, y;
-
- for (j = 0; j < outh; j++) {
- x = xprime + xi + FIXP*inw/2;
- y = yprime + yi + FIXP*inh/2;
-
- for (i = 0; i < outw; i++) {
- int32_t v;
- int x1, y1;
- uint8_t *pin, *pout;
- x += c;
- y -= s;
- x1 = x>>16;
- y1 = y>>16;
-
- /* the out-of-range values avoid border artifacts */
- if (x1 >= -1 && x1 <= inw && y1 >= -1 && y1 <= inh) {
- uint8_t inp_inv[4]; /* interpolated input value */
- pout = out->data[plane] + j * out->linesize[plane] + i * rot->draw.pixelstep[plane];
- if (rot->use_bilinear) {
- pin = interpolate_bilinear(inp_inv,
- in->data[plane], in->linesize[plane], rot->draw.pixelstep[plane],
- x, y, inw-1, inh-1);
- } else {
- int x2 = av_clip(x1, 0, inw-1);
- int y2 = av_clip(y1, 0, inh-1);
- pin = in->data[plane] + y2 * in->linesize[plane] + x2 * rot->draw.pixelstep[plane];
- }
- switch (rot->draw.pixelstep[plane]) {
- case 1:
- *pout = *pin;
- break;
- case 2:
- *((uint16_t *)pout) = *((uint16_t *)pin);
- break;
- case 3:
- v = AV_RB24(pin);
- AV_WB24(pout, v);
- break;
- case 4:
- *((uint32_t *)pout) = *((uint32_t *)pin);
- break;
- default:
- memcpy(pout, pin, rot->draw.pixelstep[plane]);
- break;
- }
- }
- }
- xprime += s;
- yprime += c;
- }
+ const int outw = FF_CEIL_RSHIFT(outlink->w, hsub);
+ const int outh = FF_CEIL_RSHIFT(outlink->h, vsub);
+ ThreadData td = { .in = in, .out = out,
+ .inw = FF_CEIL_RSHIFT(inlink->w, hsub),
+ .inh = FF_CEIL_RSHIFT(inlink->h, vsub),
+ .outh = outh, .outw = outw,
+ .xi = -outw/2 * c, .yi = outw/2 * s,
+ .xprime = -outh/2 * s,
+ .yprime = -outh/2 * c,
+ .plane = plane, .c = c, .s = s };
+
+
+ ctx->internal->execute(ctx, filter_slice, &td, NULL, FFMIN(outh, ctx->graph->nb_threads));
}
av_frame_free(&in);
@@ -452,5 +485,5 @@ AVFilter avfilter_vf_rotate = {
.inputs = rotate_inputs,
.outputs = rotate_outputs,
.priv_class = &rotate_class,
- .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC | AVFILTER_FLAG_SLICE_THREADS,
};
diff --git a/libavformat/amr.c b/libavformat/amr.c
index 7201ac3688..db9bb4e59d 100644
--- a/libavformat/amr.c
+++ b/libavformat/amr.c
@@ -132,8 +132,6 @@ static int amr_read_packet(AVFormatContext *s, AVPacket *pkt)
};
size = packed_size[mode];
- } else {
- av_assert0(0);
}
if (!size || av_new_packet(pkt, size))
diff --git a/libavformat/asfdec.c b/libavformat/asfdec.c
index 72865c1441..528bcbd5b1 100644
--- a/libavformat/asfdec.c
+++ b/libavformat/asfdec.c
@@ -754,7 +754,9 @@ static int asf_read_header(AVFormatContext *s)
if (ret < 0)
return ret;
} else if (!ff_guidcmp(&g, &ff_asf_stream_header)) {
- asf_read_stream_properties(s, gsize);
+ int ret = asf_read_stream_properties(s, gsize);
+ if (ret < 0)
+ return ret;
} else if (!ff_guidcmp(&g, &ff_asf_comment_header)) {
asf_read_content_desc(s, gsize);
} else if (!ff_guidcmp(&g, &ff_asf_language_guid)) {
diff --git a/libavformat/avformat.h b/libavformat/avformat.h
index b18eb3f545..4e5683c66a 100644
--- a/libavformat/avformat.h
+++ b/libavformat/avformat.h
@@ -1315,9 +1315,39 @@ typedef struct AVFormatContext {
* Demuxers can use the flag to detect such changes.
*/
int io_repositioned;
+
+ /**
+ * Forced video codec.
+ * This allows forcing a specific decoder, even when there are multiple with
+ * the same codec_id.
+ * Demuxing: Set by user via av_format_set_video_codec (NO direct access).
+ */
+ AVCodec *video_codec;
+
+ /**
+ * Forced audio codec.
+ * This allows forcing a specific decoder, even when there are multiple with
+ * the same codec_id.
+ * Demuxing: Set by user via av_format_set_audio_codec (NO direct access).
+ */
+ AVCodec *audio_codec;
+
+ /**
+ * Forced subtitle codec.
+ * This allows forcing a specific decoder, even when there are multiple with
+ * the same codec_id.
+ * Demuxing: Set by user via av_format_set_subtitle_codec (NO direct access).
+ */
+ AVCodec *subtitle_codec;
} AVFormatContext;
int av_format_get_probe_score(const AVFormatContext *s);
+AVCodec * av_format_get_video_codec(const AVFormatContext *s);
+void av_format_set_video_codec(AVFormatContext *s, AVCodec *c);
+AVCodec * av_format_get_audio_codec(const AVFormatContext *s);
+void av_format_set_audio_codec(AVFormatContext *s, AVCodec *c);
+AVCodec * av_format_get_subtitle_codec(const AVFormatContext *s);
+void av_format_set_subtitle_codec(AVFormatContext *s, AVCodec *c);
/**
* Returns the method used to set ctx->duration.
diff --git a/libavformat/avidec.c b/libavformat/avidec.c
index b04a46be33..c7210f026b 100644
--- a/libavformat/avidec.c
+++ b/libavformat/avidec.c
@@ -905,7 +905,8 @@ fail:
static int read_gab2_sub(AVStream *st, AVPacket *pkt)
{
- if (pkt->data && !strcmp(pkt->data, "GAB2") && AV_RL16(pkt->data + 5) == 2) {
+ if (pkt->size >= 7 &&
+ !strcmp(pkt->data, "GAB2") && AV_RL16(pkt->data + 5) == 2) {
uint8_t desc[256];
int score = AVPROBE_SCORE_EXTENSION, ret;
AVIStream *ast = st->priv_data;
diff --git a/libavformat/bfi.c b/libavformat/bfi.c
index 7a4f436bf3..b65a582e65 100644
--- a/libavformat/bfi.c
+++ b/libavformat/bfi.c
@@ -136,6 +136,10 @@ static int bfi_read_packet(AVFormatContext * s, AVPacket * pkt)
video_offset = avio_rl32(pb);
audio_size = video_offset - audio_offset;
bfi->video_size = chunk_size - video_offset;
+ if (audio_size < 0 || bfi->video_size < 0) {
+ av_log(s, AV_LOG_ERROR, "Invalid audio/video offsets or chunk size\n");
+ return AVERROR_INVALIDDATA;
+ }
//Tossing an audio packet at the audio decoder.
ret = av_get_packet(pb, pkt, audio_size);
@@ -144,9 +148,7 @@ static int bfi_read_packet(AVFormatContext * s, AVPacket * pkt)
pkt->pts = bfi->audio_frame;
bfi->audio_frame += ret;
- }
-
- else {
+ } else if (bfi->video_size > 0) {
//Tossing a video packet at the video decoder.
ret = av_get_packet(pb, pkt, bfi->video_size);
@@ -154,10 +156,13 @@ static int bfi_read_packet(AVFormatContext * s, AVPacket * pkt)
return ret;
pkt->pts = bfi->video_frame;
- bfi->video_frame += bfi->video_size ? ret / bfi->video_size : 1;
+ bfi->video_frame += ret / bfi->video_size;
/* One less frame to read. A cursory decrement. */
bfi->nframes--;
+ } else {
+ /* Empty video packet */
+ ret = AVERROR(EAGAIN);
}
bfi->avflag = !bfi->avflag;
diff --git a/libavformat/electronicarts.c b/libavformat/electronicarts.c
index b631393113..97c19c932a 100644
--- a/libavformat/electronicarts.c
+++ b/libavformat/electronicarts.c
@@ -485,7 +485,7 @@ static int ea_read_header(AVFormatContext *s)
}
if (ea->audio_codec) {
- if (ea->num_channels <= 0) {
+ if (ea->num_channels <= 0 || ea->num_channels > 2) {
av_log(s, AV_LOG_WARNING,
"Unsupported number of channels: %d\n", ea->num_channels);
ea->audio_codec = 0;
diff --git a/libavformat/mov.c b/libavformat/mov.c
index 895af18576..b06dcdb828 100644
--- a/libavformat/mov.c
+++ b/libavformat/mov.c
@@ -1922,11 +1922,16 @@ static int mov_read_stts(MOVContext *c, AVIOContext *pb, MOVAtom atom)
sample_count=avio_rb32(pb);
sample_duration = avio_rb32(pb);
+
/* sample_duration < 0 is invalid based on the spec */
if (sample_duration < 0) {
av_log(c->fc, AV_LOG_ERROR, "Invalid SampleDelta in STTS %d\n", sample_duration);
sample_duration = 1;
}
+ if (sample_count < 0) {
+ av_log(c->fc, AV_LOG_ERROR, "Invalid sample_count=%d\n", sample_count);
+ return AVERROR_INVALIDDATA;
+ }
sc->stts_data[i].count= sample_count;
sc->stts_data[i].duration= sample_duration;
@@ -2631,7 +2636,7 @@ static int mov_read_trun(MOVContext *c, AVIOContext *pb, MOVAtom atom)
return AVERROR_INVALIDDATA;
}
sc = st->priv_data;
- if (sc->pseudo_stream_id+1 != frag->stsd_id)
+ if (sc->pseudo_stream_id+1 != frag->stsd_id && sc->pseudo_stream_id != -1)
return 0;
avio_r8(pb); /* version */
flags = avio_rb24(pb);
diff --git a/libavformat/mpegenc.c b/libavformat/mpegenc.c
index ad07aa6b69..0a9d69b344 100644
--- a/libavformat/mpegenc.c
+++ b/libavformat/mpegenc.c
@@ -379,6 +379,10 @@ static av_cold int mpeg_mux_init(AVFormatContext *ctx)
av_log(ctx, AV_LOG_WARNING, "VBV buffer size not set, muxing may fail\n");
stream->max_buffer_size = 230*1024; //FIXME this is probably too small as default
}
+ if (stream->max_buffer_size > 1024 * 8191) {
+ av_log(ctx, AV_LOG_WARNING, "buffer size %d, too large\n", stream->max_buffer_size);
+ stream->max_buffer_size = 1024 * 8191;
+ }
s->video_bound++;
break;
case AVMEDIA_TYPE_SUBTITLE:
@@ -424,6 +428,10 @@ static av_cold int mpeg_mux_init(AVFormatContext *ctx)
bitrate += bitrate / 20;
bitrate += 10000;
s->mux_rate = (bitrate + (8 * 50) - 1) / (8 * 50);
+ if (s->mux_rate >= (1<<22)) {
+ av_log(ctx, AV_LOG_WARNING, "mux rate %d is too large\n", s->mux_rate);
+ s->mux_rate = (1<<22) - 1;
+ }
}
if (s->is_vcd) {
@@ -1146,7 +1154,7 @@ static int mpeg_mux_end(AVFormatContext *ctx)
#define OFFSET(x) offsetof(MpegMuxContext, x)
#define E AV_OPT_FLAG_ENCODING_PARAM
static const AVOption options[] = {
- { "muxrate", NULL, OFFSET(user_mux_rate), AV_OPT_TYPE_INT, {.i64 = 0}, 0, INT_MAX, E },
+ { "muxrate", NULL, OFFSET(user_mux_rate), AV_OPT_TYPE_INT, {.i64 = 0}, 0, ((1<<22) - 1) * (8 * 50), E },
{ "preload", "Initial demux-decode delay in microseconds.", OFFSET(preload), AV_OPT_TYPE_INT, {.i64 = 500000}, 0, INT_MAX, E},
{ NULL },
};
diff --git a/libavformat/mtv.c b/libavformat/mtv.c
index 29c26c6811..0517dd2d83 100644
--- a/libavformat/mtv.c
+++ b/libavformat/mtv.c
@@ -105,8 +105,8 @@ static int mtv_read_header(AVFormatContext *s)
mtv->img_height=mtv->img_segment_size / (mtv->img_bpp>>3)
/ mtv->img_width;
}
- if(!mtv->img_height || !mtv->img_width){
- av_log(s, AV_LOG_ERROR, "width or height is invalid and I cannot calculate them from other information\n");
+ if(!mtv->img_height || !mtv->img_width || !mtv->img_segment_size){
+ av_log(s, AV_LOG_ERROR, "width or height or segment_size is invalid and I cannot calculate them from other information\n");
return AVERROR(EINVAL);
}
diff --git a/libavformat/mvi.c b/libavformat/mvi.c
index 953c1824fa..bd1f3c66f9 100644
--- a/libavformat/mvi.c
+++ b/libavformat/mvi.c
@@ -96,10 +96,12 @@ static int read_header(AVFormatContext *s)
mvi->get_int = (vst->codec->width * vst->codec->height < (1 << 16)) ? avio_rl16 : avio_rl24;
mvi->audio_frame_size = ((uint64_t)mvi->audio_data_size << MVI_FRAC_BITS) / frames_count;
- if (!mvi->audio_frame_size) {
- av_log(s, AV_LOG_ERROR, "audio_frame_size is 0\n");
+ if (mvi->audio_frame_size <= 1 << MVI_FRAC_BITS - 1) {
+ av_log(s, AV_LOG_ERROR, "Invalid audio_data_size (%d) or frames_count (%d)\n",
+ mvi->audio_data_size, frames_count);
return AVERROR_INVALIDDATA;
}
+
mvi->audio_size_counter = (ast->codec->sample_rate * 830 / mvi->audio_frame_size - 1) * mvi->audio_frame_size;
mvi->audio_size_left = mvi->audio_data_size;
diff --git a/libavformat/mxfdec.c b/libavformat/mxfdec.c
index 92b69b48dc..dac98acaf3 100644
--- a/libavformat/mxfdec.c
+++ b/libavformat/mxfdec.c
@@ -1569,11 +1569,13 @@ static int mxf_parse_structural_metadata(MXFContext *mxf)
st->codec->bits_per_coded_sample = descriptor->bits_per_sample;
if (descriptor->sample_rate.den > 0) {
- avpriv_set_pts_info(st, 64, descriptor->sample_rate.den, descriptor->sample_rate.num);
st->codec->sample_rate = descriptor->sample_rate.num / descriptor->sample_rate.den;
+ avpriv_set_pts_info(st, 64, descriptor->sample_rate.den, descriptor->sample_rate.num);
} else {
- av_log(mxf->fc, AV_LOG_WARNING, "invalid sample rate (%d/%d) found for stream #%d, time base forced to 1/48000\n",
- descriptor->sample_rate.num, descriptor->sample_rate.den, st->index);
+ av_log(mxf->fc, AV_LOG_WARNING, "invalid sample rate (%d/%d) "
+ "found for stream #%d, time base forced to 1/48000\n",
+ descriptor->sample_rate.num, descriptor->sample_rate.den,
+ st->index);
avpriv_set_pts_info(st, 64, 1, 48000);
}
diff --git a/libavformat/mxfenc.c b/libavformat/mxfenc.c
index 51ed2122df..5e77a3f068 100644
--- a/libavformat/mxfenc.c
+++ b/libavformat/mxfenc.c
@@ -1660,7 +1660,7 @@ static void mxf_gen_umid(AVFormatContext *s)
AV_WB64(mxf->umid , umid);
AV_WB64(mxf->umid+8, umid>>8);
- mxf->instance_number = seed;
+ mxf->instance_number = seed & 0xFFFFFF;
}
static int mxf_write_header(AVFormatContext *s)
diff --git a/libavformat/oma.c b/libavformat/oma.c
index fc926bf8ba..27028674db 100644
--- a/libavformat/oma.c
+++ b/libavformat/oma.c
@@ -21,6 +21,7 @@
#include "internal.h"
#include "oma.h"
#include "libavcodec/avcodec.h"
+#include "libavutil/channel_layout.h"
const uint16_t ff_oma_srate_tab[8] = { 320, 441, 480, 882, 960, 0 };
@@ -32,3 +33,16 @@ const AVCodecTag ff_oma_codec_tags[] = {
{ 0 },
};
+/** map ATRAC-X channel id to internal channel layout */
+const uint64_t ff_oma_chid_to_native_layout[7] = {
+ AV_CH_LAYOUT_MONO,
+ AV_CH_LAYOUT_STEREO,
+ AV_CH_LAYOUT_SURROUND,
+ AV_CH_LAYOUT_4POINT0,
+ AV_CH_LAYOUT_5POINT1_BACK,
+ AV_CH_LAYOUT_6POINT1_BACK,
+ AV_CH_LAYOUT_7POINT1
+};
+
+/** map ATRAC-X channel id to total number of channels */
+const int ff_oma_chid_to_num_channels[7] = {1, 2, 3, 4, 6, 7, 8};
diff --git a/libavformat/oma.h b/libavformat/oma.h
index c3f998e761..e2a187bacc 100644
--- a/libavformat/oma.h
+++ b/libavformat/oma.h
@@ -41,4 +41,7 @@ extern const uint16_t ff_oma_srate_tab[8];
extern const AVCodecTag ff_oma_codec_tags[];
+extern const uint64_t ff_oma_chid_to_native_layout[7];
+extern const int ff_oma_chid_to_num_channels[7];
+
#endif /* AVFORMAT_OMA_H */
diff --git a/libavformat/omadec.c b/libavformat/omadec.c
index fe1f56ec15..e4291d9b55 100644
--- a/libavformat/omadec.c
+++ b/libavformat/omadec.c
@@ -1,7 +1,7 @@
/*
* Sony OpenMG (OMA) demuxer
*
- * Copyright (c) 2008 Maxim Poliakovski
+ * Copyright (c) 2008, 2013 Maxim Poliakovski
* 2008 Benjamin Larsson
* 2011 David Goldwich
*
@@ -285,7 +285,7 @@ static int decrypt_init(AVFormatContext *s, ID3v2ExtraMeta *em, uint8_t *header)
static int oma_read_header(AVFormatContext *s)
{
int ret, framesize, jsflag, samplerate;
- uint32_t codec_params;
+ uint32_t codec_params, channel_id;
int16_t eid;
uint8_t buf[EA3_HEADER_SIZE];
uint8_t *edata;
@@ -365,7 +365,14 @@ static int oma_read_header(AVFormatContext *s)
avpriv_set_pts_info(st, 64, 1, st->codec->sample_rate);
break;
case OMA_CODECID_ATRAC3P:
- st->codec->channels = (codec_params >> 10) & 7;
+ channel_id = (codec_params >> 10) & 7;
+ if (!channel_id) {
+ av_log(s, AV_LOG_ERROR,
+ "Invalid ATRAC-X channel id: %d\n", channel_id);
+ return AVERROR_INVALIDDATA;
+ }
+ st->codec->channel_layout = ff_oma_chid_to_native_layout[channel_id - 1];
+ st->codec->channels = ff_oma_chid_to_num_channels[channel_id - 1];
framesize = ((codec_params & 0x3FF) * 8) + 8;
samplerate = ff_oma_srate_tab[(codec_params >> 13) & 7] * 100;
if (!samplerate) {
@@ -373,7 +380,7 @@ static int oma_read_header(AVFormatContext *s)
return AVERROR_INVALIDDATA;
}
st->codec->sample_rate = samplerate;
- st->codec->bit_rate = samplerate * framesize * 8 / 1024;
+ st->codec->bit_rate = samplerate * framesize * 8 / 2048;
avpriv_set_pts_info(st, 64, 1, samplerate);
av_log(s, AV_LOG_ERROR, "Unsupported codec ATRAC3+!\n");
break;
diff --git a/libavformat/riffdec.c b/libavformat/riffdec.c
index 973f3fada9..e7c52e09c3 100644
--- a/libavformat/riffdec.c
+++ b/libavformat/riffdec.c
@@ -130,6 +130,11 @@ int ff_get_wav_header(AVIOContext *pb, AVCodecContext *codec, int size)
if (size > 0)
avio_skip(pb, size);
}
+ if (codec->sample_rate <= 0) {
+ av_log(NULL, AV_LOG_ERROR,
+ "Invalid sample rate: %d\n", codec->sample_rate);
+ return AVERROR_INVALIDDATA;
+ }
if (codec->codec_id == AV_CODEC_ID_AAC_LATM) {
/* Channels and sample_rate values are those prior to applying SBR
* and/or PS. */
diff --git a/libavformat/rtmpproto.c b/libavformat/rtmpproto.c
index 9bccd92088..6246ba6173 100644
--- a/libavformat/rtmpproto.c
+++ b/libavformat/rtmpproto.c
@@ -2769,6 +2769,7 @@ static const AVOption rtmp_options[] = {
{"rtmp_swfverify", "URL to player swf file, compute hash/size automatically.", OFFSET(swfverify), AV_OPT_TYPE_STRING, {.str = NULL }, 0, 0, DEC},
{"rtmp_tcurl", "URL of the target stream. Defaults to proto://host[:port]/app.", OFFSET(tcurl), AV_OPT_TYPE_STRING, {.str = NULL }, 0, 0, DEC|ENC},
{"rtmp_listen", "Listen for incoming rtmp connections", OFFSET(listen), AV_OPT_TYPE_INT, {.i64 = 0}, INT_MIN, INT_MAX, DEC, "rtmp_listen" },
+ {"listen", "Listen for incoming rtmp connections", OFFSET(listen), AV_OPT_TYPE_INT, {.i64 = 0}, INT_MIN, INT_MAX, DEC, "rtmp_listen" },
{"timeout", "Maximum timeout (in seconds) to wait for incoming connections. -1 is infinite. Implies -rtmp_listen 1", OFFSET(listen_timeout), AV_OPT_TYPE_INT, {.i64 = -1}, INT_MIN, INT_MAX, DEC, "rtmp_listen" },
{ NULL },
};
diff --git a/libavformat/utils.c b/libavformat/utils.c
index 1f118dfa8d..61405d7c97 100644
--- a/libavformat/utils.c
+++ b/libavformat/utils.c
@@ -98,12 +98,27 @@ static int64_t wrap_timestamp(AVStream *st, int64_t timestamp)
}
MAKE_ACCESSORS(AVStream, stream, AVRational, r_frame_rate)
+MAKE_ACCESSORS(AVFormatContext, format, AVCodec *, video_codec)
+MAKE_ACCESSORS(AVFormatContext, format, AVCodec *, audio_codec)
+MAKE_ACCESSORS(AVFormatContext, format, AVCodec *, subtitle_codec)
-static AVCodec *find_decoder(AVStream *st, enum AVCodecID codec_id)
+static AVCodec *find_decoder(AVFormatContext *s, AVStream *st, enum AVCodecID codec_id)
{
if (st->codec->codec)
return st->codec->codec;
+ switch(st->codec->codec_type){
+ case AVMEDIA_TYPE_VIDEO:
+ if(s->video_codec) return s->video_codec;
+ break;
+ case AVMEDIA_TYPE_AUDIO:
+ if(s->audio_codec) return s->audio_codec;
+ break;
+ case AVMEDIA_TYPE_SUBTITLE:
+ if(s->subtitle_codec) return s->subtitle_codec;
+ break;
+ }
+
return avcodec_find_decoder(codec_id);
}
@@ -2438,7 +2453,7 @@ static int has_codec_parameters(AVStream *st, const char **errmsg_ptr)
}
/* returns 1 or 0 if or if not decoded data was returned, or a negative error */
-static int try_decode_frame(AVStream *st, AVPacket *avpkt, AVDictionary **options)
+static int try_decode_frame(AVFormatContext *s, AVStream *st, AVPacket *avpkt, AVDictionary **options)
{
const AVCodec *codec;
int got_picture = 1, ret = 0;
@@ -2452,7 +2467,7 @@ static int try_decode_frame(AVStream *st, AVPacket *avpkt, AVDictionary **option
if (!avcodec_is_open(st->codec) && !st->info->found_decoder) {
AVDictionary *thread_opt = NULL;
- codec = find_decoder(st, st->codec->codec_id);
+ codec = find_decoder(s, st, st->codec->codec_id);
if (!codec) {
st->info->found_decoder = -1;
@@ -2703,7 +2718,7 @@ int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options)
avcodec_get_name(st->codec->codec_id));
}
}
- codec = find_decoder(st, st->codec->codec_id);
+ codec = find_decoder(ic, st, st->codec->codec_id);
/* force thread count to 1 since the h264 decoder will not extract SPS
* and PPS to extradata during multi-threaded decoding */
@@ -2933,7 +2948,7 @@ int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options)
least one frame of codec data, this makes sure the codec initializes
the channel configuration and does not only trust the values from the container.
*/
- try_decode_frame(st, pkt, (options && i < orig_nb_streams ) ? &options[i] : NULL);
+ try_decode_frame(ic, st, pkt, (options && i < orig_nb_streams ) ? &options[i] : NULL);
st->codec_info_nb_frames++;
count++;
@@ -2951,7 +2966,7 @@ int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options)
/* flush the decoders */
if (st->info->found_decoder == 1) {
do {
- err = try_decode_frame(st, &empty_pkt,
+ err = try_decode_frame(ic, st, &empty_pkt,
(options && i < orig_nb_streams) ?
&options[i] : NULL);
} while (err > 0 && !has_codec_parameters(st, NULL));
@@ -3154,7 +3169,7 @@ int av_find_best_stream(AVFormatContext *ic,
if (st->disposition & (AV_DISPOSITION_HEARING_IMPAIRED|AV_DISPOSITION_VISUAL_IMPAIRED))
continue;
if (decoder_ret) {
- decoder = find_decoder(st, st->codec->codec_id);
+ decoder = find_decoder(ic, st, st->codec->codec_id);
if (!decoder) {
if (ret < 0)
ret = AVERROR_DECODER_NOT_FOUND;
diff --git a/libavformat/version.h b/libavformat/version.h
index 10b6a36b98..32e76c78c0 100644
--- a/libavformat/version.h
+++ b/libavformat/version.h
@@ -30,8 +30,8 @@
#include "libavutil/avutil.h"
#define LIBAVFORMAT_VERSION_MAJOR 55
-#define LIBAVFORMAT_VERSION_MINOR 18
-#define LIBAVFORMAT_VERSION_MICRO 104
+#define LIBAVFORMAT_VERSION_MINOR 19
+#define LIBAVFORMAT_VERSION_MICRO 100
#define LIBAVFORMAT_VERSION_INT AV_VERSION_INT(LIBAVFORMAT_VERSION_MAJOR, \
LIBAVFORMAT_VERSION_MINOR, \
diff --git a/libavformat/vqf.c b/libavformat/vqf.c
index 83ec1be1a0..be7d8a0a80 100644
--- a/libavformat/vqf.c
+++ b/libavformat/vqf.c
@@ -179,14 +179,21 @@ static int vqf_read_header(AVFormatContext *s)
st->codec->sample_rate = 11025;
break;
default:
- st->codec->sample_rate = rate_flag*1000;
- if (st->codec->sample_rate <= 0) {
- av_log(s, AV_LOG_ERROR, "sample rate %d is invalid\n", st->codec->sample_rate);
- return -1;
+ if (rate_flag < 8 || rate_flag > 44) {
+ av_log(s, AV_LOG_ERROR, "Invalid rate flag %d\n", rate_flag);
+ return AVERROR_INVALIDDATA;
}
+ st->codec->sample_rate = rate_flag*1000;
break;
}
+ if (read_bitrate / st->codec->channels < 8 ||
+ read_bitrate / st->codec->channels > 48) {
+ av_log(s, AV_LOG_ERROR, "Invalid bitrate per channel %d\n",
+ read_bitrate / st->codec->channels);
+ return AVERROR_INVALIDDATA;
+ }
+
switch (((st->codec->sample_rate/1000) << 8) +
read_bitrate/st->codec->channels) {
case (11<<8) + 8 :
diff --git a/libavformat/xwma.c b/libavformat/xwma.c
index 0d40bd76c8..135faf2f7a 100644
--- a/libavformat/xwma.c
+++ b/libavformat/xwma.c
@@ -199,8 +199,10 @@ static int xwma_read_header(AVFormatContext *s)
/* Estimate the duration from the total number of output bytes. */
const uint64_t total_decoded_bytes = dpds_table[dpds_table_size - 1];
- if(!bytes_per_sample) {
- av_log(s, AV_LOG_ERROR, "bytes_per_sample is 0\n");
+ if (!bytes_per_sample) {
+ av_log(s, AV_LOG_ERROR,
+ "Invalid bits_per_coded_sample %d for %d channels\n",
+ st->codec->bits_per_coded_sample, st->codec->channels);
return AVERROR_INVALIDDATA;
}
diff --git a/libswresample/swresample.h b/libswresample/swresample.h
index 95e8a5a093..23ceb982f4 100644
--- a/libswresample/swresample.h
+++ b/libswresample/swresample.h
@@ -84,8 +84,8 @@
* input, in_samples);
* handle_output(output, out_samples);
* av_freep(&output);
- * }
- * @endcode
+ * }
+ * @endcode
*
* When the conversion is finished, the conversion
* context and everything associated with it must be freed with swr_free().
diff --git a/libswscale/options.c b/libswscale/options.c
index 8985e6b5d6..2b3147bb96 100644
--- a/libswscale/options.c
+++ b/libswscale/options.c
@@ -34,7 +34,7 @@ static const char *sws_context_to_name(void *ptr)
#define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
static const AVOption swscale_options[] = {
- { "sws_flags", "scaler flags", OFFSET(flags), AV_OPT_TYPE_FLAGS, { .i64 = DEFAULT }, 0, UINT_MAX, VE, "sws_flags" },
+ { "sws_flags", "scaler flags", OFFSET(flags), AV_OPT_TYPE_FLAGS, { .i64 = SWS_BILINEAR }, 0, UINT_MAX, VE, "sws_flags" },
{ "fast_bilinear", "fast bilinear", 0, AV_OPT_TYPE_CONST, { .i64 = SWS_FAST_BILINEAR }, INT_MIN, INT_MAX, VE, "sws_flags" },
{ "bilinear", "bilinear", 0, AV_OPT_TYPE_CONST, { .i64 = SWS_BILINEAR }, INT_MIN, INT_MAX, VE, "sws_flags" },
{ "bicubic", "bicubic", 0, AV_OPT_TYPE_CONST, { .i64 = SWS_BICUBIC }, INT_MIN, INT_MAX, VE, "sws_flags" },
diff --git a/libswscale/utils.c b/libswscale/utils.c
index 05ab0c6f86..a2e3ce1723 100644
--- a/libswscale/utils.c
+++ b/libswscale/utils.c
@@ -622,6 +622,10 @@ static av_cold int initFilter(int16_t **outFilter, int32_t **filterPos,
sum += filter[i * filterSize + j];
}
sum = (sum + one / 2) / one;
+ if (!sum) {
+ av_log(NULL, AV_LOG_WARNING, "SwScaler: zero vector in scaling\n");
+ sum = 1;
+ }
for (j = 0; j < *outFilterSize; j++) {
int64_t v = filter[i * filterSize + j] + error;
int intV = ROUNDED_DIV(v, sum);
diff --git a/tests/fate-run.sh b/tests/fate-run.sh
index 71eff804e7..52ddc22300 100755
--- a/tests/fate-run.sh
+++ b/tests/fate-run.sh
@@ -225,7 +225,7 @@ fi
if test -e "$ref" || test $cmp = "oneline" ; then
case $cmp in
- diff) diff -u -a -b "$ref" "$outfile" >$cmpfile ;;
+ diff) diff -u -b "$ref" "$outfile" >$cmpfile ;;
oneoff) oneoff "$ref" "$outfile" >$cmpfile ;;
stddev) stddev "$ref" "$outfile" >$cmpfile ;;
oneline)oneline "$ref" "$outfile" >$cmpfile ;;
diff --git a/tests/fate/filter-video.mak b/tests/fate/filter-video.mak
index 9b9acf7b28..6d99ef2c55 100644
--- a/tests/fate/filter-video.mak
+++ b/tests/fate/filter-video.mak
@@ -57,7 +57,7 @@ FATE_FILTER_VSYNTH-$(CONFIG_HISTOGRAM_FILTER) += fate-filter-histogram-levels
fate-filter-histogram-levels: CMD = framecrc -c:v pgmyuv -i $(SRC) -vf histogram -flags +bitexact -sws_flags +accurate_rnd+bitexact
FATE_FILTER_VSYNTH-$(CONFIG_HISTOGRAM_FILTER) += fate-filter-histogram-waveform
-fate-filter-histogram-waveform: CMD = framecrc -c:v pgmyuv -i $(SRC) -vf histogram=mode=waveform -flags +bitexact -sws_flags +accurate_rnd+bitexact
+fate-filter-histogram-waveform: CMD = framecrc -c:v pgmyuv -i $(SRC) -vf format=yuv444p,histogram=mode=waveform -flags +bitexact -sws_flags +accurate_rnd+bitexact
FATE_FILTER_VSYNTH-$(CONFIG_OVERLAY_FILTER) += fate-filter-overlay
fate-filter-overlay: tests/data/filtergraphs/overlay
@@ -244,6 +244,9 @@ fate-filter-pixfmts-crop: CMD = pixfmts "100:100:100:100"
FATE_FILTER_PIXFMTS-$(CONFIG_FIELD_FILTER) += fate-filter-pixfmts-field
fate-filter-pixfmts-field: CMD = pixfmts "bottom"
+FATE_FILTER_PIXFMTS-$(CONFIG_FIELDORDER_FILTER) += fate-filter-pixfmts-fieldorder
+fate-filter-pixfmts-fieldorder: CMD = pixfmts "tff" "setfield=bff,"
+
FATE_FILTER_PIXFMTS-$(CONFIG_HFLIP_FILTER) += fate-filter-pixfmts-hflip
fate-filter-pixfmts-hflip: CMD = pixfmts
diff --git a/tests/ref/fate/exif-image-jpg b/tests/ref/fate/exif-image-jpg
index a634a87c55..ebe3fbf48e 100644
--- a/tests/ref/fate/exif-image-jpg
+++ b/tests/ref/fate/exif-image-jpg
@@ -31,10 +31,10 @@ TAG:YCbCrPositioning=2
TAG:ExposureTime=1:1250
TAG:FNumber=40:10
TAG:ISOSpeedRatings=160
-TAG:ExifVersion=0221
+TAG:ExifVersion=48, 50, 50, 49
TAG:DateTimeOriginal=2013:07:18 13:12:03
TAG:DateTimeDigitized=2013:07:18 13:12:03
-TAG:ComponentsConfiguration=
+TAG:ComponentsConfiguration=1, 2, 3, 0
TAG:CompressedBitsPerPixel=3:1
TAG:ShutterSpeedValue=329:32
TAG:ApertureValue=128:32
@@ -43,21 +43,21 @@ TAG:MaxApertureValue=113:32
TAG:MeteringMode=5
TAG:Flash=16
TAG:FocalLength=5000:1000
-TAG:MakerNote=
-TAG:UserComment=
-TAG:FlashpixVersion=0100
+TAG:MakerNote=25, 0, 1, 0, 3, 0, 48, 0, 0, 0, 28, 4, 0, 0, 2, 0, 3, 0, 4, 0, 0, 0, 124, 4, 0, 0, 3, 0, 3, 0, 4, 0, 0, 0, 132, 4, 0, 0, 4, 0, 3, 0, 34, 0, 0, 0, 140, 4, 0, 0, 0, 0, 3, 0, 6, 0, 0, 0, 208, 4, 0, 0, 6, 0, 2, 0, 28, 0, 0, 0, 220, 4, 0, 0, 7, 0, 2, 0, 22, 0, 0, 0, 252, 4, 0, 0, 8, 0, 4, 0, 1, 0, 0, 0, 17, 166, 15, 0, 9, 0, 2, 0, 32, 0, 0, 0, 20, 5, 0, 0, 13, 0, 4, 0, 167, 0, 0, 0, 52, 5, 0, 0, 16, 0, 4, 0, 1, 0, 0, 0, 0, 0, 96, 2, 38, 0, 3, 0, 48, 0, 0, 0, 208, 7, 0, 0, 19, 0, 3, 0, 4, 0, 0, 0, 48, 8, 0, 0, 24, 0, 1, 0, 0, 1, 0, 0, 56, 8, 0, 0, 25, 0, 3, 0, 1, 0, 0, 0, 1, 0, 0, 0, 28, 0, 3, 0, 1, 0, 0, 0, 0, 0, 0, 0, 29, 0, 3, 0, 16, 0, 0, 0, 56, 9, 0, 0, 30, 0, 4, 0, 1, 0, 0, 0, 0, 4, 0, 1, 31, 0, 3, 0, 69, 0, 0, 0, 88, 9, 0, 0, 34, 0, 3, 0, 208, 0, 0, 0, 226, 9, 0, 0, 35, 0, 4, 0, 2, 0, 0, 0, 130, 11, 0, 0, 39, 0, 3, 0, 5, 0, 0, 0, 138, 11, 0, 0, 40, 0, 1, 0, 16, 0, 0, 0, 148, 11, 0, 0, 208, 0, 4, 0, 1, 0, 0, 0, 0, 0, 0, 0, 45, 0, 4, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 96, 0, 2, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 4, 0, 255, 255, 1, 0, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 15, 0, 3, 0, 1, 0, 6, 64, 0, 0, 255, 127, 255, 255, 96, 234, 136, 19, 232, 3, 113, 0, 221, 0, 255, 255, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 160, 15, 160, 15, 0, 0, 0, 0, 255, 255, 0, 0, 255, 127, 255, 127, 0, 0, 0, 0, 255, 255, 90, 0, 2, 0, 136, 19, 250, 0, 187, 0, 0, 0, 0, 0, 0, 0, 0, 0, 68, 0, 18, 0, 160, 0, 68, 1, 128, 0, 73, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 119, 0, 0, 0, 128, 0, 73, 1, 0, 0, 0, 0, 23, 0, 250, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 73, 77, 71, 58, 80, 111, 119, 101, 114, 83, 104, 111, 116, 32, 83, 88, 50, 48, 48, 32, 73, 83, 32, 74, 80, 69, 71, 0, 0, 0, 0, 0, 70, 105, 114, 109, 119, 97, 114, 101, 32, 86, 101, 114, 115, 105, 111, 110, 32, 49, 46, 48, 48, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 211, 1, 0, 0, 155, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 128, 1, 0, 0, 221, 3, 0, 0, 56, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 64, 2, 0, 0, 123, 3, 0, 0, 165, 255, 255, 255, 0, 0, 0, 0, 0, 0, 0, 0, 7, 0, 0, 0, 66, 0, 0, 0, 10, 0, 0, 0, 17, 0, 0, 0, 70, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 81, 0, 0, 0, 0, 0, 0, 0, 204, 3, 0, 0, 138, 3, 0, 0, 138, 3, 0, 0, 128, 1, 0, 0, 66, 4, 0, 0, 165, 255, 255, 255, 0, 0, 0, 0, 0, 0, 0, 0, 138, 3, 0, 0, 138, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 35, 0, 0, 0, 120, 0, 0, 0, 120, 0, 0, 0, 102, 255, 255, 255, 208, 0, 0, 0, 114, 255, 255, 255, 208, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 114, 255, 255, 255, 208, 0, 0, 0, 12, 0, 0, 0, 204, 0, 0, 0, 239, 255, 255, 255, 201, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0, 4, 0, 0, 0, 4, 0, 0, 0, 5, 0, 0, 8, 0, 0, 0, 239, 255, 255, 255, 201, 0, 0, 0, 24, 0, 0, 0, 143, 3, 0, 0, 125, 6, 0, 0, 97, 6, 0, 0, 143, 3, 0, 0, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 91, 1, 0, 0, 113, 4, 0, 0, 204, 3, 0, 0, 147, 2, 0, 0, 165, 255, 255, 255, 10, 0, 0, 0, 128, 0, 0, 0, 251, 1, 0, 0, 4, 0, 0, 0, 0, 0, 0, 0, 74, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 97, 2, 0, 0, 150, 2, 0, 0, 204, 2, 0, 0, 241, 2, 0, 0, 0, 0, 0, 0, 128, 0, 0, 0, 0, 0, 0, 0, 8, 162, 255, 255, 70, 2, 0, 0, 69, 2, 0, 0, 69, 2, 0, 0, 65, 2, 0, 0, 66, 2, 0, 0, 68, 2, 0, 0, 66, 2, 0, 0, 67, 2, 0, 0, 67, 2, 0, 0, 68, 2, 0, 0, 18, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 56, 5, 0, 0, 250, 0, 0, 0, 53, 1, 0, 0, 58, 0, 0, 0, 5, 4, 0, 0, 193, 0, 0, 0, 240, 0, 0, 0, 45, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 3, 0, 0, 0, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 107, 0, 0, 0, 6, 255, 255, 255, 0, 0, 0, 0, 255, 255, 0, 0, 0, 0, 0, 0, 0, 244, 255, 255, 133, 0, 0, 0, 102, 2, 0, 0, 243, 1, 0, 0, 0, 0, 0, 0, 99, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 192, 0, 0, 0, 4, 1, 0, 0, 0, 1, 0, 0, 4, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 96, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 33, 0, 0, 0, 13, 0, 0, 0, 237, 157, 54, 41, 96, 0, 4, 0, 9, 0, 9, 0, 160, 15, 200, 8, 100, 0, 100, 0, 18, 0, 18, 0, 18, 0, 18, 0, 18, 0, 18, 0, 18, 0, 18, 0, 18, 0, 18, 0, 18, 0, 18, 0, 18, 0, 18, 0, 18, 0, 18, 0, 18, 0, 18, 0, 238, 255, 0, 0, 18, 0, 238, 255, 0, 0, 18, 0, 238, 255, 0, 0, 18, 0, 238, 255, 238, 255, 238, 255, 0, 0, 0, 0, 0, 0, 18, 0, 18, 0, 18, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 159, 0, 15, 0, 104, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 1, 0, 0, 0, 2, 0, 2, 0, 2, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 138, 0, 1, 0, 0, 0, 4, 0, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 160, 1, 0, 0, 0, 0, 16, 0, 8, 0, 1, 0, 1, 0, 128, 2, 224, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8, 0, 128, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8, 0, 0, 0, 0, 0, 0, 0, 10, 0, 0, 0, 255, 255, 0, 0, 0, 0, 239, 154, 237, 228, 191, 235, 20, 171, 30, 6, 2, 129, 88, 251, 56, 49, 73, 73, 42, 0, 222, 2, 0, 0
+TAG:UserComment=0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+TAG:FlashpixVersion=48, 49, 48, 48
TAG:ColorSpace=1
TAG:PixelXDimension=4000
TAG:PixelYDimension=2248
TAG:GPSLatitudeRef=R98
-TAG:GPSLatitude=0100
+TAG:GPSLatitude=48, 49, 48, 48
TAG:0x1001=4000
TAG:0x1002=2248
TAG:FocalPlaneXResolution=4000000:244
TAG:FocalPlaneYResolution=2248000:183
TAG:FocalPlaneResolutionUnit=2
TAG:SensingMethod=2
-TAG:FileSource=
+TAG:FileSource=3
TAG:CustomRendered=0
TAG:ExposureMode=0
TAG:WhiteBalance=0
diff --git a/tests/ref/fate/ffprobe_compact b/tests/ref/fate/ffprobe_compact
index 249df5750d..eb82edb473 100644
--- a/tests/ref/fate/ffprobe_compact
+++ b/tests/ref/fate/ffprobe_compact
@@ -26,7 +26,7 @@ packet|codec_type=video|stream_index=1|pts=6144|pts_time=0.120000|dts=6144|dts_t
frame|media_type=video|key_frame=1|pkt_pts=6144|pkt_pts_time=0.120000|pkt_dts=6144|pkt_dts_time=0.120000|pkt_duration=2048|pkt_duration_time=0.040000|pkt_pos=794307|pkt_size=N/A|width=320|height=240|pix_fmt=rgb24|sample_aspect_ratio=1:1|pict_type=I|coded_picture_number=0|display_picture_number=0|interlaced_frame=0|top_field_first=0|repeat_pict=0
packet|codec_type=video|stream_index=2|pts=6144|pts_time=0.120000|dts=6144|dts_time=0.120000|duration=2048|duration_time=0.040000|convergence_duration=N/A|convergence_duration_time=N/A|size=30000|pos=1024731|flags=K
frame|media_type=video|key_frame=1|pkt_pts=6144|pkt_pts_time=0.120000|pkt_dts=6144|pkt_dts_time=0.120000|pkt_duration=2048|pkt_duration_time=0.040000|pkt_pos=1024731|pkt_size=N/A|width=100|height=100|pix_fmt=rgb24|sample_aspect_ratio=1:1|pict_type=I|coded_picture_number=0|display_picture_number=0|interlaced_frame=0|top_field_first=0|repeat_pict=0
-stream|index=0|codec_name=pcm_s16le|profile=unknown|codec_type=audio|codec_time_base=1/44100|codec_tag_string=PSD[16]|codec_tag=0x10445350|sample_fmt=s16|sample_rate=44100|channels=1|bits_per_sample=16|id=N/A|r_frame_rate=0/0|avg_frame_rate=0/0|time_base=1/44100|start_pts=0|start_time=0.000000|duration_ts=N/A|duration=N/A|bit_rate=705600|nb_frames=N/A|nb_read_frames=6|nb_read_packets=6|disposition:default=0|disposition:dub=0|disposition:original=0|disposition:comment=0|disposition:lyrics=0|disposition:karaoke=0|disposition:forced=0|disposition:hearing_impaired=0|disposition:visual_impaired=0|disposition:clean_effects=0|disposition:attached_pic=0|tag:E=mc²
+stream|index=0|codec_name=pcm_s16le|profile=unknown|codec_type=audio|codec_time_base=1/44100|codec_tag_string=PSD[16]|codec_tag=0x10445350|sample_fmt=s16|sample_rate=44100|channels=1|channel_layout=unknown|bits_per_sample=16|id=N/A|r_frame_rate=0/0|avg_frame_rate=0/0|time_base=1/44100|start_pts=0|start_time=0.000000|duration_ts=N/A|duration=N/A|bit_rate=705600|nb_frames=N/A|nb_read_frames=6|nb_read_packets=6|disposition:default=0|disposition:dub=0|disposition:original=0|disposition:comment=0|disposition:lyrics=0|disposition:karaoke=0|disposition:forced=0|disposition:hearing_impaired=0|disposition:visual_impaired=0|disposition:clean_effects=0|disposition:attached_pic=0|tag:E=mc²
stream|index=1|codec_name=rawvideo|profile=unknown|codec_type=video|codec_time_base=1/51200|codec_tag_string=RGB[24]|codec_tag=0x18424752|width=320|height=240|has_b_frames=0|sample_aspect_ratio=1:1|display_aspect_ratio=4:3|pix_fmt=rgb24|level=-99|timecode=N/A|id=N/A|r_frame_rate=25/1|avg_frame_rate=25/1|time_base=1/51200|start_pts=0|start_time=0.000000|duration_ts=N/A|duration=N/A|bit_rate=N/A|nb_frames=N/A|nb_read_frames=4|nb_read_packets=4|disposition:default=0|disposition:dub=0|disposition:original=0|disposition:comment=0|disposition:lyrics=0|disposition:karaoke=0|disposition:forced=0|disposition:hearing_impaired=0|disposition:visual_impaired=0|disposition:clean_effects=0|disposition:attached_pic=0|tag:title=foobar|tag:duration_ts=field-and-tags-conflict-attempt
stream|index=2|codec_name=rawvideo|profile=unknown|codec_type=video|codec_time_base=1/51200|codec_tag_string=RGB[24]|codec_tag=0x18424752|width=100|height=100|has_b_frames=0|sample_aspect_ratio=1:1|display_aspect_ratio=1:1|pix_fmt=rgb24|level=-99|timecode=N/A|id=N/A|r_frame_rate=25/1|avg_frame_rate=25/1|time_base=1/51200|start_pts=0|start_time=0.000000|duration_ts=N/A|duration=N/A|bit_rate=N/A|nb_frames=N/A|nb_read_frames=4|nb_read_packets=4|disposition:default=0|disposition:dub=0|disposition:original=0|disposition:comment=0|disposition:lyrics=0|disposition:karaoke=0|disposition:forced=0|disposition:hearing_impaired=0|disposition:visual_impaired=0|disposition:clean_effects=0|disposition:attached_pic=0
format|filename=tests/data/ffprobe-test.nut|nb_streams=3|nb_programs=0|format_name=nut|start_time=0.000000|duration=0.120000|size=1054812|bit_rate=70320800|probe_score=100|tag:title=ffprobe test file|tag:comment='A comment with CSV, XML & JSON special chars': <tag value="x">|tag:comment2=I ♥ Üñîçød€
diff --git a/tests/ref/fate/ffprobe_csv b/tests/ref/fate/ffprobe_csv
index 4d950c343e..10d1bf1adc 100644
--- a/tests/ref/fate/ffprobe_csv
+++ b/tests/ref/fate/ffprobe_csv
@@ -26,7 +26,7 @@ packet,video,1,6144,0.120000,6144,0.120000,2048,0.040000,N/A,N/A,230400,794307,K
frame,video,1,6144,0.120000,6144,0.120000,2048,0.040000,794307,N/A,320,240,rgb24,1:1,I,0,0,0,0,0
packet,video,2,6144,0.120000,6144,0.120000,2048,0.040000,N/A,N/A,30000,1024731,K
frame,video,1,6144,0.120000,6144,0.120000,2048,0.040000,1024731,N/A,100,100,rgb24,1:1,I,0,0,0,0,0
-stream,0,pcm_s16le,unknown,audio,1/44100,PSD[16],0x10445350,s16,44100,1,16,N/A,0/0,0/0,1/44100,0,0.000000,N/A,N/A,705600,N/A,6,6,0,0,0,0,0,0,0,0,0,0,0,mc²
+stream,0,pcm_s16le,unknown,audio,1/44100,PSD[16],0x10445350,s16,44100,1,unknown,16,N/A,0/0,0/0,1/44100,0,0.000000,N/A,N/A,705600,N/A,6,6,0,0,0,0,0,0,0,0,0,0,0,mc²
stream,1,rawvideo,unknown,video,1/51200,RGB[24],0x18424752,320,240,0,1:1,4:3,rgb24,-99,N/A,N/A,25/1,25/1,1/51200,0,0.000000,N/A,N/A,N/A,N/A,4,4,0,0,0,0,0,0,0,0,0,0,0,foobar,field-and-tags-conflict-attempt
stream,2,rawvideo,unknown,video,1/51200,RGB[24],0x18424752,100,100,0,1:1,1:1,rgb24,-99,N/A,N/A,25/1,25/1,1/51200,0,0.000000,N/A,N/A,N/A,N/A,4,4,0,0,0,0,0,0,0,0,0,0,0
format,tests/data/ffprobe-test.nut,3,0,nut,0.000000,0.120000,1054812,70320800,100,ffprobe test file,"'A comment with CSV, XML & JSON special chars': <tag value=""x"">",I ♥ Üñîçød€
diff --git a/tests/ref/fate/ffprobe_default b/tests/ref/fate/ffprobe_default
index 690a419b0d..c6d9b28f16 100644
--- a/tests/ref/fate/ffprobe_default
+++ b/tests/ref/fate/ffprobe_default
@@ -491,6 +491,7 @@ codec_tag=0x10445350
sample_fmt=s16
sample_rate=44100
channels=1
+channel_layout=unknown
bits_per_sample=16
id=N/A
r_frame_rate=0/0
diff --git a/tests/ref/fate/ffprobe_flat b/tests/ref/fate/ffprobe_flat
index e54510fa5b..ae280b79c4 100644
--- a/tests/ref/fate/ffprobe_flat
+++ b/tests/ref/fate/ffprobe_flat
@@ -434,6 +434,7 @@ streams.stream.0.codec_tag="0x10445350"
streams.stream.0.sample_fmt="s16"
streams.stream.0.sample_rate="44100"
streams.stream.0.channels=1
+streams.stream.0.channel_layout="unknown"
streams.stream.0.bits_per_sample=16
streams.stream.0.id="N/A"
streams.stream.0.r_frame_rate="0/0"
diff --git a/tests/ref/fate/ffprobe_ini b/tests/ref/fate/ffprobe_ini
index 8f4de86090..c47a0c4e90 100644
--- a/tests/ref/fate/ffprobe_ini
+++ b/tests/ref/fate/ffprobe_ini
@@ -493,6 +493,7 @@ codec_tag=0x10445350
sample_fmt=s16
sample_rate=44100
channels=1
+channel_layout=unknown
bits_per_sample=16
id=N/A
r_frame_rate=0/0
diff --git a/tests/ref/fate/filter-pixfmts-fieldorder b/tests/ref/fate/filter-pixfmts-fieldorder
new file mode 100644
index 0000000000..d743caad9e
--- /dev/null
+++ b/tests/ref/fate/filter-pixfmts-fieldorder
@@ -0,0 +1,89 @@
+0bgr a8559fc1395908cda27a27bf79819d61
+0rgb 704924e65927896b84bd645ca59fb7f2
+abgr f479df06a4b73d348b3af004923d7eb9
+argb c16c9af56802c3b821b30c613c041622
+bgr0 b7ee10fec20508b64dfca22d61533a88
+bgr24 a1c59acf60652578b802e7f6639d1ac5
+bgr444be d41639928d05bdee67fa2a76c7f74533
+bgr444le 45f616b1ec9014f8d618878c4db850c1
+bgr48be 7015ba40d142742e5c6a39113fee58af
+bgr48le fd33d4a60a789d170b871784d0859089
+bgr4_byte d1ff07684bc1be194d26cdf598385b8e
+bgr555be 2295d642e02f9dcb49c20142bfdd9c1a
+bgr555le a95883301ab991a3cf8bb063b7912410
+bgr565be c37ced8fa4951da20831edf9bd46b35c
+bgr565le da0ee7f773efa07fdacc62b7deac452b
+bgr8 94a043f6d5e01de077a92a9be6f77582
+bgra 02db5e046ced11d3f09d901cae205e2f
+gbrap 5317a66b677c943203b2a5e93607dc68
+gbrp b1ffeba90a60e9a23c9f35466c135069
+gbrp10be 07b2842fdf9b92f6cd8e8cf3a8abdfac
+gbrp10le 7fa5429bb4bc6f0e1f3c190bc8064b38
+gbrp12be 5ea6cb457c88b4b3d48f6f63b6208828
+gbrp12le f969a3b017a0f46008f81453174f4ecd
+gbrp14be b2879b691564f66b88e63efd268de8c5
+gbrp14le 7baa94cd296e6ec8e41446bca95151e4
+gbrp9be a6eb7cde03f19a25bf13627d731a2c9a
+gbrp9le 1b6d228b97a370ec76707ed2dcc15a66
+gray 3258910ef8c6f0c4a331368e7af87507
+gray16be 50cc8a29e8e81e174676542347804a4f
+gray16le fb93e8aa2deed734dfd1ca6a5c48cf18
+rgb0 5f8f8076e2b9a2422cac25a4f0459d79
+rgb24 66ca89ced3ade4c239ad6c5a79a6b8cd
+rgb444be 7197a09137ab5630a26226396bb7e313
+rgb444le 2d10d7d2ef4527152a9ba8b2020c8d41
+rgb48be 7aee07b85b137365eb04a7ea4c10dcfe
+rgb48le debaa00dfed0234efd1441aea8195026
+rgb4_byte 12d6a5a7ca40c531a4d480546ed766eb
+rgb555be 5d36ead35aa8820daab7d84e1ffbb4ac
+rgb555le 10f43b4c9b06742abc114f0ff1864111
+rgb565be 998ca430a1173f194d78a92695e097f9
+rgb565le 99be7760861aa9c1d3d743a1ea311689
+rgb8 6303c75883c1e2e7e13644fb5dcd11a7
+rgba af33be882959a2fc963d6e5984d24b6d
+rgba64be 9ae73ceaa9d95e7981b4df559b0d3b6d
+rgba64le 86997e62f62d2a5351b5ca9c9c93c2d1
+uyvy422 a07829fe630b59855c80d87d0e8f0dbb
+xyz12be 5785613045b051cee508625dde3357df
+xyz12le 3645601dc8bfbdff68cc823a0fc85b27
+yuv411p 3d3d427999863945aa5ec03edf98c19a
+yuv422p 961d8a22bfe8868eb3c743b4241a3c99
+yuv422p10be 845e80406c880f33a384b5ebdd21796d
+yuv422p10le 0dbf5d1eb94f10cc4413976fa175fa90
+yuv422p12be 9e96f9324d0a4eac4b45161b02c14209
+yuv422p12le db17368e8cd5409826a088ee26ad2a1b
+yuv422p14be e20b379fb17e50f279a5ecd9c895537f
+yuv422p14le 9440b0c46dadddc19154054978fce20c
+yuv422p16be 09d09b64aa4b6ae8479a833622bf27e8
+yuv422p16le 03dbbf3394ba66cda54ef53772258dae
+yuv422p9be 7c59e15cb5e6eac9cd090c6ecab4c6b8
+yuv422p9le 98b6b1fbacdfd4c25dbda89b675b660b
+yuv444p 709ffd0e95a9438b14d9ba59c0237a61
+yuv444p10be 5ae7571651faebac6dca2ee4fd24610c
+yuv444p10le eb3acb92448d8b0e6f145e9d8af43378
+yuv444p12be df1e9bb90364d874dbac41a8237d6068
+yuv444p12le 7e21d00cb42404df0780317dc135f03d
+yuv444p14be 6abf70b4b7ff55552327230e08e3dd52
+yuv444p14le 85597559a748aa5aeed2d7dc49d7f66e
+yuv444p16be 2a19a8ca44198d0e90c02fd4db24d0c6
+yuv444p16le 5cb12916afef8c82d1796fdc39b97174
+yuv444p9be fc731b5b0afe0a9de6bd46179c692efa
+yuv444p9le 7449c543e528067afed5d3c96f7b8da2
+yuva422p d07e0ed0cb20eee67007b154f02cf37c
+yuva422p10be 56bfd0af50b9450f72061c605f6537dd
+yuva422p10le bfe1df3aae3e02f0a2ddbd404199e6ac
+yuva422p16be db80a2e1ddc93126206ad4864f67b50c
+yuva422p16le 847e541568b4bce64f916d42173c93c7
+yuva422p9be 5487d986b52f5744a8adac77e46c08f1
+yuva422p9le 6faa61b431b2e0bd4064e5fcecb7130a
+yuva444p 377f65cb10dd8013011e185b6aa66697
+yuva444p10be 5e42b4e4b6b1bfd12aa5869750cd2cce
+yuva444p10le c1fddd5f488fd4f8d1e07288f8a49182
+yuva444p16be ba2b267e0cc0c36041f3db4f4072ee9e
+yuva444p16le c2cfda36aa173be3a4a5c9a8fd66d8db
+yuva444p9be e047805a8f9552a6e0b6d4f772126808
+yuva444p9le 28336905569a55e935d708527a3c9d9d
+yuvj411p 09f79c56109a13eefb68ee729d9a624b
+yuvj422p 942043a34ac7d0f65edced1f6361259c
+yuvj444p 7e4758df8eb9b18ad60e1b69a913f8c8
+yuyv422 6b0c70d5ebf1685857b65456c547ea1c
diff --git a/tests/ref/lavf/gif b/tests/ref/lavf/gif
index 531cd1eda7..4d90abe38f 100644
--- a/tests/ref/lavf/gif
+++ b/tests/ref/lavf/gif
@@ -1,3 +1,3 @@
-e35f5ea283bbcb249818e0078ec72664 *./tests/data/lavf/lavf.gif
-2011766 ./tests/data/lavf/lavf.gif
+8aef8081e8afa445f63f320f4a1c5edb *./tests/data/lavf/lavf.gif
+2030198 ./tests/data/lavf/lavf.gif
./tests/data/lavf/lavf.gif CRC=0x0dc5477c