From d0fd6fc20130ef514df294727bf21d01ccbf588c Mon Sep 17 00:00:00 2001 From: Nathan Adil Maxson Date: Wed, 30 Nov 2011 21:37:33 -0800 Subject: Cleaned up alacenc.c Signed-off-by: Ronald S. Bultje --- libavcodec/alacenc.c | 101 +++++++++++++++++++++++++++------------------------ 1 file changed, 54 insertions(+), 47 deletions(-) diff --git a/libavcodec/alacenc.c b/libavcodec/alacenc.c index fe03bb7dad..7e29a24c6f 100644 --- a/libavcodec/alacenc.c +++ b/libavcodec/alacenc.c @@ -75,20 +75,22 @@ typedef struct AlacEncodeContext { } AlacEncodeContext; -static void init_sample_buffers(AlacEncodeContext *s, const int16_t *input_samples) +static void init_sample_buffers(AlacEncodeContext *s, + const int16_t *input_samples) { int ch, i; - for(ch=0;chavctx->channels;ch++) { + for (ch = 0; ch < s->avctx->channels; ch++) { const int16_t *sptr = input_samples + ch; - for(i=0;iavctx->frame_size;i++) { + for (i = 0; i < s->avctx->frame_size; i++) { s->sample_buf[ch][i] = *sptr; sptr += s->avctx->channels; } } } -static void encode_scalar(AlacEncodeContext *s, int x, int k, int write_sample_size) +static void encode_scalar(AlacEncodeContext *s, int x, + int k, int write_sample_size) { int divisor, q, r; @@ -97,17 +99,17 @@ static void encode_scalar(AlacEncodeContext *s, int x, int k, int write_sample_s q = x / divisor; r = x % divisor; - if(q > 8) { + if (q > 8) { // write escape code and sample value directly put_bits(&s->pbctx, 9, ALAC_ESCAPE_CODE); put_bits(&s->pbctx, write_sample_size, x); } else { - if(q) + if (q) put_bits(&s->pbctx, q, (1<pbctx, 1, 0); - if(k != 1) { - if(r > 0) + if (k != 1) { + if (r > 0) put_bits(&s->pbctx, k, r+1); else put_bits(&s->pbctx, k-1, 0); @@ -164,7 +166,7 @@ static int estimate_stereo_mode(int32_t *left_ch, int32_t *right_ch, int n) /* calculate sum of 2nd order residual for each channel */ sum[0] = sum[1] = sum[2] = sum[3] = 0; - for(i=2; i> 1); @@ -181,8 +183,8 @@ static int estimate_stereo_mode(int32_t *left_ch, int32_t *right_ch, int n) /* return mode with lowest score */ best = 0; - for(i=1; i<4; i++) { - if(score[i] < score[best]) { + for (i = 1; i < 4; i++) { + if (score[i] < score[best]) { best = i; } } @@ -205,7 +207,7 @@ static void alac_stereo_decorrelation(AlacEncodeContext *s) break; case ALAC_CHMODE_LEFT_SIDE: - for(i=0; iinterlacing_leftweight = 1; @@ -213,7 +215,7 @@ static void alac_stereo_decorrelation(AlacEncodeContext *s) break; case ALAC_CHMODE_RIGHT_SIDE: - for(i=0; i> 31); @@ -223,7 +225,7 @@ static void alac_stereo_decorrelation(AlacEncodeContext *s) break; default: - for(i=0; i> 1; right[i] = tmp - right[i]; @@ -239,10 +241,10 @@ static void alac_linear_predictor(AlacEncodeContext *s, int ch) int i; AlacLPCContext lpc = s->lpc[ch]; - if(lpc.lpc_order == 31) { + if (lpc.lpc_order == 31) { s->predictor_buf[0] = s->sample_buf[ch][0]; - for(i=1; iavctx->frame_size; i++) + for (i = 1; i < s->avctx->frame_size; i++) s->predictor_buf[i] = s->sample_buf[ch][i] - s->sample_buf[ch][i-1]; return; @@ -250,17 +252,17 @@ static void alac_linear_predictor(AlacEncodeContext *s, int ch) // generalised linear predictor - if(lpc.lpc_order > 0) { + if (lpc.lpc_order > 0) { int32_t *samples = s->sample_buf[ch]; int32_t *residual = s->predictor_buf; // generate warm-up samples residual[0] = samples[0]; - for(i=1;i<=lpc.lpc_order;i++) + for (i = 1; i <= lpc.lpc_order; i++) residual[i] = samples[i] - samples[i-1]; // perform lpc on remaining samples - for(i = lpc.lpc_order + 1; i < s->avctx->frame_size; i++) { + for (i = lpc.lpc_order + 1; i < s->avctx->frame_size; i++) { int sum = 1 << (lpc.lpc_quant - 1), res_val, j; for (j = 0; j < lpc.lpc_order; j++) { @@ -303,7 +305,7 @@ static void alac_entropy_coder(AlacEncodeContext *s) int sign_modifier = 0, i, k; int32_t *samples = s->predictor_buf; - for(i=0;i < s->avctx->frame_size;) { + for (i = 0; i < s->avctx->frame_size;) { int x; k = av_log2((history >> 9) + 3); @@ -320,15 +322,15 @@ static void alac_entropy_coder(AlacEncodeContext *s) - ((history * s->rc.history_mult) >> 9); sign_modifier = 0; - if(x > 0xFFFF) + if (x > 0xFFFF) history = 0xFFFF; - if((history < 128) && (i < s->avctx->frame_size)) { + if (history < 128 && i < s->avctx->frame_size) { unsigned int block_size = 0; k = 7 - av_log2(history) + ((history + 16) >> 6); - while((*samples == 0) && (i < s->avctx->frame_size)) { + while (*samples == 0 && i < s->avctx->frame_size) { samples++; i++; block_size++; @@ -347,12 +349,12 @@ static void write_compressed_frame(AlacEncodeContext *s) { int i, j; - if(s->avctx->channels == 2) + if (s->avctx->channels == 2) alac_stereo_decorrelation(s); put_bits(&s->pbctx, 8, s->interlacing_shift); put_bits(&s->pbctx, 8, s->interlacing_leftweight); - for(i=0;iavctx->channels;i++) { + for (i = 0; i < s->avctx->channels; i++) { calc_predictor_params(s, i); @@ -362,14 +364,14 @@ static void write_compressed_frame(AlacEncodeContext *s) put_bits(&s->pbctx, 3, s->rc.rice_modifier); put_bits(&s->pbctx, 5, s->lpc[i].lpc_order); // predictor coeff. table - for(j=0;jlpc[i].lpc_order;j++) { + for (j = 0; j < s->lpc[i].lpc_order; j++) { put_sbits(&s->pbctx, 16, s->lpc[i].lpc_coeff[j]); } } // apply lpc and entropy coding to audio samples - for(i=0;iavctx->channels;i++) { + for (i = 0; i < s->avctx->channels; i++) { alac_linear_predictor(s, i); alac_entropy_coder(s); } @@ -384,13 +386,13 @@ static av_cold int alac_encode_init(AVCodecContext *avctx) avctx->frame_size = DEFAULT_FRAME_SIZE; avctx->bits_per_coded_sample = DEFAULT_SAMPLE_SIZE; - if(avctx->sample_fmt != AV_SAMPLE_FMT_S16) { + if (avctx->sample_fmt != AV_SAMPLE_FMT_S16) { av_log(avctx, AV_LOG_ERROR, "only pcm_s16 input samples are supported\n"); return -1; } // Set default compression level - if(avctx->compression_level == FF_COMPRESSION_DEFAULT) + if (avctx->compression_level == FF_COMPRESSION_DEFAULT) s->compression_level = 2; else s->compression_level = av_clip(avctx->compression_level, 0, 2); @@ -411,21 +413,23 @@ static av_cold int alac_encode_init(AVCodecContext *avctx) AV_WB8 (alac_extradata+17, avctx->bits_per_coded_sample); AV_WB8 (alac_extradata+21, avctx->channels); AV_WB32(alac_extradata+24, s->max_coded_frame_size); - AV_WB32(alac_extradata+28, avctx->sample_rate*avctx->channels*avctx->bits_per_coded_sample); // average bitrate + AV_WB32(alac_extradata+28, + avctx->sample_rate * avctx->channels * avctx->bits_per_coded_sample); // average bitrate AV_WB32(alac_extradata+32, avctx->sample_rate); // Set relevant extradata fields - if(s->compression_level > 0) { + if (s->compression_level > 0) { AV_WB8(alac_extradata+18, s->rc.history_mult); AV_WB8(alac_extradata+19, s->rc.initial_history); AV_WB8(alac_extradata+20, s->rc.k_modifier); } s->min_prediction_order = DEFAULT_MIN_PRED_ORDER; - if(avctx->min_prediction_order >= 0) { - if(avctx->min_prediction_order < MIN_LPC_ORDER || + if (avctx->min_prediction_order >= 0) { + if (avctx->min_prediction_order < MIN_LPC_ORDER || avctx->min_prediction_order > ALAC_MAX_LPC_ORDER) { - av_log(avctx, AV_LOG_ERROR, "invalid min prediction order: %d\n", avctx->min_prediction_order); + av_log(avctx, AV_LOG_ERROR, "invalid min prediction order: %d\n", + avctx->min_prediction_order); return -1; } @@ -433,18 +437,20 @@ static av_cold int alac_encode_init(AVCodecContext *avctx) } s->max_prediction_order = DEFAULT_MAX_PRED_ORDER; - if(avctx->max_prediction_order >= 0) { - if(avctx->max_prediction_order < MIN_LPC_ORDER || - avctx->max_prediction_order > ALAC_MAX_LPC_ORDER) { - av_log(avctx, AV_LOG_ERROR, "invalid max prediction order: %d\n", avctx->max_prediction_order); + if (avctx->max_prediction_order >= 0) { + if (avctx->max_prediction_order < MIN_LPC_ORDER || + avctx->max_prediction_order > ALAC_MAX_LPC_ORDER) { + av_log(avctx, AV_LOG_ERROR, "invalid max prediction order: %d\n", + avctx->max_prediction_order); return -1; } s->max_prediction_order = avctx->max_prediction_order; } - if(s->max_prediction_order < s->min_prediction_order) { - av_log(avctx, AV_LOG_ERROR, "invalid prediction orders: min=%d max=%d\n", + if (s->max_prediction_order < s->min_prediction_order) { + av_log(avctx, AV_LOG_ERROR, + "invalid prediction orders: min=%d max=%d\n", s->min_prediction_order, s->max_prediction_order); return -1; } @@ -469,12 +475,12 @@ static int alac_encode_frame(AVCodecContext *avctx, uint8_t *frame, PutBitContext *pb = &s->pbctx; int i, out_bytes, verbatim_flag = 0; - if(avctx->frame_size > DEFAULT_FRAME_SIZE) { + if (avctx->frame_size > DEFAULT_FRAME_SIZE) { av_log(avctx, AV_LOG_ERROR, "input frame size exceeded\n"); return -1; } - if(buf_size < 2*s->max_coded_frame_size) { + if (buf_size < 2 * s->max_coded_frame_size) { av_log(avctx, AV_LOG_ERROR, "buffer size is too small\n"); return -1; } @@ -482,11 +488,11 @@ static int alac_encode_frame(AVCodecContext *avctx, uint8_t *frame, verbatim: init_put_bits(pb, frame, buf_size); - if((s->compression_level == 0) || verbatim_flag) { + if (s->compression_level == 0 || verbatim_flag) { // Verbatim mode const int16_t *samples = data; write_frame_header(s, 1); - for(i=0; iframe_size*avctx->channels; i++) { + for (i = 0; i < avctx->frame_size * avctx->channels; i++) { put_sbits(pb, 16, *samples++); } } else { @@ -499,9 +505,9 @@ verbatim: flush_put_bits(pb); out_bytes = put_bits_count(pb) >> 3; - if(out_bytes > s->max_coded_frame_size) { + if (out_bytes > s->max_coded_frame_size) { /* frame too large. use verbatim mode */ - if(verbatim_flag || (s->compression_level == 0)) { + if (verbatim_flag || s->compression_level == 0) { /* still too large. must be an error. */ av_log(avctx, AV_LOG_ERROR, "error encoding frame\n"); return -1; @@ -532,6 +538,7 @@ AVCodec ff_alac_encoder = { .encode = alac_encode_frame, .close = alac_encode_close, .capabilities = CODEC_CAP_SMALL_LAST_FRAME, - .sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_NONE}, + .sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_S16, + AV_SAMPLE_FMT_NONE }, .long_name = NULL_IF_CONFIG_SMALL("ALAC (Apple Lossless Audio Codec)"), }; -- cgit v1.2.3 From 30266038bdfdecd244edbd91f19e19cb159a74ab Mon Sep 17 00:00:00 2001 From: Martin Storsjö Date: Fri, 2 Dec 2011 11:06:18 +0200 Subject: rtsp: Initialize the media_type_mask in the rtp guessing demuxer MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The media_type_mask is initialized via AVOptions for the rtsp and sdp demuxers, but it isn't available as an option for the rtp guessing demuxer (since it doesn't really make sense there). Therefore, it must be manually initialized instead, since a zero value means no media types at all are accepted. Signed-off-by: Martin Storsjö --- libavformat/rtsp.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/libavformat/rtsp.c b/libavformat/rtsp.c index 7548a418b2..b63f9f68cf 100644 --- a/libavformat/rtsp.c +++ b/libavformat/rtsp.c @@ -1934,6 +1934,7 @@ static int rtp_read_header(AVFormatContext *s, struct sockaddr_storage addr; AVIOContext pb; socklen_t addrlen = sizeof(addr); + RTSPState *rt = s->priv_data; if (!ff_network_init()) return AVERROR(EIO); @@ -1997,6 +1998,8 @@ static int rtp_read_header(AVFormatContext *s, /* sdp_read_header initializes this again */ ff_network_close(); + rt->media_type_mask = (1 << (AVMEDIA_TYPE_DATA+1)) - 1; + ret = sdp_read_header(s, ap); s->pb = NULL; return ret; -- cgit v1.2.3 From 73f027c17b0063233ec80e86a6b74ea4df6705c2 Mon Sep 17 00:00:00 2001 From: Luca Barbato Date: Mon, 14 Nov 2011 15:18:49 +0100 Subject: asf: do not call av_read_frame The asf_read_pts should read the bitstream directly. --- libavformat/asfdec.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libavformat/asfdec.c b/libavformat/asfdec.c index 4c9c59d606..49a9b1d539 100644 --- a/libavformat/asfdec.c +++ b/libavformat/asfdec.c @@ -1168,7 +1168,7 @@ static int64_t asf_read_pts(AVFormatContext *s, int stream_index, int64_t *ppos, //printf("asf_read_pts\n"); asf_reset_header(s); for(;;){ - if (av_read_frame(s, pkt) < 0){ + if (asf_read_packet(s, pkt) < 0){ av_log(s, AV_LOG_INFO, "asf_read_pts failed\n"); return AV_NOPTS_VALUE; } -- cgit v1.2.3 From b88eb87630c3384517437a1ee640bca9fe92852f Mon Sep 17 00:00:00 2001 From: John Stebbins Date: Tue, 15 Nov 2011 03:56:37 +0100 Subject: asf: use packet dts as approximation of pts Having a somehow off seeking is better than having none at all. Signed-off-by: Luca Barbato --- libavformat/asfdec.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libavformat/asfdec.c b/libavformat/asfdec.c index 49a9b1d539..1246cc1b65 100644 --- a/libavformat/asfdec.c +++ b/libavformat/asfdec.c @@ -1173,7 +1173,7 @@ static int64_t asf_read_pts(AVFormatContext *s, int stream_index, int64_t *ppos, return AV_NOPTS_VALUE; } - pts= pkt->pts; + pts = pkt->dts; av_free_packet(pkt); if(pkt->flags&AV_PKT_FLAG_KEY){ -- cgit v1.2.3 From 7d68f592f6b3b434b4a4c96a3e28c4149ef17060 Mon Sep 17 00:00:00 2001 From: Luca Barbato Date: Fri, 2 Dec 2011 10:26:00 +0100 Subject: doc: improve general documentation for MacOSX Signed-off-by: Luca Barbato --- doc/general.texi | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/doc/general.texi b/doc/general.texi index dc7e8bb77b..ca9731ef55 100644 --- a/doc/general.texi +++ b/doc/general.texi @@ -809,13 +809,22 @@ bash directly to work around this: bash ./configure @end example -@subsection Darwin (MacOS X, iPhone) +@anchor{Darwin} +@subsection Darwin (OSX, iPhone) -MacOS X on PowerPC or ARM (iPhone) requires a preprocessor from +The toolchain provided with Xcode is sufficient to build the basic +unacelerated code. + +OSX on PowerPC or ARM (iPhone) requires a preprocessor from @url{http://github.com/yuvi/gas-preprocessor} to build the optimized assembler functions. Just download the Perl script and put it somewhere in your PATH, Libav's configure will pick it up automatically. +OSX on amd64 and x86 requires @command{yasm} to build most of the +optimized assembler functions @url{http://mxcl.github.com/homebrew/, Homebrew}, +@url{http://www.gentoo.org/proj/en/gentoo-alt/prefix/bootstrap-macos.xml, Gentoo Prefix} +or @url{http://www.macports.org, MacPorts} can easily provide it. + @section Windows @subsection Native Windows compilation -- cgit v1.2.3 From e02dec25aba7fc8b48b235725a311af906818eee Mon Sep 17 00:00:00 2001 From: Aaron Colwell Date: Mon, 28 Nov 2011 07:21:47 -0800 Subject: vp8: flush buffers on size changes. --- libavcodec/vp8.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/libavcodec/vp8.c b/libavcodec/vp8.c index d9be734a9b..833819890d 100644 --- a/libavcodec/vp8.c +++ b/libavcodec/vp8.c @@ -51,8 +51,7 @@ static int vp8_alloc_frame(VP8Context *s, AVFrame *f) int ret; if ((ret = ff_thread_get_buffer(s->avctx, f)) < 0) return ret; - if (s->num_maps_to_be_freed) { - assert(!s->maps_are_invalid); + if (s->num_maps_to_be_freed && !s->maps_are_invalid) { f->ref_index[0] = s->segmentation_maps[--s->num_maps_to_be_freed]; } else if (!(f->ref_index[0] = av_mallocz(s->mb_width * s->mb_height))) { ff_thread_release_buffer(s->avctx, f); @@ -1567,13 +1566,15 @@ static int vp8_decode_frame(AVCodecContext *avctx, void *data, int *data_size, VP8Context *s = avctx->priv_data; int ret, mb_x, mb_y, i, y, referenced; enum AVDiscard skip_thresh; - AVFrame *av_uninit(curframe), *prev_frame = s->framep[VP56_FRAME_CURRENT]; + AVFrame *av_uninit(curframe), *prev_frame; release_queued_segmaps(s, 0); if ((ret = decode_frame_header(s, avpkt->data, avpkt->size)) < 0) return ret; + prev_frame = s->framep[VP56_FRAME_CURRENT]; + referenced = s->update_last || s->update_golden == VP56_FRAME_CURRENT || s->update_altref == VP56_FRAME_CURRENT; @@ -1814,6 +1815,7 @@ static int vp8_decode_update_thread_context(AVCodecContext *dst, const AVCodecCo if (s->macroblocks_base && (s_src->mb_width != s->mb_width || s_src->mb_height != s->mb_height)) { free_buffers(s); + s->maps_are_invalid = 1; } s->prob[0] = s_src->prob[!s_src->update_probabilities]; -- cgit v1.2.3 From 5a2e2516456e383575b44545d17c7f5859ca67b9 Mon Sep 17 00:00:00 2001 From: Luca Barbato Date: Fri, 2 Dec 2011 14:07:45 +0100 Subject: fate: update asf seektest --- tests/ref/seek/lavf_asf | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/tests/ref/seek/lavf_asf b/tests/ref/seek/lavf_asf index 757fd0e0bb..72c0f5698b 100644 --- a/tests/ref/seek/lavf_asf +++ b/tests/ref/seek/lavf_asf @@ -2,9 +2,9 @@ ret: 0 st: 0 flags:1 dts: 0.000000 pts: 0.000000 pos: 575 size: 28487 ret: 0 st:-1 flags:0 ts:-1.000000 ret: 0 st: 1 flags:1 dts: 0.444000 pts: 0.444000 pos: 147775 size: 209 ret: 0 st:-1 flags:1 ts: 1.894167 -ret: 0 st: 1 flags:1 dts: 0.960000 pts: 0.960000 pos: -1 size: 209 +ret: 0 st: 1 flags:1 dts: 0.940000 pts: 0.940000 pos: 301375 size: 209 ret: 0 st: 0 flags:0 ts: 0.788000 -ret: 0 st: 1 flags:1 dts: 0.960000 pts: 0.960000 pos: -1 size: 209 +ret: 0 st: 1 flags:1 dts: 0.940000 pts: 0.940000 pos: 301375 size: 209 ret: 0 st: 0 flags:1 ts:-0.317000 ret: 0 st: 0 flags:1 dts: 0.000000 pts: 0.000000 pos: 575 size: 28487 ret:-1 st: 1 flags:0 ts: 2.577000 @@ -14,29 +14,29 @@ ret: 0 st: 1 flags:1 dts: 0.444000 pts: 0.444000 pos: 147775 size: 209 ret: 0 st:-1 flags:1 ts:-0.740831 ret: 0 st: 0 flags:1 dts: 0.000000 pts: 0.000000 pos: 575 size: 28487 ret: 0 st: 0 flags:0 ts: 2.153000 -ret: 0 st: 1 flags:1 dts: 0.960000 pts: 0.960000 pos: -1 size: 209 +ret: 0 st: 1 flags:1 dts: 0.940000 pts: 0.940000 pos: 301375 size: 209 ret: 0 st: 0 flags:1 ts: 1.048000 -ret: 0 st: 1 flags:1 dts: 0.960000 pts: 0.960000 pos: -1 size: 209 +ret: 0 st: 1 flags:1 dts: 0.940000 pts: 0.940000 pos: 301375 size: 209 ret: 0 st: 1 flags:0 ts:-0.058000 ret: 0 st: 1 flags:1 dts: 0.000000 pts: 0.000000 pos: 29375 size: 208 ret:-1 st: 1 flags:1 ts: 2.836000 ret: 0 st:-1 flags:0 ts: 1.730004 -ret: 0 st: 1 flags:1 dts: 0.960000 pts: 0.960000 pos: -1 size: 209 +ret: 0 st: 1 flags:1 dts: 0.940000 pts: 0.940000 pos: 301375 size: 209 ret: 0 st:-1 flags:1 ts: 0.624171 ret: 0 st: 1 flags:1 dts: 0.444000 pts: 0.444000 pos: 147775 size: 209 ret: 0 st: 0 flags:0 ts:-0.482000 ret: 0 st: 0 flags:1 dts: 0.000000 pts: 0.000000 pos: 575 size: 28487 ret: 0 st: 0 flags:1 ts: 2.413000 -ret: 0 st: 1 flags:1 dts: 0.960000 pts: 0.960000 pos: -1 size: 209 +ret: 0 st: 1 flags:1 dts: 0.940000 pts: 0.940000 pos: 301375 size: 209 ret:-1 st: 1 flags:0 ts: 1.307000 ret: 0 st: 1 flags:1 ts: 0.201000 ret: 0 st: 1 flags:1 dts: 0.183000 pts: 0.183000 pos: 70975 size: 209 ret: 0 st:-1 flags:0 ts:-0.904994 ret: 0 st: 0 flags:1 dts: 0.000000 pts: 0.000000 pos: 575 size: 28487 ret: 0 st:-1 flags:1 ts: 1.989173 -ret: 0 st: 1 flags:1 dts: 0.960000 pts: 0.960000 pos: -1 size: 209 +ret: 0 st: 1 flags:1 dts: 0.940000 pts: 0.940000 pos: 301375 size: 209 ret: 0 st: 0 flags:0 ts: 0.883000 -ret: 0 st: 1 flags:1 dts: 0.960000 pts: 0.960000 pos: -1 size: 209 +ret: 0 st: 1 flags:1 dts: 0.940000 pts: 0.940000 pos: 301375 size: 209 ret: 0 st: 0 flags:1 ts:-0.222000 ret: 0 st: 0 flags:1 dts: 0.000000 pts: 0.000000 pos: 575 size: 28487 ret:-1 st: 1 flags:0 ts: 2.672000 -- cgit v1.2.3 From 150ddbc1482c65b9aac803f011d7fcd734f776ec Mon Sep 17 00:00:00 2001 From: Mans Rullgard Date: Tue, 29 Nov 2011 18:38:02 +0000 Subject: Do not memcpy raw video frames when using null muxer Commit 035af99 made avconv always call an encoder when using the null muxer. While useful for 2-pass encodes, it inadvertently caused an extra memcpy of raw frames when decoding only. This hack restores the old behaviour when only decoding while allowing use of the null muxer with encoded streams as well. Signed-off-by: Mans Rullgard --- avconv.c | 5 +++-- libavformat/nullenc.c | 2 +- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/avconv.c b/avconv.c index d6045b7ef4..8842b24599 100644 --- a/avconv.c +++ b/avconv.c @@ -1207,7 +1207,8 @@ static void do_video_out(AVFormatContext *s, av_init_packet(&pkt); pkt.stream_index= ost->index; - if (s->oformat->flags & AVFMT_RAWPICTURE) { + if (s->oformat->flags & AVFMT_RAWPICTURE && + enc->codec->id == CODEC_ID_RAWVIDEO) { /* raw pictures are written as AVPicture structure to avoid any copies. We support temporarily the older method. */ @@ -1459,7 +1460,7 @@ static void flush_encoders(OutputStream *ost_table, int nb_ostreams) if (ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <=1) continue; - if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO && (os->oformat->flags & AVFMT_RAWPICTURE)) + if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO && (os->oformat->flags & AVFMT_RAWPICTURE) && enc->codec->id == CODEC_ID_RAWVIDEO) continue; for(;;) { diff --git a/libavformat/nullenc.c b/libavformat/nullenc.c index 680b29c828..9849f460f0 100644 --- a/libavformat/nullenc.c +++ b/libavformat/nullenc.c @@ -32,5 +32,5 @@ AVOutputFormat ff_null_muxer = { .audio_codec = AV_NE(CODEC_ID_PCM_S16BE, CODEC_ID_PCM_S16LE), .video_codec = CODEC_ID_RAWVIDEO, .write_packet = null_write_packet, - .flags = AVFMT_NOFILE | AVFMT_NOTIMESTAMPS, + .flags = AVFMT_NOFILE | AVFMT_NOTIMESTAMPS | AVFMT_RAWPICTURE, }; -- cgit v1.2.3 From a760f530bba6d21484c611de67d072fdab56e08e Mon Sep 17 00:00:00 2001 From: Janne Grunau Date: Tue, 29 Nov 2011 13:38:10 +0000 Subject: ARM: make some NEON macros reusable Signed-off-by: Mans Rullgard --- libavcodec/arm/h264dsp_neon.S | 41 +----------------------------- libavcodec/arm/neon.S | 59 +++++++++++++++++++++++++++++++++++++++++++ libavcodec/arm/vp8dsp_neon.S | 26 ++++--------------- 3 files changed, 65 insertions(+), 61 deletions(-) create mode 100644 libavcodec/arm/neon.S diff --git a/libavcodec/arm/h264dsp_neon.S b/libavcodec/arm/h264dsp_neon.S index 5156538ed2..1e97908db2 100644 --- a/libavcodec/arm/h264dsp_neon.S +++ b/libavcodec/arm/h264dsp_neon.S @@ -19,46 +19,7 @@ */ #include "asm.S" - - .macro transpose_8x8 r0 r1 r2 r3 r4 r5 r6 r7 - vtrn.32 \r0, \r4 - vtrn.32 \r1, \r5 - vtrn.32 \r2, \r6 - vtrn.32 \r3, \r7 - vtrn.16 \r0, \r2 - vtrn.16 \r1, \r3 - vtrn.16 \r4, \r6 - vtrn.16 \r5, \r7 - vtrn.8 \r0, \r1 - vtrn.8 \r2, \r3 - vtrn.8 \r4, \r5 - vtrn.8 \r6, \r7 - .endm - - .macro transpose_4x4 r0 r1 r2 r3 - vtrn.16 \r0, \r2 - vtrn.16 \r1, \r3 - vtrn.8 \r0, \r1 - vtrn.8 \r2, \r3 - .endm - - .macro swap4 r0 r1 r2 r3 r4 r5 r6 r7 - vswp \r0, \r4 - vswp \r1, \r5 - vswp \r2, \r6 - vswp \r3, \r7 - .endm - - .macro transpose16_4x4 r0 r1 r2 r3 r4 r5 r6 r7 - vtrn.32 \r0, \r2 - vtrn.32 \r1, \r3 - vtrn.32 \r4, \r6 - vtrn.32 \r5, \r7 - vtrn.16 \r0, \r1 - vtrn.16 \r2, \r3 - vtrn.16 \r4, \r5 - vtrn.16 \r6, \r7 - .endm +#include "neon.S" /* chroma_mc8(uint8_t *dst, uint8_t *src, int stride, int h, int x, int y) */ .macro h264_chroma_mc8 type diff --git a/libavcodec/arm/neon.S b/libavcodec/arm/neon.S new file mode 100644 index 0000000000..716a607af7 --- /dev/null +++ b/libavcodec/arm/neon.S @@ -0,0 +1,59 @@ +/* + * Copyright (c) 2008 Mans Rullgard + * + * This file is part of Libav. + * + * Libav is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * Libav is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with Libav; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +.macro transpose_8x8 r0, r1, r2, r3, r4, r5, r6, r7 + vtrn.32 \r0, \r4 + vtrn.32 \r1, \r5 + vtrn.32 \r2, \r6 + vtrn.32 \r3, \r7 + vtrn.16 \r0, \r2 + vtrn.16 \r1, \r3 + vtrn.16 \r4, \r6 + vtrn.16 \r5, \r7 + vtrn.8 \r0, \r1 + vtrn.8 \r2, \r3 + vtrn.8 \r4, \r5 + vtrn.8 \r6, \r7 +.endm + +.macro transpose_4x4 r0, r1, r2, r3 + vtrn.16 \r0, \r2 + vtrn.16 \r1, \r3 + vtrn.8 \r0, \r1 + vtrn.8 \r2, \r3 +.endm + +.macro swap4 r0, r1, r2, r3, r4, r5, r6, r7 + vswp \r0, \r4 + vswp \r1, \r5 + vswp \r2, \r6 + vswp \r3, \r7 +.endm + +.macro transpose16_4x4 r0, r1, r2, r3, r4, r5, r6, r7 + vtrn.32 \r0, \r2 + vtrn.32 \r1, \r3 + vtrn.32 \r4, \r6 + vtrn.32 \r5, \r7 + vtrn.16 \r0, \r1 + vtrn.16 \r2, \r3 + vtrn.16 \r4, \r5 + vtrn.16 \r6, \r7 +.endm diff --git a/libavcodec/arm/vp8dsp_neon.S b/libavcodec/arm/vp8dsp_neon.S index 1b9f24eef2..4ff53ad70f 100644 --- a/libavcodec/arm/vp8dsp_neon.S +++ b/libavcodec/arm/vp8dsp_neon.S @@ -22,6 +22,7 @@ */ #include "asm.S" +#include "neon.S" function ff_vp8_luma_dc_wht_neon, export=1 vld1.16 {q0-q1}, [r1,:128] @@ -454,23 +455,6 @@ endfunc .endif .endm -.macro transpose8x16matrix - vtrn.32 q0, q4 - vtrn.32 q1, q5 - vtrn.32 q2, q6 - vtrn.32 q3, q7 - - vtrn.16 q0, q2 - vtrn.16 q1, q3 - vtrn.16 q4, q6 - vtrn.16 q5, q7 - - vtrn.8 q0, q1 - vtrn.8 q2, q3 - vtrn.8 q4, q5 - vtrn.8 q6, q7 -.endm - .macro vp8_v_loop_filter16 name, inner=0, simple=0 function ff_vp8_v_loop_filter16\name\()_neon, export=1 vpush {q4-q7} @@ -605,7 +589,7 @@ function ff_vp8_h_loop_filter16\name\()_neon, export=1 vld1.8 {d13}, [r0], r1 vld1.8 {d15}, [r0], r1 - transpose8x16matrix + transpose_8x8 q0, q1, q2, q3, q4, q5, q6, q7 vdup.8 q14, r2 @ flim_E .if !\simple @@ -616,7 +600,7 @@ function ff_vp8_h_loop_filter16\name\()_neon, export=1 sub r0, r0, r1, lsl #4 @ backup 16 rows - transpose8x16matrix + transpose_8x8 q0, q1, q2, q3, q4, q5, q6, q7 @ Store pixels: vst1.8 {d0}, [r0], r1 @@ -670,7 +654,7 @@ function ff_vp8_h_loop_filter8uv\name\()_neon, export=1 vld1.8 {d14}, [r0], r2 vld1.8 {d15}, [r1], r2 - transpose8x16matrix + transpose_8x8 q0, q1, q2, q3, q4, q5, q6, q7 vdup.8 q14, r3 @ flim_E vdup.8 q15, r12 @ flim_I @@ -681,7 +665,7 @@ function ff_vp8_h_loop_filter8uv\name\()_neon, export=1 sub r0, r0, r2, lsl #3 @ backup u 8 rows sub r1, r1, r2, lsl #3 @ backup v 8 rows - transpose8x16matrix + transpose_8x8 q0, q1, q2, q3, q4, q5, q6, q7 @ Store pixels: vst1.8 {d0}, [r0], r2 -- cgit v1.2.3 From 59807fee6dfca909d966da3d2203d6c3cbf356c8 Mon Sep 17 00:00:00 2001 From: Mans Rullgard Date: Fri, 2 Dec 2011 16:45:00 +0000 Subject: ARM: h264dsp_neon cosmetics - Replace 'ip' with 'r12'. - Use correct size designators for vld1/vst1. - Whitespace fixes. Signed-off-by: Mans Rullgard --- libavcodec/arm/h264dsp_neon.S | 626 +++++++++++++++++++++--------------------- 1 file changed, 313 insertions(+), 313 deletions(-) diff --git a/libavcodec/arm/h264dsp_neon.S b/libavcodec/arm/h264dsp_neon.S index 1e97908db2..e23f7b149a 100644 --- a/libavcodec/arm/h264dsp_neon.S +++ b/libavcodec/arm/h264dsp_neon.S @@ -22,13 +22,13 @@ #include "neon.S" /* chroma_mc8(uint8_t *dst, uint8_t *src, int stride, int h, int x, int y) */ - .macro h264_chroma_mc8 type +.macro h264_chroma_mc8 type function ff_\type\()_h264_chroma_mc8_neon, export=1 push {r4-r7, lr} ldrd r4, [sp, #20] -.ifc \type,avg + .ifc \type,avg mov lr, r0 -.endif + .endif pld [r1] pld [r1, r2] @@ -36,7 +36,7 @@ A muls r7, r4, r5 T mul r7, r4, r5 T cmp r7, #0 rsb r6, r7, r5, lsl #3 - rsb ip, r7, r4, lsl #3 + rsb r12, r7, r4, lsl #3 sub r4, r7, r4, lsl #3 sub r4, r4, r5, lsl #3 add r4, r4, #64 @@ -47,10 +47,10 @@ T cmp r7, #0 vdup.8 d0, r4 lsl r4, r2, #1 - vdup.8 d1, ip - vld1.64 {d4, d5}, [r1], r4 + vdup.8 d1, r12 + vld1.8 {d4, d5}, [r1], r4 vdup.8 d2, r6 - vld1.64 {d6, d7}, [r5], r4 + vld1.8 {d6, d7}, [r5], r4 vdup.8 d3, r7 vext.8 d5, d4, d5, #1 @@ -59,7 +59,7 @@ T cmp r7, #0 1: pld [r5] vmull.u8 q8, d4, d0 vmlal.u8 q8, d5, d1 - vld1.64 {d4, d5}, [r1], r4 + vld1.8 {d4, d5}, [r1], r4 vmlal.u8 q8, d6, d2 vext.8 d5, d4, d5, #1 vmlal.u8 q8, d7, d3 @@ -69,57 +69,57 @@ T cmp r7, #0 vmlal.u8 q9, d4, d2 vmlal.u8 q9, d5, d3 vrshrn.u16 d16, q8, #6 - vld1.64 {d6, d7}, [r5], r4 + vld1.8 {d6, d7}, [r5], r4 pld [r1] vrshrn.u16 d17, q9, #6 -.ifc \type,avg - vld1.64 {d20}, [lr,:64], r2 - vld1.64 {d21}, [lr,:64], r2 + .ifc \type,avg + vld1.8 {d20}, [lr,:64], r2 + vld1.8 {d21}, [lr,:64], r2 vrhadd.u8 q8, q8, q10 -.endif + .endif vext.8 d7, d6, d7, #1 - vst1.64 {d16}, [r0,:64], r2 - vst1.64 {d17}, [r0,:64], r2 + vst1.8 {d16}, [r0,:64], r2 + vst1.8 {d17}, [r0,:64], r2 bgt 1b pop {r4-r7, pc} 2: tst r6, r6 - add ip, ip, r6 + add r12, r12, r6 vdup.8 d0, r4 - vdup.8 d1, ip + vdup.8 d1, r12 beq 4f add r5, r1, r2 lsl r4, r2, #1 - vld1.64 {d4}, [r1], r4 - vld1.64 {d6}, [r5], r4 + vld1.8 {d4}, [r1], r4 + vld1.8 {d6}, [r5], r4 3: pld [r5] vmull.u8 q8, d4, d0 vmlal.u8 q8, d6, d1 - vld1.64 {d4}, [r1], r4 + vld1.8 {d4}, [r1], r4 vmull.u8 q9, d6, d0 vmlal.u8 q9, d4, d1 - vld1.64 {d6}, [r5], r4 + vld1.8 {d6}, [r5], r4 vrshrn.u16 d16, q8, #6 vrshrn.u16 d17, q9, #6 -.ifc \type,avg - vld1.64 {d20}, [lr,:64], r2 - vld1.64 {d21}, [lr,:64], r2 + .ifc \type,avg + vld1.8 {d20}, [lr,:64], r2 + vld1.8 {d21}, [lr,:64], r2 vrhadd.u8 q8, q8, q10 -.endif + .endif subs r3, r3, #2 pld [r1] - vst1.64 {d16}, [r0,:64], r2 - vst1.64 {d17}, [r0,:64], r2 + vst1.8 {d16}, [r0,:64], r2 + vst1.8 {d17}, [r0,:64], r2 bgt 3b pop {r4-r7, pc} -4: vld1.64 {d4, d5}, [r1], r2 - vld1.64 {d6, d7}, [r1], r2 +4: vld1.8 {d4, d5}, [r1], r2 + vld1.8 {d6, d7}, [r1], r2 vext.8 d5, d4, d5, #1 vext.8 d7, d6, d7, #1 @@ -127,36 +127,36 @@ T cmp r7, #0 subs r3, r3, #2 vmull.u8 q8, d4, d0 vmlal.u8 q8, d5, d1 - vld1.64 {d4, d5}, [r1], r2 + vld1.8 {d4, d5}, [r1], r2 vmull.u8 q9, d6, d0 vmlal.u8 q9, d7, d1 pld [r1] vext.8 d5, d4, d5, #1 vrshrn.u16 d16, q8, #6 vrshrn.u16 d17, q9, #6 -.ifc \type,avg - vld1.64 {d20}, [lr,:64], r2 - vld1.64 {d21}, [lr,:64], r2 + .ifc \type,avg + vld1.8 {d20}, [lr,:64], r2 + vld1.8 {d21}, [lr,:64], r2 vrhadd.u8 q8, q8, q10 -.endif - vld1.64 {d6, d7}, [r1], r2 + .endif + vld1.8 {d6, d7}, [r1], r2 vext.8 d7, d6, d7, #1 - vst1.64 {d16}, [r0,:64], r2 - vst1.64 {d17}, [r0,:64], r2 + vst1.8 {d16}, [r0,:64], r2 + vst1.8 {d17}, [r0,:64], r2 bgt 5b pop {r4-r7, pc} endfunc - .endm +.endm /* chroma_mc4(uint8_t *dst, uint8_t *src, int stride, int h, int x, int y) */ - .macro h264_chroma_mc4 type +.macro h264_chroma_mc4 type function ff_\type\()_h264_chroma_mc4_neon, export=1 push {r4-r7, lr} ldrd r4, [sp, #20] -.ifc \type,avg + .ifc \type,avg mov lr, r0 -.endif + .endif pld [r1] pld [r1, r2] @@ -164,7 +164,7 @@ A muls r7, r4, r5 T mul r7, r4, r5 T cmp r7, #0 rsb r6, r7, r5, lsl #3 - rsb ip, r7, r4, lsl #3 + rsb r12, r7, r4, lsl #3 sub r4, r7, r4, lsl #3 sub r4, r4, r5, lsl #3 add r4, r4, #64 @@ -175,10 +175,10 @@ T cmp r7, #0 vdup.8 d0, r4 lsl r4, r2, #1 - vdup.8 d1, ip - vld1.64 {d4}, [r1], r4 + vdup.8 d1, r12 + vld1.8 {d4}, [r1], r4 vdup.8 d2, r6 - vld1.64 {d6}, [r5], r4 + vld1.8 {d6}, [r5], r4 vdup.8 d3, r7 vext.8 d5, d4, d5, #1 @@ -192,22 +192,22 @@ T cmp r7, #0 1: pld [r5] vmull.u8 q8, d4, d0 vmlal.u8 q8, d6, d2 - vld1.64 {d4}, [r1], r4 + vld1.8 {d4}, [r1], r4 vext.8 d5, d4, d5, #1 vtrn.32 d4, d5 vmull.u8 q9, d6, d0 vmlal.u8 q9, d4, d2 - vld1.64 {d6}, [r5], r4 + vld1.8 {d6}, [r5], r4 vadd.i16 d16, d16, d17 vadd.i16 d17, d18, d19 vrshrn.u16 d16, q8, #6 subs r3, r3, #2 pld [r1] -.ifc \type,avg + .ifc \type,avg vld1.32 {d20[0]}, [lr,:32], r2 vld1.32 {d20[1]}, [lr,:32], r2 vrhadd.u8 d16, d16, d20 -.endif + .endif vext.8 d7, d6, d7, #1 vtrn.32 d6, d7 vst1.32 {d16[0]}, [r0,:32], r2 @@ -217,9 +217,9 @@ T cmp r7, #0 pop {r4-r7, pc} 2: tst r6, r6 - add ip, ip, r6 + add r12, r12, r6 vdup.8 d0, r4 - vdup.8 d1, ip + vdup.8 d1, r12 vtrn.32 d0, d1 beq 4f @@ -238,11 +238,11 @@ T cmp r7, #0 vadd.i16 d16, d16, d17 vadd.i16 d17, d18, d19 vrshrn.u16 d16, q8, #6 -.ifc \type,avg + .ifc \type,avg vld1.32 {d20[0]}, [lr,:32], r2 vld1.32 {d20[1]}, [lr,:32], r2 vrhadd.u8 d16, d16, d20 -.endif + .endif subs r3, r3, #2 pld [r1] vst1.32 {d16[0]}, [r0,:32], r2 @@ -251,8 +251,8 @@ T cmp r7, #0 pop {r4-r7, pc} -4: vld1.64 {d4}, [r1], r2 - vld1.64 {d6}, [r1], r2 +4: vld1.8 {d4}, [r1], r2 + vld1.8 {d6}, [r1], r2 vext.8 d5, d4, d5, #1 vext.8 d7, d6, d7, #1 vtrn.32 d4, d5 @@ -261,19 +261,19 @@ T cmp r7, #0 5: vmull.u8 q8, d4, d0 vmull.u8 q9, d6, d0 subs r3, r3, #2 - vld1.64 {d4}, [r1], r2 + vld1.8 {d4}, [r1], r2 vext.8 d5, d4, d5, #1 vtrn.32 d4, d5 vadd.i16 d16, d16, d17 vadd.i16 d17, d18, d19 pld [r1] vrshrn.u16 d16, q8, #6 -.ifc \type,avg + .ifc \type,avg vld1.32 {d20[0]}, [lr,:32], r2 vld1.32 {d20[1]}, [lr,:32], r2 vrhadd.u8 d16, d16, d20 -.endif - vld1.64 {d6}, [r1], r2 + .endif + vld1.8 {d6}, [r1], r2 vext.8 d7, d6, d7, #1 vtrn.32 d6, d7 pld [r1] @@ -283,9 +283,9 @@ T cmp r7, #0 pop {r4-r7, pc} endfunc - .endm +.endm - .macro h264_chroma_mc2 type +.macro h264_chroma_mc2 type function ff_\type\()_h264_chroma_mc2_neon, export=1 push {r4-r6, lr} ldr r4, [sp, #16] @@ -315,29 +315,29 @@ function ff_\type\()_h264_chroma_mc2_neon, export=1 vtrn.16 q2, q3 vmull.u8 q8, d4, d0 vmlal.u8 q8, d5, d1 -.ifc \type,avg + .ifc \type,avg vld1.16 {d18[0]}, [r0,:16], r2 vld1.16 {d18[1]}, [r0,:16] sub r0, r0, r2 -.endif + .endif vtrn.32 d16, d17 vadd.i16 d16, d16, d17 vrshrn.u16 d16, q8, #6 -.ifc \type,avg + .ifc \type,avg vrhadd.u8 d16, d16, d18 -.endif + .endif vst1.16 {d16[0]}, [r0,:16], r2 vst1.16 {d16[1]}, [r0,:16], r2 subs r3, r3, #2 bgt 1b pop {r4-r6, pc} 2: -.ifc \type,put + .ifc \type,put ldrh_post r5, r1, r2 strh_post r5, r0, r2 ldrh_post r6, r1, r2 strh_post r6, r0, r2 -.else + .else vld1.16 {d16[0]}, [r1], r2 vld1.16 {d16[1]}, [r1], r2 vld1.16 {d18[0]}, [r0,:16], r2 @@ -346,7 +346,7 @@ function ff_\type\()_h264_chroma_mc2_neon, export=1 vrhadd.u8 d16, d16, d18 vst1.16 {d16[0]}, [r0,:16], r2 vst1.16 {d16[1]}, [r0,:16], r2 -.endif + .endif subs r3, r3, #2 bgt 2b pop {r4-r6, pc} @@ -362,22 +362,22 @@ endfunc /* H.264 loop filter */ - .macro h264_loop_filter_start - ldr ip, [sp] +.macro h264_loop_filter_start + ldr r12, [sp] tst r2, r2 - ldr ip, [ip] + ldr r12, [r12] it ne tstne r3, r3 - vmov.32 d24[0], ip - and ip, ip, ip, lsl #16 + vmov.32 d24[0], r12 + and r12, r12, r12, lsl #16 it eq bxeq lr - ands ip, ip, ip, lsl #8 + ands r12, r12, r12, lsl #8 it lt bxlt lr - .endm +.endm - .macro h264_loop_filter_luma +.macro h264_loop_filter_luma vdup.8 q11, r2 @ alpha vmovl.u8 q12, d24 vabd.u8 q6, q8, q0 @ abs(p0 - q0) @@ -443,29 +443,29 @@ endfunc vqmovun.s16 d17, q6 vqmovun.s16 d0, q11 vqmovun.s16 d1, q12 - .endm +.endm function ff_h264_v_loop_filter_luma_neon, export=1 h264_loop_filter_start - vld1.64 {d0, d1}, [r0,:128], r1 - vld1.64 {d2, d3}, [r0,:128], r1 - vld1.64 {d4, d5}, [r0,:128], r1 + vld1.8 {d0, d1}, [r0,:128], r1 + vld1.8 {d2, d3}, [r0,:128], r1 + vld1.8 {d4, d5}, [r0,:128], r1 sub r0, r0, r1, lsl #2 sub r0, r0, r1, lsl #1 - vld1.64 {d20,d21}, [r0,:128], r1 - vld1.64 {d18,d19}, [r0,:128], r1 - vld1.64 {d16,d17}, [r0,:128], r1 + vld1.8 {d20,d21}, [r0,:128], r1 + vld1.8 {d18,d19}, [r0,:128], r1 + vld1.8 {d16,d17}, [r0,:128], r1 vpush {d8-d15} h264_loop_filter_luma sub r0, r0, r1, lsl #1 - vst1.64 {d8, d9}, [r0,:128], r1 - vst1.64 {d16,d17}, [r0,:128], r1 - vst1.64 {d0, d1}, [r0,:128], r1 - vst1.64 {d10,d11}, [r0,:128] + vst1.8 {d8, d9}, [r0,:128], r1 + vst1.8 {d16,d17}, [r0,:128], r1 + vst1.8 {d0, d1}, [r0,:128], r1 + vst1.8 {d10,d11}, [r0,:128] vpop {d8-d15} bx lr @@ -475,22 +475,22 @@ function ff_h264_h_loop_filter_luma_neon, export=1 h264_loop_filter_start sub r0, r0, #4 - vld1.64 {d6}, [r0], r1 - vld1.64 {d20}, [r0], r1 - vld1.64 {d18}, [r0], r1 - vld1.64 {d16}, [r0], r1 - vld1.64 {d0}, [r0], r1 - vld1.64 {d2}, [r0], r1 - vld1.64 {d4}, [r0], r1 - vld1.64 {d26}, [r0], r1 - vld1.64 {d7}, [r0], r1 - vld1.64 {d21}, [r0], r1 - vld1.64 {d19}, [r0], r1 - vld1.64 {d17}, [r0], r1 - vld1.64 {d1}, [r0], r1 - vld1.64 {d3}, [r0], r1 - vld1.64 {d5}, [r0], r1 - vld1.64 {d27}, [r0], r1 + vld1.8 {d6}, [r0], r1 + vld1.8 {d20}, [r0], r1 + vld1.8 {d18}, [r0], r1 + vld1.8 {d16}, [r0], r1 + vld1.8 {d0}, [r0], r1 + vld1.8 {d2}, [r0], r1 + vld1.8 {d4}, [r0], r1 + vld1.8 {d26}, [r0], r1 + vld1.8 {d7}, [r0], r1 + vld1.8 {d21}, [r0], r1 + vld1.8 {d19}, [r0], r1 + vld1.8 {d17}, [r0], r1 + vld1.8 {d1}, [r0], r1 + vld1.8 {d3}, [r0], r1 + vld1.8 {d5}, [r0], r1 + vld1.8 {d27}, [r0], r1 transpose_8x8 q3, q10, q9, q8, q0, q1, q2, q13 @@ -523,7 +523,7 @@ function ff_h264_h_loop_filter_luma_neon, export=1 bx lr endfunc - .macro h264_loop_filter_chroma +.macro h264_loop_filter_chroma vdup.8 d22, r2 @ alpha vmovl.u8 q12, d24 vabd.u8 d26, d16, d0 @ abs(p0 - q0) @@ -552,22 +552,22 @@ endfunc vsubw.s8 q11, q11, d4 vqmovun.s16 d16, q14 vqmovun.s16 d0, q11 - .endm +.endm function ff_h264_v_loop_filter_chroma_neon, export=1 h264_loop_filter_start sub r0, r0, r1, lsl #1 - vld1.64 {d18}, [r0,:64], r1 - vld1.64 {d16}, [r0,:64], r1 - vld1.64 {d0}, [r0,:64], r1 - vld1.64 {d2}, [r0,:64] + vld1.8 {d18}, [r0,:64], r1 + vld1.8 {d16}, [r0,:64], r1 + vld1.8 {d0}, [r0,:64], r1 + vld1.8 {d2}, [r0,:64] h264_loop_filter_chroma sub r0, r0, r1, lsl #1 - vst1.64 {d16}, [r0,:64], r1 - vst1.64 {d0}, [r0,:64], r1 + vst1.8 {d16}, [r0,:64], r1 + vst1.8 {d0}, [r0,:64], r1 bx lr endfunc @@ -612,20 +612,20 @@ endfunc /* H.264 qpel MC */ - .macro lowpass_const r +.macro lowpass_const r movw \r, #5 movt \r, #20 vmov.32 d6[0], \r - .endm +.endm - .macro lowpass_8 r0, r1, r2, r3, d0, d1, narrow=1 -.if \narrow +.macro lowpass_8 r0, r1, r2, r3, d0, d1, narrow=1 + .if \narrow t0 .req q0 t1 .req q8 -.else + .else t0 .req \d0 t1 .req \d1 -.endif + .endif vext.8 d2, \r0, \r1, #2 vext.8 d3, \r0, \r1, #3 vaddl.u8 q1, d2, d3 @@ -646,20 +646,20 @@ endfunc vaddl.u8 t1, \r2, d31 vmla.i16 t1, q9, d6[1] vmls.i16 t1, q10, d6[0] -.if \narrow + .if \narrow vqrshrun.s16 \d0, t0, #5 vqrshrun.s16 \d1, t1, #5 -.endif + .endif .unreq t0 .unreq t1 - .endm +.endm - .macro lowpass_8_1 r0, r1, d0, narrow=1 -.if \narrow +.macro lowpass_8_1 r0, r1, d0, narrow=1 + .if \narrow t0 .req q0 -.else + .else t0 .req \d0 -.endif + .endif vext.8 d2, \r0, \r1, #2 vext.8 d3, \r0, \r1, #3 vaddl.u8 q1, d2, d3 @@ -670,13 +670,13 @@ endfunc vaddl.u8 t0, \r0, d30 vmla.i16 t0, q1, d6[1] vmls.i16 t0, q2, d6[0] -.if \narrow + .if \narrow vqrshrun.s16 \d0, t0, #5 -.endif + .endif .unreq t0 - .endm +.endm - .macro lowpass_8.16 r0, r1, l0, h0, l1, h1, d +.macro lowpass_8.16 r0, r1, l0, h0, l1, h1, d vext.16 q1, \r0, \r1, #2 vext.16 q0, \r0, \r1, #3 vaddl.s16 q9, d2, d0 @@ -711,59 +711,59 @@ endfunc vrshrn.s32 d19, q1, #10 vqmovun.s16 \d, q9 - .endm +.endm function put_h264_qpel16_h_lowpass_neon_packed mov r4, lr - mov ip, #16 + mov r12, #16 mov r3, #8 bl put_h264_qpel8_h_lowpass_neon sub r1, r1, r2, lsl #4 add r1, r1, #8 - mov ip, #16 + mov r12, #16 mov lr, r4 b put_h264_qpel8_h_lowpass_neon endfunc - .macro h264_qpel_h_lowpass type +.macro h264_qpel_h_lowpass type function \type\()_h264_qpel16_h_lowpass_neon push {lr} - mov ip, #16 + mov r12, #16 bl \type\()_h264_qpel8_h_lowpass_neon sub r0, r0, r3, lsl #4 sub r1, r1, r2, lsl #4 add r0, r0, #8 add r1, r1, #8 - mov ip, #16 + mov r12, #16 pop {lr} endfunc function \type\()_h264_qpel8_h_lowpass_neon -1: vld1.64 {d0, d1}, [r1], r2 - vld1.64 {d16,d17}, [r1], r2 - subs ip, ip, #2 +1: vld1.8 {d0, d1}, [r1], r2 + vld1.8 {d16,d17}, [r1], r2 + subs r12, r12, #2 lowpass_8 d0, d1, d16, d17, d0, d16 -.ifc \type,avg + .ifc \type,avg vld1.8 {d2}, [r0,:64], r3 vrhadd.u8 d0, d0, d2 vld1.8 {d3}, [r0,:64] vrhadd.u8 d16, d16, d3 sub r0, r0, r3 -.endif - vst1.64 {d0}, [r0,:64], r3 - vst1.64 {d16}, [r0,:64], r3 + .endif + vst1.8 {d0}, [r0,:64], r3 + vst1.8 {d16}, [r0,:64], r3 bne 1b bx lr endfunc - .endm +.endm h264_qpel_h_lowpass put h264_qpel_h_lowpass avg - .macro h264_qpel_h_lowpass_l2 type +.macro h264_qpel_h_lowpass_l2 type function \type\()_h264_qpel16_h_lowpass_l2_neon push {lr} - mov ip, #16 + mov r12, #16 bl \type\()_h264_qpel8_h_lowpass_l2_neon sub r0, r0, r2, lsl #4 sub r1, r1, r2, lsl #4 @@ -771,31 +771,31 @@ function \type\()_h264_qpel16_h_lowpass_l2_neon add r0, r0, #8 add r1, r1, #8 add r3, r3, #8 - mov ip, #16 + mov r12, #16 pop {lr} endfunc function \type\()_h264_qpel8_h_lowpass_l2_neon -1: vld1.64 {d0, d1}, [r1], r2 - vld1.64 {d16,d17}, [r1], r2 - vld1.64 {d28}, [r3], r2 - vld1.64 {d29}, [r3], r2 - subs ip, ip, #2 +1: vld1.8 {d0, d1}, [r1], r2 + vld1.8 {d16,d17}, [r1], r2 + vld1.8 {d28}, [r3], r2 + vld1.8 {d29}, [r3], r2 + subs r12, r12, #2 lowpass_8 d0, d1, d16, d17, d0, d1 vrhadd.u8 q0, q0, q14 -.ifc \type,avg + .ifc \type,avg vld1.8 {d2}, [r0,:64], r2 vrhadd.u8 d0, d0, d2 vld1.8 {d3}, [r0,:64] vrhadd.u8 d1, d1, d3 sub r0, r0, r2 -.endif - vst1.64 {d0}, [r0,:64], r2 - vst1.64 {d1}, [r0,:64], r2 + .endif + vst1.8 {d0}, [r0,:64], r2 + vst1.8 {d1}, [r0,:64], r2 bne 1b bx lr endfunc - .endm +.endm h264_qpel_h_lowpass_l2 put h264_qpel_h_lowpass_l2 avg @@ -815,7 +815,7 @@ function put_h264_qpel16_v_lowpass_neon_packed b put_h264_qpel8_v_lowpass_neon endfunc - .macro h264_qpel_v_lowpass type +.macro h264_qpel_v_lowpass type function \type\()_h264_qpel16_v_lowpass_neon mov r4, lr bl \type\()_h264_qpel8_v_lowpass_neon @@ -832,19 +832,19 @@ function \type\()_h264_qpel16_v_lowpass_neon endfunc function \type\()_h264_qpel8_v_lowpass_neon - vld1.64 {d8}, [r1], r3 - vld1.64 {d10}, [r1], r3 - vld1.64 {d12}, [r1], r3 - vld1.64 {d14}, [r1], r3 - vld1.64 {d22}, [r1], r3 - vld1.64 {d24}, [r1], r3 - vld1.64 {d26}, [r1], r3 - vld1.64 {d28}, [r1], r3 - vld1.64 {d9}, [r1], r3 - vld1.64 {d11}, [r1], r3 - vld1.64 {d13}, [r1], r3 - vld1.64 {d15}, [r1], r3 - vld1.64 {d23}, [r1] + vld1.8 {d8}, [r1], r3 + vld1.8 {d10}, [r1], r3 + vld1.8 {d12}, [r1], r3 + vld1.8 {d14}, [r1], r3 + vld1.8 {d22}, [r1], r3 + vld1.8 {d24}, [r1], r3 + vld1.8 {d26}, [r1], r3 + vld1.8 {d28}, [r1], r3 + vld1.8 {d9}, [r1], r3 + vld1.8 {d11}, [r1], r3 + vld1.8 {d13}, [r1], r3 + vld1.8 {d15}, [r1], r3 + vld1.8 {d23}, [r1] transpose_8x8 q4, q5, q6, q7, q11, q12, q13, q14 lowpass_8 d8, d9, d10, d11, d8, d10 @@ -853,7 +853,7 @@ function \type\()_h264_qpel8_v_lowpass_neon lowpass_8 d26, d27, d28, d29, d26, d28 transpose_8x8 d8, d10, d12, d14, d22, d24, d26, d28 -.ifc \type,avg + .ifc \type,avg vld1.8 {d9}, [r0,:64], r2 vrhadd.u8 d8, d8, d9 vld1.8 {d11}, [r0,:64], r2 @@ -871,34 +871,34 @@ function \type\()_h264_qpel8_v_lowpass_neon vld1.8 {d29}, [r0,:64], r2 vrhadd.u8 d28, d28, d29 sub r0, r0, r2, lsl #3 -.endif + .endif - vst1.64 {d8}, [r0,:64], r2 - vst1.64 {d10}, [r0,:64], r2 - vst1.64 {d12}, [r0,:64], r2 - vst1.64 {d14}, [r0,:64], r2 - vst1.64 {d22}, [r0,:64], r2 - vst1.64 {d24}, [r0,:64], r2 - vst1.64 {d26}, [r0,:64], r2 - vst1.64 {d28}, [r0,:64], r2 + vst1.8 {d8}, [r0,:64], r2 + vst1.8 {d10}, [r0,:64], r2 + vst1.8 {d12}, [r0,:64], r2 + vst1.8 {d14}, [r0,:64], r2 + vst1.8 {d22}, [r0,:64], r2 + vst1.8 {d24}, [r0,:64], r2 + vst1.8 {d26}, [r0,:64], r2 + vst1.8 {d28}, [r0,:64], r2 bx lr endfunc - .endm +.endm h264_qpel_v_lowpass put h264_qpel_v_lowpass avg - .macro h264_qpel_v_lowpass_l2 type +.macro h264_qpel_v_lowpass_l2 type function \type\()_h264_qpel16_v_lowpass_l2_neon mov r4, lr bl \type\()_h264_qpel8_v_lowpass_l2_neon sub r1, r1, r3, lsl #2 bl \type\()_h264_qpel8_v_lowpass_l2_neon sub r0, r0, r3, lsl #4 - sub ip, ip, r2, lsl #4 + sub r12, r12, r2, lsl #4 add r0, r0, #8 - add ip, ip, #8 + add r12, r12, #8 sub r1, r1, r3, lsl #4 sub r1, r1, r3, lsl #2 add r1, r1, #8 @@ -908,19 +908,19 @@ function \type\()_h264_qpel16_v_lowpass_l2_neon endfunc function \type\()_h264_qpel8_v_lowpass_l2_neon - vld1.64 {d8}, [r1], r3 - vld1.64 {d10}, [r1], r3 - vld1.64 {d12}, [r1], r3 - vld1.64 {d14}, [r1], r3 - vld1.64 {d22}, [r1], r3 - vld1.64 {d24}, [r1], r3 - vld1.64 {d26}, [r1], r3 - vld1.64 {d28}, [r1], r3 - vld1.64 {d9}, [r1], r3 - vld1.64 {d11}, [r1], r3 - vld1.64 {d13}, [r1], r3 - vld1.64 {d15}, [r1], r3 - vld1.64 {d23}, [r1] + vld1.8 {d8}, [r1], r3 + vld1.8 {d10}, [r1], r3 + vld1.8 {d12}, [r1], r3 + vld1.8 {d14}, [r1], r3 + vld1.8 {d22}, [r1], r3 + vld1.8 {d24}, [r1], r3 + vld1.8 {d26}, [r1], r3 + vld1.8 {d28}, [r1], r3 + vld1.8 {d9}, [r1], r3 + vld1.8 {d11}, [r1], r3 + vld1.8 {d13}, [r1], r3 + vld1.8 {d15}, [r1], r3 + vld1.8 {d23}, [r1] transpose_8x8 q4, q5, q6, q7, q11, q12, q13, q14 lowpass_8 d8, d9, d10, d11, d8, d9 @@ -929,20 +929,20 @@ function \type\()_h264_qpel8_v_lowpass_l2_neon lowpass_8 d26, d27, d28, d29, d26, d27 transpose_8x8 d8, d9, d12, d13, d22, d23, d26, d27 - vld1.64 {d0}, [ip], r2 - vld1.64 {d1}, [ip], r2 - vld1.64 {d2}, [ip], r2 - vld1.64 {d3}, [ip], r2 - vld1.64 {d4}, [ip], r2 + vld1.8 {d0}, [r12], r2 + vld1.8 {d1}, [r12], r2 + vld1.8 {d2}, [r12], r2 + vld1.8 {d3}, [r12], r2 + vld1.8 {d4}, [r12], r2 vrhadd.u8 q0, q0, q4 - vld1.64 {d5}, [ip], r2 + vld1.8 {d5}, [r12], r2 vrhadd.u8 q1, q1, q6 - vld1.64 {d10}, [ip], r2 + vld1.8 {d10}, [r12], r2 vrhadd.u8 q2, q2, q11 - vld1.64 {d11}, [ip], r2 + vld1.8 {d11}, [r12], r2 vrhadd.u8 q5, q5, q13 -.ifc \type,avg + .ifc \type,avg vld1.8 {d16}, [r0,:64], r3 vrhadd.u8 d0, d0, d16 vld1.8 {d17}, [r0,:64], r3 @@ -960,51 +960,51 @@ function \type\()_h264_qpel8_v_lowpass_l2_neon vld1.8 {d17}, [r0,:64], r3 vrhadd.u8 d11, d11, d17 sub r0, r0, r3, lsl #3 -.endif + .endif - vst1.64 {d0}, [r0,:64], r3 - vst1.64 {d1}, [r0,:64], r3 - vst1.64 {d2}, [r0,:64], r3 - vst1.64 {d3}, [r0,:64], r3 - vst1.64 {d4}, [r0,:64], r3 - vst1.64 {d5}, [r0,:64], r3 - vst1.64 {d10}, [r0,:64], r3 - vst1.64 {d11}, [r0,:64], r3 + vst1.8 {d0}, [r0,:64], r3 + vst1.8 {d1}, [r0,:64], r3 + vst1.8 {d2}, [r0,:64], r3 + vst1.8 {d3}, [r0,:64], r3 + vst1.8 {d4}, [r0,:64], r3 + vst1.8 {d5}, [r0,:64], r3 + vst1.8 {d10}, [r0,:64], r3 + vst1.8 {d11}, [r0,:64], r3 bx lr endfunc - .endm +.endm h264_qpel_v_lowpass_l2 put h264_qpel_v_lowpass_l2 avg function put_h264_qpel8_hv_lowpass_neon_top - lowpass_const ip - mov ip, #12 -1: vld1.64 {d0, d1}, [r1], r3 - vld1.64 {d16,d17}, [r1], r3 - subs ip, ip, #2 + lowpass_const r12 + mov r12, #12 +1: vld1.8 {d0, d1}, [r1], r3 + vld1.8 {d16,d17}, [r1], r3 + subs r12, r12, #2 lowpass_8 d0, d1, d16, d17, q11, q12, narrow=0 - vst1.64 {d22-d25}, [r4,:128]! + vst1.8 {d22-d25}, [r4,:128]! bne 1b - vld1.64 {d0, d1}, [r1] + vld1.8 {d0, d1}, [r1] lowpass_8_1 d0, d1, q12, narrow=0 - mov ip, #-16 - add r4, r4, ip - vld1.64 {d30,d31}, [r4,:128], ip - vld1.64 {d20,d21}, [r4,:128], ip - vld1.64 {d18,d19}, [r4,:128], ip - vld1.64 {d16,d17}, [r4,:128], ip - vld1.64 {d14,d15}, [r4,:128], ip - vld1.64 {d12,d13}, [r4,:128], ip - vld1.64 {d10,d11}, [r4,:128], ip - vld1.64 {d8, d9}, [r4,:128], ip - vld1.64 {d6, d7}, [r4,:128], ip - vld1.64 {d4, d5}, [r4,:128], ip - vld1.64 {d2, d3}, [r4,:128], ip - vld1.64 {d0, d1}, [r4,:128] + mov r12, #-16 + add r4, r4, r12 + vld1.8 {d30,d31}, [r4,:128], r12 + vld1.8 {d20,d21}, [r4,:128], r12 + vld1.8 {d18,d19}, [r4,:128], r12 + vld1.8 {d16,d17}, [r4,:128], r12 + vld1.8 {d14,d15}, [r4,:128], r12 + vld1.8 {d12,d13}, [r4,:128], r12 + vld1.8 {d10,d11}, [r4,:128], r12 + vld1.8 {d8, d9}, [r4,:128], r12 + vld1.8 {d6, d7}, [r4,:128], r12 + vld1.8 {d4, d5}, [r4,:128], r12 + vld1.8 {d2, d3}, [r4,:128], r12 + vld1.8 {d0, d1}, [r4,:128] swap4 d1, d3, d5, d7, d8, d10, d12, d14 transpose16_4x4 q0, q1, q2, q3, q4, q5, q6, q7 @@ -1012,31 +1012,31 @@ function put_h264_qpel8_hv_lowpass_neon_top swap4 d17, d19, d21, d31, d24, d26, d28, d22 transpose16_4x4 q8, q9, q10, q15, q12, q13, q14, q11 - vst1.64 {d30,d31}, [r4,:128]! - vst1.64 {d6, d7}, [r4,:128]! - vst1.64 {d20,d21}, [r4,:128]! - vst1.64 {d4, d5}, [r4,:128]! - vst1.64 {d18,d19}, [r4,:128]! - vst1.64 {d2, d3}, [r4,:128]! - vst1.64 {d16,d17}, [r4,:128]! - vst1.64 {d0, d1}, [r4,:128] + vst1.8 {d30,d31}, [r4,:128]! + vst1.8 {d6, d7}, [r4,:128]! + vst1.8 {d20,d21}, [r4,:128]! + vst1.8 {d4, d5}, [r4,:128]! + vst1.8 {d18,d19}, [r4,:128]! + vst1.8 {d2, d3}, [r4,:128]! + vst1.8 {d16,d17}, [r4,:128]! + vst1.8 {d0, d1}, [r4,:128] lowpass_8.16 q4, q12, d8, d9, d24, d25, d8 lowpass_8.16 q5, q13, d10, d11, d26, d27, d9 lowpass_8.16 q6, q14, d12, d13, d28, d29, d10 lowpass_8.16 q7, q11, d14, d15, d22, d23, d11 - vld1.64 {d16,d17}, [r4,:128], ip - vld1.64 {d30,d31}, [r4,:128], ip + vld1.8 {d16,d17}, [r4,:128], r12 + vld1.8 {d30,d31}, [r4,:128], r12 lowpass_8.16 q8, q15, d16, d17, d30, d31, d12 - vld1.64 {d16,d17}, [r4,:128], ip - vld1.64 {d30,d31}, [r4,:128], ip + vld1.8 {d16,d17}, [r4,:128], r12 + vld1.8 {d30,d31}, [r4,:128], r12 lowpass_8.16 q8, q15, d16, d17, d30, d31, d13 - vld1.64 {d16,d17}, [r4,:128], ip - vld1.64 {d30,d31}, [r4,:128], ip + vld1.8 {d16,d17}, [r4,:128], r12 + vld1.8 {d30,d31}, [r4,:128], r12 lowpass_8.16 q8, q15, d16, d17, d30, d31, d14 - vld1.64 {d16,d17}, [r4,:128], ip - vld1.64 {d30,d31}, [r4,:128] + vld1.8 {d16,d17}, [r4,:128], r12 + vld1.8 {d30,d31}, [r4,:128] lowpass_8.16 q8, q15, d16, d17, d30, d31, d15 transpose_8x8 d12, d13, d14, d15, d8, d9, d10, d11 @@ -1044,11 +1044,11 @@ function put_h264_qpel8_hv_lowpass_neon_top bx lr endfunc - .macro h264_qpel8_hv_lowpass type +.macro h264_qpel8_hv_lowpass type function \type\()_h264_qpel8_hv_lowpass_neon mov r10, lr bl put_h264_qpel8_hv_lowpass_neon_top -.ifc \type,avg + .ifc \type,avg vld1.8 {d0}, [r0,:64], r2 vrhadd.u8 d12, d12, d0 vld1.8 {d1}, [r0,:64], r2 @@ -1066,39 +1066,39 @@ function \type\()_h264_qpel8_hv_lowpass_neon vld1.8 {d7}, [r0,:64], r2 vrhadd.u8 d11, d11, d7 sub r0, r0, r2, lsl #3 -.endif + .endif - vst1.64 {d12}, [r0,:64], r2 - vst1.64 {d13}, [r0,:64], r2 - vst1.64 {d14}, [r0,:64], r2 - vst1.64 {d15}, [r0,:64], r2 - vst1.64 {d8}, [r0,:64], r2 - vst1.64 {d9}, [r0,:64], r2 - vst1.64 {d10}, [r0,:64], r2 - vst1.64 {d11}, [r0,:64], r2 + vst1.8 {d12}, [r0,:64], r2 + vst1.8 {d13}, [r0,:64], r2 + vst1.8 {d14}, [r0,:64], r2 + vst1.8 {d15}, [r0,:64], r2 + vst1.8 {d8}, [r0,:64], r2 + vst1.8 {d9}, [r0,:64], r2 + vst1.8 {d10}, [r0,:64], r2 + vst1.8 {d11}, [r0,:64], r2 mov lr, r10 bx lr endfunc - .endm +.endm h264_qpel8_hv_lowpass put h264_qpel8_hv_lowpass avg - .macro h264_qpel8_hv_lowpass_l2 type +.macro h264_qpel8_hv_lowpass_l2 type function \type\()_h264_qpel8_hv_lowpass_l2_neon mov r10, lr bl put_h264_qpel8_hv_lowpass_neon_top - vld1.64 {d0, d1}, [r2,:128]! - vld1.64 {d2, d3}, [r2,:128]! + vld1.8 {d0, d1}, [r2,:128]! + vld1.8 {d2, d3}, [r2,:128]! vrhadd.u8 q0, q0, q6 - vld1.64 {d4, d5}, [r2,:128]! + vld1.8 {d4, d5}, [r2,:128]! vrhadd.u8 q1, q1, q7 - vld1.64 {d6, d7}, [r2,:128]! + vld1.8 {d6, d7}, [r2,:128]! vrhadd.u8 q2, q2, q4 vrhadd.u8 q3, q3, q5 -.ifc \type,avg + .ifc \type,avg vld1.8 {d16}, [r0,:64], r3 vrhadd.u8 d0, d0, d16 vld1.8 {d17}, [r0,:64], r3 @@ -1116,25 +1116,25 @@ function \type\()_h264_qpel8_hv_lowpass_l2_neon vld1.8 {d23}, [r0,:64], r3 vrhadd.u8 d7, d7, d23 sub r0, r0, r3, lsl #3 -.endif - vst1.64 {d0}, [r0,:64], r3 - vst1.64 {d1}, [r0,:64], r3 - vst1.64 {d2}, [r0,:64], r3 - vst1.64 {d3}, [r0,:64], r3 - vst1.64 {d4}, [r0,:64], r3 - vst1.64 {d5}, [r0,:64], r3 - vst1.64 {d6}, [r0,:64], r3 - vst1.64 {d7}, [r0,:64], r3 + .endif + vst1.8 {d0}, [r0,:64], r3 + vst1.8 {d1}, [r0,:64], r3 + vst1.8 {d2}, [r0,:64], r3 + vst1.8 {d3}, [r0,:64], r3 + vst1.8 {d4}, [r0,:64], r3 + vst1.8 {d5}, [r0,:64], r3 + vst1.8 {d6}, [r0,:64], r3 + vst1.8 {d7}, [r0,:64], r3 mov lr, r10 bx lr endfunc - .endm +.endm h264_qpel8_hv_lowpass_l2 put h264_qpel8_hv_lowpass_l2 avg - .macro h264_qpel16_hv type +.macro h264_qpel16_hv type function \type\()_h264_qpel16_hv_lowpass_neon mov r9, lr bl \type\()_h264_qpel8_hv_lowpass_neon @@ -1167,17 +1167,17 @@ function \type\()_h264_qpel16_hv_lowpass_l2_neon mov lr, r9 b \type\()_h264_qpel8_hv_lowpass_l2_neon endfunc - .endm +.endm h264_qpel16_hv put h264_qpel16_hv avg - .macro h264_qpel8 type +.macro h264_qpel8 type function ff_\type\()_h264_qpel8_mc10_neon, export=1 lowpass_const r3 mov r3, r1 sub r1, r1, #2 - mov ip, #8 + mov r12, #8 b \type\()_h264_qpel8_h_lowpass_l2_neon endfunc @@ -1185,7 +1185,7 @@ function ff_\type\()_h264_qpel8_mc20_neon, export=1 lowpass_const r3 sub r1, r1, #2 mov r3, r2 - mov ip, #8 + mov r12, #8 b \type\()_h264_qpel8_h_lowpass_neon endfunc @@ -1193,13 +1193,13 @@ function ff_\type\()_h264_qpel8_mc30_neon, export=1 lowpass_const r3 add r3, r1, #1 sub r1, r1, #2 - mov ip, #8 + mov r12, #8 b \type\()_h264_qpel8_h_lowpass_l2_neon endfunc function ff_\type\()_h264_qpel8_mc01_neon, export=1 push {lr} - mov ip, r1 + mov r12, r1 \type\()_h264_qpel8_mc01: lowpass_const r3 mov r3, r2 @@ -1222,12 +1222,12 @@ T mov sp, r0 mov r0, sp sub r1, r1, #2 mov r3, #8 - mov ip, #8 + mov r12, #8 vpush {d8-d15} bl put_h264_qpel8_h_lowpass_neon ldrd r0, [r11], #8 mov r3, r2 - add ip, sp, #64 + add r12, sp, #64 sub r1, r1, r2, lsl #1 mov r2, #8 bl \type\()_h264_qpel8_v_lowpass_l2_neon @@ -1248,7 +1248,7 @@ T mov sp, r0 sub r1, r1, #2 mov r3, #8 mov r0, sp - mov ip, #8 + mov r12, #8 vpush {d8-d15} bl put_h264_qpel8_h_lowpass_neon mov r4, r0 @@ -1333,7 +1333,7 @@ endfunc function ff_\type\()_h264_qpel8_mc03_neon, export=1 push {lr} - add ip, r1, r2 + add r12, r1, r2 b \type\()_h264_qpel8_mc01 endfunc @@ -1356,12 +1356,12 @@ function ff_\type\()_h264_qpel8_mc33_neon, export=1 sub r1, r1, #1 b \type\()_h264_qpel8_mc11 endfunc - .endm +.endm h264_qpel8 put h264_qpel8 avg - .macro h264_qpel16 type +.macro h264_qpel16 type function ff_\type\()_h264_qpel16_mc10_neon, export=1 lowpass_const r3 mov r3, r1 @@ -1385,7 +1385,7 @@ endfunc function ff_\type\()_h264_qpel16_mc01_neon, export=1 push {r4, lr} - mov ip, r1 + mov r12, r1 \type\()_h264_qpel16_mc01: lowpass_const r3 mov r3, r2 @@ -1412,7 +1412,7 @@ T mov sp, r0 bl put_h264_qpel16_h_lowpass_neon ldrd r0, [r11], #8 mov r3, r2 - add ip, sp, #64 + add r12, sp, #64 sub r1, r1, r2, lsl #1 mov r2, #16 bl \type\()_h264_qpel16_v_lowpass_l2_neon @@ -1515,7 +1515,7 @@ endfunc function ff_\type\()_h264_qpel16_mc03_neon, export=1 push {r4, lr} - add ip, r1, r2 + add r12, r1, r2 b \type\()_h264_qpel16_mc01 endfunc @@ -1538,14 +1538,14 @@ function ff_\type\()_h264_qpel16_mc33_neon, export=1 sub r1, r1, #1 b \type\()_h264_qpel16_mc11 endfunc - .endm +.endm h264_qpel16 put h264_qpel16 avg @ Biweighted prediction - .macro biweight_16 macs, macd +.macro biweight_16 macs, macd vdup.8 d0, r4 vdup.8 d1, r5 vmov q2, q8 @@ -1583,9 +1583,9 @@ endfunc vst1.8 {d24-d25},[r6,:128], r2 bne 1b pop {r4-r6, pc} - .endm +.endm - .macro biweight_8 macs, macd +.macro biweight_8 macs, macd vdup.8 d0, r4 vdup.8 d1, r5 vmov q1, q8 @@ -1613,9 +1613,9 @@ endfunc vst1.8 {d4},[r6,:64], r2 bne 1b pop {r4-r6, pc} - .endm +.endm - .macro biweight_4 macs, macd +.macro biweight_4 macs, macd vdup.8 d0, r4 vdup.8 d1, r5 vmov q1, q8 @@ -1655,9 +1655,9 @@ endfunc vst1.32 {d2[0]},[r6,:32], r2 vst1.32 {d2[1]},[r6,:32], r2 pop {r4-r6, pc} - .endm +.endm - .macro biweight_func w +.macro biweight_func w function ff_biweight_h264_pixels_\w\()_neon, export=1 push {r4-r6, lr} ldr r12, [sp, #16] @@ -1687,7 +1687,7 @@ function ff_biweight_h264_pixels_\w\()_neon, export=1 40: rsb r5, r5, #0 biweight_\w vmlsl.u8, vmlal.u8 endfunc - .endm +.endm biweight_func 16 biweight_func 8 @@ -1695,7 +1695,7 @@ endfunc @ Weighted prediction - .macro weight_16 add +.macro weight_16 add vdup.8 d0, r12 1: subs r2, r2, #2 vld1.8 {d20-d21},[r0,:128], r1 @@ -1722,9 +1722,9 @@ endfunc vst1.8 {d24-d25},[r4,:128], r1 bne 1b pop {r4, pc} - .endm +.endm - .macro weight_8 add +.macro weight_8 add vdup.8 d0, r12 1: subs r2, r2, #2 vld1.8 {d4},[r0,:64], r1 @@ -1743,9 +1743,9 @@ endfunc vst1.8 {d4},[r4,:64], r1 bne 1b pop {r4, pc} - .endm +.endm - .macro weight_4 add +.macro weight_4 add vdup.8 d0, r12 vmov q1, q8 vmov q10, q8 @@ -1779,9 +1779,9 @@ endfunc vst1.32 {d2[0]},[r4,:32], r1 vst1.32 {d2[1]},[r4,:32], r1 pop {r4, pc} - .endm +.endm - .macro weight_func w +.macro weight_func w function ff_weight_h264_pixels_\w\()_neon, export=1 push {r4, lr} ldr r12, [sp, #8] @@ -1806,7 +1806,7 @@ function ff_weight_h264_pixels_\w\()_neon, export=1 10: rsb r12, r12, #0 weight_\w vsub.s16 endfunc - .endm +.endm weight_func 16 weight_func 8 -- cgit v1.2.3 From 51a16077da2a58d85a10dcb1259756ad6099b5d5 Mon Sep 17 00:00:00 2001 From: Mans Rullgard Date: Sat, 26 Nov 2011 15:30:34 +0000 Subject: svq1dec: avoid undefined get_bits(0) call Signed-off-by: Mans Rullgard --- libavcodec/svq1dec.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/libavcodec/svq1dec.c b/libavcodec/svq1dec.c index 8569615aa9..7eb6e607d1 100644 --- a/libavcodec/svq1dec.c +++ b/libavcodec/svq1dec.c @@ -195,7 +195,8 @@ static const uint8_t string_table[256] = { #define SVQ1_CALC_CODEBOOK_ENTRIES(cbook)\ codebook = (const uint32_t *) cbook[level];\ - bit_cache = get_bits (bitbuf, 4*stages);\ + if (stages > 0)\ + bit_cache = get_bits (bitbuf, 4*stages);\ /* calculate codebook entries for this vector */\ for (j=0; j < stages; j++) {\ entries[j] = (((bit_cache >> (4*(stages - j - 1))) & 0xF) + 16*j) << (level + 1);\ -- cgit v1.2.3 From 9b9815eec4231d9efea7cd5c6a18c09c2ebff310 Mon Sep 17 00:00:00 2001 From: Victor Vasiliev Date: Thu, 1 Dec 2011 20:45:44 +0400 Subject: Update developers documentation with coding conventions. Signed-off-by: Luca Barbato --- doc/developer.texi | 123 ++++++++++++++++++++++++++++++++++++++++++----------- 1 file changed, 97 insertions(+), 26 deletions(-) diff --git a/doc/developer.texi b/doc/developer.texi index 800009e89b..128b46e830 100644 --- a/doc/developer.texi +++ b/doc/developer.texi @@ -45,48 +45,61 @@ mailing list. @anchor{Coding Rules} @section Coding Rules -Libav is programmed in the ISO C90 language with a few additional -features from ISO C99, namely: +@subsection Code formatting conventions +The code is written in K&R C style. That means the following: @itemize @bullet @item -the @samp{inline} keyword; +The control statements are formatted by putting space betwen the statement and parenthesis +in the following way: +@example +for (i = 0; i < filter->input_count; i ++) @{ +@end example @item -@samp{//} comments; +The case statement is always located at the same level as the switch itself: +@example +switch (link->init_state) @{ +case AVLINK_INIT: + continue; +case AVLINK_STARTINIT: + av_log(filter, AV_LOG_INFO, "circular filter chain detected"); + return 0; +@end example @item -designated struct initializers (@samp{struct s x = @{ .i = 17 @};}) +Braces in function declarations are written on the new line: +@example +const char *avfilter_configuration(void) +@{ + return LIBAV_CONFIGURATION; +@} +@end example @item -compound literals (@samp{x = (struct s) @{ 17, 23 @};}) +In case of a single-statement if, no curly braces are required: +@example +if (!pic || !picref) + goto fail; +@end example +@item +Do not put spaces immediately inside parenthesis. @samp{if (ret)} is a valid style; @samp{if ( ret )} is not. @end itemize -These features are supported by all compilers we care about, so we will not -accept patches to remove their use unless they absolutely do not impair -clarity and performance. - -All code must compile with recent versions of GCC and a number of other -currently supported compilers. To ensure compatibility, please do not use -additional C99 features or GCC extensions. Especially watch out for: +There are the following guidelines regarding the indentation in files: @itemize @bullet @item -mixing statements and declarations; -@item -@samp{long long} (use @samp{int64_t} instead); -@item -@samp{__attribute__} not protected by @samp{#ifdef __GNUC__} or similar; -@item -GCC statement expressions (@samp{(x = (@{ int y = 4; y; @})}). -@end itemize - Indent size is 4. -The presentation is one inspired by 'indent -i4 -kr -nut'. +@item The TAB character is forbidden outside of Makefiles as is any form of trailing whitespace. Commits containing either will be rejected by the git repository. +@item +You should try to limit your code lines to 80 characters; however, do so if and only if this improves readability. +@end itemize +The presentation is one inspired by 'indent -i4 -kr -nut'. The main priority in Libav is simplicity and small code size in order to minimize the bug count. -Comments: Use the JavaDoc/Doxygen -format (see examples below) so that code documentation +@subsection Comments +Use the JavaDoc/Doxygen format (see examples below) so that code documentation can be generated automatically. All nontrivial functions should have a comment above them explaining what the function does, even if it is just one sentence. All structures and their member variables should be documented, too. @@ -120,11 +133,69 @@ int myfunc(int my_parameter) ... @end example +@subsection C language features + +Libav is programmed in the ISO C90 language with a few additional +features from ISO C99, namely: +@itemize @bullet +@item +the @samp{inline} keyword; +@item +@samp{//} comments; +@item +designated struct initializers (@samp{struct s x = @{ .i = 17 @};}) +@item +compound literals (@samp{x = (struct s) @{ 17, 23 @};}) +@end itemize + +These features are supported by all compilers we care about, so we will not +accept patches to remove their use unless they absolutely do not impair +clarity and performance. + +All code must compile with recent versions of GCC and a number of other +currently supported compilers. To ensure compatibility, please do not use +additional C99 features or GCC extensions. Especially watch out for: +@itemize @bullet +@item +mixing statements and declarations; +@item +@samp{long long} (use @samp{int64_t} instead); +@item +@samp{__attribute__} not protected by @samp{#ifdef __GNUC__} or similar; +@item +GCC statement expressions (@samp{(x = (@{ int y = 4; y; @})}). +@end itemize + +@subsection Naming conventions +All names are using underscores (_), not CamelCase. For example, @samp{avfilter_get_video_buffer} is +a valid function name and @samp{AVFilterGetVideo} is not. The only exception from this are structure names; +they should always be in the CamelCase + +There are following conventions for naming variables and functions: +@itemize @bullet +@item +For local variables no prefix is required. +@item +For variables and functions declared as @code{static} no prefixes are required. +@item +For variables and functions used internally by the library, @code{ff_} prefix should be used. +For example, @samp{ff_w64_demuxer}. +@item +For variables and functions used internally across multiple libraries, use @code{avpriv_}. For example, +@samp{avpriv_aac_parse_header}. +@item +For exported names, each library has its own prefixes. Just check the existing code and name accordingly. +@end itemize + +@subsection Miscellanous conventions +@itemize @bullet +@item fprintf and printf are forbidden in libavformat and libavcodec, please use av_log() instead. - +@item Casts should be used only when necessary. Unneeded parentheses should also be avoided if they don't make the code easier to understand. +@end itemize @section Development Policy -- cgit v1.2.3 From 560f773c7ddd17f66e2621222980c1359a9027be Mon Sep 17 00:00:00 2001 From: Justin Ruggles Date: Mon, 14 Nov 2011 16:07:03 -0500 Subject: avcodec: change number of plane pointers from 4 to 8 at next major bump. Add AV_NUM_DATA_POINTERS to simplify the bump transition. This will allow for supporting more planar audio channels without having to allocate separate pointer arrays. --- doc/APIchanges | 6 ++++++ libavcodec/avcodec.h | 23 ++++++++++++++--------- libavcodec/huffyuv.c | 7 ++++--- libavcodec/mpegvideo.c | 12 ++++++------ libavcodec/utils.c | 27 ++++++++++++++++----------- libavcodec/version.h | 6 ++++-- libavcodec/vp3.c | 7 ++++--- 7 files changed, 54 insertions(+), 34 deletions(-) diff --git a/doc/APIchanges b/doc/APIchanges index 6c00ee9a28..f664376d3a 100644 --- a/doc/APIchanges +++ b/doc/APIchanges @@ -13,6 +13,12 @@ libavutil: 2011-04-18 API changes, most recent first: +2011-xx-xx - xxxxxxx - lavc 53.24.0 + Change AVFrame.data[4]/base[4]/linesize[4]/error[4] to [8] at next major bump. + Change AVPicture.data[4]/linesize[4] to [8] at next major bump. + Change AVCodecContext.error[4] to [8] at next major bump. + Add AV_NUM_DATA_POINTERS to simplify the bump transition. + 2011-11-23 - bbb46f3 - lavu 51.18.0 Add av_samples_get_buffer_size(), av_samples_fill_arrays(), and av_samples_alloc(), to samplefmt.h. diff --git a/libavcodec/avcodec.h b/libavcodec/avcodec.h index 43abcd82be..eeafce4c45 100644 --- a/libavcodec/avcodec.h +++ b/libavcodec/avcodec.h @@ -927,21 +927,26 @@ typedef struct AVPacket { * sizeof(AVFrame) must not be used outside libav*. */ typedef struct AVFrame { +#if FF_API_DATA_POINTERS +#define AV_NUM_DATA_POINTERS 4 +#else +#define AV_NUM_DATA_POINTERS 8 +#endif /** * pointer to the picture planes. * This might be different from the first allocated byte * - encoding: * - decoding: */ - uint8_t *data[4]; - int linesize[4]; + uint8_t *data[AV_NUM_DATA_POINTERS]; + int linesize[AV_NUM_DATA_POINTERS]; /** * pointer to the first allocated byte of the picture. Can be used in get_buffer/release_buffer. * This isn't used by libavcodec unless the default get/release_buffer() is used. * - encoding: * - decoding: */ - uint8_t *base[4]; + uint8_t *base[AV_NUM_DATA_POINTERS]; /** * 1 -> keyframe, 0-> not * - encoding: Set by libavcodec. @@ -1065,7 +1070,7 @@ typedef struct AVFrame { * - encoding: Set by libavcodec. if flags&CODEC_FLAG_PSNR. * - decoding: unused */ - uint64_t error[4]; + uint64_t error[AV_NUM_DATA_POINTERS]; /** * type of the buffer (to keep track of who has to deallocate data[*]) @@ -1319,7 +1324,7 @@ typedef struct AVCodecContext { * @param offset offset into the AVFrame.data from which the slice should be read */ void (*draw_horiz_band)(struct AVCodecContext *s, - const AVFrame *src, int offset[4], + const AVFrame *src, int offset[AV_NUM_DATA_POINTERS], int y, int type, int height); /* audio only */ @@ -1867,7 +1872,7 @@ typedef struct AVCodecContext { * - encoding: Set by libavcodec if flags&CODEC_FLAG_PSNR. * - decoding: unused */ - uint64_t error[4]; + uint64_t error[AV_NUM_DATA_POINTERS]; /** * motion estimation comparison function @@ -3175,8 +3180,8 @@ typedef struct AVHWAccel { * the last component is alpha */ typedef struct AVPicture { - uint8_t *data[4]; - int linesize[4]; ///< number of bytes per line + uint8_t *data[AV_NUM_DATA_POINTERS]; + int linesize[AV_NUM_DATA_POINTERS]; ///< number of bytes per line } AVPicture; #define AVPALETTE_SIZE 1024 @@ -3794,7 +3799,7 @@ void avcodec_align_dimensions(AVCodecContext *s, int *width, int *height); * according to avcodec_get_edge_width() before. */ void avcodec_align_dimensions2(AVCodecContext *s, int *width, int *height, - int linesize_align[4]); + int linesize_align[AV_NUM_DATA_POINTERS]); enum PixelFormat avcodec_default_get_format(struct AVCodecContext *s, const enum PixelFormat * fmt); diff --git a/libavcodec/huffyuv.c b/libavcodec/huffyuv.c index 865bc6ab5c..57b5f32fc8 100644 --- a/libavcodec/huffyuv.c +++ b/libavcodec/huffyuv.c @@ -921,8 +921,8 @@ static int encode_bgr_bitstream(HYuvContext *s, int count){ #if CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER static void draw_slice(HYuvContext *s, int y){ - int h, cy; - int offset[4]; + int h, cy, i; + int offset[AV_NUM_DATA_POINTERS]; if(s->avctx->draw_horiz_band==NULL) return; @@ -939,7 +939,8 @@ static void draw_slice(HYuvContext *s, int y){ offset[0] = s->picture.linesize[0]*y; offset[1] = s->picture.linesize[1]*cy; offset[2] = s->picture.linesize[2]*cy; - offset[3] = 0; + for (i = 3; i < AV_NUM_DATA_POINTERS; i++) + offset[i] = 0; emms_c(); s->avctx->draw_horiz_band(s->avctx, &s->picture, offset, y, 3, h); diff --git a/libavcodec/mpegvideo.c b/libavcodec/mpegvideo.c index dcef706751..d673f79dd9 100644 --- a/libavcodec/mpegvideo.c +++ b/libavcodec/mpegvideo.c @@ -2329,7 +2329,8 @@ void ff_draw_horiz_band(MpegEncContext *s, int y, int h){ if (s->avctx->draw_horiz_band) { AVFrame *src; - int offset[4]; + int offset[AV_NUM_DATA_POINTERS]; + int i; if(s->pict_type==AV_PICTURE_TYPE_B || s->low_delay || (s->avctx->slice_flags&SLICE_FLAG_CODED_ORDER)) src= (AVFrame*)s->current_picture_ptr; @@ -2339,15 +2340,14 @@ void ff_draw_horiz_band(MpegEncContext *s, int y, int h){ return; if(s->pict_type==AV_PICTURE_TYPE_B && s->picture_structure == PICT_FRAME && s->out_format != FMT_H264){ - offset[0]= - offset[1]= - offset[2]= - offset[3]= 0; + for (i = 0; i < AV_NUM_DATA_POINTERS; i++) + offset[i] = 0; }else{ offset[0]= y * s->linesize; offset[1]= offset[2]= (y >> s->chroma_y_shift) * s->uvlinesize; - offset[3]= 0; + for (i = 3; i < AV_NUM_DATA_POINTERS; i++) + offset[i] = 0; } emms_c(); diff --git a/libavcodec/utils.c b/libavcodec/utils.c index 53440e0f84..998a12c149 100644 --- a/libavcodec/utils.c +++ b/libavcodec/utils.c @@ -127,7 +127,10 @@ void avcodec_set_dimensions(AVCodecContext *s, int width, int height){ #define INTERNAL_BUFFER_SIZE (32+1) -void avcodec_align_dimensions2(AVCodecContext *s, int *width, int *height, int linesize_align[4]){ +void avcodec_align_dimensions2(AVCodecContext *s, int *width, int *height, + int linesize_align[AV_NUM_DATA_POINTERS]) +{ + int i; int w_align= 1; int h_align= 1; @@ -209,10 +212,8 @@ void avcodec_align_dimensions2(AVCodecContext *s, int *width, int *height, int l *height+=2; // some of the optimized chroma MC reads one line too much // which is also done in mpeg decoders with lowres > 0 - linesize_align[0] = - linesize_align[1] = - linesize_align[2] = - linesize_align[3] = STRIDE_ALIGN; + for (i = 0; i < AV_NUM_DATA_POINTERS; i++) + linesize_align[i] = STRIDE_ALIGN; //STRIDE_ALIGN is 8 for SSE* but this does not work for SVQ1 chroma planes //we could change STRIDE_ALIGN to 16 for x86/sse but it would increase the //picture size unneccessarily in some cases. The solution here is not @@ -230,7 +231,7 @@ void avcodec_align_dimensions2(AVCodecContext *s, int *width, int *height, int l void avcodec_align_dimensions(AVCodecContext *s, int *width, int *height){ int chroma_shift = av_pix_fmt_descriptors[s->pix_fmt].log2_chroma_w; - int linesize_align[4]; + int linesize_align[AV_NUM_DATA_POINTERS]; int align; avcodec_align_dimensions2(s, width, height, linesize_align); align = FFMAX(linesize_align[0], linesize_align[3]); @@ -275,7 +276,7 @@ int avcodec_default_get_buffer(AVCodecContext *s, AVFrame *pic){ return -1; } - for(i=0; i<4; i++){ + for (i = 0; i < AV_NUM_DATA_POINTERS; i++) { av_freep(&buf->base[i]); buf->data[i]= NULL; } @@ -290,7 +291,7 @@ int avcodec_default_get_buffer(AVCodecContext *s, AVFrame *pic){ int tmpsize; int unaligned; AVPicture picture; - int stride_align[4]; + int stride_align[AV_NUM_DATA_POINTERS]; const int pixel_size = av_pix_fmt_descriptors[s->pix_fmt].comp[0].step_minus1+1; avcodec_get_chroma_sub_sample(s->pix_fmt, &h_chroma_shift, &v_chroma_shift); @@ -343,6 +344,10 @@ int avcodec_default_get_buffer(AVCodecContext *s, AVFrame *pic){ else buf->data[i] = buf->base[i] + FFALIGN((buf->linesize[i]*EDGE_WIDTH>>v_shift) + (pixel_size*EDGE_WIDTH>>h_shift), stride_align[i]); } + for (; i < AV_NUM_DATA_POINTERS; i++) { + buf->base[i] = buf->data[i] = NULL; + buf->linesize[i] = 0; + } if(size[1] && !size[2]) ff_set_systematic_pal2((uint32_t*)buf->data[1], s->pix_fmt); buf->width = s->width; @@ -352,7 +357,7 @@ int avcodec_default_get_buffer(AVCodecContext *s, AVFrame *pic){ } pic->type= FF_BUFFER_TYPE_INTERNAL; - for(i=0; i<4; i++){ + for (i = 0; i < AV_NUM_DATA_POINTERS; i++) { pic->base[i]= buf->base[i]; pic->data[i]= buf->data[i]; pic->linesize[i]= buf->linesize[i]; @@ -392,7 +397,7 @@ void avcodec_default_release_buffer(AVCodecContext *s, AVFrame *pic){ FFSWAP(InternalBuffer, *buf, *last); } - for(i=0; i<4; i++){ + for (i = 0; i < AV_NUM_DATA_POINTERS; i++) { pic->data[i]=NULL; // pic->base[i]=NULL; } @@ -426,7 +431,7 @@ int avcodec_default_reget_buffer(AVCodecContext *s, AVFrame *pic){ * Not internal type and reget_buffer not overridden, emulate cr buffer */ temp_pic = *pic; - for(i = 0; i < 4; i++) + for(i = 0; i < AV_NUM_DATA_POINTERS; i++) pic->data[i] = pic->base[i] = NULL; pic->opaque = NULL; /* Allocate new frame */ diff --git a/libavcodec/version.h b/libavcodec/version.h index 0bd17817ec..7262c81544 100644 --- a/libavcodec/version.h +++ b/libavcodec/version.h @@ -21,7 +21,7 @@ #define AVCODEC_VERSION_H #define LIBAVCODEC_VERSION_MAJOR 53 -#define LIBAVCODEC_VERSION_MINOR 23 +#define LIBAVCODEC_VERSION_MINOR 24 #define LIBAVCODEC_VERSION_MICRO 0 #define LIBAVCODEC_VERSION_INT AV_VERSION_INT(LIBAVCODEC_VERSION_MAJOR, \ @@ -110,6 +110,8 @@ #ifndef FF_API_TIFFENC_COMPLEVEL #define FF_API_TIFFENC_COMPLEVEL (LIBAVCODEC_VERSION_MAJOR < 54) #endif - +#ifndef FF_API_DATA_POINTERS +#define FF_API_DATA_POINTERS (LIBAVCODEC_VERSION_MAJOR < 54) +#endif #endif /* AVCODEC_VERSION_H */ diff --git a/libavcodec/vp3.c b/libavcodec/vp3.c index a31ad4e99f..6e04b7b01b 100644 --- a/libavcodec/vp3.c +++ b/libavcodec/vp3.c @@ -1331,8 +1331,8 @@ end: */ static void vp3_draw_horiz_band(Vp3DecodeContext *s, int y) { - int h, cy; - int offset[4]; + int h, cy, i; + int offset[AV_NUM_DATA_POINTERS]; if (HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) { int y_flipped = s->flipped_image ? s->avctx->height-y : y; @@ -1358,7 +1358,8 @@ static void vp3_draw_horiz_band(Vp3DecodeContext *s, int y) offset[0] = s->current_frame.linesize[0]*y; offset[1] = s->current_frame.linesize[1]*cy; offset[2] = s->current_frame.linesize[2]*cy; - offset[3] = 0; + for (i = 3; i < AV_NUM_DATA_POINTERS; i++) + offset[i] = 0; emms_c(); s->avctx->draw_horiz_band(s->avctx, &s->current_frame, offset, y, 3, h); -- cgit v1.2.3 From 0eea212943544d40f99b05571aa7159d78667154 Mon Sep 17 00:00:00 2001 From: Justin Ruggles Date: Tue, 6 Sep 2011 12:17:45 -0400 Subject: Add avcodec_decode_audio4(). Deprecate avcodec_decode_audio3(). Implement audio support in avcodec_default_get_buffer(). Implement the new audio decoder API in all audio decoders. --- doc/APIchanges | 7 ++ libavcodec/8svx.c | 38 ++++--- libavcodec/aac.h | 1 + libavcodec/aacdec.c | 49 +++++---- libavcodec/ac3dec.c | 32 ++++-- libavcodec/ac3dec.h | 1 + libavcodec/adpcm.c | 42 +++++--- libavcodec/adx.h | 1 + libavcodec/adxdec.c | 41 ++++--- libavcodec/alac.c | 45 +++++--- libavcodec/alsdec.c | 43 ++++---- libavcodec/amrnbdec.c | 25 +++-- libavcodec/amrwbdec.c | 27 +++-- libavcodec/apedec.c | 37 ++++--- libavcodec/atrac1.c | 26 +++-- libavcodec/atrac3.c | 34 +++--- libavcodec/avcodec.h | 145 +++++++++++++++++++++++-- libavcodec/binkaudio.c | 34 +++--- libavcodec/cook.c | 43 +++++--- libavcodec/dca.c | 32 +++--- libavcodec/dpcm.c | 28 +++-- libavcodec/dsicinav.c | 30 +++--- libavcodec/flacdec.c | 37 ++++--- libavcodec/g722.h | 2 + libavcodec/g722dec.c | 25 +++-- libavcodec/g726.c | 29 +++-- libavcodec/gsmdec.c | 32 ++++-- libavcodec/gsmdec_data.h | 2 + libavcodec/imc.c | 29 +++-- libavcodec/internal.h | 9 +- libavcodec/libgsm.c | 58 ++++++---- libavcodec/libopencore-amr.c | 52 ++++++--- libavcodec/libspeexdec.c | 36 ++++--- libavcodec/mace.c | 33 +++--- libavcodec/mlpdec.c | 39 ++++--- libavcodec/mpc.h | 1 + libavcodec/mpc7.c | 29 +++-- libavcodec/mpc8.c | 27 +++-- libavcodec/mpegaudiodec.c | 86 +++++++++------ libavcodec/mpegaudiodec_float.c | 17 ++- libavcodec/nellymoserdec.c | 37 ++++--- libavcodec/pcm.c | 42 +++++--- libavcodec/qcelpdec.c | 26 +++-- libavcodec/qdm2.c | 32 +++--- libavcodec/ra144.h | 1 + libavcodec/ra144dec.c | 31 ++++-- libavcodec/ra288.c | 27 +++-- libavcodec/s302m.c | 46 ++++++-- libavcodec/shorten.c | 45 ++++---- libavcodec/sipr.c | 34 +++--- libavcodec/smacker.c | 41 +++++-- libavcodec/truespeech.c | 29 +++-- libavcodec/tta.c | 28 +++-- libavcodec/twinvq.c | 36 +++++-- libavcodec/utils.c | 230 ++++++++++++++++++++++++++++++++++++---- libavcodec/version.h | 5 +- libavcodec/vmdav.c | 35 +++--- libavcodec/vorbisdec.c | 33 +++--- libavcodec/wavpack.c | 116 ++++++++------------ libavcodec/wma.h | 1 + libavcodec/wmadec.c | 30 +++--- libavcodec/wmaprodec.c | 63 ++++++----- libavcodec/wmavoice.c | 46 ++++---- libavcodec/ws-snd1.c | 38 +++++-- 64 files changed, 1590 insertions(+), 766 deletions(-) diff --git a/doc/APIchanges b/doc/APIchanges index f664376d3a..2c43e75dba 100644 --- a/doc/APIchanges +++ b/doc/APIchanges @@ -13,6 +13,13 @@ libavutil: 2011-04-18 API changes, most recent first: +2011-xx-xx - xxxxxxx - lavc 53.25.0 + Add nb_samples and extended_data fields to AVFrame. + Deprecate AVCODEC_MAX_AUDIO_FRAME_SIZE. + Deprecate avcodec_decode_audio3() in favor of avcodec_decode_audio4(). + avcodec_decode_audio4() writes output samples to an AVFrame, which allows + audio decoders to use get_buffer(). + 2011-xx-xx - xxxxxxx - lavc 53.24.0 Change AVFrame.data[4]/base[4]/linesize[4]/error[4] to [8] at next major bump. Change AVPicture.data[4]/linesize[4] to [8] at next major bump. diff --git a/libavcodec/8svx.c b/libavcodec/8svx.c index 3e3eae6c87..4f11b8bec4 100644 --- a/libavcodec/8svx.c +++ b/libavcodec/8svx.c @@ -32,6 +32,7 @@ /** decoder context */ typedef struct EightSvxContext { + AVFrame frame; uint8_t fib_acc[2]; const int8_t *table; @@ -83,13 +84,13 @@ static void raw_decode(uint8_t *dst, const int8_t *src, int src_size, } /** decode a frame */ -static int eightsvx_decode_frame(AVCodecContext *avctx, void *data, int *data_size, - AVPacket *avpkt) +static int eightsvx_decode_frame(AVCodecContext *avctx, void *data, + int *got_frame_ptr, AVPacket *avpkt) { EightSvxContext *esc = avctx->priv_data; int buf_size; - uint8_t *out_data = data; - int out_data_size; + uint8_t *out_data; + int ret; int is_compr = (avctx->codec_id != CODEC_ID_PCM_S8_PLANAR); /* for the first packet, copy data to buffer */ @@ -134,15 +135,18 @@ static int eightsvx_decode_frame(AVCodecContext *avctx, void *data, int *data_si /* decode next piece of data from the buffer */ buf_size = FFMIN(MAX_FRAME_SIZE, esc->data_size - esc->data_idx); if (buf_size <= 0) { - *data_size = 0; + *got_frame_ptr = 0; return avpkt->size; } - out_data_size = buf_size * (is_compr + 1) * avctx->channels; - if (*data_size < out_data_size) { - av_log(avctx, AV_LOG_ERROR, "Provided buffer with size %d is too small.\n", - *data_size); - return AVERROR(EINVAL); + + /* get output buffer */ + esc->frame.nb_samples = buf_size * (is_compr + 1); + if ((ret = avctx->get_buffer(avctx, &esc->frame)) < 0) { + av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); + return ret; } + out_data = esc->frame.data[0]; + if (is_compr) { delta_decode(out_data, &esc->data[0][esc->data_idx], buf_size, &esc->fib_acc[0], esc->table, avctx->channels); @@ -158,7 +162,9 @@ static int eightsvx_decode_frame(AVCodecContext *avctx, void *data, int *data_si } } esc->data_idx += buf_size; - *data_size = out_data_size; + + *got_frame_ptr = 1; + *(AVFrame *)data = esc->frame; return avpkt->size; } @@ -186,6 +192,10 @@ static av_cold int eightsvx_decode_init(AVCodecContext *avctx) return -1; } avctx->sample_fmt = AV_SAMPLE_FMT_U8; + + avcodec_get_frame_defaults(&esc->frame); + avctx->coded_frame = &esc->frame; + return 0; } @@ -207,7 +217,7 @@ AVCodec ff_eightsvx_fib_decoder = { .init = eightsvx_decode_init, .close = eightsvx_decode_close, .decode = eightsvx_decode_frame, - .capabilities = CODEC_CAP_DELAY, + .capabilities = CODEC_CAP_DELAY | CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("8SVX fibonacci"), }; @@ -219,7 +229,7 @@ AVCodec ff_eightsvx_exp_decoder = { .init = eightsvx_decode_init, .close = eightsvx_decode_close, .decode = eightsvx_decode_frame, - .capabilities = CODEC_CAP_DELAY, + .capabilities = CODEC_CAP_DELAY | CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("8SVX exponential"), }; @@ -231,6 +241,6 @@ AVCodec ff_pcm_s8_planar_decoder = { .init = eightsvx_decode_init, .close = eightsvx_decode_close, .decode = eightsvx_decode_frame, - .capabilities = CODEC_CAP_DELAY, + .capabilities = CODEC_CAP_DELAY | CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("PCM signed 8-bit planar"), }; diff --git a/libavcodec/aac.h b/libavcodec/aac.h index 0653f810fd..30491fe85a 100644 --- a/libavcodec/aac.h +++ b/libavcodec/aac.h @@ -251,6 +251,7 @@ typedef struct { */ typedef struct { AVCodecContext *avctx; + AVFrame frame; MPEG4AudioConfig m4ac; diff --git a/libavcodec/aacdec.c b/libavcodec/aacdec.c index 1015030b9a..672ba1c648 100644 --- a/libavcodec/aacdec.c +++ b/libavcodec/aacdec.c @@ -646,6 +646,9 @@ static av_cold int aac_decode_init(AVCodecContext *avctx) cbrt_tableinit(); + avcodec_get_frame_defaults(&ac->frame); + avctx->coded_frame = &ac->frame; + return 0; } @@ -2113,12 +2116,12 @@ static int parse_adts_frame_header(AACContext *ac, GetBitContext *gb) } static int aac_decode_frame_int(AVCodecContext *avctx, void *data, - int *data_size, GetBitContext *gb) + int *got_frame_ptr, GetBitContext *gb) { AACContext *ac = avctx->priv_data; ChannelElement *che = NULL, *che_prev = NULL; enum RawDataBlockType elem_type, elem_type_prev = TYPE_END; - int err, elem_id, data_size_tmp; + int err, elem_id; int samples = 0, multiplier, audio_found = 0; if (show_bits(gb, 12) == 0xfff) { @@ -2222,24 +2225,26 @@ static int aac_decode_frame_int(AVCodecContext *avctx, void *data, avctx->frame_size = samples; } - data_size_tmp = samples * avctx->channels * - av_get_bytes_per_sample(avctx->sample_fmt); - if (*data_size < data_size_tmp) { - av_log(avctx, AV_LOG_ERROR, - "Output buffer too small (%d) or trying to output too many samples (%d) for this frame.\n", - *data_size, data_size_tmp); - return -1; - } - *data_size = data_size_tmp; - if (samples) { + /* get output buffer */ + ac->frame.nb_samples = samples; + if ((err = avctx->get_buffer(avctx, &ac->frame)) < 0) { + av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); + return err; + } + if (avctx->sample_fmt == AV_SAMPLE_FMT_FLT) - ac->fmt_conv.float_interleave(data, (const float **)ac->output_data, + ac->fmt_conv.float_interleave((float *)ac->frame.data[0], + (const float **)ac->output_data, samples, avctx->channels); else - ac->fmt_conv.float_to_int16_interleave(data, (const float **)ac->output_data, + ac->fmt_conv.float_to_int16_interleave((int16_t *)ac->frame.data[0], + (const float **)ac->output_data, samples, avctx->channels); + + *(AVFrame *)data = ac->frame; } + *got_frame_ptr = !!samples; if (ac->output_configured && audio_found) ac->output_configured = OC_LOCKED; @@ -2248,7 +2253,7 @@ static int aac_decode_frame_int(AVCodecContext *avctx, void *data, } static int aac_decode_frame(AVCodecContext *avctx, void *data, - int *data_size, AVPacket *avpkt) + int *got_frame_ptr, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; @@ -2259,7 +2264,7 @@ static int aac_decode_frame(AVCodecContext *avctx, void *data, init_get_bits(&gb, buf, buf_size * 8); - if ((err = aac_decode_frame_int(avctx, data, data_size, &gb)) < 0) + if ((err = aac_decode_frame_int(avctx, data, got_frame_ptr, &gb)) < 0) return err; buf_consumed = (get_bits_count(&gb) + 7) >> 3; @@ -2481,8 +2486,8 @@ static int read_audio_mux_element(struct LATMContext *latmctx, } -static int latm_decode_frame(AVCodecContext *avctx, void *out, int *out_size, - AVPacket *avpkt) +static int latm_decode_frame(AVCodecContext *avctx, void *out, + int *got_frame_ptr, AVPacket *avpkt) { struct LATMContext *latmctx = avctx->priv_data; int muxlength, err; @@ -2504,7 +2509,7 @@ static int latm_decode_frame(AVCodecContext *avctx, void *out, int *out_size, if (!latmctx->initialized) { if (!avctx->extradata) { - *out_size = 0; + *got_frame_ptr = 0; return avpkt->size; } else { if ((err = decode_audio_specific_config( @@ -2522,7 +2527,7 @@ static int latm_decode_frame(AVCodecContext *avctx, void *out, int *out_size, return AVERROR_INVALIDDATA; } - if ((err = aac_decode_frame_int(avctx, out, out_size, &gb)) < 0) + if ((err = aac_decode_frame_int(avctx, out, got_frame_ptr, &gb)) < 0) return err; return muxlength; @@ -2552,7 +2557,7 @@ AVCodec ff_aac_decoder = { .sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_NONE }, - .capabilities = CODEC_CAP_CHANNEL_CONF, + .capabilities = CODEC_CAP_CHANNEL_CONF | CODEC_CAP_DR1, .channel_layouts = aac_channel_layout, }; @@ -2573,6 +2578,6 @@ AVCodec ff_aac_latm_decoder = { .sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_NONE }, - .capabilities = CODEC_CAP_CHANNEL_CONF, + .capabilities = CODEC_CAP_CHANNEL_CONF | CODEC_CAP_DR1, .channel_layouts = aac_channel_layout, }; diff --git a/libavcodec/ac3dec.c b/libavcodec/ac3dec.c index 8e216c039b..7e11cf49ce 100644 --- a/libavcodec/ac3dec.c +++ b/libavcodec/ac3dec.c @@ -208,6 +208,9 @@ static av_cold int ac3_decode_init(AVCodecContext *avctx) } s->downmixed = 1; + avcodec_get_frame_defaults(&s->frame); + avctx->coded_frame = &s->frame; + return 0; } @@ -1296,15 +1299,15 @@ static int decode_audio_block(AC3DecodeContext *s, int blk) /** * Decode a single AC-3 frame. */ -static int ac3_decode_frame(AVCodecContext * avctx, void *data, int *data_size, - AVPacket *avpkt) +static int ac3_decode_frame(AVCodecContext * avctx, void *data, + int *got_frame_ptr, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; AC3DecodeContext *s = avctx->priv_data; - float *out_samples_flt = data; - int16_t *out_samples_s16 = data; - int blk, ch, err; + float *out_samples_flt; + int16_t *out_samples_s16; + int blk, ch, err, ret; const uint8_t *channel_map; const float *output[AC3_MAX_CHANNELS]; @@ -1321,7 +1324,6 @@ static int ac3_decode_frame(AVCodecContext * avctx, void *data, int *data_size, init_get_bits(&s->gbc, buf, buf_size * 8); /* parse the syncinfo */ - *data_size = 0; err = parse_frame_header(s); if (err) { @@ -1343,6 +1345,7 @@ static int ac3_decode_frame(AVCodecContext * avctx, void *data, int *data_size, /* TODO: add support for substreams and dependent frames */ if(s->frame_type == EAC3_FRAME_TYPE_DEPENDENT || s->substreamid) { av_log(avctx, AV_LOG_ERROR, "unsupported frame type : skipping frame\n"); + *got_frame_ptr = 0; return s->frame_size; } else { av_log(avctx, AV_LOG_ERROR, "invalid frame type\n"); @@ -1400,6 +1403,15 @@ static int ac3_decode_frame(AVCodecContext * avctx, void *data, int *data_size, if (s->bitstream_mode == 0x7 && s->channels > 1) avctx->audio_service_type = AV_AUDIO_SERVICE_TYPE_KARAOKE; + /* get output buffer */ + s->frame.nb_samples = s->num_blocks * 256; + if ((ret = avctx->get_buffer(avctx, &s->frame)) < 0) { + av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); + return ret; + } + out_samples_flt = (float *)s->frame.data[0]; + out_samples_s16 = (int16_t *)s->frame.data[0]; + /* decode the audio blocks */ channel_map = ff_ac3_dec_channel_map[s->output_mode & ~AC3_OUTPUT_LFEON][s->lfe_on]; for (ch = 0; ch < s->out_channels; ch++) @@ -1419,8 +1431,10 @@ static int ac3_decode_frame(AVCodecContext * avctx, void *data, int *data_size, out_samples_s16 += 256 * s->out_channels; } } - *data_size = s->num_blocks * 256 * avctx->channels * - av_get_bytes_per_sample(avctx->sample_fmt); + + *got_frame_ptr = 1; + *(AVFrame *)data = s->frame; + return FFMIN(buf_size, s->frame_size); } @@ -1458,6 +1472,7 @@ AVCodec ff_ac3_decoder = { .init = ac3_decode_init, .close = ac3_decode_end, .decode = ac3_decode_frame, + .capabilities = CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("ATSC A/52A (AC-3)"), .sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_NONE @@ -1480,6 +1495,7 @@ AVCodec ff_eac3_decoder = { .init = ac3_decode_init, .close = ac3_decode_end, .decode = ac3_decode_frame, + .capabilities = CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("ATSC A/52B (AC-3, E-AC-3)"), .sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_NONE diff --git a/libavcodec/ac3dec.h b/libavcodec/ac3dec.h index 38262514b6..56c6553477 100644 --- a/libavcodec/ac3dec.h +++ b/libavcodec/ac3dec.h @@ -68,6 +68,7 @@ typedef struct { AVClass *class; ///< class for AVOptions AVCodecContext *avctx; ///< parent context + AVFrame frame; ///< AVFrame for decoded output GetBitContext gbc; ///< bitstream reader ///@name Bit stream information diff --git a/libavcodec/adpcm.c b/libavcodec/adpcm.c index 4a818575cf..3ada328df3 100644 --- a/libavcodec/adpcm.c +++ b/libavcodec/adpcm.c @@ -84,6 +84,7 @@ static const int swf_index_tables[4][16] = { /* end of tables */ typedef struct ADPCMDecodeContext { + AVFrame frame; ADPCMChannelStatus status[6]; } ADPCMDecodeContext; @@ -124,6 +125,10 @@ static av_cold int adpcm_decode_init(AVCodecContext * avctx) break; } avctx->sample_fmt = AV_SAMPLE_FMT_S16; + + avcodec_get_frame_defaults(&c->frame); + avctx->coded_frame = &c->frame; + return 0; } @@ -501,9 +506,8 @@ static int get_nb_samples(AVCodecContext *avctx, const uint8_t *buf, decode_top_nibble_next = 1; \ } -static int adpcm_decode_frame(AVCodecContext *avctx, - void *data, int *data_size, - AVPacket *avpkt) +static int adpcm_decode_frame(AVCodecContext *avctx, void *data, + int *got_frame_ptr, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; @@ -514,7 +518,7 @@ static int adpcm_decode_frame(AVCodecContext *avctx, const uint8_t *src; int st; /* stereo */ int count1, count2; - int nb_samples, coded_samples, out_bps, out_size; + int nb_samples, coded_samples, ret; nb_samples = get_nb_samples(avctx, buf, buf_size, &coded_samples); if (nb_samples <= 0) { @@ -522,22 +526,22 @@ static int adpcm_decode_frame(AVCodecContext *avctx, return AVERROR_INVALIDDATA; } - out_bps = av_get_bytes_per_sample(avctx->sample_fmt); - out_size = nb_samples * avctx->channels * out_bps; - if (*data_size < out_size) { - av_log(avctx, AV_LOG_ERROR, "output buffer is too small\n"); - return AVERROR(EINVAL); + /* get output buffer */ + c->frame.nb_samples = nb_samples; + if ((ret = avctx->get_buffer(avctx, &c->frame)) < 0) { + av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); + return ret; } + samples = (short *)c->frame.data[0]; + /* use coded_samples when applicable */ /* it is always <= nb_samples, so the output buffer will be large enough */ if (coded_samples) { if (coded_samples != nb_samples) av_log(avctx, AV_LOG_WARNING, "mismatch in coded sample count\n"); - nb_samples = coded_samples; - out_size = nb_samples * avctx->channels * out_bps; + c->frame.nb_samples = nb_samples = coded_samples; } - samples = data; src = buf; st = avctx->channels == 2 ? 1 : 0; @@ -576,7 +580,7 @@ static int adpcm_decode_frame(AVCodecContext *avctx, cs->step_index = 88; } - samples = (short*)data + channel; + samples = (short *)c->frame.data[0] + channel; for (m = 0; m < 32; m++) { *samples = adpcm_ima_qt_expand_nibble(cs, src[0] & 0x0F, 3); @@ -628,7 +632,7 @@ static int adpcm_decode_frame(AVCodecContext *avctx, } for (i = 0; i < avctx->channels; i++) { - samples = (short*)data + i; + samples = (short *)c->frame.data[0] + i; cs = &c->status[i]; for (n = nb_samples >> 1; n > 0; n--, src++) { uint8_t v = *src; @@ -965,7 +969,7 @@ static int adpcm_decode_frame(AVCodecContext *avctx, } } - out_size = count * 28 * avctx->channels * out_bps; + c->frame.nb_samples = count * 28; src = src_end; break; } @@ -1144,7 +1148,7 @@ static int adpcm_decode_frame(AVCodecContext *avctx, prev[0][i] = (int16_t)bytestream_get_be16(&src); for (ch = 0; ch <= st; ch++) { - samples = (unsigned short *) data + ch; + samples = (short *)c->frame.data[0] + ch; /* Read in every sample for this channel. */ for (i = 0; i < nb_samples / 14; i++) { @@ -1177,7 +1181,10 @@ static int adpcm_decode_frame(AVCodecContext *avctx, default: return -1; } - *data_size = out_size; + + *got_frame_ptr = 1; + *(AVFrame *)data = c->frame; + return src - buf; } @@ -1190,6 +1197,7 @@ AVCodec ff_ ## name_ ## _decoder = { \ .priv_data_size = sizeof(ADPCMDecodeContext), \ .init = adpcm_decode_init, \ .decode = adpcm_decode_frame, \ + .capabilities = CODEC_CAP_DR1, \ .long_name = NULL_IF_CONFIG_SMALL(long_name_), \ } diff --git a/libavcodec/adx.h b/libavcodec/adx.h index da40eec929..92abe5f163 100644 --- a/libavcodec/adx.h +++ b/libavcodec/adx.h @@ -40,6 +40,7 @@ typedef struct { } ADXChannelState; typedef struct { + AVFrame frame; int channels; ADXChannelState prev[2]; int header_parsed; diff --git a/libavcodec/adxdec.c b/libavcodec/adxdec.c index 4558060781..e9104133fa 100644 --- a/libavcodec/adxdec.c +++ b/libavcodec/adxdec.c @@ -50,6 +50,10 @@ static av_cold int adx_decode_init(AVCodecContext *avctx) c->channels = avctx->channels; avctx->sample_fmt = AV_SAMPLE_FMT_S16; + + avcodec_get_frame_defaults(&c->frame); + avctx->coded_frame = &c->frame; + return 0; } @@ -89,36 +93,42 @@ static int adx_decode(ADXContext *c, int16_t *out, const uint8_t *in, int ch) return 0; } -static int adx_decode_frame(AVCodecContext *avctx, void *data, int *data_size, - AVPacket *avpkt) +static int adx_decode_frame(AVCodecContext *avctx, void *data, + int *got_frame_ptr, AVPacket *avpkt) { int buf_size = avpkt->size; ADXContext *c = avctx->priv_data; - int16_t *samples = data; + int16_t *samples; const uint8_t *buf = avpkt->data; - int num_blocks, ch; + int num_blocks, ch, ret; if (c->eof) { - *data_size = 0; + *got_frame_ptr = 0; return buf_size; } - /* 18 bytes of data are expanded into 32*2 bytes of audio, - so guard against buffer overflows */ + /* calculate number of blocks in the packet */ num_blocks = buf_size / (BLOCK_SIZE * c->channels); - if (num_blocks > *data_size / (BLOCK_SAMPLES * c->channels)) { - buf_size = (*data_size / (BLOCK_SAMPLES * c->channels)) * BLOCK_SIZE; - num_blocks = buf_size / (BLOCK_SIZE * c->channels); - } - if (!buf_size || buf_size % (BLOCK_SIZE * avctx->channels)) { + + /* if the packet is not an even multiple of BLOCK_SIZE, check for an EOF + packet */ + if (!num_blocks || buf_size % (BLOCK_SIZE * avctx->channels)) { if (buf_size >= 4 && (AV_RB16(buf) & 0x8000)) { c->eof = 1; - *data_size = 0; + *got_frame_ptr = 0; return avpkt->size; } return AVERROR_INVALIDDATA; } + /* get output buffer */ + c->frame.nb_samples = num_blocks * BLOCK_SAMPLES; + if ((ret = avctx->get_buffer(avctx, &c->frame)) < 0) { + av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); + return ret; + } + samples = (int16_t *)c->frame.data[0]; + while (num_blocks--) { for (ch = 0; ch < c->channels; ch++) { if (adx_decode(c, samples + ch, buf, ch)) { @@ -132,7 +142,9 @@ static int adx_decode_frame(AVCodecContext *avctx, void *data, int *data_size, samples += BLOCK_SAMPLES * c->channels; } - *data_size = (uint8_t*)samples - (uint8_t*)data; + *got_frame_ptr = 1; + *(AVFrame *)data = c->frame; + return buf - avpkt->data; } @@ -143,5 +155,6 @@ AVCodec ff_adpcm_adx_decoder = { .priv_data_size = sizeof(ADXContext), .init = adx_decode_init, .decode = adx_decode_frame, + .capabilities = CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("SEGA CRI ADX ADPCM"), }; diff --git a/libavcodec/alac.c b/libavcodec/alac.c index 1056e6c8f4..47234ecf13 100644 --- a/libavcodec/alac.c +++ b/libavcodec/alac.c @@ -62,10 +62,10 @@ typedef struct { AVCodecContext *avctx; + AVFrame frame; GetBitContext gb; int numchannels; - int bytespersample; /* buffers */ int32_t *predicterror_buffer[MAX_CHANNELS]; @@ -351,9 +351,8 @@ static void interleave_stereo_24(int32_t *buffer[MAX_CHANNELS], } } -static int alac_decode_frame(AVCodecContext *avctx, - void *outbuffer, int *outputsize, - AVPacket *avpkt) +static int alac_decode_frame(AVCodecContext *avctx, void *data, + int *got_frame_ptr, AVPacket *avpkt) { const uint8_t *inbuffer = avpkt->data; int input_buffer_size = avpkt->size; @@ -366,7 +365,7 @@ static int alac_decode_frame(AVCodecContext *avctx, int isnotcompressed; uint8_t interlacing_shift; uint8_t interlacing_leftweight; - int i, ch; + int i, ch, ret; init_get_bits(&alac->gb, inbuffer, input_buffer_size * 8); @@ -401,14 +400,17 @@ static int alac_decode_frame(AVCodecContext *avctx, } else outputsamples = alac->setinfo_max_samples_per_frame; - alac->bytespersample = channels * av_get_bytes_per_sample(avctx->sample_fmt); - - if(outputsamples > *outputsize / alac->bytespersample){ - av_log(avctx, AV_LOG_ERROR, "sample buffer too small\n"); - return -1; + /* get output buffer */ + if (outputsamples > INT32_MAX) { + av_log(avctx, AV_LOG_ERROR, "unsupported block size: %u\n", outputsamples); + return AVERROR_INVALIDDATA; + } + alac->frame.nb_samples = outputsamples; + if ((ret = avctx->get_buffer(avctx, &alac->frame)) < 0) { + av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); + return ret; } - *outputsize = outputsamples * alac->bytespersample; readsamplesize = alac->setinfo_sample_size - alac->extra_bits + channels - 1; if (readsamplesize > MIN_CACHE_BITS) { av_log(avctx, AV_LOG_ERROR, "readsamplesize too big (%d)\n", readsamplesize); @@ -501,21 +503,23 @@ static int alac_decode_frame(AVCodecContext *avctx, switch(alac->setinfo_sample_size) { case 16: if (channels == 2) { - interleave_stereo_16(alac->outputsamples_buffer, outbuffer, - outputsamples); + interleave_stereo_16(alac->outputsamples_buffer, + (int16_t *)alac->frame.data[0], outputsamples); } else { + int16_t *outbuffer = (int16_t *)alac->frame.data[0]; for (i = 0; i < outputsamples; i++) { - ((int16_t*)outbuffer)[i] = alac->outputsamples_buffer[0][i]; + outbuffer[i] = alac->outputsamples_buffer[0][i]; } } break; case 24: if (channels == 2) { - interleave_stereo_24(alac->outputsamples_buffer, outbuffer, - outputsamples); + interleave_stereo_24(alac->outputsamples_buffer, + (int32_t *)alac->frame.data[0], outputsamples); } else { + int32_t *outbuffer = (int32_t *)alac->frame.data[0]; for (i = 0; i < outputsamples; i++) - ((int32_t *)outbuffer)[i] = alac->outputsamples_buffer[0][i] << 8; + outbuffer[i] = alac->outputsamples_buffer[0][i] << 8; } break; } @@ -523,6 +527,9 @@ static int alac_decode_frame(AVCodecContext *avctx, if (input_buffer_size * 8 - get_bits_count(&alac->gb) > 8) av_log(avctx, AV_LOG_ERROR, "Error : %d bits left\n", input_buffer_size * 8 - get_bits_count(&alac->gb)); + *got_frame_ptr = 1; + *(AVFrame *)data = alac->frame; + return input_buffer_size; } @@ -637,6 +644,9 @@ static av_cold int alac_decode_init(AVCodecContext * avctx) return ret; } + avcodec_get_frame_defaults(&alac->frame); + avctx->coded_frame = &alac->frame; + return 0; } @@ -648,5 +658,6 @@ AVCodec ff_alac_decoder = { .init = alac_decode_init, .close = alac_decode_close, .decode = alac_decode_frame, + .capabilities = CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("ALAC (Apple Lossless Audio Codec)"), }; diff --git a/libavcodec/alsdec.c b/libavcodec/alsdec.c index e7a0de24b1..71495803a3 100644 --- a/libavcodec/alsdec.c +++ b/libavcodec/alsdec.c @@ -191,6 +191,7 @@ typedef struct { typedef struct { AVCodecContext *avctx; + AVFrame frame; ALSSpecificConfig sconf; GetBitContext gb; DSPContext dsp; @@ -1415,15 +1416,14 @@ static int read_frame_data(ALSDecContext *ctx, unsigned int ra_frame) /** Decode an ALS frame. */ -static int decode_frame(AVCodecContext *avctx, - void *data, int *data_size, +static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame_ptr, AVPacket *avpkt) { ALSDecContext *ctx = avctx->priv_data; ALSSpecificConfig *sconf = &ctx->sconf; const uint8_t *buffer = avpkt->data; int buffer_size = avpkt->size; - int invalid_frame, size; + int invalid_frame, ret; unsigned int c, sample, ra_frame, bytes_read, shift; init_get_bits(&ctx->gb, buffer, buffer_size * 8); @@ -1448,21 +1448,17 @@ static int decode_frame(AVCodecContext *avctx, ctx->frame_id++; - // check for size of decoded data - size = ctx->cur_frame_length * avctx->channels * - av_get_bytes_per_sample(avctx->sample_fmt); - - if (size > *data_size) { - av_log(avctx, AV_LOG_ERROR, "Decoded data exceeds buffer size.\n"); - return -1; + /* get output buffer */ + ctx->frame.nb_samples = ctx->cur_frame_length; + if ((ret = avctx->get_buffer(avctx, &ctx->frame)) < 0) { + av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); + return ret; } - *data_size = size; - // transform decoded frame into output format #define INTERLEAVE_OUTPUT(bps) \ { \ - int##bps##_t *dest = (int##bps##_t*) data; \ + int##bps##_t *dest = (int##bps##_t*)ctx->frame.data[0]; \ shift = bps - ctx->avctx->bits_per_raw_sample; \ for (sample = 0; sample < ctx->cur_frame_length; sample++) \ for (c = 0; c < avctx->channels; c++) \ @@ -1480,7 +1476,7 @@ static int decode_frame(AVCodecContext *avctx, int swap = HAVE_BIGENDIAN != sconf->msb_first; if (ctx->avctx->bits_per_raw_sample == 24) { - int32_t *src = data; + int32_t *src = (int32_t *)ctx->frame.data[0]; for (sample = 0; sample < ctx->cur_frame_length * avctx->channels; @@ -1501,22 +1497,25 @@ static int decode_frame(AVCodecContext *avctx, if (swap) { if (ctx->avctx->bits_per_raw_sample <= 16) { - int16_t *src = (int16_t*) data; + int16_t *src = (int16_t*) ctx->frame.data[0]; int16_t *dest = (int16_t*) ctx->crc_buffer; for (sample = 0; sample < ctx->cur_frame_length * avctx->channels; sample++) *dest++ = av_bswap16(src[sample]); } else { - ctx->dsp.bswap_buf((uint32_t*)ctx->crc_buffer, data, + ctx->dsp.bswap_buf((uint32_t*)ctx->crc_buffer, + (uint32_t *)ctx->frame.data[0], ctx->cur_frame_length * avctx->channels); } crc_source = ctx->crc_buffer; } else { - crc_source = data; + crc_source = ctx->frame.data[0]; } - ctx->crc = av_crc(ctx->crc_table, ctx->crc, crc_source, size); + ctx->crc = av_crc(ctx->crc_table, ctx->crc, crc_source, + ctx->cur_frame_length * avctx->channels * + av_get_bytes_per_sample(avctx->sample_fmt)); } @@ -1527,6 +1526,9 @@ static int decode_frame(AVCodecContext *avctx, } } + *got_frame_ptr = 1; + *(AVFrame *)data = ctx->frame; + bytes_read = invalid_frame ? buffer_size : (get_bits_count(&ctx->gb) + 7) >> 3; @@ -1724,6 +1726,9 @@ static av_cold int decode_init(AVCodecContext *avctx) dsputil_init(&ctx->dsp, avctx); + avcodec_get_frame_defaults(&ctx->frame); + avctx->coded_frame = &ctx->frame; + return 0; } @@ -1747,7 +1752,7 @@ AVCodec ff_als_decoder = { .close = decode_end, .decode = decode_frame, .flush = flush, - .capabilities = CODEC_CAP_SUBFRAMES, + .capabilities = CODEC_CAP_SUBFRAMES | CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 Audio Lossless Coding (ALS)"), }; diff --git a/libavcodec/amrnbdec.c b/libavcodec/amrnbdec.c index 501b137780..b594af760a 100644 --- a/libavcodec/amrnbdec.c +++ b/libavcodec/amrnbdec.c @@ -95,6 +95,7 @@ #define AMR_AGC_ALPHA 0.9 typedef struct AMRContext { + AVFrame avframe; ///< AVFrame for decoded samples AMRNBFrame frame; ///< decoded AMR parameters (lsf coefficients, codebook indexes, etc) uint8_t bad_frame_indicator; ///< bad frame ? 1 : 0 enum Mode cur_frame_mode; @@ -167,6 +168,9 @@ static av_cold int amrnb_decode_init(AVCodecContext *avctx) for (i = 0; i < 4; i++) p->prediction_error[i] = MIN_ENERGY; + avcodec_get_frame_defaults(&p->avframe); + avctx->coded_frame = &p->avframe; + return 0; } @@ -919,21 +923,29 @@ static void postfilter(AMRContext *p, float *lpc, float *buf_out) /// @} -static int amrnb_decode_frame(AVCodecContext *avctx, void *data, int *data_size, - AVPacket *avpkt) +static int amrnb_decode_frame(AVCodecContext *avctx, void *data, + int *got_frame_ptr, AVPacket *avpkt) { AMRContext *p = avctx->priv_data; // pointer to private data const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; - float *buf_out = data; // pointer to the output data buffer - int i, subframe; + float *buf_out; // pointer to the output data buffer + int i, subframe, ret; float fixed_gain_factor; AMRFixed fixed_sparse = {0}; // fixed vector up to anti-sparseness processing float spare_vector[AMR_SUBFRAME_SIZE]; // extra stack space to hold result from anti-sparseness processing float synth_fixed_gain; // the fixed gain that synthesis should use const float *synth_fixed_vector; // pointer to the fixed vector that synthesis should use + /* get output buffer */ + p->avframe.nb_samples = AMR_BLOCK_SIZE; + if ((ret = avctx->get_buffer(avctx, &p->avframe)) < 0) { + av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); + return ret; + } + buf_out = (float *)p->avframe.data[0]; + p->cur_frame_mode = unpack_bitstream(p, buf, buf_size); if (p->cur_frame_mode == MODE_DTX) { av_log_missing_feature(avctx, "dtx mode", 1); @@ -1028,8 +1040,8 @@ static int amrnb_decode_frame(AVCodecContext *avctx, void *data, int *data_size, ff_weighted_vector_sumf(p->lsf_avg, p->lsf_avg, p->lsf_q[3], 0.84, 0.16, LP_FILTER_ORDER); - /* report how many samples we got */ - *data_size = AMR_BLOCK_SIZE * sizeof(float); + *got_frame_ptr = 1; + *(AVFrame *)data = p->avframe; /* return the amount of bytes consumed if everything was OK */ return frame_sizes_nb[p->cur_frame_mode] + 1; // +7 for rounding and +8 for TOC @@ -1043,6 +1055,7 @@ AVCodec ff_amrnb_decoder = { .priv_data_size = sizeof(AMRContext), .init = amrnb_decode_init, .decode = amrnb_decode_frame, + .capabilities = CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("Adaptive Multi-Rate NarrowBand"), .sample_fmts = (const enum AVSampleFormat[]){AV_SAMPLE_FMT_FLT,AV_SAMPLE_FMT_NONE}, }; diff --git a/libavcodec/amrwbdec.c b/libavcodec/amrwbdec.c index d4bb7760ef..d4aa557d07 100644 --- a/libavcodec/amrwbdec.c +++ b/libavcodec/amrwbdec.c @@ -41,6 +41,7 @@ #include "amrwbdata.h" typedef struct { + AVFrame avframe; ///< AVFrame for decoded samples AMRWBFrame frame; ///< AMRWB parameters decoded from bitstream enum Mode fr_cur_mode; ///< mode index of current frame uint8_t fr_quality; ///< frame quality index (FQI) @@ -102,6 +103,9 @@ static av_cold int amrwb_decode_init(AVCodecContext *avctx) for (i = 0; i < 4; i++) ctx->prediction_error[i] = MIN_ENERGY; + avcodec_get_frame_defaults(&ctx->avframe); + avctx->coded_frame = &ctx->avframe; + return 0; } @@ -1062,15 +1066,15 @@ static void update_sub_state(AMRWBContext *ctx) LP_ORDER_16k * sizeof(float)); } -static int amrwb_decode_frame(AVCodecContext *avctx, void *data, int *data_size, - AVPacket *avpkt) +static int amrwb_decode_frame(AVCodecContext *avctx, void *data, + int *got_frame_ptr, AVPacket *avpkt) { AMRWBContext *ctx = avctx->priv_data; AMRWBFrame *cf = &ctx->frame; const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; int expected_fr_size, header_size; - float *buf_out = data; + float *buf_out; float spare_vector[AMRWB_SFR_SIZE]; // extra stack space to hold result from anti-sparseness processing float fixed_gain_factor; // fixed gain correction factor (gamma) float *synth_fixed_vector; // pointer to the fixed vector that synthesis should use @@ -1080,7 +1084,15 @@ static int amrwb_decode_frame(AVCodecContext *avctx, void *data, int *data_size, float hb_exc[AMRWB_SFR_SIZE_16k]; // excitation for the high frequency band float hb_samples[AMRWB_SFR_SIZE_16k]; // filtered high-band samples from synthesis float hb_gain; - int sub, i; + int sub, i, ret; + + /* get output buffer */ + ctx->avframe.nb_samples = 4 * AMRWB_SFR_SIZE_16k; + if ((ret = avctx->get_buffer(avctx, &ctx->avframe)) < 0) { + av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); + return ret; + } + buf_out = (float *)ctx->avframe.data[0]; header_size = decode_mime_header(ctx, buf); expected_fr_size = ((cf_sizes_wb[ctx->fr_cur_mode] + 7) >> 3) + 1; @@ -1088,7 +1100,7 @@ static int amrwb_decode_frame(AVCodecContext *avctx, void *data, int *data_size, if (buf_size < expected_fr_size) { av_log(avctx, AV_LOG_ERROR, "Frame too small (%d bytes). Truncated file?\n", buf_size); - *data_size = 0; + *got_frame_ptr = 0; return buf_size; } @@ -1219,8 +1231,8 @@ static int amrwb_decode_frame(AVCodecContext *avctx, void *data, int *data_size, memcpy(ctx->isp_sub4_past, ctx->isp[3], LP_ORDER * sizeof(ctx->isp[3][0])); memcpy(ctx->isf_past_final, ctx->isf_cur, LP_ORDER * sizeof(float)); - /* report how many samples we got */ - *data_size = 4 * AMRWB_SFR_SIZE_16k * sizeof(float); + *got_frame_ptr = 1; + *(AVFrame *)data = ctx->avframe; return expected_fr_size; } @@ -1232,6 +1244,7 @@ AVCodec ff_amrwb_decoder = { .priv_data_size = sizeof(AMRWBContext), .init = amrwb_decode_init, .decode = amrwb_decode_frame, + .capabilities = CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("Adaptive Multi-Rate WideBand"), .sample_fmts = (const enum AVSampleFormat[]){AV_SAMPLE_FMT_FLT,AV_SAMPLE_FMT_NONE}, }; diff --git a/libavcodec/apedec.c b/libavcodec/apedec.c index 7702b291c8..2d03c554a6 100644 --- a/libavcodec/apedec.c +++ b/libavcodec/apedec.c @@ -129,6 +129,7 @@ typedef struct APEPredictor { /** Decoder context */ typedef struct APEContext { AVCodecContext *avctx; + AVFrame frame; DSPContext dsp; int channels; int samples; ///< samples left to decode in current frame @@ -215,6 +216,10 @@ static av_cold int ape_decode_init(AVCodecContext *avctx) dsputil_init(&s->dsp, avctx); avctx->sample_fmt = AV_SAMPLE_FMT_S16; avctx->channel_layout = (avctx->channels==2) ? AV_CH_LAYOUT_STEREO : AV_CH_LAYOUT_MONO; + + avcodec_get_frame_defaults(&s->frame); + avctx->coded_frame = &s->frame; + return 0; filter_alloc_fail: ape_decode_close(avctx); @@ -805,16 +810,15 @@ static void ape_unpack_stereo(APEContext *ctx, int count) } } -static int ape_decode_frame(AVCodecContext *avctx, - void *data, int *data_size, - AVPacket *avpkt) +static int ape_decode_frame(AVCodecContext *avctx, void *data, + int *got_frame_ptr, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; APEContext *s = avctx->priv_data; - int16_t *samples = data; - int i; - int blockstodecode, out_size; + int16_t *samples; + int i, ret; + int blockstodecode; int bytes_used = 0; /* this should never be negative, but bad things will happen if it is, so @@ -826,7 +830,7 @@ static int ape_decode_frame(AVCodecContext *avctx, void *tmp_data; if (!buf_size) { - *data_size = 0; + *got_frame_ptr = 0; return 0; } if (buf_size < 8) { @@ -874,18 +878,19 @@ static int ape_decode_frame(AVCodecContext *avctx, } if (!s->data) { - *data_size = 0; + *got_frame_ptr = 0; return buf_size; } blockstodecode = FFMIN(BLOCKS_PER_LOOP, s->samples); - out_size = blockstodecode * avctx->channels * - av_get_bytes_per_sample(avctx->sample_fmt); - if (*data_size < out_size) { - av_log(avctx, AV_LOG_ERROR, "Output buffer is too small.\n"); - return AVERROR(EINVAL); + /* get output buffer */ + s->frame.nb_samples = blockstodecode; + if ((ret = avctx->get_buffer(avctx, &s->frame)) < 0) { + av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); + return ret; } + samples = (int16_t *)s->frame.data[0]; s->error=0; @@ -909,7 +914,9 @@ static int ape_decode_frame(AVCodecContext *avctx, s->samples -= blockstodecode; - *data_size = out_size; + *got_frame_ptr = 1; + *(AVFrame *)data = s->frame; + return bytes_used; } @@ -927,7 +934,7 @@ AVCodec ff_ape_decoder = { .init = ape_decode_init, .close = ape_decode_close, .decode = ape_decode_frame, - .capabilities = CODEC_CAP_SUBFRAMES | CODEC_CAP_DELAY, + .capabilities = CODEC_CAP_SUBFRAMES | CODEC_CAP_DELAY | CODEC_CAP_DR1, .flush = ape_flush, .long_name = NULL_IF_CONFIG_SMALL("Monkey's Audio"), }; diff --git a/libavcodec/atrac1.c b/libavcodec/atrac1.c index 770b1bf90e..9ead80d5c8 100644 --- a/libavcodec/atrac1.c +++ b/libavcodec/atrac1.c @@ -72,6 +72,7 @@ typedef struct { * The atrac1 context, holds all needed parameters for decoding */ typedef struct { + AVFrame frame; AT1SUCtx SUs[AT1_MAX_CHANNELS]; ///< channel sound unit DECLARE_ALIGNED(32, float, spec)[AT1_SU_SAMPLES]; ///< the mdct spectrum buffer @@ -273,14 +274,14 @@ static void at1_subband_synthesis(AT1Ctx *q, AT1SUCtx* su, float *pOut) static int atrac1_decode_frame(AVCodecContext *avctx, void *data, - int *data_size, AVPacket *avpkt) + int *got_frame_ptr, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; AT1Ctx *q = avctx->priv_data; - int ch, ret, out_size; + int ch, ret; GetBitContext gb; - float* samples = data; + float *samples; if (buf_size < 212 * q->channels) { @@ -288,12 +289,13 @@ static int atrac1_decode_frame(AVCodecContext *avctx, void *data, return AVERROR_INVALIDDATA; } - out_size = q->channels * AT1_SU_SAMPLES * - av_get_bytes_per_sample(avctx->sample_fmt); - if (*data_size < out_size) { - av_log(avctx, AV_LOG_ERROR, "Output buffer is too small\n"); - return AVERROR(EINVAL); + /* get output buffer */ + q->frame.nb_samples = AT1_SU_SAMPLES; + if ((ret = avctx->get_buffer(avctx, &q->frame)) < 0) { + av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); + return ret; } + samples = (float *)q->frame.data[0]; for (ch = 0; ch < q->channels; ch++) { AT1SUCtx* su = &q->SUs[ch]; @@ -321,7 +323,9 @@ static int atrac1_decode_frame(AVCodecContext *avctx, void *data, AT1_SU_SAMPLES, 2); } - *data_size = out_size; + *got_frame_ptr = 1; + *(AVFrame *)data = q->frame; + return avctx->block_align; } @@ -389,6 +393,9 @@ static av_cold int atrac1_decode_init(AVCodecContext *avctx) q->SUs[1].spectrum[0] = q->SUs[1].spec1; q->SUs[1].spectrum[1] = q->SUs[1].spec2; + avcodec_get_frame_defaults(&q->frame); + avctx->coded_frame = &q->frame; + return 0; } @@ -401,5 +408,6 @@ AVCodec ff_atrac1_decoder = { .init = atrac1_decode_init, .close = atrac1_decode_end, .decode = atrac1_decode_frame, + .capabilities = CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("Atrac 1 (Adaptive TRansform Acoustic Coding)"), }; diff --git a/libavcodec/atrac3.c b/libavcodec/atrac3.c index 3a48a5a647..bdd03402da 100644 --- a/libavcodec/atrac3.c +++ b/libavcodec/atrac3.c @@ -86,6 +86,7 @@ typedef struct { } channel_unit; typedef struct { + AVFrame frame; GetBitContext gb; //@{ /** stream data */ @@ -823,16 +824,16 @@ static int decodeFrame(ATRAC3Context *q, const uint8_t* databuf, * @param avctx pointer to the AVCodecContext */ -static int atrac3_decode_frame(AVCodecContext *avctx, - void *data, int *data_size, - AVPacket *avpkt) { +static int atrac3_decode_frame(AVCodecContext *avctx, void *data, + int *got_frame_ptr, AVPacket *avpkt) +{ const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; ATRAC3Context *q = avctx->priv_data; - int result = 0, out_size; + int result; const uint8_t* databuf; - float *samples_flt = data; - int16_t *samples_s16 = data; + float *samples_flt; + int16_t *samples_s16; if (buf_size < avctx->block_align) { av_log(avctx, AV_LOG_ERROR, @@ -840,12 +841,14 @@ static int atrac3_decode_frame(AVCodecContext *avctx, return AVERROR_INVALIDDATA; } - out_size = SAMPLES_PER_FRAME * q->channels * - av_get_bytes_per_sample(avctx->sample_fmt); - if (*data_size < out_size) { - av_log(avctx, AV_LOG_ERROR, "Output buffer is too small\n"); - return AVERROR(EINVAL); + /* get output buffer */ + q->frame.nb_samples = SAMPLES_PER_FRAME; + if ((result = avctx->get_buffer(avctx, &q->frame)) < 0) { + av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); + return result; } + samples_flt = (float *)q->frame.data[0]; + samples_s16 = (int16_t *)q->frame.data[0]; /* Check if we need to descramble and what buffer to pass on. */ if (q->scrambled_stream) { @@ -875,7 +878,9 @@ static int atrac3_decode_frame(AVCodecContext *avctx, (const float **)q->outSamples, SAMPLES_PER_FRAME, q->channels); } - *data_size = out_size; + + *got_frame_ptr = 1; + *(AVFrame *)data = q->frame; return avctx->block_align; } @@ -1047,6 +1052,9 @@ static av_cold int atrac3_decode_init(AVCodecContext *avctx) } } + avcodec_get_frame_defaults(&q->frame); + avctx->coded_frame = &q->frame; + return 0; } @@ -1060,6 +1068,6 @@ AVCodec ff_atrac3_decoder = .init = atrac3_decode_init, .close = atrac3_decode_close, .decode = atrac3_decode_frame, - .capabilities = CODEC_CAP_SUBFRAMES, + .capabilities = CODEC_CAP_SUBFRAMES | CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("Atrac 3 (Adaptive TRansform Acoustic Coding 3)"), }; diff --git a/libavcodec/avcodec.h b/libavcodec/avcodec.h index eeafce4c45..83fb39b99e 100644 --- a/libavcodec/avcodec.h +++ b/libavcodec/avcodec.h @@ -480,8 +480,10 @@ enum CodecID { #define CH_LAYOUT_STEREO_DOWNMIX AV_CH_LAYOUT_STEREO_DOWNMIX #endif +#if FF_API_OLD_DECODE_AUDIO /* in bytes */ #define AVCODEC_MAX_AUDIO_FRAME_SIZE 192000 // 1 second of 48khz 32bit audio +#endif /** * Required number of additionally allocated bytes at the end of the input bitstream for decoding. @@ -933,13 +935,24 @@ typedef struct AVFrame { #define AV_NUM_DATA_POINTERS 8 #endif /** - * pointer to the picture planes. + * pointer to the picture/channel planes. * This might be different from the first allocated byte - * - encoding: - * - decoding: + * - encoding: Set by user + * - decoding: set by AVCodecContext.get_buffer() */ uint8_t *data[AV_NUM_DATA_POINTERS]; + + /** + * Size, in bytes, of the data for each picture/channel plane. + * + * For audio, only linesize[0] may be set. For planar audio, each channel + * plane must be the same size. + * + * - encoding: Set by user (video only) + * - decoding: set by AVCodecContext.get_buffer() + */ int linesize[AV_NUM_DATA_POINTERS]; + /** * pointer to the first allocated byte of the picture. Can be used in get_buffer/release_buffer. * This isn't used by libavcodec unless the default get/release_buffer() is used. @@ -993,7 +1006,7 @@ typedef struct AVFrame { * buffer age (1->was last buffer and dint change, 2->..., ...). * Set to INT_MAX if the buffer has not been used yet. * - encoding: unused - * - decoding: MUST be set by get_buffer(). + * - decoding: MUST be set by get_buffer() for video. */ int age; @@ -1190,6 +1203,33 @@ typedef struct AVFrame { * - decoding: Set by libavcodec. */ void *thread_opaque; + + /** + * number of audio samples (per channel) described by this frame + * - encoding: unused + * - decoding: Set by libavcodec + */ + int nb_samples; + + /** + * pointers to the data planes/channels. + * + * For video, this should simply point to data[]. + * + * For planar audio, each channel has a separate data pointer, and + * linesize[0] contains the size of each channel buffer. + * For packed audio, there is just one data pointer, and linesize[0] + * contains the total size of the buffer for all channels. + * + * Note: Both data and extended_data will always be set by get_buffer(), + * but for planar audio with more channels that can fit in data, + * extended_data must be used by the decoder in order to access all + * channels. + * + * encoding: unused + * decoding: set by AVCodecContext.get_buffer() + */ + uint8_t **extended_data; } AVFrame; struct AVCodecInternal; @@ -1545,15 +1585,56 @@ typedef struct AVCodecContext { /** * Called at the beginning of each frame to get a buffer for it. - * If pic.reference is set then the frame will be read later by libavcodec. - * avcodec_align_dimensions2() should be used to find the required width and - * height, as they normally need to be rounded up to the next multiple of 16. + * + * The function will set AVFrame.data[], AVFrame.linesize[]. + * AVFrame.extended_data[] must also be set, but it should be the same as + * AVFrame.data[] except for planar audio with more channels than can fit + * in AVFrame.data[]. In that case, AVFrame.data[] shall still contain as + * many data pointers as it can hold. + * * if CODEC_CAP_DR1 is not set then get_buffer() must call * avcodec_default_get_buffer() instead of providing buffers allocated by * some other means. + * + * AVFrame.data[] should be 32- or 16-byte-aligned unless the CPU doesn't + * need it. avcodec_default_get_buffer() aligns the output buffer properly, + * but if get_buffer() is overridden then alignment considerations should + * be taken into account. + * + * @see avcodec_default_get_buffer() + * + * Video: + * + * If pic.reference is set then the frame will be read later by libavcodec. + * avcodec_align_dimensions2() should be used to find the required width and + * height, as they normally need to be rounded up to the next multiple of 16. + * * If frame multithreading is used and thread_safe_callbacks is set, - * it may be called from a different thread, but not from more than one at once. - * Does not need to be reentrant. + * it may be called from a different thread, but not from more than one at + * once. Does not need to be reentrant. + * + * @see release_buffer(), reget_buffer() + * @see avcodec_align_dimensions2() + * + * Audio: + * + * Decoders request a buffer of a particular size by setting + * AVFrame.nb_samples prior to calling get_buffer(). The decoder may, + * however, utilize only part of the buffer by setting AVFrame.nb_samples + * to a smaller value in the output frame. + * + * Decoders cannot use the buffer after returning from + * avcodec_decode_audio4(), so they will not call release_buffer(), as it + * is assumed to be released immediately upon return. + * + * As a convenience, av_samples_get_buffer_size() and + * av_samples_fill_arrays() in libavutil may be used by custom get_buffer() + * functions to find the required data size and to fill data pointers and + * linesize. In AVFrame.linesize, only linesize[0] may be set for audio + * since all planes must be the same size. + * + * @see av_samples_get_buffer_size(), av_samples_fill_arrays() + * * - encoding: unused * - decoding: Set by libavcodec, user can override. */ @@ -3882,7 +3963,12 @@ int avcodec_open(AVCodecContext *avctx, AVCodec *codec); */ int avcodec_open2(AVCodecContext *avctx, AVCodec *codec, AVDictionary **options); +#if FF_API_OLD_DECODE_AUDIO /** + * Wrapper function which calls avcodec_decode_audio4. + * + * @deprecated Use avcodec_decode_audio4 instead. + * * Decode the audio frame of size avpkt->size from avpkt->data into samples. * Some decoders may support multiple frames in a single AVPacket, such * decoders would then just decode the first frame. In this case, @@ -3917,6 +4003,8 @@ int avcodec_open2(AVCodecContext *avctx, AVCodec *codec, AVDictionary **options) * * @param avctx the codec context * @param[out] samples the output buffer, sample type in avctx->sample_fmt + * If the sample format is planar, each channel plane will + * be the same size, with no padding between channels. * @param[in,out] frame_size_ptr the output buffer size in bytes * @param[in] avpkt The input AVPacket containing the input buffer. * You can create such packet with av_init_packet() and by then setting @@ -3925,9 +4013,46 @@ int avcodec_open2(AVCodecContext *avctx, AVCodec *codec, AVDictionary **options) * @return On error a negative value is returned, otherwise the number of bytes * used or zero if no frame data was decompressed (used) from the input AVPacket. */ -int avcodec_decode_audio3(AVCodecContext *avctx, int16_t *samples, +attribute_deprecated int avcodec_decode_audio3(AVCodecContext *avctx, int16_t *samples, int *frame_size_ptr, AVPacket *avpkt); +#endif + +/** + * Decode the audio frame of size avpkt->size from avpkt->data into frame. + * + * Some decoders may support multiple frames in a single AVPacket. Such + * decoders would then just decode the first frame. In this case, + * avcodec_decode_audio4 has to be called again with an AVPacket containing + * the remaining data in order to decode the second frame, etc... + * Even if no frames are returned, the packet needs to be fed to the decoder + * with remaining data until it is completely consumed or an error occurs. + * + * @warning The input buffer, avpkt->data must be FF_INPUT_BUFFER_PADDING_SIZE + * larger than the actual read bytes because some optimized bitstream + * readers read 32 or 64 bits at once and could read over the end. + * + * @note You might have to align the input buffer. The alignment requirements + * depend on the CPU and the decoder. + * + * @param avctx the codec context + * @param[out] frame The AVFrame in which to store decoded audio samples. + * Decoders request a buffer of a particular size by setting + * AVFrame.nb_samples prior to calling get_buffer(). The + * decoder may, however, only utilize part of the buffer by + * setting AVFrame.nb_samples to a smaller value in the + * output frame. + * @param[out] got_frame_ptr Zero if no frame could be decoded, otherwise it is + * non-zero. + * @param[in] avpkt The input AVPacket containing the input buffer. + * At least avpkt->data and avpkt->size should be set. Some + * decoders might also require additional fields to be set. + * @return A negative error code is returned if an error occurred during + * decoding, otherwise the number of bytes consumed from the input + * AVPacket is returned. + */ +int avcodec_decode_audio4(AVCodecContext *avctx, AVFrame *frame, + int *got_frame_ptr, AVPacket *avpkt); /** * Decode the video frame of size avpkt->size from avpkt->data into picture. diff --git a/libavcodec/binkaudio.c b/libavcodec/binkaudio.c index d917e7a12c..1dceeb74c3 100644 --- a/libavcodec/binkaudio.c +++ b/libavcodec/binkaudio.c @@ -45,6 +45,7 @@ static float quant_table[96]; #define BINK_BLOCK_MAX_SIZE (MAX_CHANNELS << 11) typedef struct { + AVFrame frame; GetBitContext gb; DSPContext dsp; FmtConvertContext fmt_conv; @@ -147,6 +148,9 @@ static av_cold int decode_init(AVCodecContext *avctx) else return -1; + avcodec_get_frame_defaults(&s->frame); + avctx->coded_frame = &s->frame; + return 0; } @@ -293,6 +297,7 @@ static av_cold int decode_end(AVCodecContext *avctx) ff_rdft_end(&s->trans.rdft); else if (CONFIG_BINKAUDIO_DCT_DECODER) ff_dct_end(&s->trans.dct); + return 0; } @@ -302,20 +307,19 @@ static void get_bits_align32(GetBitContext *s) if (n) skip_bits(s, n); } -static int decode_frame(AVCodecContext *avctx, - void *data, int *data_size, - AVPacket *avpkt) +static int decode_frame(AVCodecContext *avctx, void *data, + int *got_frame_ptr, AVPacket *avpkt) { BinkAudioContext *s = avctx->priv_data; - int16_t *samples = data; + int16_t *samples; GetBitContext *gb = &s->gb; - int out_size, consumed = 0; + int ret, consumed = 0; if (!get_bits_left(gb)) { uint8_t *buf; /* handle end-of-stream */ if (!avpkt->size) { - *data_size = 0; + *got_frame_ptr = 0; return 0; } if (avpkt->size < 4) { @@ -334,11 +338,13 @@ static int decode_frame(AVCodecContext *avctx, skip_bits_long(gb, 32); } - out_size = s->block_size * av_get_bytes_per_sample(avctx->sample_fmt); - if (*data_size < out_size) { - av_log(avctx, AV_LOG_ERROR, "Output buffer is too small\n"); - return AVERROR(EINVAL); + /* get output buffer */ + s->frame.nb_samples = s->block_size / avctx->channels; + if ((ret = avctx->get_buffer(avctx, &s->frame)) < 0) { + av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); + return ret; } + samples = (int16_t *)s->frame.data[0]; if (decode_block(s, samples, avctx->codec->id == CODEC_ID_BINKAUDIO_DCT)) { av_log(avctx, AV_LOG_ERROR, "Incomplete packet\n"); @@ -346,7 +352,9 @@ static int decode_frame(AVCodecContext *avctx, } get_bits_align32(gb); - *data_size = out_size; + *got_frame_ptr = 1; + *(AVFrame *)data = s->frame; + return consumed; } @@ -358,7 +366,7 @@ AVCodec ff_binkaudio_rdft_decoder = { .init = decode_init, .close = decode_end, .decode = decode_frame, - .capabilities = CODEC_CAP_DELAY, + .capabilities = CODEC_CAP_DELAY | CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("Bink Audio (RDFT)") }; @@ -370,6 +378,6 @@ AVCodec ff_binkaudio_dct_decoder = { .init = decode_init, .close = decode_end, .decode = decode_frame, - .capabilities = CODEC_CAP_DELAY, + .capabilities = CODEC_CAP_DELAY | CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("Bink Audio (DCT)") }; diff --git a/libavcodec/cook.c b/libavcodec/cook.c index 8b0a351495..81a1aae9d1 100644 --- a/libavcodec/cook.c +++ b/libavcodec/cook.c @@ -122,6 +122,7 @@ typedef struct cook { void (* saturate_output) (struct cook *q, int chan, float *out); AVCodecContext* avctx; + AVFrame frame; GetBitContext gb; /* stream data */ int nb_channels; @@ -131,6 +132,7 @@ typedef struct cook { int samples_per_channel; /* states */ AVLFG random_state; + int discarded_packets; /* transform data */ FFTContext mdct_ctx; @@ -896,7 +898,8 @@ mlt_compensate_output(COOKContext *q, float *decode_buffer, float *out, int chan) { imlt_gain(q, decode_buffer, gains_ptr, previous_buffer); - q->saturate_output (q, chan, out); + if (out) + q->saturate_output(q, chan, out); } @@ -953,24 +956,28 @@ static void decode_subpacket(COOKContext *q, COOKSubpacket *p, * @param avctx pointer to the AVCodecContext */ -static int cook_decode_frame(AVCodecContext *avctx, - void *data, int *data_size, - AVPacket *avpkt) { +static int cook_decode_frame(AVCodecContext *avctx, void *data, + int *got_frame_ptr, AVPacket *avpkt) +{ const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; COOKContext *q = avctx->priv_data; - int i, out_size; + float *samples = NULL; + int i, ret; int offset = 0; int chidx = 0; if (buf_size < avctx->block_align) return buf_size; - out_size = q->nb_channels * q->samples_per_channel * - av_get_bytes_per_sample(avctx->sample_fmt); - if (*data_size < out_size) { - av_log(avctx, AV_LOG_ERROR, "Output buffer is too small\n"); - return AVERROR(EINVAL); + /* get output buffer */ + if (q->discarded_packets >= 2) { + q->frame.nb_samples = q->samples_per_channel; + if ((ret = avctx->get_buffer(avctx, &q->frame)) < 0) { + av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); + return ret; + } + samples = (float *)q->frame.data[0]; } /* estimate subpacket sizes */ @@ -990,15 +997,21 @@ static int cook_decode_frame(AVCodecContext *avctx, q->subpacket[i].bits_per_subpacket = (q->subpacket[i].size*8)>>q->subpacket[i].bits_per_subpdiv; q->subpacket[i].ch_idx = chidx; av_log(avctx,AV_LOG_DEBUG,"subpacket[%i] size %i js %i %i block_align %i\n",i,q->subpacket[i].size,q->subpacket[i].joint_stereo,offset,avctx->block_align); - decode_subpacket(q, &q->subpacket[i], buf + offset, data); + decode_subpacket(q, &q->subpacket[i], buf + offset, samples); offset += q->subpacket[i].size; chidx += q->subpacket[i].num_channels; av_log(avctx,AV_LOG_DEBUG,"subpacket[%i] %i %i\n",i,q->subpacket[i].size * 8,get_bits_count(&q->gb)); } - *data_size = out_size; /* Discard the first two frames: no valid audio. */ - if (avctx->frame_number < 2) *data_size = 0; + if (q->discarded_packets < 2) { + q->discarded_packets++; + *got_frame_ptr = 0; + return avctx->block_align; + } + + *got_frame_ptr = 1; + *(AVFrame *)data = q->frame; return avctx->block_align; } @@ -1246,6 +1259,9 @@ static av_cold int cook_decode_init(AVCodecContext *avctx) else avctx->channel_layout = (avctx->channels==2) ? AV_CH_LAYOUT_STEREO : AV_CH_LAYOUT_MONO; + avcodec_get_frame_defaults(&q->frame); + avctx->coded_frame = &q->frame; + #ifdef DEBUG dump_cook_context(q); #endif @@ -1262,5 +1278,6 @@ AVCodec ff_cook_decoder = .init = cook_decode_init, .close = cook_decode_close, .decode = cook_decode_frame, + .capabilities = CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("COOK"), }; diff --git a/libavcodec/dca.c b/libavcodec/dca.c index 21a245585d..e3f87b92eb 100644 --- a/libavcodec/dca.c +++ b/libavcodec/dca.c @@ -261,6 +261,7 @@ static av_always_inline int get_bitalloc(GetBitContext *gb, BitAlloc *ba, int id typedef struct { AVCodecContext *avctx; + AVFrame frame; /* Frame header */ int frame_type; ///< type of the current frame int samples_deficit; ///< deficit sample count @@ -1635,9 +1636,8 @@ static void dca_exss_parse_header(DCAContext *s) * Main frame decoding function * FIXME add arguments */ -static int dca_decode_frame(AVCodecContext * avctx, - void *data, int *data_size, - AVPacket *avpkt) +static int dca_decode_frame(AVCodecContext *avctx, void *data, + int *got_frame_ptr, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; @@ -1645,9 +1645,8 @@ static int dca_decode_frame(AVCodecContext * avctx, int lfe_samples; int num_core_channels = 0; int i, ret; - float *samples_flt = data; - int16_t *samples_s16 = data; - int out_size; + float *samples_flt; + int16_t *samples_s16; DCAContext *s = avctx->priv_data; int channels; int core_ss_end; @@ -1839,11 +1838,14 @@ static int dca_decode_frame(AVCodecContext * avctx, return AVERROR_PATCHWELCOME; } - out_size = 256 / 8 * s->sample_blocks * channels * - av_get_bytes_per_sample(avctx->sample_fmt); - if (*data_size < out_size) - return AVERROR(EINVAL); - *data_size = out_size; + /* get output buffer */ + s->frame.nb_samples = 256 * (s->sample_blocks / 8); + if ((ret = avctx->get_buffer(avctx, &s->frame)) < 0) { + av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); + return ret; + } + samples_flt = (float *)s->frame.data[0]; + samples_s16 = (int16_t *)s->frame.data[0]; /* filter to get final output */ for (i = 0; i < (s->sample_blocks / 8); i++) { @@ -1877,6 +1879,9 @@ static int dca_decode_frame(AVCodecContext * avctx, s->lfe_data[i] = s->lfe_data[i + lfe_samples]; } + *got_frame_ptr = 1; + *(AVFrame *)data = s->frame; + return buf_size; } @@ -1919,6 +1924,9 @@ static av_cold int dca_decode_init(AVCodecContext * avctx) avctx->channels = avctx->request_channels; } + avcodec_get_frame_defaults(&s->frame); + avctx->coded_frame = &s->frame; + return 0; } @@ -1947,7 +1955,7 @@ AVCodec ff_dca_decoder = { .decode = dca_decode_frame, .close = dca_decode_end, .long_name = NULL_IF_CONFIG_SMALL("DCA (DTS Coherent Acoustics)"), - .capabilities = CODEC_CAP_CHANNEL_CONF, + .capabilities = CODEC_CAP_CHANNEL_CONF | CODEC_CAP_DR1, .sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_NONE }, diff --git a/libavcodec/dpcm.c b/libavcodec/dpcm.c index abb2019306..935f67caca 100644 --- a/libavcodec/dpcm.c +++ b/libavcodec/dpcm.c @@ -42,6 +42,7 @@ #include "bytestream.h" typedef struct DPCMContext { + AVFrame frame; int channels; int16_t roq_square_array[256]; int sample[2]; ///< previous sample (for SOL_DPCM) @@ -162,22 +163,25 @@ static av_cold int dpcm_decode_init(AVCodecContext *avctx) else avctx->sample_fmt = AV_SAMPLE_FMT_S16; + avcodec_get_frame_defaults(&s->frame); + avctx->coded_frame = &s->frame; + return 0; } -static int dpcm_decode_frame(AVCodecContext *avctx, void *data, int *data_size, - AVPacket *avpkt) +static int dpcm_decode_frame(AVCodecContext *avctx, void *data, + int *got_frame_ptr, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; const uint8_t *buf_end = buf + buf_size; DPCMContext *s = avctx->priv_data; - int out = 0; + int out = 0, ret; int predictor[2]; int ch = 0; int stereo = s->channels - 1; - int16_t *output_samples = data; + int16_t *output_samples; /* calculate output size */ switch(avctx->codec->id) { @@ -197,15 +201,18 @@ static int dpcm_decode_frame(AVCodecContext *avctx, void *data, int *data_size, out = buf_size; break; } - out *= av_get_bytes_per_sample(avctx->sample_fmt); if (out <= 0) { av_log(avctx, AV_LOG_ERROR, "packet is too small\n"); return AVERROR(EINVAL); } - if (*data_size < out) { - av_log(avctx, AV_LOG_ERROR, "output buffer is too small\n"); - return AVERROR(EINVAL); + + /* get output buffer */ + s->frame.nb_samples = out / s->channels; + if ((ret = avctx->get_buffer(avctx, &s->frame)) < 0) { + av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); + return ret; } + output_samples = (int16_t *)s->frame.data[0]; switch(avctx->codec->id) { @@ -307,7 +314,9 @@ static int dpcm_decode_frame(AVCodecContext *avctx, void *data, int *data_size, break; } - *data_size = out; + *got_frame_ptr = 1; + *(AVFrame *)data = s->frame; + return buf_size; } @@ -319,6 +328,7 @@ AVCodec ff_ ## name_ ## _decoder = { \ .priv_data_size = sizeof(DPCMContext), \ .init = dpcm_decode_init, \ .decode = dpcm_decode_frame, \ + .capabilities = CODEC_CAP_DR1, \ .long_name = NULL_IF_CONFIG_SMALL(long_name_), \ } diff --git a/libavcodec/dsicinav.c b/libavcodec/dsicinav.c index cbf7c4a6f8..37d39f5405 100644 --- a/libavcodec/dsicinav.c +++ b/libavcodec/dsicinav.c @@ -44,6 +44,7 @@ typedef struct CinVideoContext { } CinVideoContext; typedef struct CinAudioContext { + AVFrame frame; int initial_decode_frame; int delta; } CinAudioContext; @@ -317,25 +318,28 @@ static av_cold int cinaudio_decode_init(AVCodecContext *avctx) cin->delta = 0; avctx->sample_fmt = AV_SAMPLE_FMT_S16; + avcodec_get_frame_defaults(&cin->frame); + avctx->coded_frame = &cin->frame; + return 0; } -static int cinaudio_decode_frame(AVCodecContext *avctx, - void *data, int *data_size, - AVPacket *avpkt) +static int cinaudio_decode_frame(AVCodecContext *avctx, void *data, + int *got_frame_ptr, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; CinAudioContext *cin = avctx->priv_data; const uint8_t *buf_end = buf + avpkt->size; - int16_t *samples = data; - int delta, out_size; - - out_size = (avpkt->size - cin->initial_decode_frame) * - av_get_bytes_per_sample(avctx->sample_fmt); - if (*data_size < out_size) { - av_log(avctx, AV_LOG_ERROR, "Output buffer is too small\n"); - return AVERROR(EINVAL); + int16_t *samples; + int delta, ret; + + /* get output buffer */ + cin->frame.nb_samples = avpkt->size - cin->initial_decode_frame; + if ((ret = avctx->get_buffer(avctx, &cin->frame)) < 0) { + av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); + return ret; } + samples = (int16_t *)cin->frame.data[0]; delta = cin->delta; if (cin->initial_decode_frame) { @@ -351,7 +355,8 @@ static int cinaudio_decode_frame(AVCodecContext *avctx, } cin->delta = delta; - *data_size = out_size; + *got_frame_ptr = 1; + *(AVFrame *)data = cin->frame; return avpkt->size; } @@ -376,5 +381,6 @@ AVCodec ff_dsicinaudio_decoder = { .priv_data_size = sizeof(CinAudioContext), .init = cinaudio_decode_init, .decode = cinaudio_decode_frame, + .capabilities = CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("Delphine Software International CIN audio"), }; diff --git a/libavcodec/flacdec.c b/libavcodec/flacdec.c index 95cf2bccb4..58eb66def9 100644 --- a/libavcodec/flacdec.c +++ b/libavcodec/flacdec.c @@ -49,6 +49,7 @@ typedef struct FLACContext { FLACSTREAMINFO AVCodecContext *avctx; ///< parent AVCodecContext + AVFrame frame; GetBitContext gb; ///< GetBitContext initialized to start at the current frame int blocksize; ///< number of samples in the current frame @@ -116,6 +117,9 @@ static av_cold int flac_decode_init(AVCodecContext *avctx) allocate_buffers(s); s->got_streaminfo = 1; + avcodec_get_frame_defaults(&s->frame); + avctx->coded_frame = &s->frame; + return 0; } @@ -542,20 +546,18 @@ static int decode_frame(FLACContext *s) return 0; } -static int flac_decode_frame(AVCodecContext *avctx, - void *data, int *data_size, - AVPacket *avpkt) +static int flac_decode_frame(AVCodecContext *avctx, void *data, + int *got_frame_ptr, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; FLACContext *s = avctx->priv_data; int i, j = 0, bytes_read = 0; - int16_t *samples_16 = data; - int32_t *samples_32 = data; - int alloc_data_size= *data_size; - int output_size; + int16_t *samples_16; + int32_t *samples_32; + int ret; - *data_size=0; + *got_frame_ptr = 0; if (s->max_framesize == 0) { s->max_framesize = @@ -586,15 +588,14 @@ static int flac_decode_frame(AVCodecContext *avctx, } bytes_read = (get_bits_count(&s->gb)+7)/8; - /* check if allocated data size is large enough for output */ - output_size = s->blocksize * s->channels * - av_get_bytes_per_sample(avctx->sample_fmt); - if (output_size > alloc_data_size) { - av_log(s->avctx, AV_LOG_ERROR, "output data size is larger than " - "allocated data size\n"); - return -1; + /* get output buffer */ + s->frame.nb_samples = s->blocksize; + if ((ret = avctx->get_buffer(avctx, &s->frame)) < 0) { + av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); + return ret; } - *data_size = output_size; + samples_16 = (int16_t *)s->frame.data[0]; + samples_32 = (int32_t *)s->frame.data[0]; #define DECORRELATE(left, right)\ assert(s->channels == 2);\ @@ -639,6 +640,9 @@ static int flac_decode_frame(AVCodecContext *avctx, buf_size - bytes_read, buf_size); } + *got_frame_ptr = 1; + *(AVFrame *)data = s->frame; + return bytes_read; } @@ -662,5 +666,6 @@ AVCodec ff_flac_decoder = { .init = flac_decode_init, .close = flac_decode_close, .decode = flac_decode_frame, + .capabilities = CODEC_CAP_DR1, .long_name= NULL_IF_CONFIG_SMALL("FLAC (Free Lossless Audio Codec)"), }; diff --git a/libavcodec/g722.h b/libavcodec/g722.h index 5edb6c8119..69e7a86e25 100644 --- a/libavcodec/g722.h +++ b/libavcodec/g722.h @@ -26,10 +26,12 @@ #define AVCODEC_G722_H #include +#include "avcodec.h" #define PREV_SAMPLES_BUF_SIZE 1024 typedef struct { + AVFrame frame; int16_t prev_samples[PREV_SAMPLES_BUF_SIZE]; ///< memory of past decoded samples int prev_samples_pos; ///< the number of values in prev_samples diff --git a/libavcodec/g722dec.c b/libavcodec/g722dec.c index 2be47159a4..652a1aa4ae 100644 --- a/libavcodec/g722dec.c +++ b/libavcodec/g722dec.c @@ -66,6 +66,9 @@ static av_cold int g722_decode_init(AVCodecContext * avctx) c->band[1].scale_factor = 2; c->prev_samples_pos = 22; + avcodec_get_frame_defaults(&c->frame); + avctx->coded_frame = &c->frame; + return 0; } @@ -81,20 +84,22 @@ static const int16_t *low_inv_quants[3] = { ff_g722_low_inv_quant6, ff_g722_low_inv_quant4 }; static int g722_decode_frame(AVCodecContext *avctx, void *data, - int *data_size, AVPacket *avpkt) + int *got_frame_ptr, AVPacket *avpkt) { G722Context *c = avctx->priv_data; - int16_t *out_buf = data; - int j, out_len; + int16_t *out_buf; + int j, ret; const int skip = 8 - avctx->bits_per_coded_sample; const int16_t *quantizer_table = low_inv_quants[skip]; GetBitContext gb; - out_len = avpkt->size * 2 * av_get_bytes_per_sample(avctx->sample_fmt); - if (*data_size < out_len) { - av_log(avctx, AV_LOG_ERROR, "Output buffer is too small\n"); - return AVERROR(EINVAL); + /* get output buffer */ + c->frame.nb_samples = avpkt->size * 2; + if ((ret = avctx->get_buffer(avctx, &c->frame)) < 0) { + av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); + return ret; } + out_buf = (int16_t *)c->frame.data[0]; init_get_bits(&gb, avpkt->data, avpkt->size * 8); @@ -128,7 +133,10 @@ static int g722_decode_frame(AVCodecContext *avctx, void *data, c->prev_samples_pos = 22; } } - *data_size = out_len; + + *got_frame_ptr = 1; + *(AVFrame *)data = c->frame; + return avpkt->size; } @@ -139,5 +147,6 @@ AVCodec ff_adpcm_g722_decoder = { .priv_data_size = sizeof(G722Context), .init = g722_decode_init, .decode = g722_decode_frame, + .capabilities = CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("G.722 ADPCM"), }; diff --git a/libavcodec/g726.c b/libavcodec/g726.c index 37b0adf3b4..85711f854c 100644 --- a/libavcodec/g726.c +++ b/libavcodec/g726.c @@ -75,6 +75,7 @@ typedef struct G726Tables { typedef struct G726Context { AVClass *class; + AVFrame frame; G726Tables tbls; /**< static tables needed for computation */ Float11 sr[2]; /**< prev. reconstructed samples */ @@ -427,26 +428,31 @@ static av_cold int g726_decode_init(AVCodecContext *avctx) avctx->sample_fmt = AV_SAMPLE_FMT_S16; + avcodec_get_frame_defaults(&c->frame); + avctx->coded_frame = &c->frame; + return 0; } -static int g726_decode_frame(AVCodecContext *avctx, - void *data, int *data_size, - AVPacket *avpkt) +static int g726_decode_frame(AVCodecContext *avctx, void *data, + int *got_frame_ptr, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; G726Context *c = avctx->priv_data; - int16_t *samples = data; + int16_t *samples; GetBitContext gb; - int out_samples, out_size; + int out_samples, ret; out_samples = buf_size * 8 / c->code_size; - out_size = out_samples * av_get_bytes_per_sample(avctx->sample_fmt); - if (*data_size < out_size) { - av_log(avctx, AV_LOG_ERROR, "Output buffer is too small\n"); - return AVERROR(EINVAL); + + /* get output buffer */ + c->frame.nb_samples = out_samples; + if ((ret = avctx->get_buffer(avctx, &c->frame)) < 0) { + av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); + return ret; } + samples = (int16_t *)c->frame.data[0]; init_get_bits(&gb, buf, buf_size * 8); @@ -456,7 +462,9 @@ static int g726_decode_frame(AVCodecContext *avctx, if (get_bits_left(&gb) > 0) av_log(avctx, AV_LOG_ERROR, "Frame invalidly split, missing parser?\n"); - *data_size = out_size; + *got_frame_ptr = 1; + *(AVFrame *)data = c->frame; + return buf_size; } @@ -474,6 +482,7 @@ AVCodec ff_adpcm_g726_decoder = { .init = g726_decode_init, .decode = g726_decode_frame, .flush = g726_decode_flush, + .capabilities = CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("G.726 ADPCM"), }; #endif diff --git a/libavcodec/gsmdec.c b/libavcodec/gsmdec.c index 1091745f4b..97b6fe8492 100644 --- a/libavcodec/gsmdec.c +++ b/libavcodec/gsmdec.c @@ -32,6 +32,8 @@ static av_cold int gsm_init(AVCodecContext *avctx) { + GSMContext *s = avctx->priv_data; + avctx->channels = 1; if (!avctx->sample_rate) avctx->sample_rate = 8000; @@ -47,30 +49,35 @@ static av_cold int gsm_init(AVCodecContext *avctx) avctx->block_align = GSM_MS_BLOCK_SIZE; } + avcodec_get_frame_defaults(&s->frame); + avctx->coded_frame = &s->frame; + return 0; } static int gsm_decode_frame(AVCodecContext *avctx, void *data, - int *data_size, AVPacket *avpkt) + int *got_frame_ptr, AVPacket *avpkt) { + GSMContext *s = avctx->priv_data; int res; GetBitContext gb; const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; - int16_t *samples = data; - int frame_bytes = avctx->frame_size * - av_get_bytes_per_sample(avctx->sample_fmt); - - if (*data_size < frame_bytes) { - av_log(avctx, AV_LOG_ERROR, "Output buffer is too small\n"); - return AVERROR(EINVAL); - } + int16_t *samples; if (buf_size < avctx->block_align) { av_log(avctx, AV_LOG_ERROR, "Packet is too small\n"); return AVERROR_INVALIDDATA; } + /* get output buffer */ + s->frame.nb_samples = avctx->frame_size; + if ((res = avctx->get_buffer(avctx, &s->frame)) < 0) { + av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); + return res; + } + samples = (int16_t *)s->frame.data[0]; + switch (avctx->codec_id) { case CODEC_ID_GSM: init_get_bits(&gb, buf, buf_size * 8); @@ -85,7 +92,10 @@ static int gsm_decode_frame(AVCodecContext *avctx, void *data, if (res < 0) return res; } - *data_size = frame_bytes; + + *got_frame_ptr = 1; + *(AVFrame *)data = s->frame; + return avctx->block_align; } @@ -103,6 +113,7 @@ AVCodec ff_gsm_decoder = { .init = gsm_init, .decode = gsm_decode_frame, .flush = gsm_flush, + .capabilities = CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("GSM"), }; @@ -114,5 +125,6 @@ AVCodec ff_gsm_ms_decoder = { .init = gsm_init, .decode = gsm_decode_frame, .flush = gsm_flush, + .capabilities = CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("GSM Microsoft variant"), }; diff --git a/libavcodec/gsmdec_data.h b/libavcodec/gsmdec_data.h index b78daa7335..21789f725b 100644 --- a/libavcodec/gsmdec_data.h +++ b/libavcodec/gsmdec_data.h @@ -23,6 +23,7 @@ #define AVCODEC_GSMDEC_DATA #include +#include "avcodec.h" // input and output sizes in byte #define GSM_BLOCK_SIZE 33 @@ -30,6 +31,7 @@ #define GSM_FRAME_SIZE 160 typedef struct { + AVFrame frame; // Contains first 120 elements from the previous frame // (used by long_term_synth according to the "lag"), // then in the following 160 elements the current diff --git a/libavcodec/imc.c b/libavcodec/imc.c index 1f1db6c121..b55eee9b70 100644 --- a/libavcodec/imc.c +++ b/libavcodec/imc.c @@ -51,6 +51,8 @@ #define COEFFS 256 typedef struct { + AVFrame frame; + float old_floor[BANDS]; float flcoeffs1[BANDS]; float flcoeffs2[BANDS]; @@ -168,6 +170,10 @@ static av_cold int imc_decode_init(AVCodecContext * avctx) dsputil_init(&q->dsp, avctx); avctx->sample_fmt = AV_SAMPLE_FMT_FLT; avctx->channel_layout = AV_CH_LAYOUT_MONO; + + avcodec_get_frame_defaults(&q->frame); + avctx->coded_frame = &q->frame; + return 0; } @@ -649,9 +655,8 @@ static int imc_get_coeffs (IMCContext* q) { return 0; } -static int imc_decode_frame(AVCodecContext * avctx, - void *data, int *data_size, - AVPacket *avpkt) +static int imc_decode_frame(AVCodecContext * avctx, void *data, + int *got_frame_ptr, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; @@ -659,7 +664,7 @@ static int imc_decode_frame(AVCodecContext * avctx, IMCContext *q = avctx->priv_data; int stream_format_code; - int imc_hdr, i, j, out_size, ret; + int imc_hdr, i, j, ret; int flag; int bits, summer; int counter, bitscount; @@ -670,15 +675,16 @@ static int imc_decode_frame(AVCodecContext * avctx, return AVERROR_INVALIDDATA; } - out_size = COEFFS * av_get_bytes_per_sample(avctx->sample_fmt); - if (*data_size < out_size) { - av_log(avctx, AV_LOG_ERROR, "Output buffer is too small\n"); - return AVERROR(EINVAL); + /* get output buffer */ + q->frame.nb_samples = COEFFS; + if ((ret = avctx->get_buffer(avctx, &q->frame)) < 0) { + av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); + return ret; } + q->out_samples = (float *)q->frame.data[0]; q->dsp.bswap16_buf(buf16, (const uint16_t*)buf, IMC_BLOCK_SIZE / 2); - q->out_samples = data; init_get_bits(&q->gb, (const uint8_t*)buf16, IMC_BLOCK_SIZE * 8); /* Check the frame header */ @@ -823,7 +829,8 @@ static int imc_decode_frame(AVCodecContext * avctx, imc_imdct256(q); - *data_size = out_size; + *got_frame_ptr = 1; + *(AVFrame *)data = q->frame; return IMC_BLOCK_SIZE; } @@ -834,6 +841,7 @@ static av_cold int imc_decode_close(AVCodecContext * avctx) IMCContext *q = avctx->priv_data; ff_fft_end(&q->fft); + return 0; } @@ -846,5 +854,6 @@ AVCodec ff_imc_decoder = { .init = imc_decode_init, .close = imc_decode_close, .decode = imc_decode_frame, + .capabilities = CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("IMC (Intel Music Coder)"), }; diff --git a/libavcodec/internal.h b/libavcodec/internal.h index 18e851c48e..fb011c7a3a 100644 --- a/libavcodec/internal.h +++ b/libavcodec/internal.h @@ -31,12 +31,15 @@ typedef struct InternalBuffer { int last_pic_num; - uint8_t *base[4]; - uint8_t *data[4]; - int linesize[4]; + uint8_t *base[AV_NUM_DATA_POINTERS]; + uint8_t *data[AV_NUM_DATA_POINTERS]; + int linesize[AV_NUM_DATA_POINTERS]; int width; int height; enum PixelFormat pix_fmt; + uint8_t **extended_data; + int audio_data_size; + int nb_channels; } InternalBuffer; typedef struct AVCodecInternal { diff --git a/libavcodec/libgsm.c b/libavcodec/libgsm.c index c02594d0d6..22629c657c 100644 --- a/libavcodec/libgsm.c +++ b/libavcodec/libgsm.c @@ -124,7 +124,14 @@ AVCodec ff_libgsm_ms_encoder = { .long_name = NULL_IF_CONFIG_SMALL("libgsm GSM Microsoft variant"), }; +typedef struct LibGSMDecodeContext { + AVFrame frame; + struct gsm_state *state; +} LibGSMDecodeContext; + static av_cold int libgsm_decode_init(AVCodecContext *avctx) { + LibGSMDecodeContext *s = avctx->priv_data; + if (avctx->channels > 1) { av_log(avctx, AV_LOG_ERROR, "Mono required for GSM, got %d channels\n", avctx->channels); @@ -139,7 +146,7 @@ static av_cold int libgsm_decode_init(AVCodecContext *avctx) { avctx->sample_fmt = AV_SAMPLE_FMT_S16; - avctx->priv_data = gsm_create(); + s->state = gsm_create(); switch(avctx->codec_id) { case CODEC_ID_GSM: @@ -154,59 +161,72 @@ static av_cold int libgsm_decode_init(AVCodecContext *avctx) { } } + avcodec_get_frame_defaults(&s->frame); + avctx->coded_frame = &s->frame; + return 0; } static av_cold int libgsm_decode_close(AVCodecContext *avctx) { - gsm_destroy(avctx->priv_data); - avctx->priv_data = NULL; + LibGSMDecodeContext *s = avctx->priv_data; + + gsm_destroy(s->state); + s->state = NULL; return 0; } -static int libgsm_decode_frame(AVCodecContext *avctx, - void *data, int *data_size, - AVPacket *avpkt) { +static int libgsm_decode_frame(AVCodecContext *avctx, void *data, + int *got_frame_ptr, AVPacket *avpkt) +{ int i, ret; - struct gsm_state *s = avctx->priv_data; + LibGSMDecodeContext *s = avctx->priv_data; uint8_t *buf = avpkt->data; int buf_size = avpkt->size; - int16_t *samples = data; - int out_size = avctx->frame_size * av_get_bytes_per_sample(avctx->sample_fmt); - - if (*data_size < out_size) { - av_log(avctx, AV_LOG_ERROR, "Output buffer is too small\n"); - return AVERROR(EINVAL); - } + int16_t *samples; if (buf_size < avctx->block_align) { av_log(avctx, AV_LOG_ERROR, "Packet is too small\n"); return AVERROR_INVALIDDATA; } + /* get output buffer */ + s->frame.nb_samples = avctx->frame_size; + if ((ret = avctx->get_buffer(avctx, &s->frame)) < 0) { + av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); + return ret; + } + samples = (int16_t *)s->frame.data[0]; + for (i = 0; i < avctx->frame_size / GSM_FRAME_SIZE; i++) { - if ((ret = gsm_decode(s, buf, samples)) < 0) + if ((ret = gsm_decode(s->state, buf, samples)) < 0) return -1; buf += GSM_BLOCK_SIZE; samples += GSM_FRAME_SIZE; } - *data_size = out_size; + *got_frame_ptr = 1; + *(AVFrame *)data = s->frame; + return avctx->block_align; } static void libgsm_flush(AVCodecContext *avctx) { - gsm_destroy(avctx->priv_data); - avctx->priv_data = gsm_create(); + LibGSMDecodeContext *s = avctx->priv_data; + + gsm_destroy(s->state); + s->state = gsm_create(); } AVCodec ff_libgsm_decoder = { .name = "libgsm", .type = AVMEDIA_TYPE_AUDIO, .id = CODEC_ID_GSM, + .priv_data_size = sizeof(LibGSMDecodeContext), .init = libgsm_decode_init, .close = libgsm_decode_close, .decode = libgsm_decode_frame, .flush = libgsm_flush, + .capabilities = CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("libgsm GSM"), }; @@ -214,9 +234,11 @@ AVCodec ff_libgsm_ms_decoder = { .name = "libgsm_ms", .type = AVMEDIA_TYPE_AUDIO, .id = CODEC_ID_GSM_MS, + .priv_data_size = sizeof(LibGSMDecodeContext), .init = libgsm_decode_init, .close = libgsm_decode_close, .decode = libgsm_decode_frame, .flush = libgsm_flush, + .capabilities = CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("libgsm GSM Microsoft variant"), }; diff --git a/libavcodec/libopencore-amr.c b/libavcodec/libopencore-amr.c index a705975aa9..ded92179d3 100644 --- a/libavcodec/libopencore-amr.c +++ b/libavcodec/libopencore-amr.c @@ -79,6 +79,7 @@ static int get_bitrate_mode(int bitrate, void *log_ctx) typedef struct AMRContext { AVClass *av_class; + AVFrame frame; void *dec_state; void *enc_state; int enc_bitrate; @@ -112,6 +113,9 @@ static av_cold int amr_nb_decode_init(AVCodecContext *avctx) return AVERROR(ENOSYS); } + avcodec_get_frame_defaults(&s->frame); + avctx->coded_frame = &s->frame; + return 0; } @@ -120,26 +124,28 @@ static av_cold int amr_nb_decode_close(AVCodecContext *avctx) AMRContext *s = avctx->priv_data; Decoder_Interface_exit(s->dec_state); + return 0; } static int amr_nb_decode_frame(AVCodecContext *avctx, void *data, - int *data_size, AVPacket *avpkt) + int *got_frame_ptr, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; AMRContext *s = avctx->priv_data; static const uint8_t block_size[16] = { 12, 13, 15, 17, 19, 20, 26, 31, 5, 0, 0, 0, 0, 0, 0, 0 }; enum Mode dec_mode; - int packet_size, out_size; + int packet_size, ret; av_dlog(avctx, "amr_decode_frame buf=%p buf_size=%d frame_count=%d!!\n", buf, buf_size, avctx->frame_number); - out_size = 160 * av_get_bytes_per_sample(avctx->sample_fmt); - if (*data_size < out_size) { - av_log(avctx, AV_LOG_ERROR, "output buffer is too small\n"); - return AVERROR(EINVAL); + /* get output buffer */ + s->frame.nb_samples = 160; + if ((ret = avctx->get_buffer(avctx, &s->frame)) < 0) { + av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); + return ret; } dec_mode = (buf[0] >> 3) & 0x000F; @@ -154,8 +160,10 @@ static int amr_nb_decode_frame(AVCodecContext *avctx, void *data, av_dlog(avctx, "packet_size=%d buf= 0x%X %X %X %X\n", packet_size, buf[0], buf[1], buf[2], buf[3]); /* call decoder */ - Decoder_Interface_Decode(s->dec_state, buf, data, 0); - *data_size = out_size; + Decoder_Interface_Decode(s->dec_state, buf, (short *)s->frame.data[0], 0); + + *got_frame_ptr = 1; + *(AVFrame *)data = s->frame; return packet_size; } @@ -168,6 +176,7 @@ AVCodec ff_libopencore_amrnb_decoder = { .init = amr_nb_decode_init, .close = amr_nb_decode_close, .decode = amr_nb_decode_frame, + .capabilities = CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("OpenCORE Adaptive Multi-Rate (AMR) Narrow-Band"), }; @@ -251,6 +260,7 @@ AVCodec ff_libopencore_amrnb_encoder = { #include typedef struct AMRWBContext { + AVFrame frame; void *state; } AMRWBContext; @@ -267,23 +277,27 @@ static av_cold int amr_wb_decode_init(AVCodecContext *avctx) return AVERROR(ENOSYS); } + avcodec_get_frame_defaults(&s->frame); + avctx->coded_frame = &s->frame; + return 0; } static int amr_wb_decode_frame(AVCodecContext *avctx, void *data, - int *data_size, AVPacket *avpkt) + int *got_frame_ptr, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; AMRWBContext *s = avctx->priv_data; - int mode; - int packet_size, out_size; + int mode, ret; + int packet_size; static const uint8_t block_size[16] = {18, 24, 33, 37, 41, 47, 51, 59, 61, 6, 6, 0, 0, 0, 1, 1}; - out_size = 320 * av_get_bytes_per_sample(avctx->sample_fmt); - if (*data_size < out_size) { - av_log(avctx, AV_LOG_ERROR, "output buffer is too small\n"); - return AVERROR(EINVAL); + /* get output buffer */ + s->frame.nb_samples = 320; + if ((ret = avctx->get_buffer(avctx, &s->frame)) < 0) { + av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); + return ret; } mode = (buf[0] >> 3) & 0x000F; @@ -295,8 +309,11 @@ static int amr_wb_decode_frame(AVCodecContext *avctx, void *data, return AVERROR_INVALIDDATA; } - D_IF_decode(s->state, buf, data, _good_frame); - *data_size = out_size; + D_IF_decode(s->state, buf, (short *)s->frame.data[0], _good_frame); + + *got_frame_ptr = 1; + *(AVFrame *)data = s->frame; + return packet_size; } @@ -316,6 +333,7 @@ AVCodec ff_libopencore_amrwb_decoder = { .init = amr_wb_decode_init, .close = amr_wb_decode_close, .decode = amr_wb_decode_frame, + .capabilities = CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("OpenCORE Adaptive Multi-Rate (AMR) Wide-Band"), }; diff --git a/libavcodec/libspeexdec.c b/libavcodec/libspeexdec.c index 8bbae6c4f3..eba2f16949 100644 --- a/libavcodec/libspeexdec.c +++ b/libavcodec/libspeexdec.c @@ -25,6 +25,7 @@ #include "avcodec.h" typedef struct { + AVFrame frame; SpeexBits bits; SpeexStereoState stereo; void *dec_state; @@ -89,26 +90,29 @@ static av_cold int libspeex_decode_init(AVCodecContext *avctx) s->stereo = (SpeexStereoState)SPEEX_STEREO_STATE_INIT; speex_decoder_ctl(s->dec_state, SPEEX_SET_HANDLER, &callback); } + + avcodec_get_frame_defaults(&s->frame); + avctx->coded_frame = &s->frame; + return 0; } -static int libspeex_decode_frame(AVCodecContext *avctx, - void *data, int *data_size, - AVPacket *avpkt) +static int libspeex_decode_frame(AVCodecContext *avctx, void *data, + int *got_frame_ptr, AVPacket *avpkt) { uint8_t *buf = avpkt->data; int buf_size = avpkt->size; LibSpeexContext *s = avctx->priv_data; - int16_t *output = data; - int out_size, ret, consumed = 0; - - /* check output buffer size */ - out_size = s->frame_size * avctx->channels * - av_get_bytes_per_sample(avctx->sample_fmt); - if (*data_size < out_size) { - av_log(avctx, AV_LOG_ERROR, "Output buffer is too small\n"); - return AVERROR(EINVAL); + int16_t *output; + int ret, consumed = 0; + + /* get output buffer */ + s->frame.nb_samples = s->frame_size; + if ((ret = avctx->get_buffer(avctx, &s->frame)) < 0) { + av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); + return ret; } + output = (int16_t *)s->frame.data[0]; /* if there is not enough data left for the smallest possible frame, reset the libspeex buffer using the current packet, otherwise ignore @@ -116,7 +120,7 @@ static int libspeex_decode_frame(AVCodecContext *avctx, if (speex_bits_remaining(&s->bits) < 43) { /* check for flush packet */ if (!buf || !buf_size) { - *data_size = 0; + *got_frame_ptr = 0; return buf_size; } /* set new buffer */ @@ -133,7 +137,9 @@ static int libspeex_decode_frame(AVCodecContext *avctx, if (avctx->channels == 2) speex_decode_stereo_int(output, s->frame_size, &s->stereo); - *data_size = out_size; + *got_frame_ptr = 1; + *(AVFrame *)data = s->frame; + return consumed; } @@ -163,6 +169,6 @@ AVCodec ff_libspeex_decoder = { .close = libspeex_decode_close, .decode = libspeex_decode_frame, .flush = libspeex_decode_flush, - .capabilities = CODEC_CAP_SUBFRAMES | CODEC_CAP_DELAY, + .capabilities = CODEC_CAP_SUBFRAMES | CODEC_CAP_DELAY | CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("libspeex Speex"), }; diff --git a/libavcodec/mace.c b/libavcodec/mace.c index a55a041696..792d71d072 100644 --- a/libavcodec/mace.c +++ b/libavcodec/mace.c @@ -153,6 +153,7 @@ typedef struct ChannelData { } ChannelData; typedef struct MACEContext { + AVFrame frame; ChannelData chd[2]; } MACEContext; @@ -228,30 +229,35 @@ static void chomp6(ChannelData *chd, int16_t *output, uint8_t val, static av_cold int mace_decode_init(AVCodecContext * avctx) { + MACEContext *ctx = avctx->priv_data; + if (avctx->channels > 2) return -1; avctx->sample_fmt = AV_SAMPLE_FMT_S16; + + avcodec_get_frame_defaults(&ctx->frame); + avctx->coded_frame = &ctx->frame; + return 0; } -static int mace_decode_frame(AVCodecContext *avctx, - void *data, int *data_size, - AVPacket *avpkt) +static int mace_decode_frame(AVCodecContext *avctx, void *data, + int *got_frame_ptr, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; - int16_t *samples = data; + int16_t *samples; MACEContext *ctx = avctx->priv_data; - int i, j, k, l; - int out_size; + int i, j, k, l, ret; int is_mace3 = (avctx->codec_id == CODEC_ID_MACE3); - out_size = 3 * (buf_size << (1 - is_mace3)) * - av_get_bytes_per_sample(avctx->sample_fmt); - if (*data_size < out_size) { - av_log(avctx, AV_LOG_ERROR, "Output buffer is too small\n"); - return AVERROR(EINVAL); + /* get output buffer */ + ctx->frame.nb_samples = 3 * (buf_size << (1 - is_mace3)) / avctx->channels; + if ((ret = avctx->get_buffer(avctx, &ctx->frame)) < 0) { + av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); + return ret; } + samples = (int16_t *)ctx->frame.data[0]; for(i = 0; i < avctx->channels; i++) { int16_t *output = samples + i; @@ -277,7 +283,8 @@ static int mace_decode_frame(AVCodecContext *avctx, } } - *data_size = out_size; + *got_frame_ptr = 1; + *(AVFrame *)data = ctx->frame; return buf_size; } @@ -289,6 +296,7 @@ AVCodec ff_mace3_decoder = { .priv_data_size = sizeof(MACEContext), .init = mace_decode_init, .decode = mace_decode_frame, + .capabilities = CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("MACE (Macintosh Audio Compression/Expansion) 3:1"), }; @@ -299,6 +307,7 @@ AVCodec ff_mace6_decoder = { .priv_data_size = sizeof(MACEContext), .init = mace_decode_init, .decode = mace_decode_frame, + .capabilities = CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("MACE (Macintosh Audio Compression/Expansion) 6:1"), }; diff --git a/libavcodec/mlpdec.c b/libavcodec/mlpdec.c index cefd0b5614..4dc2d9f3eb 100644 --- a/libavcodec/mlpdec.c +++ b/libavcodec/mlpdec.c @@ -120,6 +120,7 @@ typedef struct SubStream { typedef struct MLPDecodeContext { AVCodecContext *avctx; + AVFrame frame; //! Current access unit being read has a major sync. int is_major_sync_unit; @@ -239,6 +240,9 @@ static av_cold int mlp_decode_init(AVCodecContext *avctx) m->substream[substr].lossless_check_data = 0xffffffff; dsputil_init(&m->dsp, avctx); + avcodec_get_frame_defaults(&m->frame); + avctx->coded_frame = &m->frame; + return 0; } @@ -905,13 +909,14 @@ static void rematrix_channels(MLPDecodeContext *m, unsigned int substr) /** Write the audio data into the output buffer. */ static int output_data(MLPDecodeContext *m, unsigned int substr, - uint8_t *data, unsigned int *data_size) + void *data, int *got_frame_ptr) { + AVCodecContext *avctx = m->avctx; SubStream *s = &m->substream[substr]; unsigned int i, out_ch = 0; - int out_size; - int32_t *data_32 = (int32_t*) data; - int16_t *data_16 = (int16_t*) data; + int32_t *data_32; + int16_t *data_16; + int ret; int is32 = (m->avctx->sample_fmt == AV_SAMPLE_FMT_S32); if (m->avctx->channels != s->max_matrix_channel + 1) { @@ -919,11 +924,14 @@ static int output_data(MLPDecodeContext *m, unsigned int substr, return AVERROR_INVALIDDATA; } - out_size = s->blockpos * m->avctx->channels * - av_get_bytes_per_sample(m->avctx->sample_fmt); - - if (*data_size < out_size) - return AVERROR(EINVAL); + /* get output buffer */ + m->frame.nb_samples = s->blockpos; + if ((ret = avctx->get_buffer(avctx, &m->frame)) < 0) { + av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); + return ret; + } + data_32 = (int32_t *)m->frame.data[0]; + data_16 = (int16_t *)m->frame.data[0]; for (i = 0; i < s->blockpos; i++) { for (out_ch = 0; out_ch <= s->max_matrix_channel; out_ch++) { @@ -936,7 +944,8 @@ static int output_data(MLPDecodeContext *m, unsigned int substr, } } - *data_size = out_size; + *got_frame_ptr = 1; + *(AVFrame *)data = m->frame; return 0; } @@ -945,8 +954,8 @@ static int output_data(MLPDecodeContext *m, unsigned int substr, * @return negative on error, 0 if not enough data is present in the input stream, * otherwise the number of bytes consumed. */ -static int read_access_unit(AVCodecContext *avctx, void* data, int *data_size, - AVPacket *avpkt) +static int read_access_unit(AVCodecContext *avctx, void* data, + int *got_frame_ptr, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; @@ -982,7 +991,7 @@ static int read_access_unit(AVCodecContext *avctx, void* data, int *data_size, if (!m->params_valid) { av_log(m->avctx, AV_LOG_WARNING, "Stream parameters not seen; skipping frame.\n"); - *data_size = 0; + *got_frame_ptr = 0; return length; } @@ -1127,7 +1136,7 @@ next_substr: rematrix_channels(m, m->max_decoded_substream); - if ((ret = output_data(m, m->max_decoded_substream, data, data_size)) < 0) + if ((ret = output_data(m, m->max_decoded_substream, data, got_frame_ptr)) < 0) return ret; return length; @@ -1148,6 +1157,7 @@ AVCodec ff_mlp_decoder = { .priv_data_size = sizeof(MLPDecodeContext), .init = mlp_decode_init, .decode = read_access_unit, + .capabilities = CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("MLP (Meridian Lossless Packing)"), }; @@ -1159,6 +1169,7 @@ AVCodec ff_truehd_decoder = { .priv_data_size = sizeof(MLPDecodeContext), .init = mlp_decode_init, .decode = read_access_unit, + .capabilities = CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("TrueHD"), }; #endif /* CONFIG_TRUEHD_DECODER */ diff --git a/libavcodec/mpc.h b/libavcodec/mpc.h index 6d0f7b45bb..1a6e7943af 100644 --- a/libavcodec/mpc.h +++ b/libavcodec/mpc.h @@ -50,6 +50,7 @@ typedef struct { }Band; typedef struct { + AVFrame frame; DSPContext dsp; MPADSPContext mpadsp; GetBitContext gb; diff --git a/libavcodec/mpc7.c b/libavcodec/mpc7.c index 576400d720..290ecfb385 100644 --- a/libavcodec/mpc7.c +++ b/libavcodec/mpc7.c @@ -136,6 +136,10 @@ static av_cold int mpc7_decode_init(AVCodecContext * avctx) } } vlc_initialized = 1; + + avcodec_get_frame_defaults(&c->frame); + avctx->coded_frame = &c->frame; + return 0; } @@ -192,9 +196,8 @@ static int get_scale_idx(GetBitContext *gb, int ref) return ref + t; } -static int mpc7_decode_frame(AVCodecContext * avctx, - void *data, int *data_size, - AVPacket *avpkt) +static int mpc7_decode_frame(AVCodecContext * avctx, void *data, + int *got_frame_ptr, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; @@ -204,7 +207,7 @@ static int mpc7_decode_frame(AVCodecContext * avctx, int i, ch; int mb = -1; Band *bands = c->bands; - int off, out_size; + int off, ret; int bits_used, bits_avail; memset(bands, 0, sizeof(*bands) * (c->maxbands + 1)); @@ -213,10 +216,11 @@ static int mpc7_decode_frame(AVCodecContext * avctx, return AVERROR(EINVAL); } - out_size = (buf[1] ? c->lastframelen : MPC_FRAME_SIZE) * 4; - if (*data_size < out_size) { - av_log(avctx, AV_LOG_ERROR, "Output buffer is too small\n"); - return AVERROR(EINVAL); + /* get output buffer */ + c->frame.nb_samples = buf[1] ? c->lastframelen : MPC_FRAME_SIZE; + if ((ret = avctx->get_buffer(avctx, &c->frame)) < 0) { + av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); + return ret; } bits = av_malloc(((buf_size - 1) & ~3) + FF_INPUT_BUFFER_PADDING_SIZE); @@ -276,7 +280,7 @@ static int mpc7_decode_frame(AVCodecContext * avctx, for(ch = 0; ch < 2; ch++) idx_to_quant(c, &gb, bands[i].res[ch], c->Q[ch] + off); - ff_mpc_dequantize_and_synth(c, mb, data, 2); + ff_mpc_dequantize_and_synth(c, mb, c->frame.data[0], 2); av_free(bits); @@ -288,10 +292,12 @@ static int mpc7_decode_frame(AVCodecContext * avctx, } if(c->frames_to_skip){ c->frames_to_skip--; - *data_size = 0; + *got_frame_ptr = 0; return buf_size; } - *data_size = out_size; + + *got_frame_ptr = 1; + *(AVFrame *)data = c->frame; return buf_size; } @@ -312,5 +318,6 @@ AVCodec ff_mpc7_decoder = { .init = mpc7_decode_init, .decode = mpc7_decode_frame, .flush = mpc7_decode_flush, + .capabilities = CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("Musepack SV7"), }; diff --git a/libavcodec/mpc8.c b/libavcodec/mpc8.c index b38664215b..b97f3ed62c 100644 --- a/libavcodec/mpc8.c +++ b/libavcodec/mpc8.c @@ -228,12 +228,15 @@ static av_cold int mpc8_decode_init(AVCodecContext * avctx) &mpc8_q8_codes[i], 1, 1, INIT_VLC_USE_NEW_STATIC); } vlc_initialized = 1; + + avcodec_get_frame_defaults(&c->frame); + avctx->coded_frame = &c->frame; + return 0; } -static int mpc8_decode_frame(AVCodecContext * avctx, - void *data, int *data_size, - AVPacket *avpkt) +static int mpc8_decode_frame(AVCodecContext * avctx, void *data, + int *got_frame_ptr, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; @@ -241,14 +244,15 @@ static int mpc8_decode_frame(AVCodecContext * avctx, GetBitContext gb2, *gb = &gb2; int i, j, k, ch, cnt, res, t; Band *bands = c->bands; - int off, out_size; + int off; int maxband, keyframe; int last[2]; - out_size = MPC_FRAME_SIZE * 2 * avctx->channels; - if (*data_size < out_size) { - av_log(avctx, AV_LOG_ERROR, "Output buffer is too small\n"); - return AVERROR(EINVAL); + /* get output buffer */ + c->frame.nb_samples = MPC_FRAME_SIZE; + if ((res = avctx->get_buffer(avctx, &c->frame)) < 0) { + av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); + return res; } keyframe = c->cur_frame == 0; @@ -401,14 +405,16 @@ static int mpc8_decode_frame(AVCodecContext * avctx, } } - ff_mpc_dequantize_and_synth(c, maxband, data, avctx->channels); + ff_mpc_dequantize_and_synth(c, maxband, c->frame.data[0], avctx->channels); c->cur_frame++; c->last_bits_used = get_bits_count(gb); if(c->cur_frame >= c->frames) c->cur_frame = 0; - *data_size = out_size; + + *got_frame_ptr = 1; + *(AVFrame *)data = c->frame; return c->cur_frame ? c->last_bits_used >> 3 : buf_size; } @@ -420,5 +426,6 @@ AVCodec ff_mpc8_decoder = { .priv_data_size = sizeof(MPCContext), .init = mpc8_decode_init, .decode = mpc8_decode_frame, + .capabilities = CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("Musepack SV8"), }; diff --git a/libavcodec/mpegaudiodec.c b/libavcodec/mpegaudiodec.c index ffd369021c..c819bc546f 100644 --- a/libavcodec/mpegaudiodec.c +++ b/libavcodec/mpegaudiodec.c @@ -79,6 +79,7 @@ typedef struct MPADecodeContext { int err_recognition; AVCodecContext* avctx; MPADSPContext mpadsp; + AVFrame frame; } MPADecodeContext; #if CONFIG_FLOAT @@ -474,6 +475,10 @@ static av_cold int decode_init(AVCodecContext * avctx) if (avctx->codec_id == CODEC_ID_MP3ADU) s->adu_mode = 1; + + avcodec_get_frame_defaults(&s->frame); + avctx->coded_frame = &s->frame; + return 0; } @@ -1695,7 +1700,7 @@ static int mp_decode_layer3(MPADecodeContext *s) static int mp_decode_frame(MPADecodeContext *s, OUT_INT *samples, const uint8_t *buf, int buf_size) { - int i, nb_frames, ch; + int i, nb_frames, ch, ret; OUT_INT *samples_ptr; init_get_bits(&s->gb, buf + HEADER_SIZE, (buf_size - HEADER_SIZE) * 8); @@ -1743,8 +1748,16 @@ static int mp_decode_frame(MPADecodeContext *s, OUT_INT *samples, assert(i <= buf_size - HEADER_SIZE && i >= 0); memcpy(s->last_buf + s->last_buf_size, s->gb.buffer + buf_size - HEADER_SIZE - i, i); s->last_buf_size += i; + } - break; + /* get output buffer */ + if (!samples) { + s->frame.nb_samples = s->avctx->frame_size; + if ((ret = s->avctx->get_buffer(s->avctx, &s->frame)) < 0) { + av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed\n"); + return ret; + } + samples = (OUT_INT *)s->frame.data[0]; } /* apply the synthesis filter */ @@ -1764,7 +1777,7 @@ static int mp_decode_frame(MPADecodeContext *s, OUT_INT *samples, return nb_frames * 32 * sizeof(OUT_INT) * s->nb_channels; } -static int decode_frame(AVCodecContext * avctx, void *data, int *data_size, +static int decode_frame(AVCodecContext * avctx, void *data, int *got_frame_ptr, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; @@ -1772,7 +1785,6 @@ static int decode_frame(AVCodecContext * avctx, void *data, int *data_size, MPADecodeContext *s = avctx->priv_data; uint32_t header; int out_size; - OUT_INT *out_samples = data; if (buf_size < HEADER_SIZE) return AVERROR_INVALIDDATA; @@ -1795,10 +1807,6 @@ static int decode_frame(AVCodecContext * avctx, void *data, int *data_size, avctx->bit_rate = s->bit_rate; avctx->sub_id = s->layer; - if (*data_size < avctx->frame_size * avctx->channels * sizeof(OUT_INT)) - return AVERROR(EINVAL); - *data_size = 0; - if (s->frame_size <= 0 || s->frame_size > buf_size) { av_log(avctx, AV_LOG_ERROR, "incomplete frame\n"); return AVERROR_INVALIDDATA; @@ -1807,9 +1815,10 @@ static int decode_frame(AVCodecContext * avctx, void *data, int *data_size, buf_size= s->frame_size; } - out_size = mp_decode_frame(s, out_samples, buf, buf_size); + out_size = mp_decode_frame(s, NULL, buf, buf_size); if (out_size >= 0) { - *data_size = out_size; + *got_frame_ptr = 1; + *(AVFrame *)data = s->frame; avctx->sample_rate = s->sample_rate; //FIXME maybe move the other codec info stuff from above here too } else { @@ -1818,6 +1827,7 @@ static int decode_frame(AVCodecContext * avctx, void *data, int *data_size, If there is more data in the packet, just consume the bad frame instead of returning an error, which would discard the whole packet. */ + *got_frame_ptr = 0; if (buf_size == avpkt->size) return out_size; } @@ -1833,15 +1843,14 @@ static void flush(AVCodecContext *avctx) } #if CONFIG_MP3ADU_DECODER || CONFIG_MP3ADUFLOAT_DECODER -static int decode_frame_adu(AVCodecContext *avctx, void *data, int *data_size, - AVPacket *avpkt) +static int decode_frame_adu(AVCodecContext *avctx, void *data, + int *got_frame_ptr, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; MPADecodeContext *s = avctx->priv_data; uint32_t header; int len, out_size; - OUT_INT *out_samples = data; len = buf_size; @@ -1871,9 +1880,6 @@ static int decode_frame_adu(AVCodecContext *avctx, void *data, int *data_size, avctx->bit_rate = s->bit_rate; avctx->sub_id = s->layer; - if (*data_size < avctx->frame_size * avctx->channels * sizeof(OUT_INT)) - return AVERROR(EINVAL); - s->frame_size = len; #if FF_API_PARSE_FRAME @@ -1881,9 +1887,11 @@ static int decode_frame_adu(AVCodecContext *avctx, void *data, int *data_size, out_size = buf_size; else #endif - out_size = mp_decode_frame(s, out_samples, buf, buf_size); + out_size = mp_decode_frame(s, NULL, buf, buf_size); + + *got_frame_ptr = 1; + *(AVFrame *)data = s->frame; - *data_size = out_size; return buf_size; } #endif /* CONFIG_MP3ADU_DECODER || CONFIG_MP3ADUFLOAT_DECODER */ @@ -1894,6 +1902,7 @@ static int decode_frame_adu(AVCodecContext *avctx, void *data, int *data_size, * Context for MP3On4 decoder */ typedef struct MP3On4DecodeContext { + AVFrame *frame; int frames; ///< number of mp3 frames per block (number of mp3 decoder instances) int syncword; ///< syncword patch const uint8_t *coff; ///< channel offsets in output buffer @@ -1984,6 +1993,7 @@ static int decode_init_mp3on4(AVCodecContext * avctx) // Put decoder context in place to make init_decode() happy avctx->priv_data = s->mp3decctx[0]; decode_init(avctx); + s->frame = avctx->coded_frame; // Restore mp3on4 context pointer avctx->priv_data = s; s->mp3decctx[0]->adu_mode = 1; // Set adu mode @@ -2028,9 +2038,8 @@ static void flush_mp3on4(AVCodecContext *avctx) } -static int decode_frame_mp3on4(AVCodecContext * avctx, - void *data, int *data_size, - AVPacket *avpkt) +static int decode_frame_mp3on4(AVCodecContext *avctx, void *data, + int *got_frame_ptr, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; @@ -2038,14 +2047,17 @@ static int decode_frame_mp3on4(AVCodecContext * avctx, MPADecodeContext *m; int fsize, len = buf_size, out_size = 0; uint32_t header; - OUT_INT *out_samples = data; + OUT_INT *out_samples; OUT_INT *outptr, *bp; - int fr, j, n, ch; + int fr, j, n, ch, ret; - if (*data_size < MPA_FRAME_SIZE * avctx->channels * sizeof(OUT_INT)) { - av_log(avctx, AV_LOG_ERROR, "output buffer is too small\n"); - return AVERROR(EINVAL); + /* get output buffer */ + s->frame->nb_samples = MPA_FRAME_SIZE; + if ((ret = avctx->get_buffer(avctx, s->frame)) < 0) { + av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); + return ret; } + out_samples = (OUT_INT *)s->frame->data[0]; // Discard too short frames if (buf_size < HEADER_SIZE) @@ -2104,7 +2116,10 @@ static int decode_frame_mp3on4(AVCodecContext * avctx, /* update codec info */ avctx->sample_rate = s->mp3decctx[0]->sample_rate; - *data_size = out_size; + s->frame->nb_samples = out_size / (avctx->channels * sizeof(OUT_INT)); + *got_frame_ptr = 1; + *(AVFrame *)data = *s->frame; + return buf_size; } #endif /* CONFIG_MP3ON4_DECODER || CONFIG_MP3ON4FLOAT_DECODER */ @@ -2119,7 +2134,9 @@ AVCodec ff_mp1_decoder = { .init = decode_init, .decode = decode_frame, #if FF_API_PARSE_FRAME - .capabilities = CODEC_CAP_PARSE_ONLY, + .capabilities = CODEC_CAP_PARSE_ONLY | CODEC_CAP_DR1, +#else + .capabilities = CODEC_CAP_DR1, #endif .flush = flush, .long_name = NULL_IF_CONFIG_SMALL("MP1 (MPEG audio layer 1)"), @@ -2134,7 +2151,9 @@ AVCodec ff_mp2_decoder = { .init = decode_init, .decode = decode_frame, #if FF_API_PARSE_FRAME - .capabilities = CODEC_CAP_PARSE_ONLY, + .capabilities = CODEC_CAP_PARSE_ONLY | CODEC_CAP_DR1, +#else + .capabilities = CODEC_CAP_DR1, #endif .flush = flush, .long_name = NULL_IF_CONFIG_SMALL("MP2 (MPEG audio layer 2)"), @@ -2149,7 +2168,9 @@ AVCodec ff_mp3_decoder = { .init = decode_init, .decode = decode_frame, #if FF_API_PARSE_FRAME - .capabilities = CODEC_CAP_PARSE_ONLY, + .capabilities = CODEC_CAP_PARSE_ONLY | CODEC_CAP_DR1, +#else + .capabilities = CODEC_CAP_DR1, #endif .flush = flush, .long_name = NULL_IF_CONFIG_SMALL("MP3 (MPEG audio layer 3)"), @@ -2164,7 +2185,9 @@ AVCodec ff_mp3adu_decoder = { .init = decode_init, .decode = decode_frame_adu, #if FF_API_PARSE_FRAME - .capabilities = CODEC_CAP_PARSE_ONLY, + .capabilities = CODEC_CAP_PARSE_ONLY | CODEC_CAP_DR1, +#else + .capabilities = CODEC_CAP_DR1, #endif .flush = flush, .long_name = NULL_IF_CONFIG_SMALL("ADU (Application Data Unit) MP3 (MPEG audio layer 3)"), @@ -2179,6 +2202,7 @@ AVCodec ff_mp3on4_decoder = { .init = decode_init_mp3on4, .close = decode_close_mp3on4, .decode = decode_frame_mp3on4, + .capabilities = CODEC_CAP_DR1, .flush = flush_mp3on4, .long_name = NULL_IF_CONFIG_SMALL("MP3onMP4"), }; diff --git a/libavcodec/mpegaudiodec_float.c b/libavcodec/mpegaudiodec_float.c index 9300de29b9..02c83afb4c 100644 --- a/libavcodec/mpegaudiodec_float.c +++ b/libavcodec/mpegaudiodec_float.c @@ -31,7 +31,9 @@ AVCodec ff_mp1float_decoder = { .init = decode_init, .decode = decode_frame, #if FF_API_PARSE_FRAME - .capabilities = CODEC_CAP_PARSE_ONLY, + .capabilities = CODEC_CAP_PARSE_ONLY | CODEC_CAP_DR1, +#else + .capabilities = CODEC_CAP_DR1, #endif .flush = flush, .long_name = NULL_IF_CONFIG_SMALL("MP1 (MPEG audio layer 1)"), @@ -46,7 +48,9 @@ AVCodec ff_mp2float_decoder = { .init = decode_init, .decode = decode_frame, #if FF_API_PARSE_FRAME - .capabilities = CODEC_CAP_PARSE_ONLY, + .capabilities = CODEC_CAP_PARSE_ONLY | CODEC_CAP_DR1, +#else + .capabilities = CODEC_CAP_DR1, #endif .flush = flush, .long_name = NULL_IF_CONFIG_SMALL("MP2 (MPEG audio layer 2)"), @@ -61,7 +65,9 @@ AVCodec ff_mp3float_decoder = { .init = decode_init, .decode = decode_frame, #if FF_API_PARSE_FRAME - .capabilities = CODEC_CAP_PARSE_ONLY, + .capabilities = CODEC_CAP_PARSE_ONLY | CODEC_CAP_DR1, +#else + .capabilities = CODEC_CAP_DR1, #endif .flush = flush, .long_name = NULL_IF_CONFIG_SMALL("MP3 (MPEG audio layer 3)"), @@ -76,7 +82,9 @@ AVCodec ff_mp3adufloat_decoder = { .init = decode_init, .decode = decode_frame_adu, #if FF_API_PARSE_FRAME - .capabilities = CODEC_CAP_PARSE_ONLY, + .capabilities = CODEC_CAP_PARSE_ONLY | CODEC_CAP_DR1, +#else + .capabilities = CODEC_CAP_DR1, #endif .flush = flush, .long_name = NULL_IF_CONFIG_SMALL("ADU (Application Data Unit) MP3 (MPEG audio layer 3)"), @@ -91,6 +99,7 @@ AVCodec ff_mp3on4float_decoder = { .init = decode_init_mp3on4, .close = decode_close_mp3on4, .decode = decode_frame_mp3on4, + .capabilities = CODEC_CAP_DR1, .flush = flush_mp3on4, .long_name = NULL_IF_CONFIG_SMALL("MP3onMP4"), }; diff --git a/libavcodec/nellymoserdec.c b/libavcodec/nellymoserdec.c index 278b6b3891..7723c5827b 100644 --- a/libavcodec/nellymoserdec.c +++ b/libavcodec/nellymoserdec.c @@ -47,6 +47,7 @@ typedef struct NellyMoserDecodeContext { AVCodecContext* avctx; + AVFrame frame; float *float_buf; DECLARE_ALIGNED(16, float, state)[NELLY_BUF_LEN]; AVLFG random_state; @@ -142,29 +143,28 @@ static av_cold int decode_init(AVCodecContext * avctx) { ff_init_ff_sine_windows(7); avctx->channel_layout = AV_CH_LAYOUT_MONO; + + avcodec_get_frame_defaults(&s->frame); + avctx->coded_frame = &s->frame; + return 0; } -static int decode_tag(AVCodecContext * avctx, - void *data, int *data_size, - AVPacket *avpkt) { +static int decode_tag(AVCodecContext *avctx, void *data, + int *got_frame_ptr, AVPacket *avpkt) +{ const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; NellyMoserDecodeContext *s = avctx->priv_data; - int blocks, i, block_size; - int16_t *samples_s16 = data; - float *samples_flt = data; + int blocks, i, ret; + int16_t *samples_s16; + float *samples_flt; - block_size = NELLY_SAMPLES * av_get_bytes_per_sample(avctx->sample_fmt); blocks = buf_size / NELLY_BLOCK_LEN; if (blocks <= 0) { av_log(avctx, AV_LOG_ERROR, "Packet is too small\n"); return AVERROR_INVALIDDATA; } - if (*data_size < blocks * block_size) { - av_log(avctx, AV_LOG_ERROR, "Output buffer is too small\n"); - return AVERROR(EINVAL); - } if (buf_size % NELLY_BLOCK_LEN) { av_log(avctx, AV_LOG_WARNING, "Leftover bytes: %d.\n", buf_size % NELLY_BLOCK_LEN); @@ -177,6 +177,15 @@ static int decode_tag(AVCodecContext * avctx, * 44100 Hz - 8 */ + /* get output buffer */ + s->frame.nb_samples = NELLY_SAMPLES * blocks; + if ((ret = avctx->get_buffer(avctx, &s->frame)) < 0) { + av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); + return ret; + } + samples_s16 = (int16_t *)s->frame.data[0]; + samples_flt = (float *)s->frame.data[0]; + for (i=0 ; isample_fmt == SAMPLE_FMT_FLT) { nelly_decode_block(s, buf, samples_flt); @@ -188,7 +197,9 @@ static int decode_tag(AVCodecContext * avctx, } buf += NELLY_BLOCK_LEN; } - *data_size = blocks * block_size; + + *got_frame_ptr = 1; + *(AVFrame *)data = s->frame; return buf_size; } @@ -198,6 +209,7 @@ static av_cold int decode_end(AVCodecContext * avctx) { av_freep(&s->float_buf); ff_mdct_end(&s->imdct_ctx); + return 0; } @@ -209,6 +221,7 @@ AVCodec ff_nellymoser_decoder = { .init = decode_init, .close = decode_end, .decode = decode_tag, + .capabilities = CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("Nellymoser Asao"), .sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16, diff --git a/libavcodec/pcm.c b/libavcodec/pcm.c index 0e9e685989..76d5c100bc 100644 --- a/libavcodec/pcm.c +++ b/libavcodec/pcm.c @@ -192,6 +192,7 @@ static int pcm_encode_frame(AVCodecContext *avctx, } typedef struct PCMDecode { + AVFrame frame; short table[256]; } PCMDecode; @@ -223,6 +224,9 @@ static av_cold int pcm_decode_init(AVCodecContext * avctx) if (avctx->sample_fmt == AV_SAMPLE_FMT_S32) avctx->bits_per_raw_sample = av_get_bits_per_sample(avctx->codec->id); + avcodec_get_frame_defaults(&s->frame); + avctx->coded_frame = &s->frame; + return 0; } @@ -243,22 +247,20 @@ static av_cold int pcm_decode_init(AVCodecContext * avctx) dst += size / 8; \ } -static int pcm_decode_frame(AVCodecContext *avctx, - void *data, int *data_size, - AVPacket *avpkt) +static int pcm_decode_frame(AVCodecContext *avctx, void *data, + int *got_frame_ptr, AVPacket *avpkt) { const uint8_t *src = avpkt->data; int buf_size = avpkt->size; PCMDecode *s = avctx->priv_data; - int sample_size, c, n, out_size; + int sample_size, c, n, ret, samples_per_block; uint8_t *samples; int32_t *dst_int32_t; - samples = data; - sample_size = av_get_bits_per_sample(avctx->codec_id)/8; /* av_get_bits_per_sample returns 0 for CODEC_ID_PCM_DVD */ + samples_per_block = 1; if (CODEC_ID_PCM_DVD == avctx->codec_id) { if (avctx->bits_per_coded_sample != 20 && avctx->bits_per_coded_sample != 24) { @@ -266,10 +268,13 @@ static int pcm_decode_frame(AVCodecContext *avctx, return AVERROR(EINVAL); } /* 2 samples are interleaved per block in PCM_DVD */ + samples_per_block = 2; sample_size = avctx->bits_per_coded_sample * 2 / 8; - } else if (avctx->codec_id == CODEC_ID_PCM_LXF) + } else if (avctx->codec_id == CODEC_ID_PCM_LXF) { /* we process 40-bit blocks per channel for LXF */ + samples_per_block = 2; sample_size = 5; + } if (sample_size == 0) { av_log(avctx, AV_LOG_ERROR, "Invalid sample_size\n"); @@ -288,14 +293,13 @@ static int pcm_decode_frame(AVCodecContext *avctx, n = buf_size/sample_size; - out_size = n * av_get_bytes_per_sample(avctx->sample_fmt); - if (avctx->codec_id == CODEC_ID_PCM_DVD || - avctx->codec_id == CODEC_ID_PCM_LXF) - out_size *= 2; - if (*data_size < out_size) { - av_log(avctx, AV_LOG_ERROR, "output buffer too small\n"); - return AVERROR(EINVAL); + /* get output buffer */ + s->frame.nb_samples = n * samples_per_block / avctx->channels; + if ((ret = avctx->get_buffer(avctx, &s->frame)) < 0) { + av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); + return ret; } + samples = s->frame.data[0]; switch(avctx->codec->id) { case CODEC_ID_PCM_U32LE: @@ -401,7 +405,7 @@ static int pcm_decode_frame(AVCodecContext *avctx, case CODEC_ID_PCM_DVD: { const uint8_t *src8; - dst_int32_t = data; + dst_int32_t = (int32_t *)s->frame.data[0]; n /= avctx->channels; switch (avctx->bits_per_coded_sample) { case 20: @@ -433,7 +437,7 @@ static int pcm_decode_frame(AVCodecContext *avctx, { int i; const uint8_t *src8; - dst_int32_t = data; + dst_int32_t = (int32_t *)s->frame.data[0]; n /= avctx->channels; //unpack and de-planerize for (i = 0; i < n; i++) { @@ -454,7 +458,10 @@ static int pcm_decode_frame(AVCodecContext *avctx, default: return -1; } - *data_size = out_size; + + *got_frame_ptr = 1; + *(AVFrame *)data = s->frame; + return buf_size; } @@ -483,6 +490,7 @@ AVCodec ff_ ## name_ ## _decoder = { \ .priv_data_size = sizeof(PCMDecode), \ .init = pcm_decode_init, \ .decode = pcm_decode_frame, \ + .capabilities = CODEC_CAP_DR1, \ .sample_fmts = (const enum AVSampleFormat[]){sample_fmt_,AV_SAMPLE_FMT_NONE}, \ .long_name = NULL_IF_CONFIG_SMALL(long_name_), \ } diff --git a/libavcodec/qcelpdec.c b/libavcodec/qcelpdec.c index 9e7e13118b..20a0484b42 100644 --- a/libavcodec/qcelpdec.c +++ b/libavcodec/qcelpdec.c @@ -56,6 +56,7 @@ typedef enum typedef struct { + AVFrame avframe; GetBitContext gb; qcelp_packet_rate bitrate; QCELPFrame frame; /**< unpacked data frame */ @@ -97,6 +98,9 @@ static av_cold int qcelp_decode_init(AVCodecContext *avctx) for(i=0; i<10; i++) q->prev_lspf[i] = (i+1)/11.; + avcodec_get_frame_defaults(&q->avframe); + avctx->coded_frame = &q->avframe; + return 0; } @@ -682,23 +686,25 @@ static void postfilter(QCELPContext *q, float *samples, float *lpc) 160, 0.9375, &q->postfilter_agc_mem); } -static int qcelp_decode_frame(AVCodecContext *avctx, void *data, int *data_size, - AVPacket *avpkt) +static int qcelp_decode_frame(AVCodecContext *avctx, void *data, + int *got_frame_ptr, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; QCELPContext *q = avctx->priv_data; - float *outbuffer = data; - int i, out_size; + float *outbuffer; + int i, ret; float quantized_lspf[10], lpc[10]; float gain[16]; float *formant_mem; - out_size = 160 * av_get_bytes_per_sample(avctx->sample_fmt); - if (*data_size < out_size) { - av_log(avctx, AV_LOG_ERROR, "Output buffer is too small\n"); - return AVERROR(EINVAL); + /* get output buffer */ + q->avframe.nb_samples = 160; + if ((ret = avctx->get_buffer(avctx, &q->avframe)) < 0) { + av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); + return ret; } + outbuffer = (float *)q->avframe.data[0]; if ((q->bitrate = determine_bitrate(avctx, buf_size, &buf)) == I_F_Q) { warn_insufficient_frame_quality(avctx, "bitrate cannot be determined."); @@ -783,7 +789,8 @@ erasure: memcpy(q->prev_lspf, quantized_lspf, sizeof(q->prev_lspf)); q->prev_bitrate = q->bitrate; - *data_size = out_size; + *got_frame_ptr = 1; + *(AVFrame *)data = q->avframe; return buf_size; } @@ -795,6 +802,7 @@ AVCodec ff_qcelp_decoder = .id = CODEC_ID_QCELP, .init = qcelp_decode_init, .decode = qcelp_decode_frame, + .capabilities = CODEC_CAP_DR1, .priv_data_size = sizeof(QCELPContext), .long_name = NULL_IF_CONFIG_SMALL("QCELP / PureVoice"), }; diff --git a/libavcodec/qdm2.c b/libavcodec/qdm2.c index 5068e675cb..9341c69281 100644 --- a/libavcodec/qdm2.c +++ b/libavcodec/qdm2.c @@ -130,6 +130,8 @@ typedef struct { * QDM2 decoder context */ typedef struct { + AVFrame frame; + /// Parameters from codec header, do not change during playback int nb_channels; ///< number of channels int channels; ///< number of channels @@ -1875,6 +1877,9 @@ static av_cold int qdm2_decode_init(AVCodecContext *avctx) avctx->sample_fmt = AV_SAMPLE_FMT_S16; + avcodec_get_frame_defaults(&s->frame); + avctx->coded_frame = &s->frame; + // dump_context(s); return 0; } @@ -1952,30 +1957,27 @@ static int qdm2_decode (QDM2Context *q, const uint8_t *in, int16_t *out) } -static int qdm2_decode_frame(AVCodecContext *avctx, - void *data, int *data_size, - AVPacket *avpkt) +static int qdm2_decode_frame(AVCodecContext *avctx, void *data, + int *got_frame_ptr, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; QDM2Context *s = avctx->priv_data; - int16_t *out = data; - int i, out_size; + int16_t *out; + int i, ret; if(!buf) return 0; if(buf_size < s->checksum_size) return -1; - out_size = 16 * s->channels * s->frame_size * - av_get_bytes_per_sample(avctx->sample_fmt); - if (*data_size < out_size) { - av_log(avctx, AV_LOG_ERROR, "Output buffer is too small\n"); - return AVERROR(EINVAL); + /* get output buffer */ + s->frame.nb_samples = 16 * s->frame_size; + if ((ret = avctx->get_buffer(avctx, &s->frame)) < 0) { + av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); + return ret; } - - av_log(avctx, AV_LOG_DEBUG, "decode(%d): %p[%d] -> %p[%d]\n", - buf_size, buf, s->checksum_size, data, *data_size); + out = (int16_t *)s->frame.data[0]; for (i = 0; i < 16; i++) { if (qdm2_decode(s, buf, out) < 0) @@ -1983,7 +1985,8 @@ static int qdm2_decode_frame(AVCodecContext *avctx, out += s->channels * s->frame_size; } - *data_size = out_size; + *got_frame_ptr = 1; + *(AVFrame *)data = s->frame; return s->checksum_size; } @@ -1997,5 +2000,6 @@ AVCodec ff_qdm2_decoder = .init = qdm2_decode_init, .close = qdm2_decode_close, .decode = qdm2_decode_frame, + .capabilities = CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("QDesign Music Codec 2"), }; diff --git a/libavcodec/ra144.h b/libavcodec/ra144.h index dcdfbb8ccc..f6475d45ff 100644 --- a/libavcodec/ra144.h +++ b/libavcodec/ra144.h @@ -34,6 +34,7 @@ typedef struct { AVCodecContext *avctx; + AVFrame frame; LPCContext lpc_ctx; unsigned int old_energy; ///< previous frame energy diff --git a/libavcodec/ra144dec.c b/libavcodec/ra144dec.c index 5fff696d83..dd8838c417 100644 --- a/libavcodec/ra144dec.c +++ b/libavcodec/ra144dec.c @@ -38,6 +38,10 @@ static av_cold int ra144_decode_init(AVCodecContext * avctx) ractx->lpc_coef[1] = ractx->lpc_tables[1]; avctx->sample_fmt = AV_SAMPLE_FMT_S16; + + avcodec_get_frame_defaults(&ractx->frame); + avctx->coded_frame = &ractx->frame; + return 0; } @@ -54,8 +58,8 @@ static void do_output_subblock(RA144Context *ractx, const uint16_t *lpc_coefs, } /** Uncompress one block (20 bytes -> 160*2 bytes). */ -static int ra144_decode_frame(AVCodecContext * avctx, void *vdata, - int *data_size, AVPacket *avpkt) +static int ra144_decode_frame(AVCodecContext * avctx, void *data, + int *got_frame_ptr, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; @@ -64,23 +68,25 @@ static int ra144_decode_frame(AVCodecContext * avctx, void *vdata, uint16_t block_coefs[NBLOCKS][LPC_ORDER]; // LPC coefficients of each sub-block unsigned int lpc_refl[LPC_ORDER]; // LPC reflection coefficients of the frame int i, j; - int out_size; - int16_t *data = vdata; + int ret; + int16_t *samples; unsigned int energy; RA144Context *ractx = avctx->priv_data; GetBitContext gb; - out_size = NBLOCKS * BLOCKSIZE * av_get_bytes_per_sample(avctx->sample_fmt); - if (*data_size < out_size) { - av_log(avctx, AV_LOG_ERROR, "Output buffer is too small\n"); - return AVERROR(EINVAL); + /* get output buffer */ + ractx->frame.nb_samples = NBLOCKS * BLOCKSIZE; + if ((ret = avctx->get_buffer(avctx, &ractx->frame)) < 0) { + av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); + return ret; } + samples = (int16_t *)ractx->frame.data[0]; if(buf_size < FRAMESIZE) { av_log(avctx, AV_LOG_ERROR, "Frame too small (%d bytes). Truncated file?\n", buf_size); - *data_size = 0; + *got_frame_ptr = 0; return buf_size; } init_get_bits(&gb, buf, FRAMESIZE * 8); @@ -106,7 +112,7 @@ static int ra144_decode_frame(AVCodecContext * avctx, void *vdata, do_output_subblock(ractx, block_coefs[i], refl_rms[i], &gb); for (j=0; j < BLOCKSIZE; j++) - *data++ = av_clip_int16(ractx->curr_sblock[j + 10] << 2); + *samples++ = av_clip_int16(ractx->curr_sblock[j + 10] << 2); } ractx->old_energy = energy; @@ -114,7 +120,9 @@ static int ra144_decode_frame(AVCodecContext * avctx, void *vdata, FFSWAP(unsigned int *, ractx->lpc_coef[0], ractx->lpc_coef[1]); - *data_size = out_size; + *got_frame_ptr = 1; + *(AVFrame *)data = ractx->frame; + return FRAMESIZE; } @@ -125,5 +133,6 @@ AVCodec ff_ra_144_decoder = { .priv_data_size = sizeof(RA144Context), .init = ra144_decode_init, .decode = ra144_decode_frame, + .capabilities = CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("RealAudio 1.0 (14.4K)"), }; diff --git a/libavcodec/ra288.c b/libavcodec/ra288.c index eac2e2e3cd..062d9fac94 100644 --- a/libavcodec/ra288.c +++ b/libavcodec/ra288.c @@ -36,6 +36,7 @@ #define RA288_BLOCKS_PER_FRAME 32 typedef struct { + AVFrame frame; DSPContext dsp; DECLARE_ALIGNED(16, float, sp_lpc)[FFALIGN(36, 8)]; ///< LPC coefficients for speech data (spec: A) DECLARE_ALIGNED(16, float, gain_lpc)[FFALIGN(10, 8)]; ///< LPC coefficients for gain (spec: GB) @@ -62,6 +63,10 @@ static av_cold int ra288_decode_init(AVCodecContext *avctx) RA288Context *ractx = avctx->priv_data; avctx->sample_fmt = AV_SAMPLE_FMT_FLT; dsputil_init(&ractx->dsp, avctx); + + avcodec_get_frame_defaults(&ractx->frame); + avctx->coded_frame = &ractx->frame; + return 0; } @@ -165,12 +170,12 @@ static void backward_filter(RA288Context *ractx, } static int ra288_decode_frame(AVCodecContext * avctx, void *data, - int *data_size, AVPacket *avpkt) + int *got_frame_ptr, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; - float *out = data; - int i, out_size; + float *out; + int i, ret; RA288Context *ractx = avctx->priv_data; GetBitContext gb; @@ -181,12 +186,13 @@ static int ra288_decode_frame(AVCodecContext * avctx, void *data, return AVERROR_INVALIDDATA; } - out_size = RA288_BLOCK_SIZE * RA288_BLOCKS_PER_FRAME * - av_get_bytes_per_sample(avctx->sample_fmt); - if (*data_size < out_size) { - av_log(avctx, AV_LOG_ERROR, "Output buffer is too small\n"); - return AVERROR(EINVAL); + /* get output buffer */ + ractx->frame.nb_samples = RA288_BLOCK_SIZE * RA288_BLOCKS_PER_FRAME; + if ((ret = avctx->get_buffer(avctx, &ractx->frame)) < 0) { + av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); + return ret; } + out = (float *)ractx->frame.data[0]; init_get_bits(&gb, buf, avctx->block_align * 8); @@ -208,7 +214,9 @@ static int ra288_decode_frame(AVCodecContext * avctx, void *data, } } - *data_size = out_size; + *got_frame_ptr = 1; + *(AVFrame *)data = ractx->frame; + return avctx->block_align; } @@ -219,5 +227,6 @@ AVCodec ff_ra_288_decoder = { .priv_data_size = sizeof(RA288Context), .init = ra288_decode_init, .decode = ra288_decode_frame, + .capabilities = CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("RealAudio 2.0 (28.8K)"), }; diff --git a/libavcodec/s302m.c b/libavcodec/s302m.c index f6f096d89f..34018aeb46 100644 --- a/libavcodec/s302m.c +++ b/libavcodec/s302m.c @@ -25,6 +25,10 @@ #define AES3_HEADER_LEN 4 +typedef struct S302MDecodeContext { + AVFrame frame; +} S302MDecodeContext; + static int s302m_parse_frame_header(AVCodecContext *avctx, const uint8_t *buf, int buf_size) { @@ -73,10 +77,12 @@ static int s302m_parse_frame_header(AVCodecContext *avctx, const uint8_t *buf, } static int s302m_decode_frame(AVCodecContext *avctx, void *data, - int *data_size, AVPacket *avpkt) + int *got_frame_ptr, AVPacket *avpkt) { + S302MDecodeContext *s = avctx->priv_data; const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; + int block_size, ret; int frame_size = s302m_parse_frame_header(avctx, buf, buf_size); if (frame_size < 0) @@ -85,11 +91,18 @@ static int s302m_decode_frame(AVCodecContext *avctx, void *data, buf_size -= AES3_HEADER_LEN; buf += AES3_HEADER_LEN; - if (*data_size < 4 * buf_size * 8 / (avctx->bits_per_coded_sample + 4)) - return -1; + /* get output buffer */ + block_size = (avctx->bits_per_coded_sample + 4) / 4; + s->frame.nb_samples = 2 * (buf_size / block_size) / avctx->channels; + if ((ret = avctx->get_buffer(avctx, &s->frame)) < 0) { + av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); + return ret; + } + + buf_size = (s->frame.nb_samples * avctx->channels / 2) * block_size; if (avctx->bits_per_coded_sample == 24) { - uint32_t *o = data; + uint32_t *o = (uint32_t *)s->frame.data[0]; for (; buf_size > 6; buf_size -= 7) { *o++ = (av_reverse[buf[2]] << 24) | (av_reverse[buf[1]] << 16) | @@ -100,9 +113,8 @@ static int s302m_decode_frame(AVCodecContext *avctx, void *data, (av_reverse[buf[3] & 0x0f] << 4); buf += 7; } - *data_size = (uint8_t*) o - (uint8_t*) data; } else if (avctx->bits_per_coded_sample == 20) { - uint32_t *o = data; + uint32_t *o = (uint32_t *)s->frame.data[0]; for (; buf_size > 5; buf_size -= 6) { *o++ = (av_reverse[buf[2] & 0xf0] << 28) | (av_reverse[buf[1]] << 20) | @@ -112,9 +124,8 @@ static int s302m_decode_frame(AVCodecContext *avctx, void *data, (av_reverse[buf[3]] << 12); buf += 6; } - *data_size = (uint8_t*) o - (uint8_t*) data; } else { - uint16_t *o = data; + uint16_t *o = (uint16_t *)s->frame.data[0]; for (; buf_size > 4; buf_size -= 5) { *o++ = (av_reverse[buf[1]] << 8) | av_reverse[buf[0]]; @@ -123,10 +134,22 @@ static int s302m_decode_frame(AVCodecContext *avctx, void *data, (av_reverse[buf[2]] >> 4); buf += 5; } - *data_size = (uint8_t*) o - (uint8_t*) data; } - return buf - avpkt->data; + *got_frame_ptr = 1; + *(AVFrame *)data = s->frame; + + return avpkt->size; +} + +static int s302m_decode_init(AVCodecContext *avctx) +{ + S302MDecodeContext *s = avctx->priv_data; + + avcodec_get_frame_defaults(&s->frame); + avctx->coded_frame = &s->frame; + + return 0; } @@ -134,6 +157,9 @@ AVCodec ff_s302m_decoder = { .name = "s302m", .type = AVMEDIA_TYPE_AUDIO, .id = CODEC_ID_S302M, + .priv_data_size = sizeof(S302MDecodeContext), + .init = s302m_decode_init, .decode = s302m_decode_frame, + .capabilities = CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("SMPTE 302M"), }; diff --git a/libavcodec/shorten.c b/libavcodec/shorten.c index da36bd58eb..da0ef08eee 100644 --- a/libavcodec/shorten.c +++ b/libavcodec/shorten.c @@ -79,6 +79,7 @@ static const uint8_t is_audio_command[10] = { 1, 1, 1, 1, 0, 0, 0, 1, 1, 0 }; typedef struct ShortenContext { AVCodecContext *avctx; + AVFrame frame; GetBitContext gb; int min_framesize, max_framesize; @@ -112,6 +113,9 @@ static av_cold int shorten_decode_init(AVCodecContext * avctx) s->avctx = avctx; avctx->sample_fmt = AV_SAMPLE_FMT_S16; + avcodec_get_frame_defaults(&s->frame); + avctx->coded_frame = &s->frame; + return 0; } @@ -394,15 +398,13 @@ static int read_header(ShortenContext *s) return 0; } -static int shorten_decode_frame(AVCodecContext *avctx, - void *data, int *data_size, - AVPacket *avpkt) +static int shorten_decode_frame(AVCodecContext *avctx, void *data, + int *got_frame_ptr, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; ShortenContext *s = avctx->priv_data; int i, input_buf_size = 0; - int16_t *samples = data; int ret; /* allocate internal bitstream buffer */ @@ -436,7 +438,7 @@ static int shorten_decode_frame(AVCodecContext *avctx, /* do not decode until buffer has at least max_framesize bytes or the end of the file has been reached */ if (buf_size < s->max_framesize && avpkt->data) { - *data_size = 0; + *got_frame_ptr = 0; return input_buf_size; } } @@ -448,13 +450,13 @@ static int shorten_decode_frame(AVCodecContext *avctx, if (!s->got_header) { if ((ret = read_header(s)) < 0) return ret; - *data_size = 0; + *got_frame_ptr = 0; goto finish_frame; } /* if quit command was read previously, don't decode anything */ if (s->got_quit_command) { - *data_size = 0; + *got_frame_ptr = 0; return avpkt->size; } @@ -464,7 +466,7 @@ static int shorten_decode_frame(AVCodecContext *avctx, int len; if (get_bits_left(&s->gb) < 3+FNSIZE) { - *data_size = 0; + *got_frame_ptr = 0; break; } @@ -472,7 +474,7 @@ static int shorten_decode_frame(AVCodecContext *avctx, if (cmd > FN_VERBATIM) { av_log(avctx, AV_LOG_ERROR, "unknown shorten function %d\n", cmd); - *data_size = 0; + *got_frame_ptr = 0; break; } @@ -507,7 +509,7 @@ static int shorten_decode_frame(AVCodecContext *avctx, break; } if (cmd == FN_BLOCKSIZE || cmd == FN_QUIT) { - *data_size = 0; + *got_frame_ptr = 0; break; } } else { @@ -571,19 +573,23 @@ static int shorten_decode_frame(AVCodecContext *avctx, /* if this is the last channel in the block, output the samples */ s->cur_chan++; if (s->cur_chan == s->channels) { - int out_size = s->blocksize * s->channels * - av_get_bytes_per_sample(avctx->sample_fmt); - if (*data_size < out_size) { - av_log(avctx, AV_LOG_ERROR, "Output buffer is too small\n"); - return AVERROR(EINVAL); + /* get output buffer */ + s->frame.nb_samples = s->blocksize; + if ((ret = avctx->get_buffer(avctx, &s->frame)) < 0) { + av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); + return ret; } - interleave_buffer(samples, s->channels, s->blocksize, s->decoded); - *data_size = out_size; + /* interleave output */ + interleave_buffer((int16_t *)s->frame.data[0], s->channels, + s->blocksize, s->decoded); + + *got_frame_ptr = 1; + *(AVFrame *)data = s->frame; } } } if (s->cur_chan < s->channels) - *data_size = 0; + *got_frame_ptr = 0; finish_frame: s->bitindex = get_bits_count(&s->gb) - 8*((get_bits_count(&s->gb))/8); @@ -614,6 +620,7 @@ static av_cold int shorten_decode_close(AVCodecContext *avctx) } av_freep(&s->bitstream); av_freep(&s->coeffs); + return 0; } @@ -625,6 +632,6 @@ AVCodec ff_shorten_decoder = { .init = shorten_decode_init, .close = shorten_decode_close, .decode = shorten_decode_frame, - .capabilities = CODEC_CAP_DELAY, + .capabilities = CODEC_CAP_DELAY | CODEC_CAP_DR1, .long_name= NULL_IF_CONFIG_SMALL("Shorten"), }; diff --git a/libavcodec/sipr.c b/libavcodec/sipr.c index 10a12c52a5..c832b9b1fd 100644 --- a/libavcodec/sipr.c +++ b/libavcodec/sipr.c @@ -507,20 +507,23 @@ static av_cold int sipr_decoder_init(AVCodecContext * avctx) avctx->sample_fmt = AV_SAMPLE_FMT_FLT; + avcodec_get_frame_defaults(&ctx->frame); + avctx->coded_frame = &ctx->frame; + return 0; } -static int sipr_decode_frame(AVCodecContext *avctx, void *datap, - int *data_size, AVPacket *avpkt) +static int sipr_decode_frame(AVCodecContext *avctx, void *data, + int *got_frame_ptr, AVPacket *avpkt) { SiprContext *ctx = avctx->priv_data; const uint8_t *buf=avpkt->data; SiprParameters parm; const SiprModeParam *mode_par = &modes[ctx->mode]; GetBitContext gb; - float *data = datap; + float *samples; int subframe_size = ctx->mode == MODE_16k ? L_SUBFR_16k : SUBFR_SIZE; - int i, out_size; + int i, ret; ctx->avctx = avctx; if (avpkt->size < (mode_par->bits_per_frame >> 3)) { @@ -530,27 +533,27 @@ static int sipr_decode_frame(AVCodecContext *avctx, void *datap, return -1; } - out_size = mode_par->frames_per_packet * subframe_size * - mode_par->subframe_count * - av_get_bytes_per_sample(avctx->sample_fmt); - if (*data_size < out_size) { - av_log(avctx, AV_LOG_ERROR, - "Error processing packet: output buffer (%d) too small\n", - *data_size); - return -1; + /* get output buffer */ + ctx->frame.nb_samples = mode_par->frames_per_packet * subframe_size * + mode_par->subframe_count; + if ((ret = avctx->get_buffer(avctx, &ctx->frame)) < 0) { + av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); + return ret; } + samples = (float *)ctx->frame.data[0]; init_get_bits(&gb, buf, mode_par->bits_per_frame); for (i = 0; i < mode_par->frames_per_packet; i++) { decode_parameters(&parm, &gb, mode_par); - ctx->decode_frame(ctx, &parm, data); + ctx->decode_frame(ctx, &parm, samples); - data += subframe_size * mode_par->subframe_count; + samples += subframe_size * mode_par->subframe_count; } - *data_size = out_size; + *got_frame_ptr = 1; + *(AVFrame *)data = ctx->frame; return mode_par->bits_per_frame >> 3; } @@ -562,5 +565,6 @@ AVCodec ff_sipr_decoder = { .priv_data_size = sizeof(SiprContext), .init = sipr_decoder_init, .decode = sipr_decode_frame, + .capabilities = CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("RealAudio SIPR / ACELP.NET"), }; diff --git a/libavcodec/smacker.c b/libavcodec/smacker.c index 00ba4b8c5d..ba7da02622 100644 --- a/libavcodec/smacker.c +++ b/libavcodec/smacker.c @@ -558,31 +558,43 @@ static av_cold int decode_end(AVCodecContext *avctx) } +typedef struct SmackerAudioContext { + AVFrame frame; +} SmackerAudioContext; + static av_cold int smka_decode_init(AVCodecContext *avctx) { + SmackerAudioContext *s = avctx->priv_data; + if (avctx->channels < 1 || avctx->channels > 2) { av_log(avctx, AV_LOG_ERROR, "invalid number of channels\n"); return AVERROR(EINVAL); } avctx->channel_layout = (avctx->channels==2) ? AV_CH_LAYOUT_STEREO : AV_CH_LAYOUT_MONO; avctx->sample_fmt = avctx->bits_per_coded_sample == 8 ? AV_SAMPLE_FMT_U8 : AV_SAMPLE_FMT_S16; + + avcodec_get_frame_defaults(&s->frame); + avctx->coded_frame = &s->frame; + return 0; } /** * Decode Smacker audio data */ -static int smka_decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) +static int smka_decode_frame(AVCodecContext *avctx, void *data, + int *got_frame_ptr, AVPacket *avpkt) { + SmackerAudioContext *s = avctx->priv_data; const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; GetBitContext gb; HuffContext h[4]; VLC vlc[4]; - int16_t *samples = data; - uint8_t *samples8 = data; + int16_t *samples; + uint8_t *samples8; int val; - int i, res; + int i, res, ret; int unp_size; int bits, stereo; int pred[2] = {0, 0}; @@ -598,15 +610,11 @@ static int smka_decode_frame(AVCodecContext *avctx, void *data, int *data_size, if(!get_bits1(&gb)){ av_log(avctx, AV_LOG_INFO, "Sound: no data\n"); - *data_size = 0; + *got_frame_ptr = 0; return 1; } stereo = get_bits1(&gb); bits = get_bits1(&gb); - if (unp_size & 0xC0000000 || unp_size > *data_size) { - av_log(avctx, AV_LOG_ERROR, "Frame is too large to fit in buffer\n"); - return -1; - } if (stereo ^ (avctx->channels != 1)) { av_log(avctx, AV_LOG_ERROR, "channels mismatch\n"); return AVERROR(EINVAL); @@ -616,6 +624,15 @@ static int smka_decode_frame(AVCodecContext *avctx, void *data, int *data_size, return AVERROR(EINVAL); } + /* get output buffer */ + s->frame.nb_samples = unp_size / (avctx->channels * (bits + 1)); + if ((ret = avctx->get_buffer(avctx, &s->frame)) < 0) { + av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); + return ret; + } + samples = (int16_t *)s->frame.data[0]; + samples8 = s->frame.data[0]; + memset(vlc, 0, sizeof(VLC) * 4); memset(h, 0, sizeof(HuffContext) * 4); // Initialize @@ -705,7 +722,9 @@ static int smka_decode_frame(AVCodecContext *avctx, void *data, int *data_size, av_free(h[i].values); } - *data_size = unp_size; + *got_frame_ptr = 1; + *(AVFrame *)data = s->frame; + return buf_size; } @@ -725,8 +744,10 @@ AVCodec ff_smackaud_decoder = { .name = "smackaud", .type = AVMEDIA_TYPE_AUDIO, .id = CODEC_ID_SMACKAUDIO, + .priv_data_size = sizeof(SmackerAudioContext), .init = smka_decode_init, .decode = smka_decode_frame, + .capabilities = CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("Smacker audio"), }; diff --git a/libavcodec/truespeech.c b/libavcodec/truespeech.c index b7a2aa6fba..524884ddf5 100644 --- a/libavcodec/truespeech.c +++ b/libavcodec/truespeech.c @@ -34,6 +34,7 @@ * TrueSpeech decoder context */ typedef struct { + AVFrame frame; DSPContext dsp; /* input data */ uint8_t buffer[32]; @@ -69,6 +70,9 @@ static av_cold int truespeech_decode_init(AVCodecContext * avctx) dsputil_init(&c->dsp, avctx); + avcodec_get_frame_defaults(&c->frame); + avctx->coded_frame = &c->frame; + return 0; } @@ -299,17 +303,16 @@ static void truespeech_save_prevvec(TSContext *c) c->prevfilt[i] = c->cvector[i]; } -static int truespeech_decode_frame(AVCodecContext *avctx, - void *data, int *data_size, - AVPacket *avpkt) +static int truespeech_decode_frame(AVCodecContext *avctx, void *data, + int *got_frame_ptr, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; TSContext *c = avctx->priv_data; int i, j; - short *samples = data; - int iterations, out_size; + int16_t *samples; + int iterations, ret; iterations = buf_size / 32; @@ -319,13 +322,15 @@ static int truespeech_decode_frame(AVCodecContext *avctx, return -1; } - out_size = iterations * 240 * av_get_bytes_per_sample(avctx->sample_fmt); - if (*data_size < out_size) { - av_log(avctx, AV_LOG_ERROR, "Output buffer is too small\n"); - return AVERROR(EINVAL); + /* get output buffer */ + c->frame.nb_samples = iterations * 240; + if ((ret = avctx->get_buffer(avctx, &c->frame)) < 0) { + av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); + return ret; } + samples = (int16_t *)c->frame.data[0]; - memset(samples, 0, out_size); + memset(samples, 0, iterations * 240 * sizeof(*samples)); for(j = 0; j < iterations; j++) { truespeech_read_frame(c, buf); @@ -345,7 +350,8 @@ static int truespeech_decode_frame(AVCodecContext *avctx, truespeech_save_prevvec(c); } - *data_size = out_size; + *got_frame_ptr = 1; + *(AVFrame *)data = c->frame; return buf_size; } @@ -357,5 +363,6 @@ AVCodec ff_truespeech_decoder = { .priv_data_size = sizeof(TSContext), .init = truespeech_decode_init, .decode = truespeech_decode_frame, + .capabilities = CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("DSP Group TrueSpeech"), }; diff --git a/libavcodec/tta.c b/libavcodec/tta.c index 3e4adf0c11..6b76f527c4 100644 --- a/libavcodec/tta.c +++ b/libavcodec/tta.c @@ -56,6 +56,7 @@ typedef struct TTAChannel { typedef struct TTAContext { AVCodecContext *avctx; + AVFrame frame; GetBitContext gb; int format, channels, bps, data_length; @@ -276,17 +277,19 @@ static av_cold int tta_decode_init(AVCodecContext * avctx) return -1; } + avcodec_get_frame_defaults(&s->frame); + avctx->coded_frame = &s->frame; + return 0; } -static int tta_decode_frame(AVCodecContext *avctx, - void *data, int *data_size, - AVPacket *avpkt) +static int tta_decode_frame(AVCodecContext *avctx, void *data, + int *got_frame_ptr, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; TTAContext *s = avctx->priv_data; - int i, out_size; + int i, ret; int cur_chan = 0, framelen = s->frame_length; int32_t *p; @@ -297,10 +300,11 @@ static int tta_decode_frame(AVCodecContext *avctx, if (!s->total_frames && s->last_frame_length) framelen = s->last_frame_length; - out_size = framelen * s->channels * av_get_bytes_per_sample(avctx->sample_fmt); - if (*data_size < out_size) { - av_log(avctx, AV_LOG_ERROR, "Output buffer size is too small.\n"); - return -1; + /* get output buffer */ + s->frame.nb_samples = framelen; + if ((ret = avctx->get_buffer(avctx, &s->frame)) < 0) { + av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); + return ret; } // decode directly to output buffer for 24-bit sample format @@ -396,19 +400,20 @@ static int tta_decode_frame(AVCodecContext *avctx, // convert to output buffer if (s->bps == 2) { - int16_t *samples = data; + int16_t *samples = (int16_t *)s->frame.data[0]; for (p = s->decode_buffer; p < s->decode_buffer + (framelen * s->channels); p++) *samples++ = *p; } else { // shift samples for 24-bit sample format - int32_t *samples = data; + int32_t *samples = (int32_t *)s->frame.data[0]; for (i = 0; i < framelen * s->channels; i++) *samples++ <<= 8; // reset decode buffer s->decode_buffer = NULL; } - *data_size = out_size; + *got_frame_ptr = 1; + *(AVFrame *)data = s->frame; return buf_size; } @@ -430,5 +435,6 @@ AVCodec ff_tta_decoder = { .init = tta_decode_init, .close = tta_decode_close, .decode = tta_decode_frame, + .capabilities = CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("True Audio (TTA)"), }; diff --git a/libavcodec/twinvq.c b/libavcodec/twinvq.c index a2851562ee..22be07a5b5 100644 --- a/libavcodec/twinvq.c +++ b/libavcodec/twinvq.c @@ -174,6 +174,7 @@ static const ModeTab mode_44_48 = { typedef struct TwinContext { AVCodecContext *avctx; + AVFrame frame; DSPContext dsp; FFTContext mdct_ctx[3]; @@ -195,6 +196,7 @@ typedef struct TwinContext { float *curr_frame; ///< non-interleaved output float *prev_frame; ///< non-interleaved previous frame int last_block_pos[2]; + int discarded_packets; float *cos_tabs[3]; @@ -676,6 +678,9 @@ static void imdct_output(TwinContext *tctx, enum FrameType ftype, int wtype, i); } + if (!out) + return; + size2 = tctx->last_block_pos[0]; size1 = mtab->size - size2; if (tctx->avctx->channels == 2) { @@ -811,16 +816,16 @@ static void read_and_decode_spectrum(TwinContext *tctx, GetBitContext *gb, } static int twin_decode_frame(AVCodecContext * avctx, void *data, - int *data_size, AVPacket *avpkt) + int *got_frame_ptr, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; TwinContext *tctx = avctx->priv_data; GetBitContext gb; const ModeTab *mtab = tctx->mtab; - float *out = data; + float *out = NULL; enum FrameType ftype; - int window_type, out_size; + int window_type, ret; static const enum FrameType wtype_to_ftype_table[] = { FT_LONG, FT_LONG, FT_SHORT, FT_LONG, FT_MEDIUM, FT_LONG, FT_LONG, FT_MEDIUM, FT_MEDIUM @@ -832,11 +837,14 @@ static int twin_decode_frame(AVCodecContext * avctx, void *data, return AVERROR(EINVAL); } - out_size = mtab->size * avctx->channels * - av_get_bytes_per_sample(avctx->sample_fmt); - if (*data_size < out_size) { - av_log(avctx, AV_LOG_ERROR, "output buffer is too small\n"); - return AVERROR(EINVAL); + /* get output buffer */ + if (tctx->discarded_packets >= 2) { + tctx->frame.nb_samples = mtab->size; + if ((ret = avctx->get_buffer(avctx, &tctx->frame)) < 0) { + av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); + return ret; + } + out = (float *)tctx->frame.data[0]; } init_get_bits(&gb, buf, buf_size * 8); @@ -856,12 +864,14 @@ static int twin_decode_frame(AVCodecContext * avctx, void *data, FFSWAP(float*, tctx->curr_frame, tctx->prev_frame); - if (tctx->avctx->frame_number < 2) { - *data_size=0; + if (tctx->discarded_packets < 2) { + tctx->discarded_packets++; + *got_frame_ptr = 0; return buf_size; } - *data_size = out_size; + *got_frame_ptr = 1; + *(AVFrame *)data = tctx->frame;; return buf_size; } @@ -1153,6 +1163,9 @@ static av_cold int twin_decode_init(AVCodecContext *avctx) memset_float(tctx->bark_hist[0][0], 0.1, FF_ARRAY_ELEMS(tctx->bark_hist)); + avcodec_get_frame_defaults(&tctx->frame); + avctx->coded_frame = &tctx->frame; + return 0; } @@ -1164,5 +1177,6 @@ AVCodec ff_twinvq_decoder = { .init = twin_decode_init, .close = twin_decode_close, .decode = twin_decode_frame, + .capabilities = CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("VQF TwinVQ"), }; diff --git a/libavcodec/utils.c b/libavcodec/utils.c index 998a12c149..c84439972c 100644 --- a/libavcodec/utils.c +++ b/libavcodec/utils.c @@ -222,9 +222,8 @@ void avcodec_align_dimensions2(AVCodecContext *s, int *width, int *height, if(s->codec_id == CODEC_ID_SVQ1 || s->codec_id == CODEC_ID_VP5 || s->codec_id == CODEC_ID_VP6 || s->codec_id == CODEC_ID_VP6F || s->codec_id == CODEC_ID_VP6A) { - linesize_align[0] = - linesize_align[1] = - linesize_align[2] = 16; + for (i = 0; i < AV_NUM_DATA_POINTERS; i++) + linesize_align[i] = 16; } #endif } @@ -241,7 +240,108 @@ void avcodec_align_dimensions(AVCodecContext *s, int *width, int *height){ *width=FFALIGN(*width, align); } -int avcodec_default_get_buffer(AVCodecContext *s, AVFrame *pic){ +static int audio_get_buffer(AVCodecContext *avctx, AVFrame *frame) +{ + AVCodecInternal *avci = avctx->internal; + InternalBuffer *buf; + int buf_size, ret, i, needs_extended_data; + + buf_size = av_samples_get_buffer_size(NULL, avctx->channels, + frame->nb_samples, avctx->sample_fmt, + 32); + if (buf_size < 0) + return AVERROR(EINVAL); + + needs_extended_data = av_sample_fmt_is_planar(avctx->sample_fmt) && + avctx->channels > AV_NUM_DATA_POINTERS; + + /* allocate InternalBuffer if needed */ + if (!avci->buffer) { + avci->buffer = av_mallocz(sizeof(InternalBuffer)); + if (!avci->buffer) + return AVERROR(ENOMEM); + } + buf = avci->buffer; + + /* if there is a previously-used internal buffer, check its size and + channel count to see if we can reuse it */ + if (buf->extended_data) { + /* if current buffer is too small, free it */ + if (buf->extended_data[0] && buf_size > buf->audio_data_size) { + av_free(buf->extended_data[0]); + if (buf->extended_data != buf->data) + av_free(&buf->extended_data); + buf->extended_data = NULL; + buf->data[0] = NULL; + } + /* if number of channels has changed, reset and/or free extended data + pointers but leave data buffer in buf->data[0] for reuse */ + if (buf->nb_channels != avctx->channels) { + if (buf->extended_data != buf->data) + av_free(buf->extended_data); + buf->extended_data = NULL; + } + } + + /* if there is no previous buffer or the previous buffer cannot be used + as-is, allocate a new buffer and/or rearrange the channel pointers */ + if (!buf->extended_data) { + /* if the channel pointers will fit, just set extended_data to data, + otherwise allocate the extended_data channel pointers */ + if (needs_extended_data) { + buf->extended_data = av_mallocz(avctx->channels * + sizeof(*buf->extended_data)); + if (!buf->extended_data) + return AVERROR(ENOMEM); + } else { + buf->extended_data = buf->data; + } + + /* if there is a previous buffer and it is large enough, reuse it and + just fill-in new channel pointers and linesize, otherwise allocate + a new buffer */ + if (buf->extended_data[0]) { + ret = av_samples_fill_arrays(buf->extended_data, &buf->linesize[0], + buf->extended_data[0], avctx->channels, + frame->nb_samples, avctx->sample_fmt, + 32); + } else { + ret = av_samples_alloc(buf->extended_data, &buf->linesize[0], + avctx->channels, frame->nb_samples, + avctx->sample_fmt, 32); + } + if (ret) + return ret; + + /* if data was not used for extended_data, we need to copy as many of + the extended_data channel pointers as will fit */ + if (needs_extended_data) { + for (i = 0; i < AV_NUM_DATA_POINTERS; i++) + buf->data[i] = buf->extended_data[i]; + } + buf->audio_data_size = buf_size; + buf->nb_channels = avctx->channels; + } + + /* copy InternalBuffer info to the AVFrame */ + frame->type = FF_BUFFER_TYPE_INTERNAL; + frame->extended_data = buf->extended_data; + frame->linesize[0] = buf->linesize[0]; + memcpy(frame->data, buf->data, sizeof(frame->data)); + + if (avctx->pkt) frame->pkt_pts = avctx->pkt->pts; + else frame->pkt_pts = AV_NOPTS_VALUE; + frame->reordered_opaque = avctx->reordered_opaque; + + if (avctx->debug & FF_DEBUG_BUFFERS) + av_log(avctx, AV_LOG_DEBUG, "default_get_buffer called on frame %p, " + "internal audio buffer used\n", frame); + + return 0; +} + +static int video_get_buffer(AVCodecContext *s, AVFrame *pic) +{ int i; int w= s->width; int h= s->height; @@ -362,6 +462,7 @@ int avcodec_default_get_buffer(AVCodecContext *s, AVFrame *pic){ pic->data[i]= buf->data[i]; pic->linesize[i]= buf->linesize[i]; } + pic->extended_data = pic->data; avci->buffer_count++; if(s->pkt) pic->pkt_pts= s->pkt->pts; @@ -375,11 +476,25 @@ int avcodec_default_get_buffer(AVCodecContext *s, AVFrame *pic){ return 0; } +int avcodec_default_get_buffer(AVCodecContext *avctx, AVFrame *frame) +{ + switch (avctx->codec_type) { + case AVMEDIA_TYPE_VIDEO: + return video_get_buffer(avctx, frame); + case AVMEDIA_TYPE_AUDIO: + return audio_get_buffer(avctx, frame); + default: + return -1; + } +} + void avcodec_default_release_buffer(AVCodecContext *s, AVFrame *pic){ int i; InternalBuffer *buf, *last; AVCodecInternal *avci = s->internal; + assert(s->codec_type == AVMEDIA_TYPE_VIDEO); + assert(pic->type==FF_BUFFER_TYPE_INTERNAL); assert(avci->buffer_count); @@ -412,6 +527,8 @@ int avcodec_default_reget_buffer(AVCodecContext *s, AVFrame *pic){ AVFrame temp_pic; int i; + assert(s->codec_type == AVMEDIA_TYPE_VIDEO); + /* If no picture return a new buffer */ if(pic->data[0] == NULL) { /* We will copy from buffer, so must be readable */ @@ -761,11 +878,59 @@ int attribute_align_arg avcodec_decode_video2(AVCodecContext *avctx, AVFrame *pi return ret; } +#if FF_API_OLD_DECODE_AUDIO int attribute_align_arg avcodec_decode_audio3(AVCodecContext *avctx, int16_t *samples, int *frame_size_ptr, AVPacket *avpkt) { - int ret; + AVFrame frame; + int ret, got_frame = 0; + + if (avctx->get_buffer != avcodec_default_get_buffer) { + av_log(avctx, AV_LOG_ERROR, "A custom get_buffer() cannot be used with " + "avcodec_decode_audio3()\n"); + return AVERROR(EINVAL); + } + + ret = avcodec_decode_audio4(avctx, &frame, &got_frame, avpkt); + + if (ret >= 0 && got_frame) { + int ch, plane_size; + int planar = av_sample_fmt_is_planar(avctx->sample_fmt); + int data_size = av_samples_get_buffer_size(&plane_size, avctx->channels, + frame.nb_samples, + avctx->sample_fmt, 1); + if (*frame_size_ptr < data_size) { + av_log(avctx, AV_LOG_ERROR, "output buffer size is too small for " + "the current frame (%d < %d)\n", *frame_size_ptr, data_size); + return AVERROR(EINVAL); + } + + memcpy(samples, frame.extended_data[0], plane_size); + + if (planar && avctx->channels > 1) { + uint8_t *out = ((uint8_t *)samples) + plane_size; + for (ch = 1; ch < avctx->channels; ch++) { + memcpy(out, frame.extended_data[ch], plane_size); + out += plane_size; + } + } + *frame_size_ptr = data_size; + } else { + *frame_size_ptr = 0; + } + return ret; +} +#endif + +int attribute_align_arg avcodec_decode_audio4(AVCodecContext *avctx, + AVFrame *frame, + int *got_frame_ptr, + AVPacket *avpkt) +{ + int ret = 0; + + *got_frame_ptr = 0; avctx->pkt = avpkt; @@ -774,23 +939,12 @@ int attribute_align_arg avcodec_decode_audio3(AVCodecContext *avctx, int16_t *sa return AVERROR(EINVAL); } - if((avctx->codec->capabilities & CODEC_CAP_DELAY) || avpkt->size){ - //FIXME remove the check below _after_ ensuring that all audio check that the available space is enough - if(*frame_size_ptr < AVCODEC_MAX_AUDIO_FRAME_SIZE){ - av_log(avctx, AV_LOG_ERROR, "buffer smaller than AVCODEC_MAX_AUDIO_FRAME_SIZE\n"); - return -1; - } - if(*frame_size_ptr < FF_MIN_BUFFER_SIZE || - *frame_size_ptr < avctx->channels * avctx->frame_size * sizeof(int16_t)){ - av_log(avctx, AV_LOG_ERROR, "buffer %d too small\n", *frame_size_ptr); - return -1; + if ((avctx->codec->capabilities & CODEC_CAP_DELAY) || avpkt->size) { + ret = avctx->codec->decode(avctx, frame, got_frame_ptr, avpkt); + if (ret >= 0 && *got_frame_ptr) { + avctx->frame_number++; + frame->pkt_dts = avpkt->dts; } - - ret = avctx->codec->decode(avctx, samples, frame_size_ptr, avpkt); - avctx->frame_number++; - }else{ - ret= 0; - *frame_size_ptr=0; } return ret; } @@ -1115,7 +1269,8 @@ void avcodec_flush_buffers(AVCodecContext *avctx) avctx->codec->flush(avctx); } -void avcodec_default_free_buffers(AVCodecContext *s){ +static void video_free_buffers(AVCodecContext *s) +{ AVCodecInternal *avci = s->internal; int i, j; @@ -1137,6 +1292,37 @@ void avcodec_default_free_buffers(AVCodecContext *s){ avci->buffer_count=0; } +static void audio_free_buffers(AVCodecContext *avctx) +{ + AVCodecInternal *avci = avctx->internal; + InternalBuffer *buf; + + if (!avci->buffer) + return; + buf = avci->buffer; + + if (buf->extended_data) { + av_free(buf->extended_data[0]); + if (buf->extended_data != buf->data) + av_free(buf->extended_data); + } + av_freep(&avci->buffer); +} + +void avcodec_default_free_buffers(AVCodecContext *avctx) +{ + switch (avctx->codec_type) { + case AVMEDIA_TYPE_VIDEO: + video_free_buffers(avctx); + break; + case AVMEDIA_TYPE_AUDIO: + audio_free_buffers(avctx); + break; + default: + break; + } +} + #if FF_API_OLD_FF_PICT_TYPES char av_get_pict_type_char(int pict_type){ return av_get_picture_type_char(pict_type); diff --git a/libavcodec/version.h b/libavcodec/version.h index 7262c81544..6faf793ea1 100644 --- a/libavcodec/version.h +++ b/libavcodec/version.h @@ -21,7 +21,7 @@ #define AVCODEC_VERSION_H #define LIBAVCODEC_VERSION_MAJOR 53 -#define LIBAVCODEC_VERSION_MINOR 24 +#define LIBAVCODEC_VERSION_MINOR 25 #define LIBAVCODEC_VERSION_MICRO 0 #define LIBAVCODEC_VERSION_INT AV_VERSION_INT(LIBAVCODEC_VERSION_MAJOR, \ @@ -113,5 +113,8 @@ #ifndef FF_API_DATA_POINTERS #define FF_API_DATA_POINTERS (LIBAVCODEC_VERSION_MAJOR < 54) #endif +#ifndef FF_API_OLD_DECODE_AUDIO +#define FF_API_OLD_DECODE_AUDIO (LIBAVCODEC_VERSION_MAJOR < 54) +#endif #endif /* AVCODEC_VERSION_H */ diff --git a/libavcodec/vmdav.c b/libavcodec/vmdav.c index 772f98c70f..89b5c2bc6a 100644 --- a/libavcodec/vmdav.c +++ b/libavcodec/vmdav.c @@ -473,6 +473,7 @@ static av_cold int vmdvideo_decode_end(AVCodecContext *avctx) #define BLOCK_TYPE_SILENCE 3 typedef struct VmdAudioContext { + AVFrame frame; int out_bps; int chunk_size; } VmdAudioContext; @@ -514,6 +515,9 @@ static av_cold int vmdaudio_decode_init(AVCodecContext *avctx) s->chunk_size = avctx->block_align + avctx->channels * (s->out_bps == 2); + avcodec_get_frame_defaults(&s->frame); + avctx->coded_frame = &s->frame; + av_log(avctx, AV_LOG_DEBUG, "%d channels, %d bits/sample, " "block align = %d, sample rate = %d\n", avctx->channels, avctx->bits_per_coded_sample, avctx->block_align, @@ -551,22 +555,21 @@ static void decode_audio_s16(int16_t *out, const uint8_t *buf, int buf_size, } } -static int vmdaudio_decode_frame(AVCodecContext *avctx, - void *data, int *data_size, - AVPacket *avpkt) +static int vmdaudio_decode_frame(AVCodecContext *avctx, void *data, + int *got_frame_ptr, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; const uint8_t *buf_end; int buf_size = avpkt->size; VmdAudioContext *s = avctx->priv_data; int block_type, silent_chunks, audio_chunks; - int nb_samples, out_size; - uint8_t *output_samples_u8 = data; - int16_t *output_samples_s16 = data; + int ret; + uint8_t *output_samples_u8; + int16_t *output_samples_s16; if (buf_size < 16) { av_log(avctx, AV_LOG_WARNING, "skipping small junk packet\n"); - *data_size = 0; + *got_frame_ptr = 0; return buf_size; } @@ -597,10 +600,15 @@ static int vmdaudio_decode_frame(AVCodecContext *avctx, /* ensure output buffer is large enough */ audio_chunks = buf_size / s->chunk_size; - nb_samples = ((silent_chunks + audio_chunks) * avctx->block_align) / avctx->channels; - out_size = nb_samples * avctx->channels * s->out_bps; - if (*data_size < out_size) - return -1; + + /* get output buffer */ + s->frame.nb_samples = ((silent_chunks + audio_chunks) * avctx->block_align) / avctx->channels; + if ((ret = avctx->get_buffer(avctx, &s->frame)) < 0) { + av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); + return ret; + } + output_samples_u8 = s->frame.data[0]; + output_samples_s16 = (int16_t *)s->frame.data[0]; /* decode silent chunks */ if (silent_chunks > 0) { @@ -630,7 +638,9 @@ static int vmdaudio_decode_frame(AVCodecContext *avctx, } } - *data_size = out_size; + *got_frame_ptr = 1; + *(AVFrame *)data = s->frame; + return avpkt->size; } @@ -658,5 +668,6 @@ AVCodec ff_vmdaudio_decoder = { .priv_data_size = sizeof(VmdAudioContext), .init = vmdaudio_decode_init, .decode = vmdaudio_decode_frame, + .capabilities = CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("Sierra VMD audio"), }; diff --git a/libavcodec/vorbisdec.c b/libavcodec/vorbisdec.c index b202249e9b..381b61d060 100644 --- a/libavcodec/vorbisdec.c +++ b/libavcodec/vorbisdec.c @@ -121,6 +121,7 @@ typedef struct { typedef struct vorbis_context_s { AVCodecContext *avccontext; + AVFrame frame; GetBitContext gb; DSPContext dsp; FmtConvertContext fmt_conv; @@ -1033,6 +1034,9 @@ static av_cold int vorbis_decode_init(AVCodecContext *avccontext) avccontext->sample_rate = vc->audio_samplerate; avccontext->frame_size = FFMIN(vc->blocksize[0], vc->blocksize[1]) >> 2; + avcodec_get_frame_defaults(&vc->frame); + avccontext->coded_frame = &vc->frame; + return 0; } @@ -1605,16 +1609,15 @@ static int vorbis_parse_audio_packet(vorbis_context *vc) // Return the decoded audio packet through the standard api -static int vorbis_decode_frame(AVCodecContext *avccontext, - void *data, int *data_size, - AVPacket *avpkt) +static int vorbis_decode_frame(AVCodecContext *avccontext, void *data, + int *got_frame_ptr, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; vorbis_context *vc = avccontext->priv_data; GetBitContext *gb = &(vc->gb); const float *channel_ptrs[255]; - int i, len, out_size; + int i, len, ret; av_dlog(NULL, "packet length %d \n", buf_size); @@ -1625,18 +1628,18 @@ static int vorbis_decode_frame(AVCodecContext *avccontext, if (!vc->first_frame) { vc->first_frame = 1; - *data_size = 0; + *got_frame_ptr = 0; return buf_size; } av_dlog(NULL, "parsed %d bytes %d bits, returned %d samples (*ch*bits) \n", get_bits_count(gb) / 8, get_bits_count(gb) % 8, len); - out_size = len * vc->audio_channels * - av_get_bytes_per_sample(avccontext->sample_fmt); - if (*data_size < out_size) { - av_log(avccontext, AV_LOG_ERROR, "output buffer is too small\n"); - return AVERROR(EINVAL); + /* get output buffer */ + vc->frame.nb_samples = len; + if ((ret = avccontext->get_buffer(avccontext, &vc->frame)) < 0) { + av_log(avccontext, AV_LOG_ERROR, "get_buffer() failed\n"); + return ret; } if (vc->audio_channels > 8) { @@ -1649,12 +1652,15 @@ static int vorbis_decode_frame(AVCodecContext *avccontext, } if (avccontext->sample_fmt == AV_SAMPLE_FMT_FLT) - vc->fmt_conv.float_interleave(data, channel_ptrs, len, vc->audio_channels); + vc->fmt_conv.float_interleave((float *)vc->frame.data[0], channel_ptrs, + len, vc->audio_channels); else - vc->fmt_conv.float_to_int16_interleave(data, channel_ptrs, len, + vc->fmt_conv.float_to_int16_interleave((int16_t *)vc->frame.data[0], + channel_ptrs, len, vc->audio_channels); - *data_size = out_size; + *got_frame_ptr = 1; + *(AVFrame *)data = vc->frame; return buf_size; } @@ -1678,6 +1684,7 @@ AVCodec ff_vorbis_decoder = { .init = vorbis_decode_init, .close = vorbis_decode_close, .decode = vorbis_decode_frame, + .capabilities = CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("Vorbis"), .channel_layouts = ff_vorbis_channel_layouts, .sample_fmts = (const enum AVSampleFormat[]) { diff --git a/libavcodec/wavpack.c b/libavcodec/wavpack.c index ec46fb166a..e4b7ebe43b 100644 --- a/libavcodec/wavpack.c +++ b/libavcodec/wavpack.c @@ -115,8 +115,6 @@ typedef struct WavpackFrameContext { int float_shift; int float_max_exp; WvChannel ch[2]; - int samples_left; - int max_samples; int pos; SavedContext sc, extra_sc; } WavpackFrameContext; @@ -125,6 +123,7 @@ typedef struct WavpackFrameContext { typedef struct WavpackContext { AVCodecContext *avctx; + AVFrame frame; WavpackFrameContext *fdec[WV_MAX_FRAME_DECODERS]; int fdec_num; @@ -133,7 +132,6 @@ typedef struct WavpackContext { int mkv_mode; int block; int samples; - int samples_left; int ch_offset; } WavpackContext; @@ -485,7 +483,6 @@ static float wv_get_value_float(WavpackFrameContext *s, uint32_t *crc, int S) static void wv_reset_saved_context(WavpackFrameContext *s) { s->pos = 0; - s->samples_left = 0; s->sc.crc = s->extra_sc.crc = 0xFFFFFFFF; } @@ -502,8 +499,7 @@ static inline int wv_unpack_stereo(WavpackFrameContext *s, GetBitContext *gb, vo float *dstfl = dst; const int channel_pad = s->avctx->channels - 2; - if(s->samples_left == s->samples) - s->one = s->zero = s->zeroes = 0; + s->one = s->zero = s->zeroes = 0; do{ L = wv_get_value(s, gb, 0, &last); if(last) break; @@ -594,13 +590,8 @@ static inline int wv_unpack_stereo(WavpackFrameContext *s, GetBitContext *gb, vo dst16 += channel_pad; } count++; - }while(!last && count < s->max_samples); + } while (!last && count < s->samples); - if (last) - s->samples_left = 0; - else - s->samples_left -= count; - if(!s->samples_left){ wv_reset_saved_context(s); if(crc != s->CRC){ av_log(s->avctx, AV_LOG_ERROR, "CRC error\n"); @@ -610,15 +601,7 @@ static inline int wv_unpack_stereo(WavpackFrameContext *s, GetBitContext *gb, vo av_log(s->avctx, AV_LOG_ERROR, "Extra bits CRC error\n"); return -1; } - }else{ - s->pos = pos; - s->sc.crc = crc; - s->sc.bits_used = get_bits_count(&s->gb); - if(s->got_extra_bits){ - s->extra_sc.crc = crc_extra_bits; - s->extra_sc.bits_used = get_bits_count(&s->gb_extra_bits); - } - } + return count * 2; } @@ -635,8 +618,7 @@ static inline int wv_unpack_mono(WavpackFrameContext *s, GetBitContext *gb, void float *dstfl = dst; const int channel_stride = s->avctx->channels; - if(s->samples_left == s->samples) - s->one = s->zero = s->zeroes = 0; + s->one = s->zero = s->zeroes = 0; do{ T = wv_get_value(s, gb, 0, &last); S = 0; @@ -675,13 +657,8 @@ static inline int wv_unpack_mono(WavpackFrameContext *s, GetBitContext *gb, void dst16 += channel_stride; } count++; - }while(!last && count < s->max_samples); + } while (!last && count < s->samples); - if (last) - s->samples_left = 0; - else - s->samples_left -= count; - if(!s->samples_left){ wv_reset_saved_context(s); if(crc != s->CRC){ av_log(s->avctx, AV_LOG_ERROR, "CRC error\n"); @@ -691,15 +668,7 @@ static inline int wv_unpack_mono(WavpackFrameContext *s, GetBitContext *gb, void av_log(s->avctx, AV_LOG_ERROR, "Extra bits CRC error\n"); return -1; } - }else{ - s->pos = pos; - s->sc.crc = crc; - s->sc.bits_used = get_bits_count(&s->gb); - if(s->got_extra_bits){ - s->extra_sc.crc = crc_extra_bits; - s->extra_sc.bits_used = get_bits_count(&s->gb_extra_bits); - } - } + return count; } @@ -743,6 +712,9 @@ static av_cold int wavpack_decode_init(AVCodecContext *avctx) s->fdec_num = 0; + avcodec_get_frame_defaults(&s->frame); + avctx->coded_frame = &s->frame; + return 0; } @@ -759,7 +731,7 @@ static av_cold int wavpack_decode_end(AVCodecContext *avctx) } static int wavpack_decode_block(AVCodecContext *avctx, int block_no, - void *data, int *data_size, + void *data, int *got_frame_ptr, const uint8_t *buf, int buf_size) { WavpackContext *wc = avctx->priv_data; @@ -774,7 +746,7 @@ static int wavpack_decode_block(AVCodecContext *avctx, int block_no, int bpp, chan, chmask; if (buf_size == 0){ - *data_size = 0; + *got_frame_ptr = 0; return 0; } @@ -789,18 +761,16 @@ static int wavpack_decode_block(AVCodecContext *avctx, int block_no, return -1; } - if(!s->samples_left){ memset(s->decorr, 0, MAX_TERMS * sizeof(Decorr)); memset(s->ch, 0, sizeof(s->ch)); s->extra_bits = 0; s->and = s->or = s->shift = 0; s->got_extra_bits = 0; - } if(!wc->mkv_mode){ s->samples = AV_RL32(buf); buf += 4; if(!s->samples){ - *data_size = 0; + *got_frame_ptr = 0; return 0; } }else{ @@ -829,13 +799,6 @@ static int wavpack_decode_block(AVCodecContext *avctx, int block_no, wc->ch_offset += 1 + s->stereo; - s->max_samples = *data_size / (bpp * avctx->channels); - s->max_samples = FFMIN(s->max_samples, s->samples); - if(s->samples_left > 0){ - s->max_samples = FFMIN(s->max_samples, s->samples_left); - buf = buf_end; - } - // parse metadata blocks while(buf < buf_end){ id = *buf++; @@ -1064,7 +1027,7 @@ static int wavpack_decode_block(AVCodecContext *avctx, int block_no, } if(id & WP_IDF_ODD) buf++; } - if(!s->samples_left){ + if(!got_terms){ av_log(avctx, AV_LOG_ERROR, "No block with decorrelation terms\n"); return -1; @@ -1101,16 +1064,6 @@ static int wavpack_decode_block(AVCodecContext *avctx, int block_no, s->got_extra_bits = 0; } } - s->samples_left = s->samples; - }else{ - init_get_bits(&s->gb, orig_buf + s->sc.offset, s->sc.size); - skip_bits_long(&s->gb, s->sc.bits_used); - if(s->got_extra_bits){ - init_get_bits(&s->gb_extra_bits, orig_buf + s->extra_sc.offset, - s->extra_sc.size); - skip_bits_long(&s->gb_extra_bits, s->extra_sc.bits_used); - } - } if(s->stereo_in){ if(avctx->sample_fmt == AV_SAMPLE_FMT_S16) @@ -1167,7 +1120,7 @@ static int wavpack_decode_block(AVCodecContext *avctx, int block_no, } } - wc->samples_left = s->samples_left; + *got_frame_ptr = 1; return samplecount * bpp; } @@ -1181,23 +1134,40 @@ static void wavpack_decode_flush(AVCodecContext *avctx) wv_reset_saved_context(s->fdec[i]); } -static int wavpack_decode_frame(AVCodecContext *avctx, - void *data, int *data_size, - AVPacket *avpkt) +static int wavpack_decode_frame(AVCodecContext *avctx, void *data, + int *got_frame_ptr, AVPacket *avpkt) { WavpackContext *s = avctx->priv_data; const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; - int frame_size; + int frame_size, ret; int samplecount = 0; s->block = 0; - s->samples_left = 0; s->ch_offset = 0; + /* determine number of samples */ if(s->mkv_mode){ s->samples = AV_RL32(buf); buf += 4; + } else { + if (s->multichannel) + s->samples = AV_RL32(buf + 4); + else + s->samples = AV_RL32(buf); + } + if (s->samples <= 0) { + av_log(avctx, AV_LOG_ERROR, "Invalid number of samples: %d\n", + s->samples); + return AVERROR(EINVAL); + } + + /* get output buffer */ + s->frame.nb_samples = s->samples; + if ((ret = avctx->get_buffer(avctx, &s->frame)) < 0) { + av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); + return ret; } + while(buf_size > 0){ if(!s->multichannel){ frame_size = buf_size; @@ -1216,17 +1186,19 @@ static int wavpack_decode_frame(AVCodecContext *avctx, wavpack_decode_flush(avctx); return -1; } - if((samplecount = wavpack_decode_block(avctx, s->block, data, - data_size, buf, frame_size)) < 0) { + if((samplecount = wavpack_decode_block(avctx, s->block, s->frame.data[0], + got_frame_ptr, buf, frame_size)) < 0) { wavpack_decode_flush(avctx); return -1; } s->block++; buf += frame_size; buf_size -= frame_size; } - *data_size = samplecount * avctx->channels; - return s->samples_left > 0 ? 0 : avpkt->size; + if (*got_frame_ptr) + *(AVFrame *)data = s->frame; + + return avpkt->size; } AVCodec ff_wavpack_decoder = { @@ -1238,6 +1210,6 @@ AVCodec ff_wavpack_decoder = { .close = wavpack_decode_end, .decode = wavpack_decode_frame, .flush = wavpack_decode_flush, - .capabilities = CODEC_CAP_SUBFRAMES, + .capabilities = CODEC_CAP_SUBFRAMES | CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("WavPack"), }; diff --git a/libavcodec/wma.h b/libavcodec/wma.h index f11d5507dc..4acbf04bbf 100644 --- a/libavcodec/wma.h +++ b/libavcodec/wma.h @@ -65,6 +65,7 @@ typedef struct CoefVLCTable { typedef struct WMACodecContext { AVCodecContext* avctx; + AVFrame frame; GetBitContext gb; PutBitContext pb; int sample_rate; diff --git a/libavcodec/wmadec.c b/libavcodec/wmadec.c index 1e3b7e32a5..5600f9ba90 100644 --- a/libavcodec/wmadec.c +++ b/libavcodec/wmadec.c @@ -124,6 +124,10 @@ static int wma_decode_init(AVCodecContext * avctx) } avctx->sample_fmt = AV_SAMPLE_FMT_S16; + + avcodec_get_frame_defaults(&s->frame); + avctx->coded_frame = &s->frame; + return 0; } @@ -797,14 +801,13 @@ static int wma_decode_frame(WMACodecContext *s, int16_t *samples) return 0; } -static int wma_decode_superframe(AVCodecContext *avctx, - void *data, int *data_size, - AVPacket *avpkt) +static int wma_decode_superframe(AVCodecContext *avctx, void *data, + int *got_frame_ptr, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; WMACodecContext *s = avctx->priv_data; - int nb_frames, bit_offset, i, pos, len, out_size; + int nb_frames, bit_offset, i, pos, len, ret; uint8_t *q; int16_t *samples; @@ -818,8 +821,6 @@ static int wma_decode_superframe(AVCodecContext *avctx, return 0; buf_size = s->block_align; - samples = data; - init_get_bits(&s->gb, buf, buf_size*8); if (s->use_bit_reservoir) { @@ -830,12 +831,13 @@ static int wma_decode_superframe(AVCodecContext *avctx, nb_frames = 1; } - out_size = nb_frames * s->frame_len * s->nb_channels * - av_get_bytes_per_sample(avctx->sample_fmt); - if (*data_size < out_size) { - av_log(s->avctx, AV_LOG_ERROR, "Insufficient output space\n"); - goto fail; + /* get output buffer */ + s->frame.nb_samples = nb_frames * s->frame_len; + if ((ret = avctx->get_buffer(avctx, &s->frame)) < 0) { + av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); + return ret; } + samples = (int16_t *)s->frame.data[0]; if (s->use_bit_reservoir) { bit_offset = get_bits(&s->gb, s->byte_offset_bits + 3); @@ -903,7 +905,9 @@ static int wma_decode_superframe(AVCodecContext *avctx, //av_log(NULL, AV_LOG_ERROR, "%d %d %d %d outbytes:%d eaten:%d\n", s->frame_len_bits, s->block_len_bits, s->frame_len, s->block_len, (int8_t *)samples - (int8_t *)data, s->block_align); - *data_size = out_size; + *got_frame_ptr = 1; + *(AVFrame *)data = s->frame; + return s->block_align; fail: /* when error, we reset the bit reservoir */ @@ -928,6 +932,7 @@ AVCodec ff_wmav1_decoder = { .close = ff_wma_end, .decode = wma_decode_superframe, .flush = flush, + .capabilities = CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("Windows Media Audio 1"), }; @@ -940,5 +945,6 @@ AVCodec ff_wmav2_decoder = { .close = ff_wma_end, .decode = wma_decode_superframe, .flush = flush, + .capabilities = CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("Windows Media Audio 2"), }; diff --git a/libavcodec/wmaprodec.c b/libavcodec/wmaprodec.c index aaae6e1f3a..c46a983602 100644 --- a/libavcodec/wmaprodec.c +++ b/libavcodec/wmaprodec.c @@ -167,6 +167,7 @@ typedef struct { typedef struct WMAProDecodeCtx { /* generic decoder variables */ AVCodecContext* avctx; ///< codec context for av_log + AVFrame frame; ///< AVFrame for decoded output DSPContext dsp; ///< accelerated DSP functions FmtConvertContext fmt_conv; uint8_t frame_data[MAX_FRAMESIZE + @@ -209,8 +210,6 @@ typedef struct WMAProDecodeCtx { uint32_t frame_num; ///< current frame number (not used for decoding) GetBitContext gb; ///< bitstream reader context int buf_bit_size; ///< buffer size in bits - float* samples; ///< current samplebuffer pointer - float* samples_end; ///< maximum samplebuffer pointer uint8_t drc_gain; ///< gain for the DRC tool int8_t skip_frame; ///< skip output step int8_t parsed_all_subframes; ///< all subframes decoded? @@ -453,6 +452,10 @@ static av_cold int decode_init(AVCodecContext *avctx) dump_context(s); avctx->channel_layout = channel_mask; + + avcodec_get_frame_defaults(&s->frame); + avctx->coded_frame = &s->frame; + return 0; } @@ -1279,22 +1282,15 @@ static int decode_subframe(WMAProDecodeCtx *s) *@return 0 if the trailer bit indicates that this is the last frame, * 1 if there are additional frames */ -static int decode_frame(WMAProDecodeCtx *s) +static int decode_frame(WMAProDecodeCtx *s, int *got_frame_ptr) { + AVCodecContext *avctx = s->avctx; GetBitContext* gb = &s->gb; int more_frames = 0; int len = 0; - int i; + int i, ret; const float *out_ptr[WMAPRO_MAX_CHANNELS]; - - /** check for potential output buffer overflow */ - if (s->num_channels * s->samples_per_frame > s->samples_end - s->samples) { - /** return an error if no frame could be decoded at all */ - av_log(s->avctx, AV_LOG_ERROR, - "not enough space for the output samples\n"); - s->packet_loss = 1; - return 0; - } + float *samples; /** get frame length */ if (s->len_prefix) @@ -1360,10 +1356,19 @@ static int decode_frame(WMAProDecodeCtx *s) } } + /* get output buffer */ + s->frame.nb_samples = s->samples_per_frame; + if ((ret = avctx->get_buffer(avctx, &s->frame)) < 0) { + av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); + s->packet_loss = 1; + return 0; + } + samples = (float *)s->frame.data[0]; + /** interleave samples and write them to the output buffer */ for (i = 0; i < s->num_channels; i++) out_ptr[i] = s->channel[i].out; - s->fmt_conv.float_interleave(s->samples, out_ptr, s->samples_per_frame, + s->fmt_conv.float_interleave(samples, out_ptr, s->samples_per_frame, s->num_channels); for (i = 0; i < s->num_channels; i++) { @@ -1375,8 +1380,10 @@ static int decode_frame(WMAProDecodeCtx *s) if (s->skip_frame) { s->skip_frame = 0; - } else - s->samples += s->num_channels * s->samples_per_frame; + *got_frame_ptr = 0; + } else { + *got_frame_ptr = 1; + } if (s->len_prefix) { if (len != (get_bits_count(gb) - s->frame_offset) + 2) { @@ -1473,8 +1480,8 @@ static void save_bits(WMAProDecodeCtx *s, GetBitContext* gb, int len, *@param avpkt input packet *@return number of bytes that were read from the input buffer */ -static int decode_packet(AVCodecContext *avctx, - void *data, int *data_size, AVPacket* avpkt) +static int decode_packet(AVCodecContext *avctx, void *data, + int *got_frame_ptr, AVPacket* avpkt) { WMAProDecodeCtx *s = avctx->priv_data; GetBitContext* gb = &s->pgb; @@ -1483,9 +1490,7 @@ static int decode_packet(AVCodecContext *avctx, int num_bits_prev_frame; int packet_sequence_number; - s->samples = data; - s->samples_end = (float*)((int8_t*)data + *data_size); - *data_size = 0; + *got_frame_ptr = 0; if (s->packet_done || s->packet_loss) { s->packet_done = 0; @@ -1532,7 +1537,7 @@ static int decode_packet(AVCodecContext *avctx, /** decode the cross packet frame if it is valid */ if (!s->packet_loss) - decode_frame(s); + decode_frame(s, got_frame_ptr); } else if (s->num_saved_bits - s->frame_offset) { av_dlog(avctx, "ignoring %x previously saved bits\n", s->num_saved_bits - s->frame_offset); @@ -1555,7 +1560,7 @@ static int decode_packet(AVCodecContext *avctx, (frame_size = show_bits(gb, s->log2_frame_size)) && frame_size <= remaining_bits(s, gb)) { save_bits(s, gb, frame_size, 0); - s->packet_done = !decode_frame(s); + s->packet_done = !decode_frame(s, got_frame_ptr); } else if (!s->len_prefix && s->num_saved_bits > get_bits_count(&s->gb)) { /** when the frames do not have a length prefix, we don't know @@ -1565,7 +1570,7 @@ static int decode_packet(AVCodecContext *avctx, therefore we save the incoming packet first, then we append the "previous frame" data from the next packet so that we get a buffer that only contains full frames */ - s->packet_done = !decode_frame(s); + s->packet_done = !decode_frame(s, got_frame_ptr); } else s->packet_done = 1; } @@ -1577,10 +1582,14 @@ static int decode_packet(AVCodecContext *avctx, save_bits(s, gb, remaining_bits(s, gb), 0); } - *data_size = (int8_t *)s->samples - (int8_t *)data; s->packet_offset = get_bits_count(gb) & 7; + if (s->packet_loss) + return AVERROR_INVALIDDATA; + + if (*got_frame_ptr) + *(AVFrame *)data = s->frame; - return (s->packet_loss) ? AVERROR_INVALIDDATA : get_bits_count(gb) >> 3; + return get_bits_count(gb) >> 3; } /** @@ -1611,7 +1620,7 @@ AVCodec ff_wmapro_decoder = { .init = decode_init, .close = decode_end, .decode = decode_packet, - .capabilities = CODEC_CAP_SUBFRAMES, + .capabilities = CODEC_CAP_SUBFRAMES | CODEC_CAP_DR1, .flush= flush, .long_name = NULL_IF_CONFIG_SMALL("Windows Media Audio 9 Professional"), }; diff --git a/libavcodec/wmavoice.c b/libavcodec/wmavoice.c index d6d4cb2963..6f3a6b2372 100644 --- a/libavcodec/wmavoice.c +++ b/libavcodec/wmavoice.c @@ -131,6 +131,7 @@ typedef struct { * @name Global values specified in the stream header / extradata or used all over. * @{ */ + AVFrame frame; GetBitContext gb; ///< packet bitreader. During decoder init, ///< it contains the extradata from the ///< demuxer. During decoding, it contains @@ -438,6 +439,9 @@ static av_cold int wmavoice_decode_init(AVCodecContext *ctx) ctx->sample_fmt = AV_SAMPLE_FMT_FLT; + avcodec_get_frame_defaults(&s->frame); + ctx->coded_frame = &s->frame; + return 0; } @@ -1725,17 +1729,17 @@ static int check_bits_for_superframe(GetBitContext *orig_gb, * @return 0 on success, <0 on error or 1 if there was not enough data to * fully parse the superframe */ -static int synth_superframe(AVCodecContext *ctx, - float *samples, int *data_size) +static int synth_superframe(AVCodecContext *ctx, int *got_frame_ptr) { WMAVoiceContext *s = ctx->priv_data; GetBitContext *gb = &s->gb, s_gb; - int n, res, out_size, n_samples = 480; + int n, res, n_samples = 480; double lsps[MAX_FRAMES][MAX_LSPS]; const double *mean_lsf = s->lsps == 16 ? wmavoice_mean_lsf16[s->lsp_def_mode] : wmavoice_mean_lsf10[s->lsp_def_mode]; float excitation[MAX_SIGNAL_HISTORY + MAX_SFRAMESIZE + 12]; float synth[MAX_LSPS + MAX_SFRAMESIZE]; + float *samples; memcpy(synth, s->synth_history, s->lsps * sizeof(*synth)); @@ -1749,7 +1753,7 @@ static int synth_superframe(AVCodecContext *ctx, } if ((res = check_bits_for_superframe(gb, s)) == 1) { - *data_size = 0; + *got_frame_ptr = 0; return 1; } @@ -1792,13 +1796,14 @@ static int synth_superframe(AVCodecContext *ctx, stabilize_lsps(lsps[n], s->lsps); } - out_size = n_samples * av_get_bytes_per_sample(ctx->sample_fmt); - if (*data_size < out_size) { - av_log(ctx, AV_LOG_ERROR, - "Output buffer too small (%d given - %d needed)\n", - *data_size, out_size); - return -1; + /* get output buffer */ + s->frame.nb_samples = 480; + if ((res = ctx->get_buffer(ctx, &s->frame)) < 0) { + av_log(ctx, AV_LOG_ERROR, "get_buffer() failed\n"); + return res; } + s->frame.nb_samples = n_samples; + samples = (float *)s->frame.data[0]; /* Parse frames, optionally preceeded by per-frame (independent) LSPs. */ for (n = 0; n < 3; n++) { @@ -1820,7 +1825,7 @@ static int synth_superframe(AVCodecContext *ctx, lsps[n], n == 0 ? s->prev_lsps : lsps[n - 1], &excitation[s->history_nsamples + n * MAX_FRAMESIZE], &synth[s->lsps + n * MAX_FRAMESIZE]))) { - *data_size = 0; + *got_frame_ptr = 0; return res; } } @@ -1833,8 +1838,7 @@ static int synth_superframe(AVCodecContext *ctx, skip_bits(gb, 10 * (res + 1)); } - /* Specify nr. of output samples */ - *data_size = out_size; + *got_frame_ptr = 1; /* Update history */ memcpy(s->prev_lsps, lsps[2], @@ -1922,7 +1926,7 @@ static void copy_bits(PutBitContext *pb, * For more information about frames, see #synth_superframe(). */ static int wmavoice_decode_packet(AVCodecContext *ctx, void *data, - int *data_size, AVPacket *avpkt) + int *got_frame_ptr, AVPacket *avpkt) { WMAVoiceContext *s = ctx->priv_data; GetBitContext *gb = &s->gb; @@ -1935,7 +1939,7 @@ static int wmavoice_decode_packet(AVCodecContext *ctx, void *data, * capping the packet size at ctx->block_align. */ for (size = avpkt->size; size > ctx->block_align; size -= ctx->block_align); if (!size) { - *data_size = 0; + *got_frame_ptr = 0; return 0; } init_get_bits(&s->gb, avpkt->data, size << 3); @@ -1956,10 +1960,11 @@ static int wmavoice_decode_packet(AVCodecContext *ctx, void *data, copy_bits(&s->pb, avpkt->data, size, gb, s->spillover_nbits); flush_put_bits(&s->pb); s->sframe_cache_size += s->spillover_nbits; - if ((res = synth_superframe(ctx, data, data_size)) == 0 && - *data_size > 0) { + if ((res = synth_superframe(ctx, got_frame_ptr)) == 0 && + *got_frame_ptr) { cnt += s->spillover_nbits; s->skip_bits_next = cnt & 7; + *(AVFrame *)data = s->frame; return cnt >> 3; } else skip_bits_long (gb, s->spillover_nbits - cnt + @@ -1974,11 +1979,12 @@ static int wmavoice_decode_packet(AVCodecContext *ctx, void *data, s->sframe_cache_size = 0; s->skip_bits_next = 0; pos = get_bits_left(gb); - if ((res = synth_superframe(ctx, data, data_size)) < 0) { + if ((res = synth_superframe(ctx, got_frame_ptr)) < 0) { return res; - } else if (*data_size > 0) { + } else if (*got_frame_ptr) { int cnt = get_bits_count(gb); s->skip_bits_next = cnt & 7; + *(AVFrame *)data = s->frame; return cnt >> 3; } else if ((s->sframe_cache_size = pos) > 0) { /* rewind bit reader to start of last (incomplete) superframe... */ @@ -2046,7 +2052,7 @@ AVCodec ff_wmavoice_decoder = { .init = wmavoice_decode_init, .close = wmavoice_decode_end, .decode = wmavoice_decode_packet, - .capabilities = CODEC_CAP_SUBFRAMES, + .capabilities = CODEC_CAP_SUBFRAMES | CODEC_CAP_DR1, .flush = wmavoice_flush, .long_name = NULL_IF_CONFIG_SMALL("Windows Media Audio Voice"), }; diff --git a/libavcodec/ws-snd1.c b/libavcodec/ws-snd1.c index dfbe4acab5..b2d086e073 100644 --- a/libavcodec/ws-snd1.c +++ b/libavcodec/ws-snd1.c @@ -37,26 +37,37 @@ static const int8_t ws_adpcm_4bit[] = { 0, 1, 2, 3, 4, 5, 6, 8 }; +typedef struct WSSndContext { + AVFrame frame; +} WSSndContext; + static av_cold int ws_snd_decode_init(AVCodecContext *avctx) { + WSSndContext *s = avctx->priv_data; + if (avctx->channels != 1) { av_log_ask_for_sample(avctx, "unsupported number of channels\n"); return AVERROR(EINVAL); } avctx->sample_fmt = AV_SAMPLE_FMT_U8; + + avcodec_get_frame_defaults(&s->frame); + avctx->coded_frame = &s->frame; + return 0; } static int ws_snd_decode_frame(AVCodecContext *avctx, void *data, - int *data_size, AVPacket *avpkt) + int *got_frame_ptr, AVPacket *avpkt) { + WSSndContext *s = avctx->priv_data; const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; - int in_size, out_size; + int in_size, out_size, ret; int sample = 128; - uint8_t *samples = data; + uint8_t *samples; uint8_t *samples_end; if (!buf_size) @@ -71,19 +82,24 @@ static int ws_snd_decode_frame(AVCodecContext *avctx, void *data, in_size = AV_RL16(&buf[2]); buf += 4; - if (out_size > *data_size) { - av_log(avctx, AV_LOG_ERROR, "Frame is too large to fit in buffer\n"); - return -1; - } if (in_size > buf_size) { av_log(avctx, AV_LOG_ERROR, "Frame data is larger than input buffer\n"); return -1; } + + /* get output buffer */ + s->frame.nb_samples = out_size; + if ((ret = avctx->get_buffer(avctx, &s->frame)) < 0) { + av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); + return ret; + } + samples = s->frame.data[0]; samples_end = samples + out_size; if (in_size == out_size) { memcpy(samples, buf, out_size); - *data_size = out_size; + *got_frame_ptr = 1; + *(AVFrame *)data = s->frame; return buf_size; } @@ -159,7 +175,9 @@ static int ws_snd_decode_frame(AVCodecContext *avctx, void *data, } } - *data_size = samples - (uint8_t *)data; + s->frame.nb_samples = samples - s->frame.data[0]; + *got_frame_ptr = 1; + *(AVFrame *)data = s->frame; return buf_size; } @@ -168,7 +186,9 @@ AVCodec ff_ws_snd1_decoder = { .name = "ws_snd1", .type = AVMEDIA_TYPE_AUDIO, .id = CODEC_ID_WESTWOOD_SND1, + .priv_data_size = sizeof(WSSndContext), .init = ws_snd_decode_init, .decode = ws_snd_decode_frame, + .capabilities = CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("Westwood Audio (SND1)"), }; -- cgit v1.2.3 From fd095539d10bb440042e671f95306b16b0fec674 Mon Sep 17 00:00:00 2001 From: Janne Grunau Date: Fri, 21 Oct 2011 17:25:30 +0200 Subject: latmdec: fix audio specific config parsing Pass the correct size in bits to mpeg4audio_get_config and add a flag to disable parsing of the sync extension when the size is not known. Latm with AudioMuxVersion 0 does not specify the size of the audio specific config. Data after the audio specific config can be misinterpreted as sync extension resulting in random and wrong configs. --- libavcodec/aacdec.c | 35 ++++++++++++++++++++++------------- libavcodec/alsdec.c | 2 +- libavcodec/mpeg4audio.c | 7 ++++--- libavcodec/mpeg4audio.h | 7 +++++-- libavcodec/mpegaudiodec.c | 3 ++- libavformat/adtsenc.c | 2 +- libavformat/flvdec.c | 2 +- libavformat/isom.c | 2 +- libavformat/latmenc.c | 2 +- libavformat/matroskaenc.c | 3 ++- 10 files changed, 40 insertions(+), 25 deletions(-) diff --git a/libavcodec/aacdec.c b/libavcodec/aacdec.c index 672ba1c648..a4fe2ee2cc 100644 --- a/libavcodec/aacdec.c +++ b/libavcodec/aacdec.c @@ -452,15 +452,17 @@ static int decode_ga_specific_config(AACContext *ac, AVCodecContext *avctx, * @param ac pointer to AACContext, may be null * @param avctx pointer to AVCCodecContext, used for logging * @param m4ac pointer to MPEG4AudioConfig, used for parsing - * @param data pointer to AVCodecContext extradata - * @param data_size size of AVCCodecContext extradata + * @param data pointer to buffer holding an audio specific config + * @param bit_size size of audio specific config or data in bits + * @param sync_extension look for an appended sync extension * * @return Returns error status or number of consumed bits. <0 - error */ static int decode_audio_specific_config(AACContext *ac, AVCodecContext *avctx, MPEG4AudioConfig *m4ac, - const uint8_t *data, int data_size) + const uint8_t *data, int bit_size, + int sync_extension) { GetBitContext gb; int i; @@ -470,9 +472,9 @@ static int decode_audio_specific_config(AACContext *ac, av_dlog(avctx, "%02x ", avctx->extradata[i]); av_dlog(avctx, "\n"); - init_get_bits(&gb, data, data_size * 8); + init_get_bits(&gb, data, bit_size); - if ((i = avpriv_mpeg4audio_get_config(m4ac, data, data_size)) < 0) + if ((i = avpriv_mpeg4audio_get_config(m4ac, data, bit_size, sync_extension)) < 0) return -1; if (m4ac->sampling_index > 12) { av_log(avctx, AV_LOG_ERROR, "invalid sampling rate index %d\n", m4ac->sampling_index); @@ -572,7 +574,7 @@ static av_cold int aac_decode_init(AVCodecContext *avctx) if (avctx->extradata_size > 0) { if (decode_audio_specific_config(ac, ac->avctx, &ac->m4ac, avctx->extradata, - avctx->extradata_size) < 0) + avctx->extradata_size*8, 1) < 0) return -1; } else { int sr, i; @@ -2315,12 +2317,19 @@ static inline uint32_t latm_get_value(GetBitContext *b) } static int latm_decode_audio_specific_config(struct LATMContext *latmctx, - GetBitContext *gb) + GetBitContext *gb, int asclen) { AVCodecContext *avctx = latmctx->aac_ctx.avctx; MPEG4AudioConfig m4ac; - int config_start_bit = get_bits_count(gb); - int bits_consumed, esize; + int config_start_bit = get_bits_count(gb); + int sync_extension = 0; + int bits_consumed, esize; + + if (asclen) { + sync_extension = 1; + asclen = FFMIN(asclen, get_bits_left(gb)); + } else + asclen = get_bits_left(gb); if (config_start_bit % 8) { av_log_missing_feature(latmctx->aac_ctx.avctx, "audio specific " @@ -2330,7 +2339,7 @@ static int latm_decode_audio_specific_config(struct LATMContext *latmctx, bits_consumed = decode_audio_specific_config(NULL, avctx, &m4ac, gb->buffer + (config_start_bit / 8), - get_bits_left(gb) / 8); + asclen, sync_extension); if (bits_consumed < 0) return AVERROR_INVALIDDATA; @@ -2388,11 +2397,11 @@ static int read_stream_mux_config(struct LATMContext *latmctx, // for all but first stream: use_same_config = get_bits(gb, 1); if (!audio_mux_version) { - if ((ret = latm_decode_audio_specific_config(latmctx, gb)) < 0) + if ((ret = latm_decode_audio_specific_config(latmctx, gb, 0)) < 0) return ret; } else { int ascLen = latm_get_value(gb); - if ((ret = latm_decode_audio_specific_config(latmctx, gb)) < 0) + if ((ret = latm_decode_audio_specific_config(latmctx, gb, ascLen)) < 0) return ret; ascLen -= ret; skip_bits_long(gb, ascLen); @@ -2514,7 +2523,7 @@ static int latm_decode_frame(AVCodecContext *avctx, void *out, } else { if ((err = decode_audio_specific_config( &latmctx->aac_ctx, avctx, &latmctx->aac_ctx.m4ac, - avctx->extradata, avctx->extradata_size)) < 0) + avctx->extradata, avctx->extradata_size*8, 1)) < 0) return err; latmctx->initialized = 1; } diff --git a/libavcodec/alsdec.c b/libavcodec/alsdec.c index 71495803a3..dc4961c9ba 100644 --- a/libavcodec/alsdec.c +++ b/libavcodec/alsdec.c @@ -291,7 +291,7 @@ static av_cold int read_specific_config(ALSDecContext *ctx) init_get_bits(&gb, avctx->extradata, avctx->extradata_size * 8); config_offset = avpriv_mpeg4audio_get_config(&m4ac, avctx->extradata, - avctx->extradata_size); + avctx->extradata_size * 8, 1); if (config_offset < 0) return -1; diff --git a/libavcodec/mpeg4audio.c b/libavcodec/mpeg4audio.c index f9e866f405..0fb9b96c80 100644 --- a/libavcodec/mpeg4audio.c +++ b/libavcodec/mpeg4audio.c @@ -76,12 +76,13 @@ static inline int get_sample_rate(GetBitContext *gb, int *index) avpriv_mpeg4audio_sample_rates[*index]; } -int avpriv_mpeg4audio_get_config(MPEG4AudioConfig *c, const uint8_t *buf, int buf_size) +int avpriv_mpeg4audio_get_config(MPEG4AudioConfig *c, const uint8_t *buf, + int bit_size, int sync_extension) { GetBitContext gb; int specific_config_bitindex; - init_get_bits(&gb, buf, buf_size*8); + init_get_bits(&gb, buf, bit_size); c->object_type = get_object_type(&gb); c->sample_rate = get_sample_rate(&gb, &c->sampling_index); c->chan_config = get_bits(&gb, 4); @@ -117,7 +118,7 @@ int avpriv_mpeg4audio_get_config(MPEG4AudioConfig *c, const uint8_t *buf, int bu return -1; } - if (c->ext_object_type != AOT_SBR) { + if (c->ext_object_type != AOT_SBR && sync_extension) { while (get_bits_left(&gb) > 15) { if (show_bits(&gb, 11) == 0x2b7) { // sync extension get_bits(&gb, 11); diff --git a/libavcodec/mpeg4audio.h b/libavcodec/mpeg4audio.h index d6730b97b3..7560f3f4e4 100644 --- a/libavcodec/mpeg4audio.h +++ b/libavcodec/mpeg4audio.h @@ -42,14 +42,17 @@ typedef struct { extern const int avpriv_mpeg4audio_sample_rates[16]; extern const uint8_t ff_mpeg4audio_channels[8]; + /** * Parse MPEG-4 systems extradata to retrieve audio configuration. * @param[in] c MPEG4AudioConfig structure to fill. * @param[in] buf Extradata from container. - * @param[in] buf_size Extradata size. + * @param[in] bit_size Extradata size in bits. + * @param[in] sync_extension look for a sync extension after config if true. * @return On error -1 is returned, on success AudioSpecificConfig bit index in extradata. */ -int avpriv_mpeg4audio_get_config(MPEG4AudioConfig *c, const uint8_t *buf, int buf_size); +int avpriv_mpeg4audio_get_config(MPEG4AudioConfig *c, const uint8_t *buf, + int bit_size, int sync_extension); enum AudioObjectType { AOT_NULL, diff --git a/libavcodec/mpegaudiodec.c b/libavcodec/mpegaudiodec.c index c819bc546f..eeef470a57 100644 --- a/libavcodec/mpegaudiodec.c +++ b/libavcodec/mpegaudiodec.c @@ -1966,7 +1966,8 @@ static int decode_init_mp3on4(AVCodecContext * avctx) return AVERROR_INVALIDDATA; } - avpriv_mpeg4audio_get_config(&cfg, avctx->extradata, avctx->extradata_size); + avpriv_mpeg4audio_get_config(&cfg, avctx->extradata, + avctx->extradata_size * 8, 1); if (!cfg.chan_config || cfg.chan_config > 7) { av_log(avctx, AV_LOG_ERROR, "Invalid channel config number.\n"); return AVERROR_INVALIDDATA; diff --git a/libavformat/adtsenc.c b/libavformat/adtsenc.c index 55fece5dd6..ef3d8e2c21 100644 --- a/libavformat/adtsenc.c +++ b/libavformat/adtsenc.c @@ -37,7 +37,7 @@ int ff_adts_decode_extradata(AVFormatContext *s, ADTSContext *adts, uint8_t *buf int off; init_get_bits(&gb, buf, size * 8); - off = avpriv_mpeg4audio_get_config(&m4ac, buf, size); + off = avpriv_mpeg4audio_get_config(&m4ac, buf, size * 8, 1); if (off < 0) return off; skip_bits_long(&gb, off); diff --git a/libavformat/flvdec.c b/libavformat/flvdec.c index 5d19dd8413..51a8126904 100644 --- a/libavformat/flvdec.c +++ b/libavformat/flvdec.c @@ -534,7 +534,7 @@ static int flv_read_packet(AVFormatContext *s, AVPacket *pkt) if (st->codec->codec_id == CODEC_ID_AAC) { MPEG4AudioConfig cfg; avpriv_mpeg4audio_get_config(&cfg, st->codec->extradata, - st->codec->extradata_size); + st->codec->extradata_size * 8, 1); st->codec->channels = cfg.channels; if (cfg.ext_sample_rate) st->codec->sample_rate = cfg.ext_sample_rate; diff --git a/libavformat/isom.c b/libavformat/isom.c index b0eef375c6..7b9f91b58a 100644 --- a/libavformat/isom.c +++ b/libavformat/isom.c @@ -436,7 +436,7 @@ int ff_mp4_read_dec_config_descr(AVFormatContext *fc, AVStream *st, AVIOContext if (st->codec->codec_id == CODEC_ID_AAC) { MPEG4AudioConfig cfg; avpriv_mpeg4audio_get_config(&cfg, st->codec->extradata, - st->codec->extradata_size); + st->codec->extradata_size * 8, 1); st->codec->channels = cfg.channels; if (cfg.object_type == 29 && cfg.sampling_index < 3) // old mp3on4 st->codec->sample_rate = avpriv_mpa_freq_tab[cfg.sampling_index]; diff --git a/libavformat/latmenc.c b/libavformat/latmenc.c index 679f2cc9c6..423710ddea 100644 --- a/libavformat/latmenc.c +++ b/libavformat/latmenc.c @@ -54,7 +54,7 @@ static int latm_decode_extradata(LATMContext *ctx, uint8_t *buf, int size) MPEG4AudioConfig m4ac; init_get_bits(&gb, buf, size * 8); - ctx->off = avpriv_mpeg4audio_get_config(&m4ac, buf, size); + ctx->off = avpriv_mpeg4audio_get_config(&m4ac, buf, size * 8, 1); if (ctx->off < 0) return ctx->off; skip_bits_long(&gb, ctx->off); diff --git a/libavformat/matroskaenc.c b/libavformat/matroskaenc.c index b8c73bf950..9f8d5d853b 100644 --- a/libavformat/matroskaenc.c +++ b/libavformat/matroskaenc.c @@ -448,7 +448,8 @@ static void get_aac_sample_rates(AVFormatContext *s, AVCodecContext *codec, int { MPEG4AudioConfig mp4ac; - if (avpriv_mpeg4audio_get_config(&mp4ac, codec->extradata, codec->extradata_size) < 0) { + if (avpriv_mpeg4audio_get_config(&mp4ac, codec->extradata, + codec->extradata_size * 8, 1) < 0) { av_log(s, AV_LOG_WARNING, "Error parsing AAC extradata, unable to determine samplerate.\n"); return; } -- cgit v1.2.3 From d268b79e3436107c11ee8bcdf9f3645368bb3fcd Mon Sep 17 00:00:00 2001 From: Janne Grunau Date: Sat, 22 Oct 2011 22:04:00 +0200 Subject: aac_latm: reconfigure decoder on audio specific config changes --- libavcodec/aacdec.c | 25 +++++++++++++++---------- 1 file changed, 15 insertions(+), 10 deletions(-) diff --git a/libavcodec/aacdec.c b/libavcodec/aacdec.c index a4fe2ee2cc..8e4b510354 100644 --- a/libavcodec/aacdec.c +++ b/libavcodec/aacdec.c @@ -2319,8 +2319,9 @@ static inline uint32_t latm_get_value(GetBitContext *b) static int latm_decode_audio_specific_config(struct LATMContext *latmctx, GetBitContext *gb, int asclen) { - AVCodecContext *avctx = latmctx->aac_ctx.avctx; - MPEG4AudioConfig m4ac; + AACContext *ac = &latmctx->aac_ctx; + AVCodecContext *avctx = ac->avctx; + MPEG4AudioConfig m4ac = {0}; int config_start_bit = get_bits_count(gb); int sync_extension = 0; int bits_consumed, esize; @@ -2335,18 +2336,23 @@ static int latm_decode_audio_specific_config(struct LATMContext *latmctx, av_log_missing_feature(latmctx->aac_ctx.avctx, "audio specific " "config not byte aligned.\n", 1); return AVERROR_INVALIDDATA; - } else { - bits_consumed = - decode_audio_specific_config(NULL, avctx, &m4ac, + } + bits_consumed = decode_audio_specific_config(NULL, avctx, &m4ac, gb->buffer + (config_start_bit / 8), asclen, sync_extension); - if (bits_consumed < 0) - return AVERROR_INVALIDDATA; + if (bits_consumed < 0) + return AVERROR_INVALIDDATA; + + if (ac->m4ac.sample_rate != m4ac.sample_rate || + ac->m4ac.chan_config != m4ac.chan_config) { + + av_log(avctx, AV_LOG_INFO, "audio config changed\n"); + latmctx->initialized = 0; esize = (bits_consumed+7) / 8; - if (avctx->extradata_size <= esize) { + if (avctx->extradata_size < esize) { av_free(avctx->extradata); avctx->extradata = av_malloc(esize + FF_INPUT_BUFFER_PADDING_SIZE); if (!avctx->extradata) @@ -2356,9 +2362,8 @@ static int latm_decode_audio_specific_config(struct LATMContext *latmctx, avctx->extradata_size = esize; memcpy(avctx->extradata, gb->buffer + (config_start_bit/8), esize); memset(avctx->extradata+esize, 0, FF_INPUT_BUFFER_PADDING_SIZE); - - skip_bits_long(gb, bits_consumed); } + skip_bits_long(gb, bits_consumed); return bits_consumed; } -- cgit v1.2.3