From 14b35bf065a3df56cccd116a27cae6a772239005 Mon Sep 17 00:00:00 2001 From: Anton Khirnov Date: Sat, 9 Nov 2013 10:14:46 +0100 Subject: huffyuv: use the AVFrame API properly. --- libavcodec/huffyuv.h | 1 - libavcodec/huffyuvenc.c | 15 +++++++++------ 2 files changed, 9 insertions(+), 7 deletions(-) diff --git a/libavcodec/huffyuv.h b/libavcodec/huffyuv.h index c464d833c1..9c875d5310 100644 --- a/libavcodec/huffyuv.h +++ b/libavcodec/huffyuv.h @@ -78,7 +78,6 @@ typedef struct HYuvContext { uint32_t bits[3][256]; uint32_t pix_bgr_map[1<stats_out = av_mallocz(1024*30); // 21*256*3(%llu ) + 3(\n) + 1(0) = 16132 s->version = 2; - avctx->coded_frame = &s->picture; + avctx->coded_frame = av_frame_alloc(); + if (!avctx->coded_frame) + return AVERROR(ENOMEM); + + avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I; + avctx->coded_frame->key_frame = 1; switch (avctx->pix_fmt) { case AV_PIX_FMT_YUV420P: @@ -438,7 +443,7 @@ static int encode_frame(AVCodecContext *avctx, AVPacket *pkt, const int fake_ystride = s->interlaced ? pict->linesize[0]*2 : pict->linesize[0]; const int fake_ustride = s->interlaced ? pict->linesize[1]*2 : pict->linesize[1]; const int fake_vstride = s->interlaced ? pict->linesize[2]*2 : pict->linesize[2]; - AVFrame * const p = &s->picture; + const AVFrame * const p = pict; int i, j, size = 0, ret; if (!pkt->data && @@ -447,10 +452,6 @@ static int encode_frame(AVCodecContext *avctx, AVPacket *pkt, return ret; } - *p = *pict; - p->pict_type = AV_PICTURE_TYPE_I; - p->key_frame = 1; - if (s->context) { for (i = 0; i < 3; i++) { ff_huff_gen_len_table(s->len[i], s->stats[i]); @@ -676,6 +677,8 @@ static av_cold int encode_end(AVCodecContext *avctx) av_freep(&avctx->extradata); av_freep(&avctx->stats_out); + av_frame_free(&avctx->coded_frame); + return 0; } -- cgit v1.2.3 From 706a92926ccae390e3f74520a60b4d5790e8ddc4 Mon Sep 17 00:00:00 2001 From: Anton Khirnov Date: Sat, 9 Nov 2013 10:14:46 +0100 Subject: jpegls: use the AVFrame API properly. --- libavcodec/jpegls.h | 1 - libavcodec/jpeglsenc.c | 27 +++++++++++++++------------ 2 files changed, 15 insertions(+), 13 deletions(-) diff --git a/libavcodec/jpegls.h b/libavcodec/jpegls.h index 4bf95626f9..eae3943c30 100644 --- a/libavcodec/jpegls.h +++ b/libavcodec/jpegls.h @@ -33,7 +33,6 @@ typedef struct JpeglsContext { AVCodecContext *avctx; - AVFrame picture; } JpeglsContext; typedef struct JLSState { diff --git a/libavcodec/jpeglsenc.c b/libavcodec/jpeglsenc.c index 9f1fc1f618..3af6412669 100644 --- a/libavcodec/jpeglsenc.c +++ b/libavcodec/jpeglsenc.c @@ -248,8 +248,7 @@ static void ls_store_lse(JLSState *state, PutBitContext *pb) static int encode_picture_ls(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *pict, int *got_packet) { - JpeglsContext *const s = avctx->priv_data; - AVFrame *const p = &s->picture; + const AVFrame *const p = pict; const int near = avctx->prediction_method; PutBitContext pb, pb2; GetBitContext gb; @@ -258,10 +257,6 @@ static int encode_picture_ls(AVCodecContext *avctx, AVPacket *pkt, int i, size, ret; int comps; - *p = *pict; - p->pict_type = AV_PICTURE_TYPE_I; - p->key_frame = 1; - if (avctx->pix_fmt == AV_PIX_FMT_GRAY8 || avctx->pix_fmt == AV_PIX_FMT_GRAY16) comps = 1; @@ -346,7 +341,7 @@ static int encode_picture_ls(AVCodecContext *avctx, AVPacket *pkt, Rc[j] = last[j]; } last = cur; - cur += s->picture.linesize[0]; + cur += p->linesize[0]; } } else if (avctx->pix_fmt == AV_PIX_FMT_BGR24) { int j, width; @@ -360,7 +355,7 @@ static int encode_picture_ls(AVCodecContext *avctx, AVPacket *pkt, Rc[j] = last[j]; } last = cur; - cur += s->picture.linesize[0]; + cur += p->linesize[0]; } } @@ -400,12 +395,20 @@ static int encode_picture_ls(AVCodecContext *avctx, AVPacket *pkt, return 0; } +static av_cold int encode_close(AVCodecContext *avctx) +{ + av_frame_free(&avctx->coded_frame); + return 0; +} + static av_cold int encode_init_ls(AVCodecContext *ctx) { - JpeglsContext *c = (JpeglsContext *)ctx->priv_data; + ctx->coded_frame = av_frame_alloc(); + if (!ctx->coded_frame) + return AVERROR(ENOMEM); - c->avctx = ctx; - ctx->coded_frame = &c->picture; + ctx->coded_frame->pict_type = AV_PICTURE_TYPE_I; + ctx->coded_frame->key_frame = 1; if (ctx->pix_fmt != AV_PIX_FMT_GRAY8 && ctx->pix_fmt != AV_PIX_FMT_GRAY16 && @@ -423,8 +426,8 @@ AVCodec ff_jpegls_encoder = { .long_name = NULL_IF_CONFIG_SMALL("JPEG-LS"), .type = AVMEDIA_TYPE_VIDEO, .id = AV_CODEC_ID_JPEGLS, - .priv_data_size = sizeof(JpeglsContext), .init = encode_init_ls, + .close = encode_close, .encode2 = encode_picture_ls, .pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_BGR24, AV_PIX_FMT_RGB24, -- cgit v1.2.3 From a6064b12b481072abe0df53e5996cf103994526f Mon Sep 17 00:00:00 2001 From: Anton Khirnov Date: Sat, 9 Nov 2013 10:14:46 +0100 Subject: ffv1: use the AVFrame API properly. --- libavcodec/ffv1.c | 4 ---- libavcodec/ffv1.h | 3 ++- libavcodec/ffv1dec.c | 29 ++++++++++++++++++++++------- libavcodec/ffv1enc.c | 35 +++++++++++++++++++++++------------ 4 files changed, 47 insertions(+), 24 deletions(-) diff --git a/libavcodec/ffv1.c b/libavcodec/ffv1.c index a31f2786ee..9e7ba2ef19 100644 --- a/libavcodec/ffv1.c +++ b/libavcodec/ffv1.c @@ -141,8 +141,6 @@ av_cold int ffv1_common_init(AVCodecContext *avctx) if (!avctx->width || !avctx->height) return AVERROR_INVALIDDATA; - avcodec_get_frame_defaults(&s->picture); - ff_dsputil_init(&s->dsp, avctx); s->width = avctx->width; @@ -271,8 +269,6 @@ av_cold int ffv1_close(AVCodecContext *avctx) FFV1Context *s = avctx->priv_data; int i, j; - av_frame_unref(&s->last_picture); - for (j = 0; j < s->slice_count; j++) { FFV1Context *fs = s->slice_context[j]; for (i = 0; i < s->plane_count; i++) { diff --git a/libavcodec/ffv1.h b/libavcodec/ffv1.h index 43c96079ae..40fc3935ff 100644 --- a/libavcodec/ffv1.h +++ b/libavcodec/ffv1.h @@ -79,7 +79,8 @@ typedef struct FFV1Context { int transparency; int flags; int picture_number; - AVFrame picture, last_picture; + AVFrame *frame; + AVFrame *last_picture; AVFrame *cur; int plane_count; diff --git a/libavcodec/ffv1dec.c b/libavcodec/ffv1dec.c index f0a409f0a8..5edc667db4 100644 --- a/libavcodec/ffv1dec.c +++ b/libavcodec/ffv1dec.c @@ -784,6 +784,10 @@ static av_cold int ffv1_decode_init(AVCodecContext *avctx) ffv1_common_init(avctx); + f->last_picture = av_frame_alloc(); + if (!f->last_picture) + return AVERROR(ENOMEM); + if (avctx->extradata && (ret = read_extra_header(f)) < 0) return ret; @@ -876,7 +880,7 @@ static int ffv1_decode_frame(AVCodecContext *avctx, void *data, for (i = f->slice_count - 1; i >= 0; i--) { FFV1Context *fs = f->slice_context[i]; int j; - if (fs->slice_damaged && f->last_picture.data[0]) { + if (fs->slice_damaged && f->last_picture->data[0]) { const uint8_t *src[4]; uint8_t *dst[4]; for (j = 0; j < 4; j++) { @@ -884,12 +888,12 @@ static int ffv1_decode_frame(AVCodecContext *avctx, void *data, int sv = (j == 1 || j == 2) ? f->chroma_v_shift : 0; dst[j] = p->data[j] + p->linesize[j] * (fs->slice_y >> sv) + (fs->slice_x >> sh); - src[j] = f->last_picture.data[j] + - f->last_picture.linesize[j] * + src[j] = f->last_picture->data[j] + + f->last_picture->linesize[j] * (fs->slice_y >> sv) + (fs->slice_x >> sh); } av_image_copy(dst, p->linesize, (const uint8_t **)src, - f->last_picture.linesize, + f->last_picture->linesize, avctx->pix_fmt, fs->slice_width, fs->slice_height); } @@ -897,8 +901,8 @@ static int ffv1_decode_frame(AVCodecContext *avctx, void *data, f->picture_number++; - av_frame_unref(&f->last_picture); - if ((ret = av_frame_ref(&f->last_picture, p)) < 0) + av_frame_unref(f->last_picture); + if ((ret = av_frame_ref(f->last_picture, p)) < 0) return ret; f->cur = NULL; @@ -907,6 +911,17 @@ static int ffv1_decode_frame(AVCodecContext *avctx, void *data, return buf_size; } +static av_cold int ffv1_decode_close(AVCodecContext *avctx) +{ + FFV1Context *s = avctx->priv_data;; + + av_frame_free(&s->last_picture); + + ffv1_close(avctx); + + return 0; +} + AVCodec ff_ffv1_decoder = { .name = "ffv1", .long_name = NULL_IF_CONFIG_SMALL("FFmpeg video codec #1"), @@ -914,7 +929,7 @@ AVCodec ff_ffv1_decoder = { .id = AV_CODEC_ID_FFV1, .priv_data_size = sizeof(FFV1Context), .init = ffv1_decode_init, - .close = ffv1_close, + .close = ffv1_decode_close, .decode = ffv1_decode_frame, .capabilities = CODEC_CAP_DR1 /*| CODEC_CAP_DRAW_HORIZ_BAND*/ | CODEC_CAP_SLICE_THREADS, diff --git a/libavcodec/ffv1enc.c b/libavcodec/ffv1enc.c index 1aec37bcef..179453db5e 100644 --- a/libavcodec/ffv1enc.c +++ b/libavcodec/ffv1enc.c @@ -721,7 +721,12 @@ static av_cold int ffv1_encode_init(AVCodecContext *avctx) if ((ret = ffv1_allocate_initial_states(s)) < 0) return ret; - avctx->coded_frame = &s->picture; + avctx->coded_frame = av_frame_alloc(); + if (!avctx->coded_frame) + return AVERROR(ENOMEM); + + avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I; + if (!s->transparency) s->plane_count = 2; @@ -858,12 +863,12 @@ static void encode_slice_header(FFV1Context *f, FFV1Context *fs) put_symbol(c, state, f->plane[j].quant_table_index, 0); av_assert0(f->plane[j].quant_table_index == f->avctx->context_model); } - if (!f->picture.interlaced_frame) + if (!f->avctx->coded_frame->interlaced_frame) put_symbol(c, state, 3, 0); else - put_symbol(c, state, 1 + !f->picture.top_field_first, 0); - put_symbol(c, state, f->picture.sample_aspect_ratio.num, 0); - put_symbol(c, state, f->picture.sample_aspect_ratio.den, 0); + put_symbol(c, state, 1 + !f->avctx->coded_frame->top_field_first, 0); + put_symbol(c, state, f->avctx->coded_frame->sample_aspect_ratio.num, 0); + put_symbol(c, state, f->avctx->coded_frame->sample_aspect_ratio.den, 0); } static int encode_slice(AVCodecContext *c, void *arg) @@ -874,12 +879,12 @@ static int encode_slice(AVCodecContext *c, void *arg) int height = fs->slice_height; int x = fs->slice_x; int y = fs->slice_y; - AVFrame *const p = &f->picture; + const AVFrame *const p = f->frame; const int ps = (av_pix_fmt_desc_get(c->pix_fmt)->flags & AV_PIX_FMT_FLAG_PLANAR) ? (f->bits_per_raw_sample > 8) + 1 : 4; - if (p->key_frame) + if (c->coded_frame->key_frame) ffv1_clear_slice_state(f, fs); if (f->version > 2) { encode_slice_header(f, fs); @@ -926,12 +931,14 @@ static int ffv1_encode_frame(AVCodecContext *avctx, AVPacket *pkt, { FFV1Context *f = avctx->priv_data; RangeCoder *const c = &f->slice_context[0]->c; - AVFrame *const p = &f->picture; + AVFrame *const p = avctx->coded_frame; int used_count = 0; uint8_t keystate = 128; uint8_t *buf_p; int i, ret; + f->frame = pict; + if ((ret = ff_alloc_packet(pkt, avctx->width * avctx->height * ((8 * 2 + 1 + 1) * 4) / 8 + FF_MIN_BUFFER_SIZE)) < 0) { @@ -942,9 +949,6 @@ static int ffv1_encode_frame(AVCodecContext *avctx, AVPacket *pkt, ff_init_range_encoder(c, pkt->data, pkt->size); ff_build_rac_states(c, 0.05 * (1LL << 32), 256 - 8); - *p = *pict; - p->pict_type = AV_PICTURE_TYPE_I; - if (avctx->gop_size == 0 || f->picture_number % avctx->gop_size == 0) { put_rac(c, &keystate, 1); p->key_frame = 1; @@ -1054,6 +1058,13 @@ static int ffv1_encode_frame(AVCodecContext *avctx, AVPacket *pkt, return 0; } +static av_cold int ffv1_encode_close(AVCodecContext *avctx) +{ + av_frame_free(&avctx->coded_frame); + ffv1_close(avctx); + return 0; +} + #define OFFSET(x) offsetof(FFV1Context, x) #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM static const AVOption options[] = { @@ -1082,7 +1093,7 @@ AVCodec ff_ffv1_encoder = { .priv_data_size = sizeof(FFV1Context), .init = ffv1_encode_init, .encode2 = ffv1_encode_frame, - .close = ffv1_close, + .close = ffv1_encode_close, .capabilities = CODEC_CAP_SLICE_THREADS, .pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV444P, -- cgit v1.2.3 From 1c01b0253ebfe05f907c3a723101fe77f6dd2336 Mon Sep 17 00:00:00 2001 From: Anton Khirnov Date: Sat, 9 Nov 2013 10:14:46 +0100 Subject: mpegvideo_enc: use the AVFrame API properly. --- libavcodec/mpegvideo.h | 3 +++ libavcodec/mpegvideo_enc.c | 55 +++++++++++++++++++++++++--------------------- 2 files changed, 33 insertions(+), 25 deletions(-) diff --git a/libavcodec/mpegvideo.h b/libavcodec/mpegvideo.h index 79da9fbec4..1492d9fddc 100644 --- a/libavcodec/mpegvideo.h +++ b/libavcodec/mpegvideo.h @@ -754,6 +754,9 @@ typedef struct MpegEncContext { ERContext er; int error_rate; + + /* temporary frames used by b_frame_strategy = 2 */ + AVFrame *tmp_frames[MAX_B_FRAMES + 2]; } MpegEncContext; #define REBASE_PICTURE(pic, new_ctx, old_ctx) \ diff --git a/libavcodec/mpegvideo_enc.c b/libavcodec/mpegvideo_enc.c index b7dda070c1..74a3468e48 100644 --- a/libavcodec/mpegvideo_enc.c +++ b/libavcodec/mpegvideo_enc.c @@ -221,7 +221,7 @@ static void MPV_encode_defaults(MpegEncContext *s) av_cold int ff_MPV_encode_init(AVCodecContext *avctx) { MpegEncContext *s = avctx->priv_data; - int i; + int i, ret; int chroma_h_shift, chroma_v_shift; MPV_encode_defaults(s); @@ -780,12 +780,29 @@ av_cold int ff_MPV_encode_init(AVCodecContext *avctx) FF_ENABLE_DEPRECATION_WARNINGS; #endif + if (avctx->b_frame_strategy == 2) { + for (i = 0; i < s->max_b_frames + 2; i++) { + s->tmp_frames[i] = av_frame_alloc(); + if (!s->tmp_frames[i]) + return AVERROR(ENOMEM); + + s->tmp_frames[i]->format = AV_PIX_FMT_YUV420P; + s->tmp_frames[i]->width = s->width >> avctx->brd_scale; + s->tmp_frames[i]->height = s->height >> avctx->brd_scale; + + ret = av_frame_get_buffer(s->tmp_frames[i], 32); + if (ret < 0) + return ret; + } + } + return 0; } av_cold int ff_MPV_encode_end(AVCodecContext *avctx) { MpegEncContext *s = avctx->priv_data; + int i; ff_rate_control_uninit(s); @@ -796,6 +813,9 @@ av_cold int ff_MPV_encode_end(AVCodecContext *avctx) av_freep(&avctx->extradata); + for (i = 0; i < FF_ARRAY_ELEMS(s->tmp_frames); i++) + av_frame_free(&s->tmp_frames[i]); + return 0; } @@ -1025,7 +1045,6 @@ static int estimate_best_b_count(MpegEncContext *s) { AVCodec *codec = avcodec_find_encoder(s->avctx->codec_id); AVCodecContext *c = avcodec_alloc_context3(NULL); - AVFrame input[MAX_B_FRAMES + 2]; const int scale = s->avctx->brd_scale; int i, j, out_size, p_lambda, b_lambda, lambda2; int64_t best_rd = INT64_MAX; @@ -1060,19 +1079,9 @@ static int estimate_best_b_count(MpegEncContext *s) return -1; for (i = 0; i < s->max_b_frames + 2; i++) { - int ysize = c->width * c->height; - int csize = (c->width / 2) * (c->height / 2); Picture pre_input, *pre_input_ptr = i ? s->input_picture[i - 1] : s->next_picture_ptr; - avcodec_get_frame_defaults(&input[i]); - input[i].data[0] = av_malloc(ysize + 2 * csize); - input[i].data[1] = input[i].data[0] + ysize; - input[i].data[2] = input[i].data[1] + csize; - input[i].linesize[0] = c->width; - input[i].linesize[1] = - input[i].linesize[2] = c->width / 2; - if (pre_input_ptr && (!i || s->input_picture[i - 1])) { pre_input = *pre_input_ptr; @@ -1082,13 +1091,13 @@ static int estimate_best_b_count(MpegEncContext *s) pre_input.f.data[2] += INPLACE_OFFSET; } - s->dsp.shrink[scale](input[i].data[0], input[i].linesize[0], + s->dsp.shrink[scale](s->tmp_frames[i]->data[0], s->tmp_frames[i]->linesize[0], pre_input.f.data[0], pre_input.f.linesize[0], c->width, c->height); - s->dsp.shrink[scale](input[i].data[1], input[i].linesize[1], + s->dsp.shrink[scale](s->tmp_frames[i]->data[1], s->tmp_frames[i]->linesize[1], pre_input.f.data[1], pre_input.f.linesize[1], c->width >> 1, c->height >> 1); - s->dsp.shrink[scale](input[i].data[2], input[i].linesize[2], + s->dsp.shrink[scale](s->tmp_frames[i]->data[2], s->tmp_frames[i]->linesize[2], pre_input.f.data[2], pre_input.f.linesize[2], c->width >> 1, c->height >> 1); } @@ -1102,21 +1111,21 @@ static int estimate_best_b_count(MpegEncContext *s) c->error[0] = c->error[1] = c->error[2] = 0; - input[0].pict_type = AV_PICTURE_TYPE_I; - input[0].quality = 1 * FF_QP2LAMBDA; + s->tmp_frames[0]->pict_type = AV_PICTURE_TYPE_I; + s->tmp_frames[0]->quality = 1 * FF_QP2LAMBDA; - out_size = encode_frame(c, &input[0]); + out_size = encode_frame(c, s->tmp_frames[0]); //rd += (out_size * lambda2) >> FF_LAMBDA_SHIFT; for (i = 0; i < s->max_b_frames + 1; i++) { int is_p = i % (j + 1) == j || i == s->max_b_frames; - input[i + 1].pict_type = is_p ? + s->tmp_frames[i + 1]->pict_type = is_p ? AV_PICTURE_TYPE_P : AV_PICTURE_TYPE_B; - input[i + 1].quality = is_p ? p_lambda : b_lambda; + s->tmp_frames[i + 1]->quality = is_p ? p_lambda : b_lambda; - out_size = encode_frame(c, &input[i + 1]); + out_size = encode_frame(c, s->tmp_frames[i + 1]); rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3); } @@ -1138,10 +1147,6 @@ static int estimate_best_b_count(MpegEncContext *s) avcodec_close(c); av_freep(&c); - for (i = 0; i < s->max_b_frames + 2; i++) { - av_freep(&input[i].data[0]); - } - return best_b_count; } -- cgit v1.2.3