summaryrefslogtreecommitdiff
path: root/libavcodec
diff options
context:
space:
mode:
authorMichael Niedermayer <michaelni@gmx.at>2013-11-17 12:17:05 +0100
committerMichael Niedermayer <michaelni@gmx.at>2013-11-17 12:19:55 +0100
commit5b0c70c2499e20529d517b712910d6f4f72e9485 (patch)
tree620207093db182a6b54129a2a166b2f721685378 /libavcodec
parent3ea168edeb7a20eae1fccf7da66ac7b8c8c791ba (diff)
parent57e7b3a89f5a0879ad039e8f04273b48649799a8 (diff)
Merge commit '57e7b3a89f5a0879ad039e8f04273b48649799a8'
* commit '57e7b3a89f5a0879ad039e8f04273b48649799a8': dnxhdenc: use the AVFrame API properly. libx264: use the AVFrame API properly. svq1enc: use the AVFrame API properly. gif: use the AVFrame API properly. Conflicts: libavcodec/gif.c libavcodec/svq1enc.c Merged-by: Michael Niedermayer <michaelni@gmx.at>
Diffstat (limited to 'libavcodec')
-rw-r--r--libavcodec/dnxhdenc.c28
-rw-r--r--libavcodec/dnxhdenc.h1
-rw-r--r--libavcodec/gif.c17
-rw-r--r--libavcodec/libx264.c15
-rw-r--r--libavcodec/svq1enc.c111
5 files changed, 92 insertions, 80 deletions
diff --git a/libavcodec/dnxhdenc.c b/libavcodec/dnxhdenc.c
index 5cf6d8b0f3..c2fbd5c999 100644
--- a/libavcodec/dnxhdenc.c
+++ b/libavcodec/dnxhdenc.c
@@ -329,9 +329,12 @@ static av_cold int dnxhd_encode_init(AVCodecContext *avctx)
FF_ALLOCZ_OR_GOTO(ctx->m.avctx, ctx->mb_bits, ctx->m.mb_num *sizeof(uint16_t), fail);
FF_ALLOCZ_OR_GOTO(ctx->m.avctx, ctx->mb_qscale, ctx->m.mb_num *sizeof(uint8_t), fail);
- ctx->frame.key_frame = 1;
- ctx->frame.pict_type = AV_PICTURE_TYPE_I;
- ctx->m.avctx->coded_frame = &ctx->frame;
+ avctx->coded_frame = av_frame_alloc();
+ if (!avctx->coded_frame)
+ return AVERROR(ENOMEM);
+
+ avctx->coded_frame->key_frame = 1;
+ avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I;
if (avctx->thread_count > MAX_THREADS) {
av_log(avctx, AV_LOG_ERROR, "too many threads\n");
@@ -922,19 +925,14 @@ static void dnxhd_load_picture(DNXHDEncContext *ctx, const AVFrame *frame)
{
int i;
- for (i = 0; i < 3; i++) {
- ctx->frame.data[i] = frame->data[i];
- ctx->frame.linesize[i] = frame->linesize[i];
- }
-
for (i = 0; i < ctx->m.avctx->thread_count; i++) {
- ctx->thread[i]->m.linesize = ctx->frame.linesize[0]<<ctx->interlaced;
- ctx->thread[i]->m.uvlinesize = ctx->frame.linesize[1]<<ctx->interlaced;
+ ctx->thread[i]->m.linesize = frame->linesize[0] << ctx->interlaced;
+ ctx->thread[i]->m.uvlinesize = frame->linesize[1] << ctx->interlaced;
ctx->thread[i]->dct_y_offset = ctx->m.linesize *8;
ctx->thread[i]->dct_uv_offset = ctx->m.uvlinesize*8;
}
- ctx->frame.interlaced_frame = frame->interlaced_frame;
+ ctx->m.avctx->coded_frame->interlaced_frame = frame->interlaced_frame;
ctx->cur_field = frame->interlaced_frame && !frame->top_field_first;
}
@@ -954,9 +952,9 @@ static int dnxhd_encode_picture(AVCodecContext *avctx, AVPacket *pkt,
encode_coding_unit:
for (i = 0; i < 3; i++) {
- ctx->src[i] = ctx->frame.data[i];
+ ctx->src[i] = frame->data[i];
if (ctx->interlaced && ctx->cur_field)
- ctx->src[i] += ctx->frame.linesize[i];
+ ctx->src[i] += frame->linesize[i];
}
dnxhd_write_header(avctx, buf);
@@ -994,7 +992,7 @@ static int dnxhd_encode_picture(AVCodecContext *avctx, AVPacket *pkt,
goto encode_coding_unit;
}
- ctx->frame.quality = ctx->qscale*FF_QP2LAMBDA;
+ avctx->coded_frame->quality = ctx->qscale * FF_QP2LAMBDA;
pkt->flags |= AV_PKT_FLAG_KEY;
*got_packet = 1;
@@ -1027,6 +1025,8 @@ static av_cold int dnxhd_encode_end(AVCodecContext *avctx)
for (i = 1; i < avctx->thread_count; i++)
av_freep(&ctx->thread[i]);
+ av_frame_free(&avctx->coded_frame);
+
return 0;
}
diff --git a/libavcodec/dnxhdenc.h b/libavcodec/dnxhdenc.h
index 9b59b96d3e..110b0ad9e2 100644
--- a/libavcodec/dnxhdenc.h
+++ b/libavcodec/dnxhdenc.h
@@ -43,7 +43,6 @@ typedef struct DNXHDEncContext {
AVClass *class;
MpegEncContext m; ///< Used for quantization dsp functions
- AVFrame frame;
int cid;
const CIDEntry *cid_table;
uint8_t *msip; ///< Macroblock Scan Indexes Payload
diff --git a/libavcodec/gif.c b/libavcodec/gif.c
index 8b9d95fe09..27d054e512 100644
--- a/libavcodec/gif.c
+++ b/libavcodec/gif.c
@@ -216,6 +216,13 @@ static av_cold int gif_encode_init(AVCodecContext *avctx)
return AVERROR(EINVAL);
}
+ avctx->coded_frame = av_frame_alloc();
+ if (!avctx->coded_frame)
+ return AVERROR(ENOMEM);
+
+ avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I;
+ avctx->coded_frame->key_frame = 1;
+
s->lzw = av_mallocz(ff_lzw_encode_state_size);
s->buf = av_malloc(avctx->width*avctx->height*2);
s->tmpl = av_malloc(avctx->width);
@@ -232,7 +239,6 @@ static int gif_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
const AVFrame *pict, int *got_packet)
{
GIFContext *s = avctx->priv_data;
- AVFrame *const p = (AVFrame *)pict;
uint8_t *outbuf_ptr, *end;
const uint32_t *palette = NULL;
int ret;
@@ -242,15 +248,12 @@ static int gif_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
outbuf_ptr = pkt->data;
end = pkt->data + pkt->size;
- p->pict_type = AV_PICTURE_TYPE_I;
- p->key_frame = 1;
-
if (avctx->pix_fmt == AV_PIX_FMT_PAL8) {
uint8_t *pal_exdata = av_packet_new_side_data(pkt, AV_PKT_DATA_PALETTE, AVPALETTE_SIZE);
if (!pal_exdata)
return AVERROR(ENOMEM);
- memcpy(pal_exdata, p->data[1], AVPALETTE_SIZE);
- palette = (uint32_t*)p->data[1];
+ memcpy(pal_exdata, pict->data[1], AVPALETTE_SIZE);
+ palette = (uint32_t*)pict->data[1];
}
gif_image_write_image(avctx, &outbuf_ptr, end, palette,
@@ -276,6 +279,8 @@ static int gif_encode_close(AVCodecContext *avctx)
{
GIFContext *s = avctx->priv_data;
+ av_frame_free(&avctx->coded_frame);
+
av_freep(&s->lzw);
av_freep(&s->buf);
av_frame_free(&s->last_frame);
diff --git a/libavcodec/libx264.c b/libavcodec/libx264.c
index 4093510c66..89df55fac6 100644
--- a/libavcodec/libx264.c
+++ b/libavcodec/libx264.c
@@ -44,7 +44,6 @@ typedef struct X264Context {
x264_picture_t pic;
uint8_t *sei;
int sei_size;
- AVFrame out_pic;
char *preset;
char *tune;
char *profile;
@@ -208,20 +207,20 @@ static int X264_frame(AVCodecContext *ctx, AVPacket *pkt, const AVFrame *frame,
switch (pic_out.i_type) {
case X264_TYPE_IDR:
case X264_TYPE_I:
- x4->out_pic.pict_type = AV_PICTURE_TYPE_I;
+ ctx->coded_frame->pict_type = AV_PICTURE_TYPE_I;
break;
case X264_TYPE_P:
- x4->out_pic.pict_type = AV_PICTURE_TYPE_P;
+ ctx->coded_frame->pict_type = AV_PICTURE_TYPE_P;
break;
case X264_TYPE_B:
case X264_TYPE_BREF:
- x4->out_pic.pict_type = AV_PICTURE_TYPE_B;
+ ctx->coded_frame->pict_type = AV_PICTURE_TYPE_B;
break;
}
pkt->flags |= AV_PKT_FLAG_KEY*pic_out.b_keyframe;
if (ret)
- x4->out_pic.quality = (pic_out.i_qpplus1 - 1) * FF_QP2LAMBDA;
+ ctx->coded_frame->quality = (pic_out.i_qpplus1 - 1) * FF_QP2LAMBDA;
*got_packet = ret;
return 0;
@@ -237,6 +236,8 @@ static av_cold int X264_close(AVCodecContext *avctx)
if (x4->enc)
x264_encoder_close(x4->enc);
+ av_frame_free(&avctx->coded_frame);
+
return 0;
}
@@ -570,7 +571,9 @@ static av_cold int X264_init(AVCodecContext *avctx)
if (!x4->enc)
return -1;
- avctx->coded_frame = &x4->out_pic;
+ avctx->coded_frame = av_frame_alloc();
+ if (!avctx->coded_frame)
+ return AVERROR(ENOMEM);
if (avctx->flags & CODEC_FLAG_GLOBAL_HEADER) {
x264_nal_t *nal;
diff --git a/libavcodec/svq1enc.c b/libavcodec/svq1enc.c
index a6c94ad006..2d7e848fc7 100644
--- a/libavcodec/svq1enc.c
+++ b/libavcodec/svq1enc.c
@@ -45,9 +45,8 @@ typedef struct SVQ1Context {
AVCodecContext *avctx;
DSPContext dsp;
HpelDSPContext hdsp;
- AVFrame picture;
- AVFrame current_picture;
- AVFrame last_picture;
+ AVFrame *current_picture;
+ AVFrame *last_picture;
PutBitContext pb;
GetBitContext gb;
@@ -264,13 +263,14 @@ static int svq1_encode_plane(SVQ1Context *s, int plane,
unsigned char *decoded_plane,
int width, int height, int src_stride, int stride)
{
+ const AVFrame *f = s->avctx->coded_frame;
int x, y;
int i;
int block_width, block_height;
int level;
int threshold[6];
uint8_t *src = s->scratchbuf + stride * 16;
- const int lambda = (s->picture.quality * s->picture.quality) >>
+ const int lambda = (f->quality * f->quality) >>
(2 * FF_LAMBDA_SHIFT);
/* figure out the acceptable level thresholds in advance */
@@ -281,7 +281,7 @@ static int svq1_encode_plane(SVQ1Context *s, int plane,
block_width = (width + 15) / 16;
block_height = (height + 15) / 16;
- if (s->picture.pict_type == AV_PICTURE_TYPE_P) {
+ if (f->pict_type == AV_PICTURE_TYPE_P) {
s->m.avctx = s->avctx;
s->m.current_picture_ptr = &s->m.current_picture;
s->m.last_picture_ptr = &s->m.last_picture;
@@ -297,13 +297,13 @@ static int svq1_encode_plane(SVQ1Context *s, int plane,
s->m.mb_stride = s->m.mb_width + 1;
s->m.b8_stride = 2 * s->m.mb_width + 1;
s->m.f_code = 1;
- s->m.pict_type = s->picture.pict_type;
+ s->m.pict_type = f->pict_type;
s->m.me_method = s->avctx->me_method;
s->m.me.scene_change_score = 0;
s->m.flags = s->avctx->flags;
// s->m.out_format = FMT_H263;
// s->m.unrestricted_mv = 1;
- s->m.lambda = s->picture.quality;
+ s->m.lambda = f->quality;
s->m.qscale = s->m.lambda * 139 +
FF_LAMBDA_SCALE * 64 >>
FF_LAMBDA_SHIFT + 7;
@@ -396,13 +396,13 @@ static int svq1_encode_plane(SVQ1Context *s, int plane,
ff_init_block_index(&s->m);
ff_update_block_index(&s->m);
- if (s->picture.pict_type == AV_PICTURE_TYPE_I ||
+ if (f->pict_type == AV_PICTURE_TYPE_I ||
(s->m.mb_type[x + y * s->m.mb_stride] &
CANDIDATE_MB_TYPE_INTRA)) {
for (i = 0; i < 6; i++)
init_put_bits(&s->reorder_pb[i], reorder_buffer[0][i],
7 * 32);
- if (s->picture.pict_type == AV_PICTURE_TYPE_P) {
+ if (f->pict_type == AV_PICTURE_TYPE_P) {
const uint8_t *vlc = ff_svq1_block_type_vlc[SVQ1_BLOCK_INTRA];
put_bits(&s->reorder_pb[5], vlc[1], vlc[0]);
score[0] = vlc[1] * lambda;
@@ -418,7 +418,7 @@ static int svq1_encode_plane(SVQ1Context *s, int plane,
best = 0;
- if (s->picture.pict_type == AV_PICTURE_TYPE_P) {
+ if (f->pict_type == AV_PICTURE_TYPE_P) {
const uint8_t *vlc = ff_svq1_block_type_vlc[SVQ1_BLOCK_INTER];
int mx, my, pred_x, pred_y, dxy;
int16_t *motion_ptr;
@@ -498,13 +498,48 @@ static int svq1_encode_plane(SVQ1Context *s, int plane,
return 0;
}
+static av_cold int svq1_encode_end(AVCodecContext *avctx)
+{
+ SVQ1Context *const s = avctx->priv_data;
+ int i;
+
+ av_log(avctx, AV_LOG_DEBUG, "RD: %f\n",
+ s->rd_total / (double)(avctx->width * avctx->height *
+ avctx->frame_number));
+
+ av_freep(&s->m.me.scratchpad);
+ av_freep(&s->m.me.map);
+ av_freep(&s->m.me.score_map);
+ av_freep(&s->mb_type);
+ av_freep(&s->dummy);
+ av_freep(&s->scratchbuf);
+
+ for (i = 0; i < 3; i++) {
+ av_freep(&s->motion_val8[i]);
+ av_freep(&s->motion_val16[i]);
+ }
+
+ av_frame_free(&s->current_picture);
+ av_frame_free(&s->last_picture);
+ av_frame_free(&avctx->coded_frame);
+
+ return 0;
+}
+
static av_cold int svq1_encode_init(AVCodecContext *avctx)
{
SVQ1Context *const s = avctx->priv_data;
ff_dsputil_init(&s->dsp, avctx);
ff_hpeldsp_init(&s->hdsp, avctx->flags);
- avctx->coded_frame = &s->picture;
+
+ avctx->coded_frame = av_frame_alloc();
+ s->current_picture = av_frame_alloc();
+ s->last_picture = av_frame_alloc();
+ if (!avctx->coded_frame || !s->current_picture || !s->last_picture) {
+ svq1_encode_end(avctx);
+ return AVERROR(ENOMEM);
+ }
s->frame_width = avctx->width;
s->frame_height = avctx->height;
@@ -536,8 +571,7 @@ static int svq1_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
const AVFrame *pict, int *got_packet)
{
SVQ1Context *const s = avctx->priv_data;
- AVFrame *const p = &s->picture;
- AVFrame temp;
+ AVFrame *const p = avctx->coded_frame;
int i, ret;
if ((ret = ff_alloc_packet2(avctx, pkt, s->y_block_width * s->y_block_height *
@@ -549,35 +583,33 @@ static int svq1_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
return -1;
}
- if (!s->current_picture.data[0]) {
- if ((ret = ff_get_buffer(avctx, &s->current_picture, 0))< 0 ||
- (ret = ff_get_buffer(avctx, &s->last_picture, 0)) < 0) {
+ if (!s->current_picture->data[0]) {
+ if ((ret = ff_get_buffer(avctx, s->current_picture, 0))< 0 ||
+ (ret = ff_get_buffer(avctx, s->last_picture, 0)) < 0) {
return ret;
}
- s->scratchbuf = av_malloc(s->current_picture.linesize[0] * 16 * 2);
+ s->scratchbuf = av_malloc(s->current_picture->linesize[0] * 16 * 2);
}
- av_frame_move_ref(&temp, &s->current_picture);
- av_frame_move_ref(&s->current_picture, &s->last_picture);
- av_frame_move_ref(&s->last_picture, &temp);
+ FFSWAP(AVFrame*, s->current_picture, s->last_picture);
init_put_bits(&s->pb, pkt->data, pkt->size);
- *p = *pict;
p->pict_type = avctx->gop_size && avctx->frame_number % avctx->gop_size ?
AV_PICTURE_TYPE_P : AV_PICTURE_TYPE_I;
p->key_frame = p->pict_type == AV_PICTURE_TYPE_I;
+ p->quality = pict->quality;
svq1_write_header(s, p->pict_type);
for (i = 0; i < 3; i++)
if (svq1_encode_plane(s, i,
- s->picture.data[i],
- s->last_picture.data[i],
- s->current_picture.data[i],
+ pict->data[i],
+ s->last_picture->data[i],
+ s->current_picture->data[i],
s->frame_width / (i ? 4 : 1),
s->frame_height / (i ? 4 : 1),
- s->picture.linesize[i],
- s->current_picture.linesize[i]) < 0)
+ pict->linesize[i],
+ s->current_picture->linesize[i]) < 0)
return -1;
// avpriv_align_put_bits(&s->pb);
@@ -594,33 +626,6 @@ static int svq1_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
return 0;
}
-static av_cold int svq1_encode_end(AVCodecContext *avctx)
-{
- SVQ1Context *const s = avctx->priv_data;
- int i;
-
- av_log(avctx, AV_LOG_DEBUG, "RD: %f\n",
- s->rd_total / (double)(avctx->width * avctx->height *
- avctx->frame_number));
-
- av_freep(&s->m.me.scratchpad);
- av_freep(&s->m.me.map);
- av_freep(&s->m.me.score_map);
- av_freep(&s->mb_type);
- av_freep(&s->dummy);
- av_freep(&s->scratchbuf);
-
- for (i = 0; i < 3; i++) {
- av_freep(&s->motion_val8[i]);
- av_freep(&s->motion_val16[i]);
- }
-
- av_frame_unref(&s->current_picture);
- av_frame_unref(&s->last_picture);
-
- return 0;
-}
-
AVCodec ff_svq1_encoder = {
.name = "svq1",
.long_name = NULL_IF_CONFIG_SMALL("Sorenson Vector Quantizer 1 / Sorenson Video 1 / SVQ1"),