summaryrefslogtreecommitdiff
path: root/libavcodec/h264.c
diff options
context:
space:
mode:
authorMichael Niedermayer <michaelni@gmx.at>2015-04-29 15:44:21 +0200
committerMichael Niedermayer <michaelni@gmx.at>2015-04-29 16:35:24 +0200
commit36f862e04c2afe37c1fd541e01013c6cef4c6015 (patch)
tree3983371af49a7234d87bf999b35b17875db09a64 /libavcodec/h264.c
parent07a79cf8694ac685ae8f579ccc33d113eb46fe3d (diff)
parenta0f2946068c62e18cb05ac25c0df3d86077251a6 (diff)
Merge commit 'a0f2946068c62e18cb05ac25c0df3d86077251a6'
* commit 'a0f2946068c62e18cb05ac25c0df3d86077251a6': h264: use properly allocated AVFrames Conflicts: libavcodec/h264.c libavcodec/h264.h libavcodec/h264_refs.c libavcodec/h264_slice.c libavcodec/svq3.c libavcodec/vda_h264.c Merged-by: Michael Niedermayer <michaelni@gmx.at>
Diffstat (limited to 'libavcodec/h264.c')
-rw-r--r--libavcodec/h264.c84
1 files changed, 49 insertions, 35 deletions
diff --git a/libavcodec/h264.c b/libavcodec/h264.c
index 99b99e7a29..eb9a7d0c97 100644
--- a/libavcodec/h264.c
+++ b/libavcodec/h264.c
@@ -99,7 +99,7 @@ void ff_h264_draw_horiz_band(const H264Context *h, H264SliceContext *sl,
int y, int height)
{
AVCodecContext *avctx = h->avctx;
- const AVFrame *src = &h->cur_pic.f;
+ const AVFrame *src = h->cur_pic.f;
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(avctx->pix_fmt);
int vshift = desc->log2_chroma_h;
const int field_pic = h->picture_structure != PICT_FRAME;
@@ -621,9 +621,19 @@ static int h264_init_context(AVCodecContext *avctx, H264Context *h)
return AVERROR(ENOMEM);
}
- for (i = 0; i < H264_MAX_PICTURE_COUNT; i++)
- av_frame_unref(&h->DPB[i].f);
- av_frame_unref(&h->cur_pic.f);
+ for (i = 0; i < H264_MAX_PICTURE_COUNT; i++) {
+ h->DPB[i].f = av_frame_alloc();
+ if (!h->DPB[i].f)
+ return AVERROR(ENOMEM);
+ }
+
+ h->cur_pic.f = av_frame_alloc();
+ if (!h->cur_pic.f)
+ return AVERROR(ENOMEM);
+
+ h->last_pic_for_ec.f = av_frame_alloc();
+ if (!h->last_pic_for_ec.f)
+ return AVERROR(ENOMEM);
for (i = 0; i < h->nb_slice_ctx; i++)
h->slice_ctx[i].h264 = h;
@@ -721,7 +731,7 @@ static void decode_postinit(H264Context *h, int setup_finished)
H264Picture *cur = h->cur_pic_ptr;
int i, pics, out_of_order, out_idx;
- h->cur_pic_ptr->f.pict_type = h->pict_type;
+ h->cur_pic_ptr->f->pict_type = h->pict_type;
if (h->next_output_pic)
return;
@@ -739,8 +749,8 @@ static void decode_postinit(H264Context *h, int setup_finished)
return;
}
- cur->f.interlaced_frame = 0;
- cur->f.repeat_pict = 0;
+ cur->f->interlaced_frame = 0;
+ cur->f->repeat_pict = 0;
/* Signal interlacing information externally. */
/* Prioritize picture timing SEI information over used
@@ -752,55 +762,55 @@ static void decode_postinit(H264Context *h, int setup_finished)
break;
case SEI_PIC_STRUCT_TOP_FIELD:
case SEI_PIC_STRUCT_BOTTOM_FIELD:
- cur->f.interlaced_frame = 1;
+ cur->f->interlaced_frame = 1;
break;
case SEI_PIC_STRUCT_TOP_BOTTOM:
case SEI_PIC_STRUCT_BOTTOM_TOP:
if (FIELD_OR_MBAFF_PICTURE(h))
- cur->f.interlaced_frame = 1;
+ cur->f->interlaced_frame = 1;
else
// try to flag soft telecine progressive
- cur->f.interlaced_frame = h->prev_interlaced_frame;
+ cur->f->interlaced_frame = h->prev_interlaced_frame;
break;
case SEI_PIC_STRUCT_TOP_BOTTOM_TOP:
case SEI_PIC_STRUCT_BOTTOM_TOP_BOTTOM:
/* Signal the possibility of telecined film externally
* (pic_struct 5,6). From these hints, let the applications
* decide if they apply deinterlacing. */
- cur->f.repeat_pict = 1;
+ cur->f->repeat_pict = 1;
break;
case SEI_PIC_STRUCT_FRAME_DOUBLING:
- cur->f.repeat_pict = 2;
+ cur->f->repeat_pict = 2;
break;
case SEI_PIC_STRUCT_FRAME_TRIPLING:
- cur->f.repeat_pict = 4;
+ cur->f->repeat_pict = 4;
break;
}
if ((h->sei_ct_type & 3) &&
h->sei_pic_struct <= SEI_PIC_STRUCT_BOTTOM_TOP)
- cur->f.interlaced_frame = (h->sei_ct_type & (1 << 1)) != 0;
+ cur->f->interlaced_frame = (h->sei_ct_type & (1 << 1)) != 0;
} else {
/* Derive interlacing flag from used decoding process. */
- cur->f.interlaced_frame = FIELD_OR_MBAFF_PICTURE(h);
+ cur->f->interlaced_frame = FIELD_OR_MBAFF_PICTURE(h);
}
- h->prev_interlaced_frame = cur->f.interlaced_frame;
+ h->prev_interlaced_frame = cur->f->interlaced_frame;
if (cur->field_poc[0] != cur->field_poc[1]) {
/* Derive top_field_first from field pocs. */
- cur->f.top_field_first = cur->field_poc[0] < cur->field_poc[1];
+ cur->f->top_field_first = cur->field_poc[0] < cur->field_poc[1];
} else {
- if (cur->f.interlaced_frame || h->sps.pic_struct_present_flag) {
+ if (cur->f->interlaced_frame || h->sps.pic_struct_present_flag) {
/* Use picture timing SEI information. Even if it is a
* information of a past frame, better than nothing. */
if (h->sei_pic_struct == SEI_PIC_STRUCT_TOP_BOTTOM ||
h->sei_pic_struct == SEI_PIC_STRUCT_TOP_BOTTOM_TOP)
- cur->f.top_field_first = 1;
+ cur->f->top_field_first = 1;
else
- cur->f.top_field_first = 0;
+ cur->f->top_field_first = 0;
} else {
/* Most likely progressive */
- cur->f.top_field_first = 0;
+ cur->f->top_field_first = 0;
}
}
@@ -809,7 +819,7 @@ static void decode_postinit(H264Context *h, int setup_finished)
h->frame_packing_arrangement_type <= 6 &&
h->content_interpretation_type > 0 &&
h->content_interpretation_type < 3) {
- AVStereo3D *stereo = av_stereo3d_create_side_data(&cur->f);
+ AVStereo3D *stereo = av_stereo3d_create_side_data(cur->f);
if (stereo) {
switch (h->frame_packing_arrangement_type) {
case 0:
@@ -846,7 +856,7 @@ static void decode_postinit(H264Context *h, int setup_finished)
if (h->sei_display_orientation_present &&
(h->sei_anticlockwise_rotation || h->sei_hflip || h->sei_vflip)) {
double angle = h->sei_anticlockwise_rotation * 360 / (double) (1 << 16);
- AVFrameSideData *rotation = av_frame_new_side_data(&cur->f,
+ AVFrameSideData *rotation = av_frame_new_side_data(cur->f,
AV_FRAME_DATA_DISPLAYMATRIX,
sizeof(int32_t) * 9);
if (rotation) {
@@ -885,7 +895,7 @@ static void decode_postinit(H264Context *h, int setup_finished)
}
}
out_of_order = MAX_DELAYED_PIC_COUNT - i;
- if( cur->f.pict_type == AV_PICTURE_TYPE_B
+ if( cur->f->pict_type == AV_PICTURE_TYPE_B
|| (h->last_pocs[MAX_DELAYED_PIC_COUNT-2] > INT_MIN && h->last_pocs[MAX_DELAYED_PIC_COUNT-1] - h->last_pocs[MAX_DELAYED_PIC_COUNT-2] > 2))
out_of_order = FFMAX(out_of_order, 1);
if (out_of_order == MAX_DELAYED_PIC_COUNT) {
@@ -913,7 +923,7 @@ static void decode_postinit(H264Context *h, int setup_finished)
out = h->delayed_pic[0];
out_idx = 0;
for (i = 1; h->delayed_pic[i] &&
- !h->delayed_pic[i]->f.key_frame &&
+ !h->delayed_pic[i]->f->key_frame &&
!h->delayed_pic[i]->mmco_reset;
i++)
if (h->delayed_pic[i]->poc < out->poc) {
@@ -921,7 +931,7 @@ static void decode_postinit(H264Context *h, int setup_finished)
out_idx = i;
}
if (h->avctx->has_b_frames == 0 &&
- (h->delayed_pic[0]->f.key_frame || h->delayed_pic[0]->mmco_reset))
+ (h->delayed_pic[0]->f->key_frame || h->delayed_pic[0]->mmco_reset))
h->next_outputed_poc = INT_MIN;
out_of_order = out->poc < h->next_outputed_poc;
@@ -934,7 +944,7 @@ static void decode_postinit(H264Context *h, int setup_finished)
}
if (!out_of_order && pics > h->avctx->has_b_frames) {
h->next_output_pic = out;
- if (out_idx == 0 && h->delayed_pic[0] && (h->delayed_pic[0]->f.key_frame || h->delayed_pic[0]->mmco_reset)) {
+ if (out_idx == 0 && h->delayed_pic[0] && (h->delayed_pic[0]->f->key_frame || h->delayed_pic[0]->mmco_reset)) {
h->next_outputed_poc = INT_MIN;
} else
h->next_outputed_poc = out->poc;
@@ -1496,7 +1506,7 @@ again:
}
}
- h->cur_pic_ptr->f.key_frame |=
+ h->cur_pic_ptr->f->key_frame |=
(h->nal_unit_type == NAL_IDR_SLICE);
if (h->nal_unit_type == NAL_IDR_SLICE ||
@@ -1537,10 +1547,10 @@ again:
goto end;
} else if (CONFIG_H264_VDPAU_DECODER &&
h->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU) {
- ff_vdpau_add_data_chunk(h->cur_pic_ptr->f.data[0],
+ ff_vdpau_add_data_chunk(h->cur_pic_ptr->f->data[0],
start_code,
sizeof(start_code));
- ff_vdpau_add_data_chunk(h->cur_pic_ptr->f.data[0],
+ ff_vdpau_add_data_chunk(h->cur_pic_ptr->f->data[0],
&buf[buf_index - consumed],
consumed);
} else
@@ -1658,7 +1668,7 @@ static int get_consumed_bytes(int pos, int buf_size)
static int output_frame(H264Context *h, AVFrame *dst, H264Picture *srcp)
{
- AVFrame *src = &srcp->f;
+ AVFrame *src = srcp->f;
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(src->format);
int i;
int ret = av_frame_ref(dst, src);
@@ -1732,7 +1742,7 @@ static int h264_decode_frame(AVCodecContext *avctx, void *data,
out_idx = 0;
for (i = 1;
h->delayed_pic[i] &&
- !h->delayed_pic[i]->f.key_frame &&
+ !h->delayed_pic[i]->f->key_frame &&
!h->delayed_pic[i]->mmco_reset;
i++)
if (h->delayed_pic[i]->poc < out->poc) {
@@ -1793,14 +1803,14 @@ static int h264_decode_frame(AVCodecContext *avctx, void *data,
if (h->next_output_pic && (
h->next_output_pic->recovered)) {
if (!h->next_output_pic->recovered)
- h->next_output_pic->f.flags |= AV_FRAME_FLAG_CORRUPT;
+ h->next_output_pic->f->flags |= AV_FRAME_FLAG_CORRUPT;
if (!h->avctx->hwaccel &&
(h->next_output_pic->field_poc[0] == INT_MAX ||
h->next_output_pic->field_poc[1] == INT_MAX)
) {
int p;
- AVFrame *f = &h->next_output_pic->f;
+ AVFrame *f = h->next_output_pic->f;
int field = h->next_output_pic->field_poc[0] == INT_MAX;
uint8_t *dst_data[4];
int linesizes[4];
@@ -1846,8 +1856,10 @@ av_cold void ff_h264_free_context(H264Context *h)
ff_h264_free_tables(h);
- for (i = 0; i < H264_MAX_PICTURE_COUNT; i++)
+ for (i = 0; i < H264_MAX_PICTURE_COUNT; i++) {
ff_h264_unref_picture(h, &h->DPB[i]);
+ av_frame_free(&h->DPB[i].f);
+ }
memset(h->delayed_pic, 0, sizeof(h->delayed_pic));
h->cur_pic_ptr = NULL;
@@ -1872,7 +1884,9 @@ static av_cold int h264_decode_end(AVCodecContext *avctx)
ff_h264_free_context(h);
ff_h264_unref_picture(h, &h->cur_pic);
+ av_frame_free(&h->cur_pic.f);
ff_h264_unref_picture(h, &h->last_pic_for_ec);
+ av_frame_free(&h->last_pic_for_ec.f);
return 0;
}