From f6774f905fb3cfdc319523ac640be30b14c1bc55 Mon Sep 17 00:00:00 2001 From: wm4 Date: Mon, 31 Mar 2014 17:46:29 +0000 Subject: mpegvideo: operate with pointers to AVFrames instead of whole structs The most interesting parts are initialization in ff_MPV_common_init() and uninitialization in ff_MPV_common_end(). ff_mpeg_unref_picture and ff_thread_release_buffer have additional NULL checks for Picture.f, because these functions can be called on uninitialized or partially initialized Pictures. NULL pointer checks are added to ff_thread_release_buffer() stub function. Signed-off-by: Vittorio Giovara --- libavcodec/vc1dec.c | 134 ++++++++++++++++++++++++++-------------------------- 1 file changed, 67 insertions(+), 67 deletions(-) (limited to 'libavcodec/vc1dec.c') diff --git a/libavcodec/vc1dec.c b/libavcodec/vc1dec.c index d697b8c13d..5d3ed807bb 100644 --- a/libavcodec/vc1dec.c +++ b/libavcodec/vc1dec.c @@ -78,9 +78,9 @@ static void init_block_index(VC1Context *v) MpegEncContext *s = &v->s; ff_init_block_index(s); if (v->field_mode && !(v->second_field ^ v->tff)) { - s->dest[0] += s->current_picture_ptr->f.linesize[0]; - s->dest[1] += s->current_picture_ptr->f.linesize[1]; - s->dest[2] += s->current_picture_ptr->f.linesize[2]; + s->dest[0] += s->current_picture_ptr->f->linesize[0]; + s->dest[1] += s->current_picture_ptr->f->linesize[1]; + s->dest[2] += s->current_picture_ptr->f->linesize[2]; } } @@ -353,7 +353,7 @@ static void vc1_mc_1mv(VC1Context *v, int dir) if ((!v->field_mode || (v->ref_field_type[dir] == 1 && v->cur_field_type == 1)) && - !v->s.last_picture.f.data[0]) + !v->s.last_picture.f->data[0]) return; mx = s->mv[dir][0][0]; @@ -385,24 +385,24 @@ static void vc1_mc_1mv(VC1Context *v, int dir) } if (!dir) { if (v->field_mode && (v->cur_field_type != v->ref_field_type[dir]) && v->second_field) { - srcY = s->current_picture.f.data[0]; - srcU = s->current_picture.f.data[1]; - srcV = s->current_picture.f.data[2]; + srcY = s->current_picture.f->data[0]; + srcU = s->current_picture.f->data[1]; + srcV = s->current_picture.f->data[2]; luty = v->curr_luty; lutuv = v->curr_lutuv; use_ic = v->curr_use_ic; } else { - srcY = s->last_picture.f.data[0]; - srcU = s->last_picture.f.data[1]; - srcV = s->last_picture.f.data[2]; + srcY = s->last_picture.f->data[0]; + srcU = s->last_picture.f->data[1]; + srcV = s->last_picture.f->data[2]; luty = v->last_luty; lutuv = v->last_lutuv; use_ic = v->last_use_ic; } } else { - srcY = s->next_picture.f.data[0]; - srcU = s->next_picture.f.data[1]; - srcV = s->next_picture.f.data[2]; + srcY = s->next_picture.f->data[0]; + srcU = s->next_picture.f->data[1]; + srcV = s->next_picture.f->data[2]; luty = v->next_luty; lutuv = v->next_lutuv; use_ic = v->next_use_ic; @@ -435,9 +435,9 @@ static void vc1_mc_1mv(VC1Context *v, int dir) srcV += uvsrc_y * s->uvlinesize + uvsrc_x; if (v->field_mode && v->ref_field_type[dir]) { - srcY += s->current_picture_ptr->f.linesize[0]; - srcU += s->current_picture_ptr->f.linesize[1]; - srcV += s->current_picture_ptr->f.linesize[2]; + srcY += s->current_picture_ptr->f->linesize[0]; + srcU += s->current_picture_ptr->f->linesize[1]; + srcV += s->current_picture_ptr->f->linesize[2]; } /* for grayscale we should not try to read from unknown area */ @@ -572,7 +572,7 @@ static void vc1_mc_4mv_luma(VC1Context *v, int n, int dir, int avg) if ((!v->field_mode || (v->ref_field_type[dir] == 1 && v->cur_field_type == 1)) && - !v->s.last_picture.f.data[0]) + !v->s.last_picture.f->data[0]) return; mx = s->mv[dir][n][0]; @@ -580,16 +580,16 @@ static void vc1_mc_4mv_luma(VC1Context *v, int n, int dir, int avg) if (!dir) { if (v->field_mode && (v->cur_field_type != v->ref_field_type[dir]) && v->second_field) { - srcY = s->current_picture.f.data[0]; + srcY = s->current_picture.f->data[0]; luty = v->curr_luty; use_ic = v->curr_use_ic; } else { - srcY = s->last_picture.f.data[0]; + srcY = s->last_picture.f->data[0]; luty = v->last_luty; use_ic = v->last_use_ic; } } else { - srcY = s->next_picture.f.data[0]; + srcY = s->next_picture.f->data[0]; luty = v->next_luty; use_ic = v->next_use_ic; } @@ -687,7 +687,7 @@ static void vc1_mc_4mv_luma(VC1Context *v, int n, int dir, int avg) srcY += src_y * s->linesize + src_x; if (v->field_mode && v->ref_field_type[dir]) - srcY += s->current_picture_ptr->f.linesize[0]; + srcY += s->current_picture_ptr->f->linesize[0]; if (fieldmv && !(src_y & 1)) v_edge_pos--; @@ -817,7 +817,7 @@ static void vc1_mc_4mv_chroma(VC1Context *v, int dir) uint8_t (*lutuv)[256]; int use_ic; - if (!v->field_mode && !v->s.last_picture.f.data[0]) + if (!v->field_mode && !v->s.last_picture.f->data[0]) return; if (s->flags & CODEC_FLAG_GRAY) return; @@ -848,7 +848,7 @@ static void vc1_mc_4mv_chroma(VC1Context *v, int dir) if (dominant) chroma_ref_type = !v->cur_field_type; } - if (v->field_mode && chroma_ref_type == 1 && v->cur_field_type == 1 && !v->s.last_picture.f.data[0]) + if (v->field_mode && chroma_ref_type == 1 && v->cur_field_type == 1 && !v->s.last_picture.f->data[0]) return; s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][0] = tx; s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][1] = ty; @@ -879,19 +879,19 @@ static void vc1_mc_4mv_chroma(VC1Context *v, int dir) if (!dir) { if (v->field_mode && (v->cur_field_type != chroma_ref_type) && v->second_field) { - srcU = s->current_picture.f.data[1]; - srcV = s->current_picture.f.data[2]; + srcU = s->current_picture.f->data[1]; + srcV = s->current_picture.f->data[2]; lutuv = v->curr_lutuv; use_ic = v->curr_use_ic; } else { - srcU = s->last_picture.f.data[1]; - srcV = s->last_picture.f.data[2]; + srcU = s->last_picture.f->data[1]; + srcV = s->last_picture.f->data[2]; lutuv = v->last_lutuv; use_ic = v->last_use_ic; } } else { - srcU = s->next_picture.f.data[1]; - srcV = s->next_picture.f.data[2]; + srcU = s->next_picture.f->data[1]; + srcV = s->next_picture.f->data[2]; lutuv = v->next_lutuv; use_ic = v->next_use_ic; } @@ -906,8 +906,8 @@ static void vc1_mc_4mv_chroma(VC1Context *v, int dir) if (v->field_mode) { if (chroma_ref_type) { - srcU += s->current_picture_ptr->f.linesize[1]; - srcV += s->current_picture_ptr->f.linesize[2]; + srcU += s->current_picture_ptr->f->linesize[1]; + srcV += s->current_picture_ptr->f->linesize[2]; } } @@ -1012,13 +1012,13 @@ static void vc1_mc_4mv_chroma4(VC1Context *v, int dir, int dir2, int avg) uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1); uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1); if (i < 2 ? dir : dir2) { - srcU = s->next_picture.f.data[1] + uvsrc_y * s->uvlinesize + uvsrc_x; - srcV = s->next_picture.f.data[2] + uvsrc_y * s->uvlinesize + uvsrc_x; + srcU = s->next_picture.f->data[1] + uvsrc_y * s->uvlinesize + uvsrc_x; + srcV = s->next_picture.f->data[2] + uvsrc_y * s->uvlinesize + uvsrc_x; lutuv = v->next_lutuv; use_ic = v->next_use_ic; } else { - srcU = s->last_picture.f.data[1] + uvsrc_y * s->uvlinesize + uvsrc_x; - srcV = s->last_picture.f.data[2] + uvsrc_y * s->uvlinesize + uvsrc_x; + srcU = s->last_picture.f->data[1] + uvsrc_y * s->uvlinesize + uvsrc_x; + srcV = s->last_picture.f->data[2] + uvsrc_y * s->uvlinesize + uvsrc_x; lutuv = v->last_lutuv; use_ic = v->last_use_ic; } @@ -1908,7 +1908,7 @@ static void vc1_interp_mc(VC1Context *v) int v_edge_pos = s->v_edge_pos >> v->field_mode; int use_ic = v->next_use_ic; - if (!v->field_mode && !v->s.next_picture.f.data[0]) + if (!v->field_mode && !v->s.next_picture.f->data[0]) return; mx = s->mv[1][0][0]; @@ -1924,9 +1924,9 @@ static void vc1_interp_mc(VC1Context *v) uvmx = uvmx + ((uvmx < 0) ? -(uvmx & 1) : (uvmx & 1)); uvmy = uvmy + ((uvmy < 0) ? -(uvmy & 1) : (uvmy & 1)); } - srcY = s->next_picture.f.data[0]; - srcU = s->next_picture.f.data[1]; - srcV = s->next_picture.f.data[2]; + srcY = s->next_picture.f->data[0]; + srcU = s->next_picture.f->data[1]; + srcV = s->next_picture.f->data[2]; src_x = s->mb_x * 16 + (mx >> 2); src_y = s->mb_y * 16 + (my >> 2); @@ -1950,9 +1950,9 @@ static void vc1_interp_mc(VC1Context *v) srcV += uvsrc_y * s->uvlinesize + uvsrc_x; if (v->field_mode && v->ref_field_type[1]) { - srcY += s->current_picture_ptr->f.linesize[0]; - srcU += s->current_picture_ptr->f.linesize[1]; - srcV += s->current_picture_ptr->f.linesize[2]; + srcY += s->current_picture_ptr->f->linesize[0]; + srcU += s->current_picture_ptr->f->linesize[1]; + srcV += s->current_picture_ptr->f->linesize[2]; } /* for grayscale we should not try to read from unknown area */ @@ -5160,7 +5160,7 @@ static void vc1_decode_skip_blocks(VC1Context *v) { MpegEncContext *s = &v->s; - if (!v->s.last_picture.f.data[0]) + if (!v->s.last_picture.f->data[0]) return; ff_er_add_slice(&s->er, 0, s->start_mb_y, s->mb_width - 1, s->end_mb_y - 1, ER_MB_END); @@ -5169,9 +5169,9 @@ static void vc1_decode_skip_blocks(VC1Context *v) s->mb_x = 0; init_block_index(v); ff_update_block_index(s); - memcpy(s->dest[0], s->last_picture.f.data[0] + s->mb_y * 16 * s->linesize, s->linesize * 16); - memcpy(s->dest[1], s->last_picture.f.data[1] + s->mb_y * 8 * s->uvlinesize, s->uvlinesize * 8); - memcpy(s->dest[2], s->last_picture.f.data[2] + s->mb_y * 8 * s->uvlinesize, s->uvlinesize * 8); + memcpy(s->dest[0], s->last_picture.f->data[0] + s->mb_y * 16 * s->linesize, s->linesize * 16); + memcpy(s->dest[1], s->last_picture.f->data[1] + s->mb_y * 8 * s->uvlinesize, s->uvlinesize * 8); + memcpy(s->dest[2], s->last_picture.f->data[2] + s->mb_y * 8 * s->uvlinesize, s->uvlinesize * 8); ff_mpeg_draw_horiz_band(s, s->mb_y * 16, 16); s->first_slice_line = 0; } @@ -5370,15 +5370,15 @@ static void vc1_draw_sprites(VC1Context *v, SpriteData* sd) v->sprite_output_frame->linesize[plane] * row; for (sprite = 0; sprite <= v->two_sprites; sprite++) { - uint8_t *iplane = s->current_picture.f.data[plane]; - int iline = s->current_picture.f.linesize[plane]; + uint8_t *iplane = s->current_picture.f->data[plane]; + int iline = s->current_picture.f->linesize[plane]; int ycoord = yoff[sprite] + yadv[sprite] * row; int yline = ycoord >> 16; int next_line; ysub[sprite] = ycoord & 0xFFFF; if (sprite) { - iplane = s->last_picture.f.data[plane]; - iline = s->last_picture.f.linesize[plane]; + iplane = s->last_picture.f->data[plane]; + iline = s->last_picture.f->linesize[plane]; } next_line = FFMIN(yline + 1, (v->sprite_height >> !!plane) - 1) * iline; if (!(xoff[sprite] & 0xFFFF) && xadv[sprite] == 1 << 16) { @@ -5447,12 +5447,12 @@ static int vc1_decode_sprites(VC1Context *v, GetBitContext* gb) vc1_parse_sprites(v, gb, &sd); - if (!s->current_picture.f.data[0]) { + if (!s->current_picture.f->data[0]) { av_log(avctx, AV_LOG_ERROR, "Got no sprites\n"); return -1; } - if (v->two_sprites && (!s->last_picture_ptr || !s->last_picture.f.data[0])) { + if (v->two_sprites && (!s->last_picture_ptr || !s->last_picture.f->data[0])) { av_log(avctx, AV_LOG_WARNING, "Need two sprites, only got one\n"); v->two_sprites = 0; } @@ -5472,7 +5472,7 @@ static void vc1_sprite_flush(AVCodecContext *avctx) { VC1Context *v = avctx->priv_data; MpegEncContext *s = &v->s; - AVFrame *f = &s->current_picture.f; + AVFrame *f = s->current_picture.f; int plane, i; /* Windows Media Image codecs have a convergence interval of two keyframes. @@ -5770,7 +5770,7 @@ static int vc1_decode_frame(AVCodecContext *avctx, void *data, if (buf_size == 0 || (buf_size == 4 && AV_RB32(buf) == VC1_CODE_ENDOFSEQ)) { /* special case for last picture */ if (s->low_delay == 0 && s->next_picture_ptr) { - if ((ret = av_frame_ref(pict, &s->next_picture_ptr->f)) < 0) + if ((ret = av_frame_ref(pict, s->next_picture_ptr->f)) < 0) return ret; s->next_picture_ptr = NULL; @@ -5936,8 +5936,8 @@ static int vc1_decode_frame(AVCodecContext *avctx, void *data, } // for skipping the frame - s->current_picture.f.pict_type = s->pict_type; - s->current_picture.f.key_frame = s->pict_type == AV_PICTURE_TYPE_I; + s->current_picture.f->pict_type = s->pict_type; + s->current_picture.f->key_frame = s->pict_type == AV_PICTURE_TYPE_I; /* skip B-frames if we don't have reference frames */ if (s->last_picture_ptr == NULL && (s->pict_type == AV_PICTURE_TYPE_B || s->droppable)) { @@ -5961,15 +5961,15 @@ static int vc1_decode_frame(AVCodecContext *avctx, void *data, } // process pulldown flags - s->current_picture_ptr->f.repeat_pict = 0; + s->current_picture_ptr->f->repeat_pict = 0; // Pulldown flags are only valid when 'broadcast' has been set. // So ticks_per_frame will be 2 if (v->rff) { // repeat field - s->current_picture_ptr->f.repeat_pict = 1; + s->current_picture_ptr->f->repeat_pict = 1; } else if (v->rptfrm) { // repeat frames - s->current_picture_ptr->f.repeat_pict = v->rptfrm * 2; + s->current_picture_ptr->f->repeat_pict = v->rptfrm * 2; } s->me.qpel_put = s->dsp.put_qpel_pixels_tab; @@ -5990,9 +5990,9 @@ static int vc1_decode_frame(AVCodecContext *avctx, void *data, v->bits = buf_size * 8; v->end_mb_x = s->mb_width; if (v->field_mode) { - s->current_picture.f.linesize[0] <<= 1; - s->current_picture.f.linesize[1] <<= 1; - s->current_picture.f.linesize[2] <<= 1; + s->current_picture.f->linesize[0] <<= 1; + s->current_picture.f->linesize[1] <<= 1; + s->current_picture.f->linesize[2] <<= 1; s->linesize <<= 1; s->uvlinesize <<= 1; } @@ -6051,9 +6051,9 @@ static int vc1_decode_frame(AVCodecContext *avctx, void *data, } if (v->field_mode) { v->second_field = 0; - s->current_picture.f.linesize[0] >>= 1; - s->current_picture.f.linesize[1] >>= 1; - s->current_picture.f.linesize[2] >>= 1; + s->current_picture.f->linesize[0] >>= 1; + s->current_picture.f->linesize[1] >>= 1; + s->current_picture.f->linesize[2] >>= 1; s->linesize >>= 1; s->uvlinesize >>= 1; if (v->s.pict_type != AV_PICTURE_TYPE_BI && v->s.pict_type != AV_PICTURE_TYPE_B) { @@ -6086,12 +6086,12 @@ image: *got_frame = 1; } else { if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay) { - if ((ret = av_frame_ref(pict, &s->current_picture_ptr->f)) < 0) + if ((ret = av_frame_ref(pict, s->current_picture_ptr->f)) < 0) goto err; ff_print_debug_info(s, s->current_picture_ptr); *got_frame = 1; } else if (s->last_picture_ptr != NULL) { - if ((ret = av_frame_ref(pict, &s->last_picture_ptr->f)) < 0) + if ((ret = av_frame_ref(pict, s->last_picture_ptr->f)) < 0) goto err; ff_print_debug_info(s, s->last_picture_ptr); *got_frame = 1; -- cgit v1.2.3