From 1765aacb17fd1df0cd794a9ddc95e22d272434cb Mon Sep 17 00:00:00 2001 From: Mans Rullgard Date: Sun, 10 Jul 2011 23:55:06 +0100 Subject: hwaccel: unbreak build Signed-off-by: Mans Rullgard --- libavcodec/mpegvideo_xvmc.c | 14 +++++++------- libavcodec/vaapi_h264.c | 10 +++++----- libavcodec/vaapi_internal.h | 2 +- libavcodec/vaapi_mpeg4.c | 2 +- libavcodec/vdpau.c | 40 ++++++++++++++++++++-------------------- 5 files changed, 34 insertions(+), 34 deletions(-) (limited to 'libavcodec') diff --git a/libavcodec/mpegvideo_xvmc.c b/libavcodec/mpegvideo_xvmc.c index 29d8bbbde6..159fe21b58 100644 --- a/libavcodec/mpegvideo_xvmc.c +++ b/libavcodec/mpegvideo_xvmc.c @@ -41,7 +41,7 @@ */ void ff_xvmc_init_block(MpegEncContext *s) { - struct xvmc_pix_fmt *render = (struct xvmc_pix_fmt*)s->current_picture.data[2]; + struct xvmc_pix_fmt *render = (struct xvmc_pix_fmt*)s->current_picture.f.data[2]; assert(render && render->xvmc_id == AV_XVMC_ID); s->block = (DCTELEM (*)[64])(render->data_blocks + render->next_free_data_block_num * 64); @@ -73,7 +73,7 @@ void ff_xvmc_pack_pblocks(MpegEncContext *s, int cbp) */ int ff_xvmc_field_start(MpegEncContext *s, AVCodecContext *avctx) { - struct xvmc_pix_fmt *last, *next, *render = (struct xvmc_pix_fmt*)s->current_picture.data[2]; + struct xvmc_pix_fmt *last, *next, *render = (struct xvmc_pix_fmt*)s->current_picture.f.data[2]; const int mb_block_count = 4 + (1 << s->chroma_format); assert(avctx); @@ -113,7 +113,7 @@ int ff_xvmc_field_start(MpegEncContext *s, AVCodecContext *avctx) case AV_PICTURE_TYPE_I: return 0; // no prediction from other frames case AV_PICTURE_TYPE_B: - next = (struct xvmc_pix_fmt*)s->next_picture.data[2]; + next = (struct xvmc_pix_fmt*)s->next_picture.f.data[2]; if (!next) return -1; if (next->xvmc_id != AV_XVMC_ID) @@ -121,7 +121,7 @@ int ff_xvmc_field_start(MpegEncContext *s, AVCodecContext *avctx) render->p_future_surface = next->p_surface; // no return here, going to set forward prediction case AV_PICTURE_TYPE_P: - last = (struct xvmc_pix_fmt*)s->last_picture.data[2]; + last = (struct xvmc_pix_fmt*)s->last_picture.f.data[2]; if (!last) last = render; // predict second field from the first if (last->xvmc_id != AV_XVMC_ID) @@ -141,7 +141,7 @@ return -1; */ void ff_xvmc_field_end(MpegEncContext *s) { - struct xvmc_pix_fmt *render = (struct xvmc_pix_fmt*)s->current_picture.data[2]; + struct xvmc_pix_fmt *render = (struct xvmc_pix_fmt*)s->current_picture.f.data[2]; assert(render); if (render->filled_mv_blocks_num > 0) @@ -179,10 +179,10 @@ void ff_xvmc_decode_mb(MpegEncContext *s) // Do I need to export quant when I could not perform postprocessing? // Anyway, it doesn't hurt. - s->current_picture.qscale_table[mb_xy] = s->qscale; + s->current_picture.f.qscale_table[mb_xy] = s->qscale; // start of XVMC-specific code - render = (struct xvmc_pix_fmt*)s->current_picture.data[2]; + render = (struct xvmc_pix_fmt*)s->current_picture.f.data[2]; assert(render); assert(render->xvmc_id == AV_XVMC_ID); assert(render->mv_blocks); diff --git a/libavcodec/vaapi_h264.c b/libavcodec/vaapi_h264.c index 83bf2907de..f6d1a6d48e 100644 --- a/libavcodec/vaapi_h264.c +++ b/libavcodec/vaapi_h264.c @@ -54,7 +54,7 @@ static void fill_vaapi_pic(VAPictureH264 *va_pic, int pic_structure) { if (pic_structure == 0) - pic_structure = pic->reference; + pic_structure = pic->f.reference; pic_structure &= PICT_FRAME; /* PICT_TOP_FIELD|PICT_BOTTOM_FIELD */ va_pic->picture_id = ff_vaapi_get_surface_id(pic); @@ -63,7 +63,7 @@ static void fill_vaapi_pic(VAPictureH264 *va_pic, va_pic->flags = 0; if (pic_structure != PICT_FRAME) va_pic->flags |= (pic_structure & PICT_TOP_FIELD) ? VA_PICTURE_H264_TOP_FIELD : VA_PICTURE_H264_BOTTOM_FIELD; - if (pic->reference) + if (pic->f.reference) va_pic->flags |= pic->long_ref ? VA_PICTURE_H264_LONG_TERM_REFERENCE : VA_PICTURE_H264_SHORT_TERM_REFERENCE; va_pic->TopFieldOrderCnt = 0; @@ -133,13 +133,13 @@ static int fill_vaapi_ReferenceFrames(VAPictureParameterBufferH264 *pic_param, for (i = 0; i < h->short_ref_count; i++) { Picture * const pic = h->short_ref[i]; - if (pic && pic->reference && dpb_add(&dpb, pic) < 0) + if (pic && pic->f.reference && dpb_add(&dpb, pic) < 0) return -1; } for (i = 0; i < 16; i++) { Picture * const pic = h->long_ref[i]; - if (pic && pic->reference && dpb_add(&dpb, pic) < 0) + if (pic && pic->f.reference && dpb_add(&dpb, pic) < 0) return -1; } return 0; @@ -159,7 +159,7 @@ static void fill_vaapi_RefPicList(VAPictureH264 RefPicList[32], { unsigned int i, n = 0; for (i = 0; i < ref_count; i++) - if (ref_list[i].reference) + if (ref_list[i].f.reference) fill_vaapi_pic(&RefPicList[n++], &ref_list[i], 0); for (; n < 32; n++) diff --git a/libavcodec/vaapi_internal.h b/libavcodec/vaapi_internal.h index 38432e1bc0..c6d5d6e42a 100644 --- a/libavcodec/vaapi_internal.h +++ b/libavcodec/vaapi_internal.h @@ -38,7 +38,7 @@ /** Extract VASurfaceID from a Picture */ static inline VASurfaceID ff_vaapi_get_surface_id(Picture *pic) { - return (uintptr_t)pic->data[3]; + return (uintptr_t)pic->f.data[3]; } /** Common AVHWAccel.end_frame() implementation */ diff --git a/libavcodec/vaapi_mpeg4.c b/libavcodec/vaapi_mpeg4.c index fcd429086e..1a756cbaf4 100644 --- a/libavcodec/vaapi_mpeg4.c +++ b/libavcodec/vaapi_mpeg4.c @@ -79,7 +79,7 @@ static int vaapi_mpeg4_start_frame(AVCodecContext *avctx, av_unused const uint8_ pic_param->quant_precision = s->quant_precision; pic_param->vop_fields.value = 0; /* reset all bits */ pic_param->vop_fields.bits.vop_coding_type = s->pict_type - AV_PICTURE_TYPE_I; - pic_param->vop_fields.bits.backward_reference_vop_coding_type = s->pict_type == AV_PICTURE_TYPE_B ? s->next_picture.pict_type - AV_PICTURE_TYPE_I : 0; + pic_param->vop_fields.bits.backward_reference_vop_coding_type = s->pict_type == AV_PICTURE_TYPE_B ? s->next_picture.f.pict_type - AV_PICTURE_TYPE_I : 0; pic_param->vop_fields.bits.vop_rounding_type = s->no_rounding; pic_param->vop_fields.bits.intra_dc_vlc_thr = mpeg4_get_intra_dc_vlc_thr(s); pic_param->vop_fields.bits.top_field_first = s->top_field_first; diff --git a/libavcodec/vdpau.c b/libavcodec/vdpau.c index bdc596ce24..df04ca01fc 100644 --- a/libavcodec/vdpau.c +++ b/libavcodec/vdpau.c @@ -46,7 +46,7 @@ void ff_vdpau_h264_set_reference_frames(MpegEncContext *s) Picture *pic; int i, list, pic_frame_idx; - render = (struct vdpau_render_state *)s->current_picture_ptr->data[0]; + render = (struct vdpau_render_state *)s->current_picture_ptr->f.data[0]; assert(render); rf = &render->info.h264.referenceFrames[0]; @@ -58,11 +58,11 @@ void ff_vdpau_h264_set_reference_frames(MpegEncContext *s) for (i = 0; i < ls; ++i) { pic = lp[i]; - if (!pic || !pic->reference) + if (!pic || !pic->f.reference) continue; pic_frame_idx = pic->long_ref ? pic->pic_id : pic->frame_num; - render_ref = (struct vdpau_render_state *)pic->data[0]; + render_ref = (struct vdpau_render_state *)pic->f.data[0]; assert(render_ref); rf2 = &render->info.h264.referenceFrames[0]; @@ -76,8 +76,8 @@ void ff_vdpau_h264_set_reference_frames(MpegEncContext *s) ++rf2; } if (rf2 != rf) { - rf2->top_is_reference |= (pic->reference & PICT_TOP_FIELD) ? VDP_TRUE : VDP_FALSE; - rf2->bottom_is_reference |= (pic->reference & PICT_BOTTOM_FIELD) ? VDP_TRUE : VDP_FALSE; + rf2->top_is_reference |= (pic->f.reference & PICT_TOP_FIELD) ? VDP_TRUE : VDP_FALSE; + rf2->bottom_is_reference |= (pic->f.reference & PICT_BOTTOM_FIELD) ? VDP_TRUE : VDP_FALSE; continue; } @@ -86,8 +86,8 @@ void ff_vdpau_h264_set_reference_frames(MpegEncContext *s) rf->surface = render_ref->surface; rf->is_long_term = pic->long_ref; - rf->top_is_reference = (pic->reference & PICT_TOP_FIELD) ? VDP_TRUE : VDP_FALSE; - rf->bottom_is_reference = (pic->reference & PICT_BOTTOM_FIELD) ? VDP_TRUE : VDP_FALSE; + rf->top_is_reference = (pic->f.reference & PICT_TOP_FIELD) ? VDP_TRUE : VDP_FALSE; + rf->bottom_is_reference = (pic->f.reference & PICT_BOTTOM_FIELD) ? VDP_TRUE : VDP_FALSE; rf->field_order_cnt[0] = pic->field_poc[0]; rf->field_order_cnt[1] = pic->field_poc[1]; rf->frame_idx = pic_frame_idx; @@ -112,7 +112,7 @@ void ff_vdpau_add_data_chunk(MpegEncContext *s, { struct vdpau_render_state *render; - render = (struct vdpau_render_state *)s->current_picture_ptr->data[0]; + render = (struct vdpau_render_state *)s->current_picture_ptr->f.data[0]; assert(render); render->bitstream_buffers= av_fast_realloc( @@ -133,7 +133,7 @@ void ff_vdpau_h264_picture_start(MpegEncContext *s) struct vdpau_render_state *render; int i; - render = (struct vdpau_render_state *)s->current_picture_ptr->data[0]; + render = (struct vdpau_render_state *)s->current_picture_ptr->f.data[0]; assert(render); for (i = 0; i < 2; ++i) { @@ -151,14 +151,14 @@ void ff_vdpau_h264_picture_complete(MpegEncContext *s) H264Context *h = s->avctx->priv_data; struct vdpau_render_state *render; - render = (struct vdpau_render_state *)s->current_picture_ptr->data[0]; + render = (struct vdpau_render_state *)s->current_picture_ptr->f.data[0]; assert(render); render->info.h264.slice_count = h->slice_num; if (render->info.h264.slice_count < 1) return; - render->info.h264.is_reference = (s->current_picture_ptr->reference & 3) ? VDP_TRUE : VDP_FALSE; + render->info.h264.is_reference = (s->current_picture_ptr->f.reference & 3) ? VDP_TRUE : VDP_FALSE; render->info.h264.field_pic_flag = s->picture_structure != PICT_FRAME; render->info.h264.bottom_field_flag = s->picture_structure == PICT_BOTTOM_FIELD; render->info.h264.num_ref_frames = h->sps.ref_frame_count; @@ -198,7 +198,7 @@ void ff_vdpau_mpeg_picture_complete(MpegEncContext *s, const uint8_t *buf, if (!s->current_picture_ptr) return; - render = (struct vdpau_render_state *)s->current_picture_ptr->data[0]; + render = (struct vdpau_render_state *)s->current_picture_ptr->f.data[0]; assert(render); /* fill VdpPictureInfoMPEG1Or2 struct */ @@ -227,12 +227,12 @@ void ff_vdpau_mpeg_picture_complete(MpegEncContext *s, const uint8_t *buf, switch(s->pict_type){ case AV_PICTURE_TYPE_B: - next = (struct vdpau_render_state *)s->next_picture.data[0]; + next = (struct vdpau_render_state *)s->next_picture.f.data[0]; assert(next); render->info.mpeg.backward_reference = next->surface; // no return here, going to set forward prediction case AV_PICTURE_TYPE_P: - last = (struct vdpau_render_state *)s->last_picture.data[0]; + last = (struct vdpau_render_state *)s->last_picture.f.data[0]; if (!last) // FIXME: Does this test make sense? last = render; // predict second field from the first render->info.mpeg.forward_reference = last->surface; @@ -253,7 +253,7 @@ void ff_vdpau_vc1_decode_picture(MpegEncContext *s, const uint8_t *buf, VC1Context *v = s->avctx->priv_data; struct vdpau_render_state *render, *last, *next; - render = (struct vdpau_render_state *)s->current_picture.data[0]; + render = (struct vdpau_render_state *)s->current_picture.f.data[0]; assert(render); /* fill LvPictureInfoVC1 struct */ @@ -297,12 +297,12 @@ void ff_vdpau_vc1_decode_picture(MpegEncContext *s, const uint8_t *buf, switch(s->pict_type){ case AV_PICTURE_TYPE_B: - next = (struct vdpau_render_state *)s->next_picture.data[0]; + next = (struct vdpau_render_state *)s->next_picture.f.data[0]; assert(next); render->info.vc1.backward_reference = next->surface; // no break here, going to set forward prediction case AV_PICTURE_TYPE_P: - last = (struct vdpau_render_state *)s->last_picture.data[0]; + last = (struct vdpau_render_state *)s->last_picture.f.data[0]; if (!last) // FIXME: Does this test make sense? last = render; // predict second field from the first render->info.vc1.forward_reference = last->surface; @@ -324,7 +324,7 @@ void ff_vdpau_mpeg4_decode_picture(MpegEncContext *s, const uint8_t *buf, if (!s->current_picture_ptr) return; - render = (struct vdpau_render_state *)s->current_picture_ptr->data[0]; + render = (struct vdpau_render_state *)s->current_picture_ptr->f.data[0]; assert(render); /* fill VdpPictureInfoMPEG4Part2 struct */ @@ -353,13 +353,13 @@ void ff_vdpau_mpeg4_decode_picture(MpegEncContext *s, const uint8_t *buf, switch (s->pict_type) { case AV_PICTURE_TYPE_B: - next = (struct vdpau_render_state *)s->next_picture.data[0]; + next = (struct vdpau_render_state *)s->next_picture.f.data[0]; assert(next); render->info.mpeg4.backward_reference = next->surface; render->info.mpeg4.vop_coding_type = 2; // no break here, going to set forward prediction case AV_PICTURE_TYPE_P: - last = (struct vdpau_render_state *)s->last_picture.data[0]; + last = (struct vdpau_render_state *)s->last_picture.f.data[0]; assert(last); render->info.mpeg4.forward_reference = last->surface; } -- cgit v1.2.3