From 5136ba7c690f50ebe12bba6e3320a18e1d4fd936 Mon Sep 17 00:00:00 2001 From: Jason Garrett-Glaser Date: Tue, 5 Jul 2011 17:55:14 -0700 Subject: H.264: faster P-SKIP decoding Inline the relevant parts of fill_decode_caches into P-SKIP mv prediction to avoid calling the whole thing. --- libavcodec/h264.h | 1 - libavcodec/h264_mvpred.h | 111 +++++++++++++++++++++++++++++++++++++++++++---- 2 files changed, 102 insertions(+), 10 deletions(-) (limited to 'libavcodec') diff --git a/libavcodec/h264.h b/libavcodec/h264.h index a2abab9d9b..d34e6db573 100644 --- a/libavcodec/h264.h +++ b/libavcodec/h264.h @@ -1331,7 +1331,6 @@ static void av_unused decode_mb_skip(H264Context *h){ mb_type|= MB_TYPE_16x16|MB_TYPE_P0L0|MB_TYPE_P1L0|MB_TYPE_SKIP; fill_decode_neighbors(h, mb_type); - fill_decode_caches(h, mb_type); //FIXME check what is needed and what not ... pred_pskip_motion(h, &mx, &my); fill_rectangle(&h->ref_cache[0][scan8[0]], 4, 4, 8, 0, 1); fill_rectangle( h->mv_cache[0][scan8[0]], 4, 4, 8, pack16to32(mx,my), 4); diff --git a/libavcodec/h264_mvpred.h b/libavcodec/h264_mvpred.h index c4e65b5847..8159f8a0dc 100644 --- a/libavcodec/h264_mvpred.h +++ b/libavcodec/h264_mvpred.h @@ -213,21 +213,114 @@ static av_always_inline void pred_8x16_motion(H264Context * const h, int n, int pred_motion(h, n, 2, list, ref, mx, my); } -static inline void pred_pskip_motion(H264Context * const h, int * const mx, int * const my){ - const int top_ref = h->ref_cache[0][ scan8[0] - 8 ]; - const int left_ref= h->ref_cache[0][ scan8[0] - 1 ]; - - tprintf(h->s.avctx, "pred_pskip: (%d) (%d) at %2d %2d\n", top_ref, left_ref, h->s.mb_x, h->s.mb_y); +#define FIX_MV_MBAFF(type, refn, mvn, idx)\ + if(FRAME_MBAFF){\ + if(MB_FIELD){\ + if(!IS_INTERLACED(type)){\ + refn <<= 1;\ + AV_COPY32(mvbuf[idx], mvn);\ + mvbuf[idx][1] /= 2;\ + mvn = mvbuf[idx];\ + }\ + }else{\ + if(IS_INTERLACED(type)){\ + refn >>= 1;\ + AV_COPY32(mvbuf[idx], mvn);\ + mvbuf[idx][1] <<= 1;\ + mvn = mvbuf[idx];\ + }\ + }\ + } - if(top_ref == PART_NOT_AVAILABLE || left_ref == PART_NOT_AVAILABLE - || !( top_ref | AV_RN32A(h->mv_cache[0][ scan8[0] - 8 ])) - || !(left_ref | AV_RN32A(h->mv_cache[0][ scan8[0] - 1 ]))){ +static av_always_inline void pred_pskip_motion(H264Context * const h, int * const mx, int * const my){ + DECLARE_ALIGNED(4, static const int16_t, zeromv)[2] = {0}; + DECLARE_ALIGNED(4, int16_t, mvbuf)[3][2]; + MpegEncContext * const s = &h->s; + int8_t *ref = s->current_picture.ref_index[0]; + int16_t (*mv)[2] = s->current_picture.motion_val[0]; + int top_ref, left_ref, diagonal_ref, match_count; + const int16_t *A, *B, *C; + int b_stride = h->b_stride; + + /* To avoid doing an entire fill_decode_caches, we inline the relevant parts here. + * FIXME: this is a partial duplicate of the logic in fill_decode_caches, but it's + * faster this way. Is there a way to avoid this duplication? + */ + if(USES_LIST(h->left_type[LTOP], 0)){ + left_ref = ref[4*h->left_mb_xy[LTOP] + 1 + (h->left_block[0]&~1)]; + A = mv[h->mb2b_xy[h->left_mb_xy[LTOP]] + 3 + b_stride*h->left_block[0]]; + FIX_MV_MBAFF(h->left_type[LTOP], left_ref, A, 0); + if(!(left_ref | AV_RN32A(A))){ + *mx = *my = 0; + return; + } + }else if(h->left_type[LTOP]){ + left_ref = LIST_NOT_USED; + A = zeromv; + }else{ + *mx = *my = 0; + return; + } + if(USES_LIST(h->top_type, 0)){ + top_ref = ref[4*h->top_mb_xy + 2]; + B = mv[h->mb2b_xy[h->top_mb_xy] + 3*b_stride]; + FIX_MV_MBAFF(h->top_type, top_ref, B, 1); + if(!(top_ref | AV_RN32A(B))){ + *mx = *my = 0; + return; + } + }else if(h->top_type){ + top_ref = LIST_NOT_USED; + B = zeromv; + }else{ *mx = *my = 0; return; } - pred_motion(h, 0, 4, 0, 0, mx, my); + tprintf(h->s.avctx, "pred_pskip: (%d) (%d) at %2d %2d\n", top_ref, left_ref, h->s.mb_x, h->s.mb_y); + + if(USES_LIST(h->topright_type, 0)){ + diagonal_ref = ref[4*h->topright_mb_xy + 2]; + C = mv[h->mb2b_xy[h->topright_mb_xy] + 3*b_stride]; + FIX_MV_MBAFF(h->topright_type, diagonal_ref, C, 2); + }else if(h->topright_type){ + diagonal_ref = LIST_NOT_USED; + C = zeromv; + }else{ + if(USES_LIST(h->topleft_type, 0)){ + diagonal_ref = ref[4*h->topleft_mb_xy + 1 + (h->topleft_partition & 2)]; + C = mv[h->mb2b_xy[h->topleft_mb_xy] + 3 + b_stride + (h->topleft_partition & 2*b_stride)]; + FIX_MV_MBAFF(h->topleft_type, diagonal_ref, C, 2); + }else if(h->topleft_type){ + diagonal_ref = LIST_NOT_USED; + C = zeromv; + }else{ + diagonal_ref = PART_NOT_AVAILABLE; + C = zeromv; + } + } + + match_count= !diagonal_ref + !top_ref + !left_ref; + tprintf(h->s.avctx, "pred_pskip_motion match_count=%d\n", match_count); + if(match_count > 1){ + *mx= mid_pred(A[0], B[0], C[0]); + *my= mid_pred(A[1], B[1], C[1]); + }else if(match_count==1){ + if(!left_ref){ + *mx= A[0]; + *my= A[1]; + }else if(!top_ref){ + *mx= B[0]; + *my= B[1]; + }else{ + *mx= C[0]; + *my= C[1]; + } + }else{ + *mx= mid_pred(A[0], B[0], C[0]); + *my= mid_pred(A[1], B[1], C[1]); + } return; } -- cgit v1.2.3