summaryrefslogtreecommitdiff
path: root/libavcodec/mips
diff options
context:
space:
mode:
authorKaustubh Raste <kaustubh.raste@imgtec.com>2017-11-06 11:32:57 +0530
committerMichael Niedermayer <michael@niedermayer.cc>2017-11-08 20:58:56 +0100
commit4878854eefe6d7f4f18fe3b1909f4e9571e18b48 (patch)
treee59c60bf22f1f02993a5a10bff7532ae95dc0ed9 /libavcodec/mips
parent9d3eb75cf637e6f2a664ad3ab67c4f785226f62e (diff)
avcodec/mips: Improve hevc bi wgt 4 tap hv mc msa functions
Use global mask buffer for appropriate mask load. Use immediate unsigned saturation for clip to max saving one vector register. Signed-off-by: Kaustubh Raste <kaustubh.raste@imgtec.com> Reviewed-by: Manojkumar Bhosale <Manojkumar.Bhosale@imgtec.com> Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
Diffstat (limited to 'libavcodec/mips')
-rw-r--r--libavcodec/mips/hevc_mc_biw_msa.c1396
1 files changed, 872 insertions, 524 deletions
diff --git a/libavcodec/mips/hevc_mc_biw_msa.c b/libavcodec/mips/hevc_mc_biw_msa.c
index 0e5f8a02cf..ea65f00497 100644
--- a/libavcodec/mips/hevc_mc_biw_msa.c
+++ b/libavcodec/mips/hevc_mc_biw_msa.c
@@ -4495,26 +4495,25 @@ static void hevc_hv_biwgt_4t_4x2_msa(uint8_t *src0_ptr,
int32_t dst_stride,
const int8_t *filter_x,
const int8_t *filter_y,
- int32_t height,
int32_t weight0,
int32_t weight1,
int32_t offset0,
int32_t offset1,
int32_t rnd_val)
{
+ uint64_t tp0, tp1;
int32_t offset, weight;
- v8i16 in0, in1;
+ v8i16 in0 = { 0 };
+ v16u8 out;
v16i8 src0, src1, src2, src3, src4;
v8i16 filt0, filt1;
- v4i32 filt_h0, filt_h1;
- v16i8 mask0 = { 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8 };
+ v8i16 filt_h0, filt_h1;
+ v16i8 mask0 = LD_SB(ff_hevc_mask_arr + 16);
v16i8 mask1;
- v8i16 filter_vec, const_vec;
+ v8i16 filter_vec, tmp, weight_vec;
v16i8 vec0, vec1, vec2, vec3, vec4, vec5;
- v8i16 dst0, dst1, dst2, dst3, dst4;
- v4i32 dst0_r, dst1_r, dst0_l;
- v8i16 dst10_r, dst32_r, dst21_r, dst43_r;
- v4i32 weight_vec, offset_vec, rnd_vec;
+ v8i16 dst20, dst31, dst42, dst10, dst32, dst21, dst43, tmp0, tmp1;
+ v4i32 dst0, dst1, offset_vec, rnd_vec, const_vec;
src0_ptr -= (src_stride + 1);
@@ -4522,10 +4521,9 @@ static void hevc_hv_biwgt_4t_4x2_msa(uint8_t *src0_ptr,
SPLATI_H2_SH(filter_vec, 0, 1, filt0, filt1);
filter_vec = LD_SH(filter_y);
- vec0 = __msa_clti_s_b((v16i8) filter_vec, 0);
- filter_vec = (v8i16) __msa_ilvr_b(vec0, (v16i8) filter_vec);
+ UNPCK_R_SB_SH(filter_vec, filter_vec);
- SPLATI_W2_SW(filter_vec, 0, filt_h0, filt_h1);
+ SPLATI_W2_SH(filter_vec, 0, filt_h0, filt_h1);
mask1 = mask0 + 2;
@@ -4533,56 +4531,44 @@ static void hevc_hv_biwgt_4t_4x2_msa(uint8_t *src0_ptr,
weight0 = weight0 & 0x0000FFFF;
weight = weight0 | (weight1 << 16);
- const_vec = __msa_ldi_h(128);
+ const_vec = __msa_fill_w((128 * weight1));
const_vec <<= 6;
offset_vec = __msa_fill_w(offset);
- weight_vec = __msa_fill_w(weight);
+ weight_vec = (v8i16) __msa_fill_w(weight);
rnd_vec = __msa_fill_w(rnd_val + 1);
+ offset_vec += const_vec;
- LD_SB3(src0_ptr, src_stride, src0, src1, src2);
- src0_ptr += (3 * src_stride);
- XORI_B3_128_SB(src0, src1, src2);
+ LD_SB5(src0_ptr, src_stride, src0, src1, src2, src3, src4);
+ XORI_B5_128_SB(src0, src1, src2, src3, src4);
- VSHF_B2_SB(src0, src0, src0, src0, mask0, mask1, vec0, vec1);
- VSHF_B2_SB(src1, src1, src1, src1, mask0, mask1, vec2, vec3);
- VSHF_B2_SB(src2, src2, src2, src2, mask0, mask1, vec4, vec5);
- dst0 = const_vec;
- DPADD_SB2_SH(vec0, vec1, filt0, filt1, dst0, dst0);
- dst1 = const_vec;
- DPADD_SB2_SH(vec2, vec3, filt0, filt1, dst1, dst1);
- dst2 = const_vec;
- DPADD_SB2_SH(vec4, vec5, filt0, filt1, dst2, dst2);
- ILVR_H2_SH(dst1, dst0, dst2, dst1, dst10_r, dst21_r);
+ VSHF_B2_SB(src0, src2, src0, src2, mask0, mask1, vec0, vec1);
+ VSHF_B2_SB(src1, src3, src1, src3, mask0, mask1, vec2, vec3);
+ VSHF_B2_SB(src2, src4, src2, src4, mask0, mask1, vec4, vec5);
- LD_SB2(src0_ptr, src_stride, src3, src4);
- LD_SH2(src1_ptr, src2_stride, in0, in1);
- in0 = (v8i16) __msa_ilvr_d((v2i64) in1, (v2i64) in0);
- XORI_B2_128_SB(src3, src4);
- /* row 3 */
- VSHF_B2_SB(src3, src3, src3, src3, mask0, mask1, vec0, vec1);
- dst3 = const_vec;
- DPADD_SB2_SH(vec0, vec1, filt0, filt1, dst3, dst3);
- dst32_r = __msa_ilvr_h(dst3, dst2);
- dst0_r = HEVC_FILT_4TAP(dst10_r, dst32_r, filt_h0, filt_h1);
- dst0_r >>= 6;
- /* row 4 */
- VSHF_B2_SB(src4, src4, src4, src4, mask0, mask1, vec0, vec1);
- dst4 = const_vec;
- DPADD_SB2_SH(vec0, vec1, filt0, filt1, dst4, dst4);
- dst43_r = __msa_ilvr_h(dst4, dst3);
- dst1_r = HEVC_FILT_4TAP(dst21_r, dst43_r, filt_h0, filt_h1);
- dst1_r >>= 6;
- dst1_r = (v4i32) __msa_pckev_h((v8i16) dst1_r, (v8i16) dst0_r);
+ dst20 = HEVC_FILT_4TAP_SH(vec0, vec1, filt0, filt1);
+ dst31 = HEVC_FILT_4TAP_SH(vec2, vec3, filt0, filt1);
+ dst42 = HEVC_FILT_4TAP_SH(vec4, vec5, filt0, filt1);
- ILVRL_H2_SW(dst1_r, in0, dst0_r, dst0_l);
- dst0_r = __msa_dpadd_s_w(offset_vec, (v8i16) dst0_r, (v8i16) weight_vec);
- dst0_l = __msa_dpadd_s_w(offset_vec, (v8i16) dst0_l, (v8i16) weight_vec);
- SRAR_W2_SW(dst0_r, dst0_l, rnd_vec);
- dst0_r = CLIP_SW_0_255(dst0_r);
- dst0_l = CLIP_SW_0_255(dst0_l);
+ ILVRL_H2_SH(dst31, dst20, dst10, dst32);
+ ILVRL_H2_SH(dst42, dst31, dst21, dst43);
+
+ dst0 = HEVC_FILT_4TAP(dst10, dst32, filt_h0, filt_h1);
+ dst1 = HEVC_FILT_4TAP(dst21, dst43, filt_h0, filt_h1);
+ dst0 >>= 6;
+ dst1 >>= 6;
+ dst0 = (v4i32) __msa_pckev_h((v8i16) dst1, (v8i16) dst0);
- HEVC_PCK_SW_SB2(dst0_l, dst0_r, dst0_r);
- ST4x2_UB(dst0_r, dst, dst_stride);
+ LD2(src1_ptr, src2_stride, tp0, tp1);
+ INSERT_D2_SH(tp0, tp1, in0);
+
+ ILVRL_H2_SH(dst0, in0, tmp0, tmp1);
+ dst0 = __msa_dpadd_s_w(offset_vec, tmp0, weight_vec);
+ dst1 = __msa_dpadd_s_w(offset_vec, tmp1, weight_vec);
+ SRAR_W2_SW(dst0, dst1, rnd_vec);
+ tmp = __msa_pckev_h((v8i16) dst1, (v8i16) dst0);
+ tmp = CLIP_SH_0_255_MAX_SATU(tmp);
+ out = (v16u8) __msa_pckev_b((v16i8) tmp, (v16i8) tmp);
+ ST4x2_UB(out, dst, dst_stride);
}
static void hevc_hv_biwgt_4t_4x4_msa(uint8_t *src0_ptr,
@@ -4593,27 +4579,28 @@ static void hevc_hv_biwgt_4t_4x4_msa(uint8_t *src0_ptr,
int32_t dst_stride,
const int8_t *filter_x,
const int8_t *filter_y,
- int32_t height,
int32_t weight0,
int32_t weight1,
int32_t offset0,
int32_t offset1,
int32_t rnd_val)
{
+ uint64_t tp0, tp1;
int32_t offset, weight;
- v8i16 in0, in1, in2, in3;
+ v16u8 out;
+ v8i16 in0 = { 0 }, in1 = { 0 };
v16i8 src0, src1, src2, src3, src4, src5, src6;
v8i16 filt0, filt1;
- v4i32 filt_h0, filt_h1;
- v16i8 mask0 = { 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8 };
+ v8i16 filt_h0, filt_h1;
+ v16i8 mask0 = LD_SB(ff_hevc_mask_arr + 16);
v16i8 mask1;
- v8i16 filter_vec, const_vec;
- v16i8 vec0, vec1, vec2, vec3, vec4, vec5;
- v8i16 dst0, dst1, dst2, dst3, dst4, dst5;
- v8i16 tmp0, tmp1;
- v4i32 dst0_r, dst1_r, dst2_r, dst3_r;
- v8i16 dst10_r, dst32_r, dst21_r, dst43_r;
- v4i32 weight_vec, offset_vec, rnd_vec;
+ v8i16 filter_vec, weight_vec;
+ v16i8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
+ v8i16 tmp0, tmp1, tmp2, tmp3;
+ v8i16 dst30, dst41, dst52, dst63;
+ v8i16 dst10, dst32, dst54, dst21, dst43, dst65;
+ v4i32 offset_vec, rnd_vec, const_vec;
+ v4i32 dst0, dst1, dst2, dst3;
src0_ptr -= (src_stride + 1);
@@ -4621,10 +4608,9 @@ static void hevc_hv_biwgt_4t_4x4_msa(uint8_t *src0_ptr,
SPLATI_H2_SH(filter_vec, 0, 1, filt0, filt1);
filter_vec = LD_SH(filter_y);
- vec0 = __msa_clti_s_b((v16i8) filter_vec, 0);
- filter_vec = (v8i16) __msa_ilvr_b(vec0, (v16i8) filter_vec);
+ UNPCK_R_SB_SH(filter_vec, filter_vec);
- SPLATI_W2_SW(filter_vec, 0, filt_h0, filt_h1);
+ SPLATI_W2_SH(filter_vec, 0, filt_h0, filt_h1);
mask1 = mask0 + 2;
@@ -4632,66 +4618,54 @@ static void hevc_hv_biwgt_4t_4x4_msa(uint8_t *src0_ptr,
weight0 = weight0 & 0x0000FFFF;
weight = weight0 | (weight1 << 16);
- const_vec = __msa_ldi_h(128);
+ const_vec = __msa_fill_w((128 * weight1));
const_vec <<= 6;
offset_vec = __msa_fill_w(offset);
- weight_vec = __msa_fill_w(weight);
+ weight_vec = (v8i16) __msa_fill_w(weight);
rnd_vec = __msa_fill_w(rnd_val + 1);
+ offset_vec += const_vec;
- LD_SB3(src0_ptr, src_stride, src0, src1, src2);
- src0_ptr += (3 * src_stride);
- XORI_B3_128_SB(src0, src1, src2);
+ LD_SB7(src0_ptr, src_stride, src0, src1, src2, src3, src4, src5, src6);
+ XORI_B7_128_SB(src0, src1, src2, src3, src4, src5, src6);
- VSHF_B2_SB(src0, src0, src0, src0, mask0, mask1, vec0, vec1);
- VSHF_B2_SB(src1, src1, src1, src1, mask0, mask1, vec2, vec3);
- VSHF_B2_SB(src2, src2, src2, src2, mask0, mask1, vec4, vec5);
- dst0 = const_vec;
- DPADD_SB2_SH(vec0, vec1, filt0, filt1, dst0, dst0);
- dst1 = const_vec;
- DPADD_SB2_SH(vec2, vec3, filt0, filt1, dst1, dst1);
- dst2 = const_vec;
- DPADD_SB2_SH(vec4, vec5, filt0, filt1, dst2, dst2);
- ILVR_H2_SH(dst1, dst0, dst2, dst1, dst10_r, dst21_r);
+ VSHF_B2_SB(src0, src3, src0, src3, mask0, mask1, vec0, vec1);
+ VSHF_B2_SB(src1, src4, src1, src4, mask0, mask1, vec2, vec3);
+ VSHF_B2_SB(src2, src5, src2, src5, mask0, mask1, vec4, vec5);
+ VSHF_B2_SB(src3, src6, src3, src6, mask0, mask1, vec6, vec7);
- LD_SB4(src0_ptr, src_stride, src3, src4, src5, src6);
- LD_SH4(src1_ptr, src2_stride, in0, in1, in2, in3);
- ILVR_D2_SH(in1, in0, in3, in2, in0, in1);
- XORI_B4_128_SB(src3, src4, src5, src6);
- /* row 3 */
- VSHF_B2_SB(src3, src3, src3, src3, mask0, mask1, vec0, vec1);
- dst3 = const_vec;
- DPADD_SB2_SH(vec0, vec1, filt0, filt1, dst3, dst3);
- dst32_r = __msa_ilvr_h(dst3, dst2);
- dst0_r = HEVC_FILT_4TAP(dst10_r, dst32_r, filt_h0, filt_h1);
- dst0_r >>= 6;
- /* row 4 */
- VSHF_B2_SB(src4, src4, src4, src4, mask0, mask1, vec0, vec1);
- dst4 = const_vec;
- DPADD_SB2_SH(vec0, vec1, filt0, filt1, dst4, dst4);
- dst43_r = __msa_ilvr_h(dst4, dst3);
- dst1_r = HEVC_FILT_4TAP(dst21_r, dst43_r, filt_h0, filt_h1);
- dst1_r >>= 6;
- /* row 5 */
- VSHF_B2_SB(src5, src5, src5, src5, mask0, mask1, vec0, vec1);
- dst5 = const_vec;
- DPADD_SB2_SH(vec0, vec1, filt0, filt1, dst5, dst5);
- dst10_r = __msa_ilvr_h(dst5, dst4);
- dst2_r = HEVC_FILT_4TAP(dst32_r, dst10_r, filt_h0, filt_h1);
- dst2_r >>= 6;
- /* row 6 */
- VSHF_B2_SB(src6, src6, src6, src6, mask0, mask1, vec0, vec1);
- dst2 = const_vec;
- DPADD_SB2_SH(vec0, vec1, filt0, filt1, dst2, dst2);
- dst21_r = __msa_ilvr_h(dst2, dst5);
- dst3_r = HEVC_FILT_4TAP(dst43_r, dst21_r, filt_h0, filt_h1);
- dst3_r >>= 6;
- PCKEV_H2_SH(dst1_r, dst0_r, dst3_r, dst2_r, tmp0, tmp1);
- HEVC_BIW_RND_CLIP2(tmp0, tmp1, in0, in1,
- weight_vec, rnd_vec, offset_vec,
- tmp0, tmp1);
+ dst30 = HEVC_FILT_4TAP_SH(vec0, vec1, filt0, filt1);
+ dst41 = HEVC_FILT_4TAP_SH(vec2, vec3, filt0, filt1);
+ dst52 = HEVC_FILT_4TAP_SH(vec4, vec5, filt0, filt1);
+ dst63 = HEVC_FILT_4TAP_SH(vec6, vec7, filt0, filt1);
- tmp0 = (v8i16) __msa_pckev_b((v16i8) tmp1, (v16i8) tmp0);
- ST4x4_UB(tmp0, tmp0, 0, 1, 2, 3, dst, dst_stride);
+ ILVRL_H2_SH(dst41, dst30, dst10, dst43);
+ ILVRL_H2_SH(dst52, dst41, dst21, dst54);
+ ILVRL_H2_SH(dst63, dst52, dst32, dst65);
+ dst0 = HEVC_FILT_4TAP(dst10, dst32, filt_h0, filt_h1);
+ dst1 = HEVC_FILT_4TAP(dst21, dst43, filt_h0, filt_h1);
+ dst2 = HEVC_FILT_4TAP(dst32, dst54, filt_h0, filt_h1);
+ dst3 = HEVC_FILT_4TAP(dst43, dst65, filt_h0, filt_h1);
+ SRA_4V(dst0, dst1, dst2, dst3, 6);
+ PCKEV_H2_SH(dst1, dst0, dst3, dst2, tmp1, tmp3);
+
+ LD2(src1_ptr, src2_stride, tp0, tp1);
+ INSERT_D2_SH(tp0, tp1, in0);
+ src1_ptr += (2 * src2_stride);
+ LD2(src1_ptr, src2_stride, tp0, tp1);
+ INSERT_D2_SH(tp0, tp1, in1);
+
+ ILVRL_H2_SH(tmp1, in0, tmp0, tmp1);
+ ILVRL_H2_SH(tmp3, in1, tmp2, tmp3);
+
+ dst0 = __msa_dpadd_s_w(offset_vec, tmp0, weight_vec);
+ dst1 = __msa_dpadd_s_w(offset_vec, tmp1, weight_vec);
+ dst2 = __msa_dpadd_s_w(offset_vec, tmp2, weight_vec);
+ dst3 = __msa_dpadd_s_w(offset_vec, tmp3, weight_vec);
+ SRAR_W4_SW(dst0, dst1, dst2, dst3, rnd_vec);
+ PCKEV_H2_SH(dst1, dst0, dst3, dst2, tmp0, tmp1);
+ CLIP_SH2_0_255_MAX_SATU(tmp0, tmp1);
+ out = (v16u8) __msa_pckev_b((v16i8) tmp1, (v16i8) tmp0);
+ ST4x4_UB(out, out, 0, 1, 2, 3, dst, dst_stride);
}
static void hevc_hv_biwgt_4t_4multx8mult_msa(uint8_t *src0_ptr,
@@ -4710,21 +4684,24 @@ static void hevc_hv_biwgt_4t_4multx8mult_msa(uint8_t *src0_ptr,
int32_t rnd_val)
{
uint32_t loop_cnt;
+ uint64_t tp0, tp1;
int32_t offset, weight;
- v8i16 in0, in1, in2, in3, in4, in5, in6, in7;
+ v16u8 out0, out1;
+ v8i16 in0 = { 0 }, in1 = { 0 }, in2 = { 0 }, in3 = { 0 };
v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10;
v8i16 filt0, filt1;
- v4i32 filt_h0, filt_h1;
- v16i8 mask0 = { 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8 };
+ v8i16 filt_h0, filt_h1;
+ v16i8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
+ v16i8 mask0 = LD_SB(ff_hevc_mask_arr + 16);
v16i8 mask1;
- v8i16 filter_vec, const_vec;
- v16i8 vec0, vec1, vec2, vec3, vec4, vec5;
- v8i16 dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7, dst8, dst9;
- v4i32 dst0_r, dst1_r, dst2_r, dst3_r, dst4_r, dst5_r, dst6_r, dst7_r;
- v8i16 tmp0, tmp1, tmp2, tmp3;
+ v8i16 filter_vec, weight_vec;
+ v8i16 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
+ v8i16 dst10, dst21, dst22, dst73, dst84, dst95, dst106;
v8i16 dst10_r, dst32_r, dst54_r, dst76_r;
v8i16 dst21_r, dst43_r, dst65_r, dst87_r;
- v4i32 weight_vec, offset_vec, rnd_vec;
+ v8i16 dst98_r, dst109_r;
+ v4i32 dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7;
+ v4i32 offset_vec, rnd_vec, const_vec;
src0_ptr -= (src_stride + 1);
@@ -4732,10 +4709,9 @@ static void hevc_hv_biwgt_4t_4multx8mult_msa(uint8_t *src0_ptr,
SPLATI_H2_SH(filter_vec, 0, 1, filt0, filt1);
filter_vec = LD_SH(filter_y);
- vec0 = __msa_clti_s_b((v16i8) filter_vec, 0);
- filter_vec = (v8i16) __msa_ilvr_b(vec0, (v16i8) filter_vec);
+ UNPCK_R_SB_SH(filter_vec, filter_vec);
- SPLATI_W2_SW(filter_vec, 0, filt_h0, filt_h1);
+ SPLATI_W2_SH(filter_vec, 0, filt_h0, filt_h1);
mask1 = mask0 + 2;
@@ -4743,98 +4719,96 @@ static void hevc_hv_biwgt_4t_4multx8mult_msa(uint8_t *src0_ptr,
weight0 = weight0 & 0x0000FFFF;
weight = weight0 | (weight1 << 16);
- const_vec = __msa_ldi_h(128);
+ const_vec = __msa_fill_w((128 * weight1));
const_vec <<= 6;
offset_vec = __msa_fill_w(offset);
- weight_vec = __msa_fill_w(weight);
+ weight_vec = (v8i16) __msa_fill_w(weight);
rnd_vec = __msa_fill_w(rnd_val + 1);
+ offset_vec += const_vec;
LD_SB3(src0_ptr, src_stride, src0, src1, src2);
src0_ptr += (3 * src_stride);
XORI_B3_128_SB(src0, src1, src2);
- VSHF_B2_SB(src0, src0, src0, src0, mask0, mask1, vec0, vec1);
- VSHF_B2_SB(src1, src1, src1, src1, mask0, mask1, vec2, vec3);
- VSHF_B2_SB(src2, src2, src2, src2, mask0, mask1, vec4, vec5);
- dst0 = const_vec;
- DPADD_SB2_SH(vec0, vec1, filt0, filt1, dst0, dst0);
- dst1 = const_vec;
- DPADD_SB2_SH(vec2, vec3, filt0, filt1, dst1, dst1);
- dst2 = const_vec;
- DPADD_SB2_SH(vec4, vec5, filt0, filt1, dst2, dst2);
- ILVR_H2_SH(dst1, dst0, dst2, dst1, dst10_r, dst21_r);
+ VSHF_B2_SB(src0, src1, src0, src1, mask0, mask1, vec0, vec1);
+ VSHF_B2_SB(src1, src2, src1, src2, mask0, mask1, vec2, vec3);
+ dst10 = HEVC_FILT_4TAP_SH(vec0, vec1, filt0, filt1);
+ dst21 = HEVC_FILT_4TAP_SH(vec2, vec3, filt0, filt1);
+ ILVRL_H2_SH(dst21, dst10, dst10_r, dst21_r);
+ dst22 = (v8i16) __msa_splati_d((v2i64) dst21, 1);
for (loop_cnt = height >> 3; loop_cnt--;) {
LD_SB8(src0_ptr, src_stride,
src3, src4, src5, src6, src7, src8, src9, src10);
src0_ptr += (8 * src_stride);
- LD_SH8(src1_ptr, src2_stride, in0, in1, in2, in3, in4, in5, in6, in7);
- src1_ptr += (8 * src2_stride);
- ILVR_D2_SH(in1, in0, in3, in2, in0, in1);
- ILVR_D2_SH(in5, in4, in7, in6, in2, in3);
XORI_B8_128_SB(src3, src4, src5, src6, src7, src8, src9, src10);
- /* row 3 */
- VSHF_B2_SB(src3, src3, src3, src3, mask0, mask1, vec0, vec1);
- dst3 = const_vec;
- DPADD_SB2_SH(vec0, vec1, filt0, filt1, dst3, dst3);
- dst32_r = __msa_ilvr_h(dst3, dst2);
- dst0_r = HEVC_FILT_4TAP(dst10_r, dst32_r, filt_h0, filt_h1);
- dst0_r >>= 6;
- /* row 4 */
- VSHF_B2_SB(src4, src4, src4, src4, mask0, mask1, vec0, vec1);
- dst4 = const_vec;
- DPADD_SB2_SH(vec0, vec1, filt0, filt1, dst4, dst4);
- dst43_r = __msa_ilvr_h(dst4, dst3);
- dst1_r = HEVC_FILT_4TAP(dst21_r, dst43_r, filt_h0, filt_h1);
- dst1_r >>= 6;
- /* row 5 */
- VSHF_B2_SB(src5, src5, src5, src5, mask0, mask1, vec0, vec1);
- dst5 = const_vec;
- DPADD_SB2_SH(vec0, vec1, filt0, filt1, dst5, dst5);
- dst54_r = __msa_ilvr_h(dst5, dst4);
- dst2_r = HEVC_FILT_4TAP(dst32_r, dst54_r, filt_h0, filt_h1);
- dst2_r >>= 6;
- /* row 6 */
- VSHF_B2_SB(src6, src6, src6, src6, mask0, mask1, vec0, vec1);
- dst6 = const_vec;
- DPADD_SB2_SH(vec0, vec1, filt0, filt1, dst6, dst6);
- dst65_r = __msa_ilvr_h(dst6, dst5);
- dst3_r = HEVC_FILT_4TAP(dst43_r, dst65_r, filt_h0, filt_h1);
- dst3_r >>= 6;
- VSHF_B2_SB(src7, src7, src7, src7, mask0, mask1, vec0, vec1);
- dst7 = const_vec;
- DPADD_SB2_SH(vec0, vec1, filt0, filt1, dst7, dst7);
- dst76_r = __msa_ilvr_h(dst7, dst6);
- dst4_r = HEVC_FILT_4TAP(dst54_r, dst76_r, filt_h0, filt_h1);
- dst4_r >>= 6;
- VSHF_B2_SB(src8, src8, src8, src8, mask0, mask1, vec0, vec1);
- dst8 = const_vec;
- DPADD_SB2_SH(vec0, vec1, filt0, filt1, dst8, dst8);
- dst87_r = __msa_ilvr_h(dst8, dst7);
- dst5_r = HEVC_FILT_4TAP(dst65_r, dst87_r, filt_h0, filt_h1);
- dst5_r >>= 6;
- VSHF_B2_SB(src9, src9, src9, src9, mask0, mask1, vec0, vec1);
- dst9 = const_vec;
- DPADD_SB2_SH(vec0, vec1, filt0, filt1, dst9, dst9);
- dst10_r = __msa_ilvr_h(dst9, dst8);
- dst6_r = HEVC_FILT_4TAP(dst76_r, dst10_r, filt_h0, filt_h1);
- dst6_r >>= 6;
- VSHF_B2_SB(src10, src10, src10, src10, mask0, mask1, vec0, vec1);
- dst2 = const_vec;
- DPADD_SB2_SH(vec0, vec1, filt0, filt1, dst2, dst2);
- dst21_r = __msa_ilvr_h(dst2, dst9);
- dst7_r = HEVC_FILT_4TAP(dst87_r, dst21_r, filt_h0, filt_h1);
- dst7_r >>= 6;
- PCKEV_H4_SH(dst1_r, dst0_r, dst3_r, dst2_r,
- dst5_r, dst4_r, dst7_r, dst6_r, tmp0, tmp1, tmp2, tmp3);
- HEVC_BIW_RND_CLIP4(tmp0, tmp1, tmp2, tmp3,
- in0, in1, in2, in3,
- weight_vec, rnd_vec, offset_vec,
- tmp0, tmp1, tmp2, tmp3);
+ VSHF_B2_SB(src3, src7, src3, src7, mask0, mask1, vec0, vec1);
+ VSHF_B2_SB(src4, src8, src4, src8, mask0, mask1, vec2, vec3);
+ VSHF_B2_SB(src5, src9, src5, src9, mask0, mask1, vec4, vec5);
+ VSHF_B2_SB(src6, src10, src6, src10, mask0, mask1, vec6, vec7);
+
+ dst73 = HEVC_FILT_4TAP_SH(vec0, vec1, filt0, filt1);
+ dst84 = HEVC_FILT_4TAP_SH(vec2, vec3, filt0, filt1);
+ dst95 = HEVC_FILT_4TAP_SH(vec4, vec5, filt0, filt1);
+ dst106 = HEVC_FILT_4TAP_SH(vec6, vec7, filt0, filt1);
+
+ dst32_r = __msa_ilvr_h(dst73, dst22);
+ ILVRL_H2_SH(dst84, dst73, dst43_r, dst87_r);
+ ILVRL_H2_SH(dst95, dst84, dst54_r, dst98_r);
+ ILVRL_H2_SH(dst106, dst95, dst65_r, dst109_r);
+ dst22 = (v8i16) __msa_splati_d((v2i64) dst73, 1);
+ dst76_r = __msa_ilvr_h(dst22, dst106);
- PCKEV_B2_SH(tmp1, tmp0, tmp3, tmp2, tmp0, tmp1);
- ST4x8_UB(tmp0, tmp1, dst, dst_stride);
+ LD2(src1_ptr, src2_stride, tp0, tp1);
+ src1_ptr += 2 * src2_stride;
+ INSERT_D2_SH(tp0, tp1, in0);
+ LD2(src1_ptr, src2_stride, tp0, tp1);
+ src1_ptr += 2 * src2_stride;
+ INSERT_D2_SH(tp0, tp1, in1);
+
+ LD2(src1_ptr, src2_stride, tp0, tp1);
+ src1_ptr += 2 * src2_stride;
+ INSERT_D2_SH(tp0, tp1, in2);
+ LD2(src1_ptr, src2_stride, tp0, tp1);
+ src1_ptr += 2 * src2_stride;
+ INSERT_D2_SH(tp0, tp1, in3);
+
+ dst0 = HEVC_FILT_4TAP(dst10_r, dst32_r, filt_h0, filt_h1);
+ dst1 = HEVC_FILT_4TAP(dst21_r, dst43_r, filt_h0, filt_h1);
+ dst2 = HEVC_FILT_4TAP(dst32_r, dst54_r, filt_h0, filt_h1);
+ dst3 = HEVC_FILT_4TAP(dst43_r, dst65_r, filt_h0, filt_h1);
+ dst4 = HEVC_FILT_4TAP(dst54_r, dst76_r, filt_h0, filt_h1);
+ dst5 = HEVC_FILT_4TAP(dst65_r, dst87_r, filt_h0, filt_h1);
+ dst6 = HEVC_FILT_4TAP(dst76_r, dst98_r, filt_h0, filt_h1);
+ dst7 = HEVC_FILT_4TAP(dst87_r, dst109_r, filt_h0, filt_h1);
+ SRA_4V(dst0, dst1, dst2, dst3, 6);
+ SRA_4V(dst4, dst5, dst6, dst7, 6);
+ PCKEV_H4_SW(dst1, dst0, dst3, dst2, dst5, dst4, dst7, dst6, dst0, dst1,
+ dst2, dst3);
+ ILVRL_H2_SH(dst0, in0, tmp0, tmp1);
+ ILVRL_H2_SH(dst1, in1, tmp2, tmp3);
+ ILVRL_H2_SH(dst2, in2, tmp4, tmp5);
+ ILVRL_H2_SH(dst3, in3, tmp6, tmp7);
+ dst0 = __msa_dpadd_s_w(offset_vec, tmp0, weight_vec);
+ dst1 = __msa_dpadd_s_w(offset_vec, tmp1, weight_vec);
+ dst2 = __msa_dpadd_s_w(offset_vec, tmp2, weight_vec);
+ dst3 = __msa_dpadd_s_w(offset_vec, tmp3, weight_vec);
+ dst4 = __msa_dpadd_s_w(offset_vec, tmp4, weight_vec);
+ dst5 = __msa_dpadd_s_w(offset_vec, tmp5, weight_vec);
+ dst6 = __msa_dpadd_s_w(offset_vec, tmp6, weight_vec);
+ dst7 = __msa_dpadd_s_w(offset_vec, tmp7, weight_vec);
+ SRAR_W4_SW(dst0, dst1, dst2, dst3, rnd_vec);
+ SRAR_W4_SW(dst4, dst5, dst6, dst7, rnd_vec);
+ PCKEV_H4_SH(dst1, dst0, dst3, dst2, dst5, dst4, dst7, dst6, tmp0, tmp1,
+ tmp2, tmp3);
+ CLIP_SH4_0_255_MAX_SATU(tmp0, tmp1, tmp2, tmp3);
+ PCKEV_B2_UB(tmp1, tmp0, tmp3, tmp2, out0, out1);
+ ST4x8_UB(out0, out1, dst, dst_stride);
dst += (8 * dst_stride);
+
+ dst10_r = dst98_r;
+ dst21_r = dst109_r;
+ dst22 = (v8i16) __msa_splati_d((v2i64) dst106, 1);
}
}
@@ -4856,13 +4830,11 @@ static void hevc_hv_biwgt_4t_4w_msa(uint8_t *src0_ptr,
if (2 == height) {
hevc_hv_biwgt_4t_4x2_msa(src0_ptr, src_stride, src1_ptr, src2_stride,
dst, dst_stride, filter_x, filter_y,
- height, weight0, weight1, offset0, offset1,
- rnd_val);
+ weight0, weight1, offset0, offset1, rnd_val);
} else if (4 == height) {
hevc_hv_biwgt_4t_4x4_msa(src0_ptr, src_stride, src1_ptr, src2_stride,
dst, dst_stride, filter_x, filter_y,
- height, weight0, weight1, offset0, offset1,
- rnd_val);
+ weight0, weight1, offset0, offset1, rnd_val);
} else if (0 == (height % 8)) {
hevc_hv_biwgt_4t_4multx8mult_msa(src0_ptr, src_stride,
src1_ptr, src2_stride,
@@ -4887,22 +4859,28 @@ static void hevc_hv_biwgt_4t_6w_msa(uint8_t *src0_ptr,
int32_t offset1,
int32_t rnd_val)
{
- uint32_t loop_cnt;
+ uint32_t tpw0, tpw1, tpw2, tpw3;
+ uint64_t tp0, tp1;
int32_t offset, weight;
- v16i8 src0, src1, src2, src3, src4, src5, src6;
- v8i16 in0, in1, in2, in3;
+ v16u8 out0, out1, out2;
+ v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10;
+ v8i16 in0 = { 0 }, in1 = { 0 }, in2 = { 0 }, in3 = { 0 };
+ v8i16 in4 = { 0 }, in5 = { 0 };
v8i16 filt0, filt1;
- v4i32 filt_h0, filt_h1;
- v16i8 mask0 = { 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8 };
+ v8i16 filt_h0, filt_h1, filter_vec;
+ v16i8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
+ v16i8 mask0 = LD_SB(ff_hevc_mask_arr);
v16i8 mask1;
- v8i16 filter_vec, const_vec;
- v16i8 vec0, vec1, vec2, vec3, vec4, vec5;
- v8i16 dst0, dst1, dst2, dst3, dst4, dst5;
+ v8i16 dsth0, dsth1, dsth2, dsth3, dsth4, dsth5, dsth6, dsth7, dsth8, dsth9;
+ v8i16 dsth10, tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, weight_vec;
+ v8i16 dst10_r, dst32_r, dst54_r, dst76_r, dst98_r, dst21_r, dst43_r;
+ v8i16 dst65_r, dst87_r, dst109_r, dst10_l, dst32_l, dst54_l, dst76_l;
+ v8i16 dst98_l, dst21_l, dst43_l, dst65_l, dst87_l, dst109_l;
+ v8i16 dst1021_l, dst3243_l, dst5465_l, dst7687_l, dst98109_l;
v4i32 dst0_r, dst0_l, dst1_r, dst1_l, dst2_r, dst2_l, dst3_r, dst3_l;
- v8i16 tmp0, tmp1, tmp2, tmp3;
- v8i16 dst10_r, dst32_r, dst21_r, dst43_r;
- v8i16 dst10_l, dst32_l, dst21_l, dst43_l;
- v4i32 weight_vec, offset_vec, rnd_vec;
+ v4i32 dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7;
+ v4i32 dst4_r, dst5_r, dst6_r, dst7_r;
+ v4i32 offset_vec, rnd_vec, const_vec;
src0_ptr -= (src_stride + 1);
@@ -4910,10 +4888,9 @@ static void hevc_hv_biwgt_4t_6w_msa(uint8_t *src0_ptr,
SPLATI_H2_SH(filter_vec, 0, 1, filt0, filt1);
filter_vec = LD_SH(filter_y);
- vec0 = __msa_clti_s_b((v16i8) filter_vec, 0);
- filter_vec = (v8i16) __msa_ilvr_b(vec0, (v16i8) filter_vec);
+ UNPCK_R_SB_SH(filter_vec, filter_vec);
- SPLATI_W2_SW(filter_vec, 0, filt_h0, filt_h1);
+ SPLATI_W2_SH(filter_vec, 0, filt_h0, filt_h1);
mask1 = mask0 + 2;
@@ -4921,11 +4898,12 @@ static void hevc_hv_biwgt_4t_6w_msa(uint8_t *src0_ptr,
weight0 = weight0 & 0x0000FFFF;
weight = weight0 | (weight1 << 16);
- const_vec = __msa_ldi_h(128);
+ const_vec = __msa_fill_w((128 * weight1));
const_vec <<= 6;
offset_vec = __msa_fill_w(offset);
- weight_vec = __msa_fill_w(weight);
+ weight_vec = (v8i16) __msa_fill_w(weight);
rnd_vec = __msa_fill_w(rnd_val + 1);
+ offset_vec += const_vec;
LD_SB3(src0_ptr, src_stride, src0, src1, src2);
src0_ptr += (3 * src_stride);
@@ -4934,72 +4912,120 @@ static void hevc_hv_biwgt_4t_6w_msa(uint8_t *src0_ptr,
VSHF_B2_SB(src0, src0, src0, src0, mask0, mask1, vec0, vec1);
VSHF_B2_SB(src1, src1, src1, src1, mask0, mask1, vec2, vec3);
VSHF_B2_SB(src2, src2, src2, src2, mask0, mask1, vec4, vec5);
- dst0 = const_vec;
- DPADD_SB2_SH(vec0, vec1, filt0, filt1, dst0, dst0);
- dst1 = const_vec;
- DPADD_SB2_SH(vec2, vec3, filt0, filt1, dst1, dst1);
- dst2 = const_vec;
- DPADD_SB2_SH(vec4, vec5, filt0, filt1, dst2, dst2);
-
- ILVRL_H2_SH(dst1, dst0, dst10_r, dst10_l);
- ILVRL_H2_SH(dst2, dst1, dst21_r, dst21_l);
+ dsth0 = HEVC_FILT_4TAP_SH(vec0, vec1, filt0, filt1);
+ dsth1 = HEVC_FILT_4TAP_SH(vec2, vec3, filt0, filt1);
+ dsth2 = HEVC_FILT_4TAP_SH(vec4, vec5, filt0, filt1);
- for (loop_cnt = height >> 2; loop_cnt--;) {
- LD_SB4(src0_ptr, src_stride, src3, src4, src5, src6);
- src0_ptr += (4 * src_stride);
- LD_SH4(src1_ptr, src2_stride, in0, in1, in2, in3);
- src1_ptr += (4 * src2_stride);
- XORI_B4_128_SB(src3, src4, src5, src6);
-
- VSHF_B2_SB(src3, src3, src3, src3, mask0, mask1, vec0, vec1);
- dst3 = const_vec;
- DPADD_SB2_SH(vec0, vec1, filt0, filt1, dst3, dst3);
+ ILVRL_H2_SH(dsth1, dsth0, dst10_r, dst10_l);
+ ILVRL_H2_SH(dsth2, dsth1, dst21_r, dst21_l);
- ILVRL_H2_SH(dst3, dst2, dst32_r, dst32_l);
- dst0_r = HEVC_FILT_4TAP(dst10_r, dst32_r, filt_h0, filt_h1);
- dst0_l = HEVC_FILT_4TAP(dst10_l, dst32_l, filt_h0, filt_h1);
- dst0_r >>= 6;
- dst0_l >>= 6;
+ LD_SB8(src0_ptr, src_stride, src3, src4, src5, src6, src7, src8, src9,
+ src10);
+ XORI_B8_128_SB(src3, src4, src5, src6, src7, src8, src9, src10);
- VSHF_B2_SB(src4, src4, src4, src4, mask0, mask1, vec0, vec1);
- dst4 = const_vec;
- DPADD_SB2_SH(vec0, vec1, filt0, filt1, dst4, dst4);
+ VSHF_B2_SB(src3, src3, src3, src3, mask0, mask1, vec0, vec1);
+ VSHF_B2_SB(src4, src4, src4, src4, mask0, mask1, vec2, vec3);
+ VSHF_B2_SB(src5, src5, src5, src5, mask0, mask1, vec4, vec5);
+ VSHF_B2_SB(src6, src6, src6, src6, mask0, mask1, vec6, vec7);
- ILVRL_H2_SH(dst4, dst3, dst43_r, dst43_l);
- dst1_r = HEVC_FILT_4TAP(dst21_r, dst43_r, filt_h0, filt_h1);
- dst1_l = HEVC_FILT_4TAP(dst21_l, dst43_l, filt_h0, filt_h1);
- dst1_r >>= 6;
- dst1_l >>= 6;
+ dsth3 = HEVC_FILT_4TAP_SH(vec0, vec1, filt0, filt1);
+ dsth4 = HEVC_FILT_4TAP_SH(vec2, vec3, filt0, filt1);
+ dsth5 = HEVC_FILT_4TAP_SH(vec4, vec5, filt0, filt1);
+ dsth6 = HEVC_FILT_4TAP_SH(vec6, vec7, filt0, filt1);
- VSHF_B2_SB(src5, src5, src5, src5, mask0, mask1, vec0, vec1);
- dst5 = const_vec;
- DPADD_SB2_SH(vec0, vec1, filt0, filt1, dst5, dst5);
-
- ILVRL_H2_SH(dst5, dst4, dst10_r, dst10_l);
- dst2_r = HEVC_FILT_4TAP(dst32_r, dst10_r, filt_h0, filt_h1);
- dst2_l = HEVC_FILT_4TAP(dst32_l, dst10_l, filt_h0, filt_h1);
- dst2_r >>= 6;
- dst2_l >>= 6;
- VSHF_B2_SB(src6, src6, src6, src6, mask0, mask1, vec0, vec1);
- dst2 = const_vec;
- DPADD_SB2_SH(vec0, vec1, filt0, filt1, dst2, dst2);
-
- ILVRL_H2_SH(dst2, dst5, dst21_r, dst21_l);
- dst3_r = HEVC_FILT_4TAP(dst43_r, dst21_r, filt_h0, filt_h1);
- dst3_l = HEVC_FILT_4TAP(dst43_l, dst21_l, filt_h0, filt_h1);
- dst3_r >>= 6;
- dst3_l >>= 6;
- PCKEV_H4_SH(dst0_l, dst0_r, dst1_l, dst1_r,
- dst2_l, dst2_r, dst3_l, dst3_r, tmp0, tmp1, tmp2, tmp3);
- HEVC_BIW_RND_CLIP4(tmp0, tmp1, tmp2, tmp3,
- in0, in1, in2, in3,
- weight_vec, rnd_vec, offset_vec,
- tmp0, tmp1, tmp2, tmp3);
+ VSHF_B2_SB(src7, src7, src7, src7, mask0, mask1, vec0, vec1);
+ VSHF_B2_SB(src8, src8, src8, src8, mask0, mask1, vec2, vec3);
+ VSHF_B2_SB(src9, src9, src9, src9, mask0, mask1, vec4, vec5);
+ VSHF_B2_SB(src10, src10, src10, src10, mask0, mask1, vec6, vec7);
+
+ dsth7 = HEVC_FILT_4TAP_SH(vec0, vec1, filt0, filt1);
+ dsth8 = HEVC_FILT_4TAP_SH(vec2, vec3, filt0, filt1);
+ dsth9 = HEVC_FILT_4TAP_SH(vec4, vec5, filt0, filt1);
+ dsth10 = HEVC_FILT_4TAP_SH(vec6, vec7, filt0, filt1);
+
+ ILVRL_H2_SH(dsth3, dsth2, dst32_r, dst32_l);
+ ILVRL_H2_SH(dsth4, dsth3, dst43_r, dst43_l);
+ ILVRL_H2_SH(dsth5, dsth4, dst54_r, dst54_l);
+ ILVRL_H2_SH(dsth6, dsth5, dst65_r, dst65_l);
+ ILVRL_H2_SH(dsth7, dsth6, dst76_r, dst76_l);
+ ILVRL_H2_SH(dsth8, dsth7, dst87_r, dst87_l);
+ ILVRL_H2_SH(dsth9, dsth8, dst98_r, dst98_l);
+ ILVRL_H2_SH(dsth10, dsth9, dst109_r, dst109_l);
+ PCKEV_D2_SH(dst21_l, dst10_l, dst43_l, dst32_l, dst1021_l, dst3243_l);
+ PCKEV_D2_SH(dst65_l, dst54_l, dst87_l, dst76_l, dst5465_l, dst7687_l);
+ dst98109_l = (v8i16) __msa_pckev_d((v2i64) dst109_l, (v2i64) dst98_l);
- PCKEV_B2_SH(tmp1, tmp0, tmp3, tmp2, tmp0, tmp1);
- ST6x4_UB(tmp0, tmp1, dst, dst_stride);
- dst += (4 * dst_stride);
- }
+ dst0_r = HEVC_FILT_4TAP(dst10_r, dst32_r, filt_h0, filt_h1);
+ dst1_r = HEVC_FILT_4TAP(dst21_r, dst43_r, filt_h0, filt_h1);
+ dst2_r = HEVC_FILT_4TAP(dst32_r, dst54_r, filt_h0, filt_h1);
+ dst3_r = HEVC_FILT_4TAP(dst43_r, dst65_r, filt_h0, filt_h1);
+ dst4_r = HEVC_FILT_4TAP(dst54_r, dst76_r, filt_h0, filt_h1);
+ dst5_r = HEVC_FILT_4TAP(dst65_r, dst87_r, filt_h0, filt_h1);
+ dst6_r = HEVC_FILT_4TAP(dst76_r, dst98_r, filt_h0, filt_h1);
+ dst7_r = HEVC_FILT_4TAP(dst87_r, dst109_r, filt_h0, filt_h1);
+ dst0_l = HEVC_FILT_4TAP(dst1021_l, dst3243_l, filt_h0, filt_h1);
+ dst1_l = HEVC_FILT_4TAP(dst3243_l, dst5465_l, filt_h0, filt_h1);
+ dst2_l = HEVC_FILT_4TAP(dst5465_l, dst7687_l, filt_h0, filt_h1);
+ dst3_l = HEVC_FILT_4TAP(dst7687_l, dst98109_l, filt_h0, filt_h1);
+ SRA_4V(dst0_r, dst1_r, dst2_r, dst3_r, 6);
+ SRA_4V(dst4_r, dst5_r, dst6_r, dst7_r, 6);
+ SRA_4V(dst0_l, dst1_l, dst2_l, dst3_l, 6);
+ PCKEV_H2_SW(dst1_r, dst0_r, dst3_r, dst2_r, dst0, dst1);
+ PCKEV_H2_SW(dst5_r, dst4_r, dst7_r, dst6_r, dst2, dst3);
+
+ LD2(src1_ptr, src2_stride, tp0, tp1);
+ INSERT_D2_SH(tp0, tp1, in0);
+ LD2(src1_ptr + 2 * src2_stride, src2_stride, tp0, tp1);
+ INSERT_D2_SH(tp0, tp1, in1);
+
+ LD2(src1_ptr + 4 * src2_stride, src2_stride, tp0, tp1);
+ INSERT_D2_SH(tp0, tp1, in2);
+ LD2(src1_ptr + 6 * src2_stride, src2_stride, tp0, tp1);
+ INSERT_D2_SH(tp0, tp1, in3);
+
+ ILVRL_H2_SH(dst0, in0, tmp0, tmp1);
+ ILVRL_H2_SH(dst1, in1, tmp2, tmp3);
+ ILVRL_H2_SH(dst2, in2, tmp4, tmp5);
+ ILVRL_H2_SH(dst3, in3, tmp6, tmp7);
+ dst0 = __msa_dpadd_s_w(offset_vec, tmp0, weight_vec);
+ dst1 = __msa_dpadd_s_w(offset_vec, tmp1, weight_vec);
+ dst2 = __msa_dpadd_s_w(offset_vec, tmp2, weight_vec);
+ dst3 = __msa_dpadd_s_w(offset_vec, tmp3, weight_vec);
+ dst4 = __msa_dpadd_s_w(offset_vec, tmp4, weight_vec);
+ dst5 = __msa_dpadd_s_w(offset_vec, tmp5, weight_vec);
+ dst6 = __msa_dpadd_s_w(offset_vec, tmp6, weight_vec);
+ dst7 = __msa_dpadd_s_w(offset_vec, tmp7, weight_vec);
+ SRAR_W4_SW(dst0, dst1, dst2, dst3, rnd_vec);
+ SRAR_W4_SW(dst4, dst5, dst6, dst7, rnd_vec);
+ PCKEV_H4_SH(dst1, dst0, dst3, dst2, dst5, dst4, dst7, dst6, tmp0, tmp1,
+ tmp2, tmp3);
+ CLIP_SH4_0_255_MAX_SATU(tmp0, tmp1, tmp2, tmp3);
+ PCKEV_B2_UB(tmp1, tmp0, tmp3, tmp2, out0, out1);
+ ST4x8_UB(out0, out1, dst, dst_stride);
+
+ PCKEV_H2_SW(dst1_l, dst0_l, dst3_l, dst2_l, dst4, dst5);
+
+ LW4(src1_ptr + 4, src2_stride, tpw0, tpw1, tpw2, tpw3);
+ src1_ptr += (4 * src2_stride);
+ INSERT_W4_SH(tpw0, tpw1, tpw2, tpw3, in4);
+ LW4(src1_ptr + 4, src2_stride, tpw0, tpw1, tpw2, tpw3);
+ INSERT_W4_SH(tpw0, tpw1, tpw2, tpw3, in5);
+
+ ILVRL_H2_SH(dst4, in4, tmp0, tmp1);
+ ILVRL_H2_SH(dst5, in5, tmp2, tmp3);
+
+ dst0 = __msa_dpadd_s_w(offset_vec, tmp0, weight_vec);
+ dst1 = __msa_dpadd_s_w(offset_vec, tmp1, weight_vec);
+ dst2 = __msa_dpadd_s_w(offset_vec, tmp2, weight_vec);
+ dst3 = __msa_dpadd_s_w(offset_vec, tmp3, weight_vec);
+ SRAR_W4_SW(dst0, dst1, dst2, dst3, rnd_vec);
+ PCKEV_H2_SH(dst1, dst0, dst3, dst2, tmp4, tmp5);
+
+ CLIP_SH2_0_255_MAX_SATU(tmp4, tmp5);
+ out2 = (v16u8) __msa_pckev_b((v16i8) tmp5, (v16i8) tmp4);
+ ST2x4_UB(out2, 0, dst + 4, dst_stride);
+ dst += 4 * dst_stride;
+ ST2x4_UB(out2, 4, dst + 4, dst_stride);
}
static void hevc_hv_biwgt_4t_8x2_msa(uint8_t *src0_ptr,
@@ -5010,7 +5036,6 @@ static void hevc_hv_biwgt_4t_8x2_msa(uint8_t *src0_ptr,
int32_t dst_stride,
const int8_t *filter_x,
const int8_t *filter_y,
- int32_t height,
int32_t weight0,
int32_t weight1,
int32_t offset0,
@@ -5018,20 +5043,21 @@ static void hevc_hv_biwgt_4t_8x2_msa(uint8_t *src0_ptr,
int32_t rnd_val)
{
int32_t weight, offset;
+ v16u8 out;
v16i8 src0, src1, src2, src3, src4;
v8i16 filt0, filt1;
- v4i32 filt_h0, filt_h1;
- v16i8 mask0 = { 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8 };
+ v8i16 filt_h0, filt_h1;
+ v16i8 mask0 = LD_SB(ff_hevc_mask_arr);
v16i8 mask1;
- v8i16 filter_vec, const_vec;
- v16i8 vec0, vec1, vec2, vec3, vec4, vec5;
+ v8i16 filter_vec, weight_vec;
+ v16i8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7, vec8, vec9;
v8i16 dst0, dst1, dst2, dst3, dst4;
v8i16 in0, in1;
v4i32 dst0_r, dst0_l, dst1_r, dst1_l;
v8i16 dst10_r, dst32_r, dst21_r, dst43_r;
v8i16 dst10_l, dst32_l, dst21_l, dst43_l;
- v8i16 tmp0, tmp1;
- v4i32 weight_vec, offset_vec, rnd_vec;
+ v8i16 tmp0, tmp1, tmp2, tmp3;
+ v4i32 offset_vec, rnd_vec, const_vec;
src0_ptr -= (src_stride + 1);
@@ -5039,10 +5065,9 @@ static void hevc_hv_biwgt_4t_8x2_msa(uint8_t *src0_ptr,
SPLATI_H2_SH(filter_vec, 0, 1, filt0, filt1);
filter_vec = LD_SH(filter_y);
- vec0 = __msa_clti_s_b((v16i8) filter_vec, 0);
- filter_vec = (v8i16) __msa_ilvr_b(vec0, (v16i8) filter_vec);
+ UNPCK_R_SB_SH(filter_vec, filter_vec);
- SPLATI_W2_SW(filter_vec, 0, filt_h0, filt_h1);
+ SPLATI_W2_SH(filter_vec, 0, filt_h0, filt_h1);
mask1 = mask0 + 2;
@@ -5050,61 +5075,177 @@ static void hevc_hv_biwgt_4t_8x2_msa(uint8_t *src0_ptr,
weight0 = weight0 & 0x0000FFFF;
weight = weight0 | (weight1 << 16);
- const_vec = __msa_ldi_h(128);
+ const_vec = __msa_fill_w((128 * weight1));
const_vec <<= 6;
offset_vec = __msa_fill_w(offset);
- weight_vec = __msa_fill_w(weight);
+ weight_vec = (v8i16) __msa_fill_w(weight);
rnd_vec = __msa_fill_w(rnd_val + 1);
+ offset_vec += const_vec;
- LD_SB3(src0_ptr, src_stride, src0, src1, src2);
- src0_ptr += (3 * src_stride);
- XORI_B3_128_SB(src0, src1, src2);
+ LD_SB5(src0_ptr, src_stride, src0, src1, src2, src3, src4);
+ XORI_B5_128_SB(src0, src1, src2, src3, src4);
+
+ LD_SH2(src1_ptr, src2_stride, in0, in1);
VSHF_B2_SB(src0, src0, src0, src0, mask0, mask1, vec0, vec1);
VSHF_B2_SB(src1, src1, src1, src1, mask0, mask1, vec2, vec3);
VSHF_B2_SB(src2, src2, src2, src2, mask0, mask1, vec4, vec5);
- dst0 = const_vec;
- DPADD_SB2_SH(vec0, vec1, filt0, filt1, dst0, dst0);
- dst1 = const_vec;
- DPADD_SB2_SH(vec2, vec3, filt0, filt1, dst1, dst1);
- dst2 = const_vec;
- DPADD_SB2_SH(vec4, vec5, filt0, filt1, dst2, dst2);
+ VSHF_B2_SB(src3, src3, src3, src3, mask0, mask1, vec6, vec7);
+ VSHF_B2_SB(src4, src4, src4, src4, mask0, mask1, vec8, vec9);
+
+ dst0 = HEVC_FILT_4TAP_SH(vec0, vec1, filt0, filt1);
+ dst1 = HEVC_FILT_4TAP_SH(vec2, vec3, filt0, filt1);
+ dst2 = HEVC_FILT_4TAP_SH(vec4, vec5, filt0, filt1);
+ dst3 = HEVC_FILT_4TAP_SH(vec6, vec7, filt0, filt1);
+ dst4 = HEVC_FILT_4TAP_SH(vec8, vec9, filt0, filt1);
ILVRL_H2_SH(dst1, dst0, dst10_r, dst10_l);
ILVRL_H2_SH(dst2, dst1, dst21_r, dst21_l);
+ ILVRL_H2_SH(dst3, dst2, dst32_r, dst32_l);
+ ILVRL_H2_SH(dst4, dst3, dst43_r, dst43_l);
+ dst0_r = HEVC_FILT_4TAP(dst10_r, dst32_r, filt_h0, filt_h1);
+ dst0_l = HEVC_FILT_4TAP(dst10_l, dst32_l, filt_h0, filt_h1);
+ dst1_r = HEVC_FILT_4TAP(dst21_r, dst43_r, filt_h0, filt_h1);
+ dst1_l = HEVC_FILT_4TAP(dst21_l, dst43_l, filt_h0, filt_h1);
+ SRA_4V(dst0_r, dst0_l, dst1_r, dst1_l, 6);
+ PCKEV_H2_SH(dst0_l, dst0_r, dst1_l, dst1_r, tmp1, tmp3);
+
+ ILVRL_H2_SH(tmp1, in0, tmp0, tmp1);
+ ILVRL_H2_SH(tmp3, in1, tmp2, tmp3);
+
+ dst0_r = __msa_dpadd_s_w(offset_vec, tmp0, weight_vec);
+ dst0_l = __msa_dpadd_s_w(offset_vec, tmp1, weight_vec);
+ dst1_r = __msa_dpadd_s_w(offset_vec, tmp2, weight_vec);
+ dst1_l = __msa_dpadd_s_w(offset_vec, tmp3, weight_vec);
+ SRAR_W4_SW(dst0_r, dst0_l, dst1_r, dst1_l, rnd_vec);
+ PCKEV_H2_SH(dst0_l, dst0_r, dst1_l, dst1_r, tmp0, tmp1);
+ CLIP_SH2_0_255_MAX_SATU(tmp0, tmp1);
+ out = (v16u8) __msa_pckev_b((v16i8) tmp1, (v16i8) tmp0);
+ ST8x2_UB(out, dst, dst_stride);
+}
- LD_SB2(src0_ptr, src_stride, src3, src4);
+static void hevc_hv_biwgt_4t_8multx4_msa(uint8_t *src0_ptr,
+ int32_t src_stride,
+ int16_t *src1_ptr,
+ int32_t src2_stride,
+ uint8_t *dst,
+ int32_t dst_stride,
+ const int8_t *filter_x,
+ const int8_t *filter_y,
+ int32_t weight0,
+ int32_t weight1,
+ int32_t offset0,
+ int32_t offset1,
+ int32_t rnd_val,
+ int32_t width8mult)
+{
+ int32_t weight, offset;
+ uint32_t cnt;
+ v16u8 out0, out1;
+ v16i8 src0, src1, src2, src3, src4, src5, src6, mask0, mask1;
+ v16i8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
+ v8i16 filt0, filt1, filt_h0, filt_h1, filter_vec, weight_vec;
+ v8i16 dsth0, dsth1, dsth2, dsth3, dsth4, dsth5, dsth6;
+ v8i16 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, in0, in1, in2, in3;
+ v8i16 dst10_r, dst32_r, dst54_r, dst21_r, dst43_r, dst65_r;
+ v8i16 dst10_l, dst32_l, dst54_l, dst21_l, dst43_l, dst65_l;
+ v4i32 dst0_r, dst0_l, dst1_r, dst1_l, dst2_r, dst2_l, dst3_r, dst3_l;
+ v4i32 dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7;
+ v4i32 offset_vec, rnd_vec, const_vec;
- LD_SH2(src1_ptr, src2_stride, in0, in1);
- XORI_B2_128_SB(src3, src4);
+ src0_ptr -= (src_stride + 1);
- VSHF_B2_SB(src3, src3, src3, src3, mask0, mask1, vec0, vec1);
- dst3 = const_vec;
- DPADD_SB2_SH(vec0, vec1, filt0, filt1, dst3, dst3);
+ filter_vec = LD_SH(filter_x);
+ SPLATI_H2_SH(filter_vec, 0, 1, filt0, filt1);
- ILVRL_H2_SH(dst3, dst2, dst32_r, dst32_l);
- dst0_r = HEVC_FILT_4TAP(dst10_r, dst32_r, filt_h0, filt_h1);
- dst0_l = HEVC_FILT_4TAP(dst10_l, dst32_l, filt_h0, filt_h1);
- dst0_r >>= 6;
- dst0_l >>= 6;
- tmp0 = __msa_pckev_h((v8i16) dst0_l, (v8i16) dst0_r);
+ filter_vec = LD_SH(filter_y);
+ UNPCK_R_SB_SH(filter_vec, filter_vec);
- VSHF_B2_SB(src4, src4, src4, src4, mask0, mask1, vec0, vec1);
- dst4 = const_vec;
- DPADD_SB2_SH(vec0, vec1, filt0, filt1, dst4, dst4);
+ SPLATI_W2_SH(filter_vec, 0, filt_h0, filt_h1);
- ILVRL_H2_SH(dst4, dst3, dst43_r, dst43_l);
- dst1_r = HEVC_FILT_4TAP(dst21_r, dst43_r, filt_h0, filt_h1);
- dst1_l = HEVC_FILT_4TAP(dst21_l, dst43_l, filt_h0, filt_h1);
- dst1_r >>= 6;
- dst1_l >>= 6;
- tmp1 = __msa_pckev_h((v8i16) dst1_l, (v8i16) dst1_r);
+ mask0 = LD_SB(ff_hevc_mask_arr);
+ mask1 = mask0 + 2;
- HEVC_BIW_RND_CLIP2(tmp0, tmp1, in0, in1,
- weight_vec, rnd_vec, offset_vec,
- tmp0, tmp1);
- tmp0 = (v8i16) __msa_pckev_b((v16i8) tmp1, (v16i8) tmp0);
- ST8x2_UB(tmp0, dst, dst_stride);
+ offset = (offset0 + offset1) << rnd_val;
+ weight0 = weight0 & 0x0000FFFF;
+ weight = weight0 | (weight1 << 16);
+
+ const_vec = __msa_fill_w((128 * weight1));
+ const_vec <<= 6;
+ offset_vec = __msa_fill_w(offset);
+ rnd_vec = __msa_fill_w(rnd_val + 1);
+ offset_vec += const_vec;
+ weight_vec = (v8i16) __msa_fill_w(weight);
+
+ for (cnt = width8mult; cnt--;) {
+ LD_SB7(src0_ptr, src_stride, src0, src1, src2, src3, src4, src5, src6);
+ src0_ptr += 8;
+ XORI_B7_128_SB(src0, src1, src2, src3, src4, src5, src6);
+
+ LD_SH4(src1_ptr, src2_stride, in0, in1, in2, in3);
+ src1_ptr += 8;
+
+ VSHF_B2_SB(src0, src0, src0, src0, mask0, mask1, vec0, vec1);
+ VSHF_B2_SB(src1, src1, src1, src1, mask0, mask1, vec2, vec3);
+ VSHF_B2_SB(src2, src2, src2, src2, mask0, mask1, vec4, vec5);
+
+ dsth0 = HEVC_FILT_4TAP_SH(vec0, vec1, filt0, filt1);
+ dsth1 = HEVC_FILT_4TAP_SH(vec2, vec3, filt0, filt1);
+ dsth2 = HEVC_FILT_4TAP_SH(vec4, vec5, filt0, filt1);
+
+ ILVRL_H2_SH(dsth1, dsth0, dst10_r, dst10_l);
+ ILVRL_H2_SH(dsth2, dsth1, dst21_r, dst21_l);
+
+ VSHF_B2_SB(src3, src3, src3, src3, mask0, mask1, vec0, vec1);
+ VSHF_B2_SB(src4, src4, src4, src4, mask0, mask1, vec2, vec3);
+ VSHF_B2_SB(src5, src5, src5, src5, mask0, mask1, vec4, vec5);
+ VSHF_B2_SB(src6, src6, src6, src6, mask0, mask1, vec6, vec7);
+
+ dsth3 = HEVC_FILT_4TAP_SH(vec0, vec1, filt0, filt1);
+ dsth4 = HEVC_FILT_4TAP_SH(vec2, vec3, filt0, filt1);
+ dsth5 = HEVC_FILT_4TAP_SH(vec4, vec5, filt0, filt1);
+ dsth6 = HEVC_FILT_4TAP_SH(vec6, vec7, filt0, filt1);
+
+ ILVRL_H2_SH(dsth3, dsth2, dst32_r, dst32_l);
+ ILVRL_H2_SH(dsth4, dsth3, dst43_r, dst43_l);
+ ILVRL_H2_SH(dsth5, dsth4, dst54_r, dst54_l);
+ ILVRL_H2_SH(dsth6, dsth5, dst65_r, dst65_l);
+
+ dst0_r = HEVC_FILT_4TAP(dst10_r, dst32_r, filt_h0, filt_h1);
+ dst0_l = HEVC_FILT_4TAP(dst10_l, dst32_l, filt_h0, filt_h1);
+ dst1_r = HEVC_FILT_4TAP(dst21_r, dst43_r, filt_h0, filt_h1);
+ dst1_l = HEVC_FILT_4TAP(dst21_l, dst43_l, filt_h0, filt_h1);
+ dst2_r = HEVC_FILT_4TAP(dst32_r, dst54_r, filt_h0, filt_h1);
+ dst2_l = HEVC_FILT_4TAP(dst32_l, dst54_l, filt_h0, filt_h1);
+ dst3_r = HEVC_FILT_4TAP(dst43_r, dst65_r, filt_h0, filt_h1);
+ dst3_l = HEVC_FILT_4TAP(dst43_l, dst65_l, filt_h0, filt_h1);
+
+ SRA_4V(dst0_r, dst0_l, dst1_r, dst1_l, 6);
+ SRA_4V(dst2_r, dst2_l, dst3_r, dst3_l, 6);
+ PCKEV_H4_SW(dst0_l, dst0_r, dst1_l, dst1_r, dst2_l, dst2_r, dst3_l,
+ dst3_r, dst0, dst1, dst2, dst3);
+
+ ILVRL_H2_SH(dst0, in0, tmp0, tmp1);
+ ILVRL_H2_SH(dst1, in1, tmp2, tmp3);
+ ILVRL_H2_SH(dst2, in2, tmp4, tmp5);
+ ILVRL_H2_SH(dst3, in3, tmp6, tmp7);
+ dst0 = __msa_dpadd_s_w(offset_vec, tmp0, weight_vec);
+ dst1 = __msa_dpadd_s_w(offset_vec, tmp1, weight_vec);
+ dst2 = __msa_dpadd_s_w(offset_vec, tmp2, weight_vec);
+ dst3 = __msa_dpadd_s_w(offset_vec, tmp3, weight_vec);
+ dst4 = __msa_dpadd_s_w(offset_vec, tmp4, weight_vec);
+ dst5 = __msa_dpadd_s_w(offset_vec, tmp5, weight_vec);
+ dst6 = __msa_dpadd_s_w(offset_vec, tmp6, weight_vec);
+ dst7 = __msa_dpadd_s_w(offset_vec, tmp7, weight_vec);
+ SRAR_W4_SW(dst0, dst1, dst2, dst3, rnd_vec);
+ SRAR_W4_SW(dst4, dst5, dst6, dst7, rnd_vec);
+ PCKEV_H4_SH(dst1, dst0, dst3, dst2, dst5, dst4, dst7, dst6,
+ tmp0, tmp1, tmp2, tmp3);
+ CLIP_SH4_0_255_MAX_SATU(tmp0, tmp1, tmp2, tmp3);
+ PCKEV_B2_UB(tmp1, tmp0, tmp3, tmp2, out0, out1);
+ ST8x4_UB(out0, out1, dst, dst_stride);
+ dst += 8;
+ }
}
static void hevc_hv_biwgt_4t_8x6_msa(uint8_t *src0_ptr,
@@ -5115,7 +5256,6 @@ static void hevc_hv_biwgt_4t_8x6_msa(uint8_t *src0_ptr,
int32_t dst_stride,
const int8_t *filter_x,
const int8_t *filter_y,
- int32_t height,
int32_t weight0,
int32_t weight1,
int32_t offset0,
@@ -5123,14 +5263,16 @@ static void hevc_hv_biwgt_4t_8x6_msa(uint8_t *src0_ptr,
int32_t rnd_val)
{
uint32_t offset, weight;
+ v16u8 out0, out1, out2;
v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8;
v8i16 filt0, filt1;
- v4i32 filt_h0, filt_h1;
- v16i8 mask0 = { 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8 };
+ v8i16 filt_h0, filt_h1;
+ v16i8 mask0 = LD_SB(ff_hevc_mask_arr);
v16i8 mask1;
- v8i16 filter_vec, const_vec;
- v16i8 vec0, vec1, vec2, vec3, vec4, vec5;
- v8i16 dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7, dst8;
+ v8i16 filter_vec, weight_vec;
+ v16i8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7, vec8, vec9;
+ v16i8 vec10, vec11, vec12, vec13, vec14, vec15, vec16, vec17;
+ v8i16 dsth0, dsth1, dsth2, dsth3, dsth4, dsth5, dsth6, dsth7, dsth8;
v4i32 dst0_r, dst0_l, dst1_r, dst1_l, dst2_r, dst2_l, dst3_r, dst3_l;
v4i32 dst4_r, dst4_l, dst5_r, dst5_l;
v8i16 dst10_r, dst32_r, dst10_l, dst32_l;
@@ -5138,8 +5280,9 @@ static void hevc_hv_biwgt_4t_8x6_msa(uint8_t *src0_ptr,
v8i16 dst54_r, dst54_l, dst65_r, dst65_l;
v8i16 dst76_r, dst76_l, dst87_r, dst87_l;
v8i16 in0, in1, in2, in3, in4, in5;
- v8i16 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5;
- v4i32 weight_vec, offset_vec, rnd_vec;
+ v8i16 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
+ v4i32 dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7;
+ v4i32 offset_vec, rnd_vec, const_vec;
src0_ptr -= (src_stride + 1);
@@ -5147,10 +5290,9 @@ static void hevc_hv_biwgt_4t_8x6_msa(uint8_t *src0_ptr,
SPLATI_H2_SH(filter_vec, 0, 1, filt0, filt1);
filter_vec = LD_SH(filter_y);
- vec0 = __msa_clti_s_b((v16i8) filter_vec, 0);
- filter_vec = (v8i16) __msa_ilvr_b(vec0, (v16i8) filter_vec);
+ UNPCK_R_SB_SH(filter_vec, filter_vec);
- SPLATI_W2_SW(filter_vec, 0, filt_h0, filt_h1);
+ SPLATI_W2_SH(filter_vec, 0, filt_h0, filt_h1);
mask1 = mask0 + 2;
@@ -5158,121 +5300,103 @@ static void hevc_hv_biwgt_4t_8x6_msa(uint8_t *src0_ptr,
weight0 = weight0 & 0x0000FFFF;
weight = weight0 | (weight1 << 16);
- const_vec = __msa_ldi_h(128);
+ const_vec = __msa_fill_w((128 * weight1));
const_vec <<= 6;
offset_vec = __msa_fill_w(offset);
- weight_vec = __msa_fill_w(weight);
+ weight_vec = (v8i16) __msa_fill_w(weight);
rnd_vec = __msa_fill_w(rnd_val + 1);
+ offset_vec += const_vec;
- LD_SB3(src0_ptr, src_stride, src0, src1, src2);
- src0_ptr += (3 * src_stride);
- XORI_B3_128_SB(src0, src1, src2);
+ LD_SB5(src0_ptr, src_stride, src0, src1, src2, src3, src4);
+ src0_ptr += (5 * src_stride);
+ LD_SB4(src0_ptr, src_stride, src5, src6, src7, src8);
+
+ XORI_B5_128_SB(src0, src1, src2, src3, src4);
+ XORI_B4_128_SB(src5, src6, src7, src8);
+
+ LD_SH6(src1_ptr, src2_stride, in0, in1, in2, in3, in4, in5);
VSHF_B2_SB(src0, src0, src0, src0, mask0, mask1, vec0, vec1);
VSHF_B2_SB(src1, src1, src1, src1, mask0, mask1, vec2, vec3);
VSHF_B2_SB(src2, src2, src2, src2, mask0, mask1, vec4, vec5);
- dst0 = const_vec;
- DPADD_SB2_SH(vec0, vec1, filt0, filt1, dst0, dst0);
- dst1 = const_vec;
- DPADD_SB2_SH(vec2, vec3, filt0, filt1, dst1, dst1);
- dst2 = const_vec;
- DPADD_SB2_SH(vec4, vec5, filt0, filt1, dst2, dst2);
-
- ILVRL_H2_SH(dst1, dst0, dst10_r, dst10_l);
- ILVRL_H2_SH(dst2, dst1, dst21_r, dst21_l);
-
- LD_SB2(src0_ptr, src_stride, src3, src4);
- src0_ptr += (2 * src_stride);
- XORI_B2_128_SB(src3, src4);
- LD_SH6(src1_ptr, src2_stride, in0, in1, in2, in3, in4, in5);
- VSHF_B2_SB(src3, src3, src3, src3, mask0, mask1, vec0, vec1);
- dst3 = const_vec;
- DPADD_SB2_SH(vec0, vec1, filt0, filt1, dst3, dst3);
+ VSHF_B2_SB(src3, src3, src3, src3, mask0, mask1, vec6, vec7);
+ VSHF_B2_SB(src4, src4, src4, src4, mask0, mask1, vec8, vec9);
+ VSHF_B2_SB(src5, src5, src5, src5, mask0, mask1, vec10, vec11);
+ VSHF_B2_SB(src6, src6, src6, src6, mask0, mask1, vec12, vec13);
+ VSHF_B2_SB(src7, src7, src7, src7, mask0, mask1, vec14, vec15);
+ VSHF_B2_SB(src8, src8, src8, src8, mask0, mask1, vec16, vec17);
+
+ dsth0 = HEVC_FILT_4TAP_SH(vec0, vec1, filt0, filt1);
+ dsth1 = HEVC_FILT_4TAP_SH(vec2, vec3, filt0, filt1);
+ dsth2 = HEVC_FILT_4TAP_SH(vec4, vec5, filt0, filt1);
+ dsth3 = HEVC_FILT_4TAP_SH(vec6, vec7, filt0, filt1);
+ dsth4 = HEVC_FILT_4TAP_SH(vec8, vec9, filt0, filt1);
+ dsth5 = HEVC_FILT_4TAP_SH(vec10, vec11, filt0, filt1);
+ dsth6 = HEVC_FILT_4TAP_SH(vec12, vec13, filt0, filt1);
+ dsth7 = HEVC_FILT_4TAP_SH(vec14, vec15, filt0, filt1);
+ dsth8 = HEVC_FILT_4TAP_SH(vec16, vec17, filt0, filt1);
+
+ ILVRL_H2_SH(dsth1, dsth0, dst10_r, dst10_l);
+ ILVRL_H2_SH(dsth2, dsth1, dst21_r, dst21_l);
+ ILVRL_H2_SH(dsth3, dsth2, dst32_r, dst32_l);
+ ILVRL_H2_SH(dsth4, dsth3, dst43_r, dst43_l);
+ ILVRL_H2_SH(dsth5, dsth4, dst54_r, dst54_l);
+ ILVRL_H2_SH(dsth6, dsth5, dst65_r, dst65_l);
+ ILVRL_H2_SH(dsth7, dsth6, dst76_r, dst76_l);
+ ILVRL_H2_SH(dsth8, dsth7, dst87_r, dst87_l);
- ILVRL_H2_SH(dst3, dst2, dst32_r, dst32_l);
dst0_r = HEVC_FILT_4TAP(dst10_r, dst32_r, filt_h0, filt_h1);
dst0_l = HEVC_FILT_4TAP(dst10_l, dst32_l, filt_h0, filt_h1);
- dst0_r >>= 6;
- dst0_l >>= 6;
- tmp0 = __msa_pckev_h((v8i16) dst0_l, (v8i16) dst0_r);
-
- VSHF_B2_SB(src4, src4, src4, src4, mask0, mask1, vec0, vec1);
- dst4 = const_vec;
- DPADD_SB2_SH(vec0, vec1, filt0, filt1, dst4, dst4);
-
- ILVRL_H2_SH(dst4, dst3, dst43_r, dst43_l);
dst1_r = HEVC_FILT_4TAP(dst21_r, dst43_r, filt_h0, filt_h1);
dst1_l = HEVC_FILT_4TAP(dst21_l, dst43_l, filt_h0, filt_h1);
- dst1_r >>= 6;
- dst1_l >>= 6;
- tmp1 = __msa_pckev_h((v8i16) dst1_l, (v8i16) dst1_r);
-
- LD_SB2(src0_ptr, src_stride, src5, src6);
- src0_ptr += (2 * src_stride);
- XORI_B2_128_SB(src5, src6);
-
- VSHF_B2_SB(src5, src5, src5, src5, mask0, mask1, vec0, vec1);
- dst5 = const_vec;
- DPADD_SB2_SH(vec0, vec1, filt0, filt1, dst5, dst5);
-
- ILVRL_H2_SH(dst5, dst4, dst54_r, dst54_l);
dst2_r = HEVC_FILT_4TAP(dst32_r, dst54_r, filt_h0, filt_h1);
dst2_l = HEVC_FILT_4TAP(dst32_l, dst54_l, filt_h0, filt_h1);
- dst2_r >>= 6;
- dst2_l >>= 6;
- tmp2 = __msa_pckev_h((v8i16) dst2_l, (v8i16) dst2_r);
-
- VSHF_B2_SB(src6, src6, src6, src6, mask0, mask1, vec0, vec1);
- dst6 = const_vec;
- DPADD_SB2_SH(vec0, vec1, filt0, filt1, dst6, dst6);
-
- ILVRL_H2_SH(dst6, dst5, dst65_r, dst65_l);
dst3_r = HEVC_FILT_4TAP(dst43_r, dst65_r, filt_h0, filt_h1);
dst3_l = HEVC_FILT_4TAP(dst43_l, dst65_l, filt_h0, filt_h1);
- dst3_r >>= 6;
- dst3_l >>= 6;
- tmp3 = __msa_pckev_h((v8i16) dst3_l, (v8i16) dst3_r);
-
- HEVC_BIW_RND_CLIP4(tmp0, tmp1, tmp2, tmp3,
- in0, in1, in2, in3,
- weight_vec, rnd_vec, offset_vec,
- tmp0, tmp1, tmp2, tmp3);
-
- PCKEV_B2_SH(tmp1, tmp0, tmp3, tmp2, tmp0, tmp1);
- ST8x4_UB(tmp0, tmp1, dst, dst_stride);
- dst += (4 * dst_stride);
-
- LD_SB2(src0_ptr, src_stride, src7, src8);
- XORI_B2_128_SB(src7, src8);
-
- VSHF_B2_SB(src7, src7, src7, src7, mask0, mask1, vec0, vec1);
- dst7 = const_vec;
- DPADD_SB2_SH(vec0, vec1, filt0, filt1, dst7, dst7);
-
- ILVRL_H2_SH(dst7, dst6, dst76_r, dst76_l);
dst4_r = HEVC_FILT_4TAP(dst54_r, dst76_r, filt_h0, filt_h1);
dst4_l = HEVC_FILT_4TAP(dst54_l, dst76_l, filt_h0, filt_h1);
- dst4_r >>= 6;
- dst4_l >>= 6;
- tmp4 = __msa_pckev_h((v8i16) dst4_l, (v8i16) dst4_r);
-
- VSHF_B2_SB(src8, src8, src8, src8, mask0, mask1, vec0, vec1);
- dst8 = const_vec;
- DPADD_SB2_SH(vec0, vec1, filt0, filt1, dst8, dst8);
-
- ILVRL_H2_SH(dst8, dst7, dst87_r, dst87_l);
dst5_r = HEVC_FILT_4TAP(dst65_r, dst87_r, filt_h0, filt_h1);
dst5_l = HEVC_FILT_4TAP(dst65_l, dst87_l, filt_h0, filt_h1);
- dst5_r >>= 6;
- dst5_l >>= 6;
- tmp5 = __msa_pckev_h((v8i16) dst5_l, (v8i16) dst5_r);
- HEVC_BIW_RND_CLIP2(tmp4, tmp5, in4, in5,
- weight_vec, rnd_vec, offset_vec,
- tmp4, tmp5);
-
- tmp4 = (v8i16) __msa_pckev_b((v16i8) tmp5, (v16i8) tmp4);
- ST8x2_UB(tmp4, dst, dst_stride);
+ SRA_4V(dst0_r, dst0_l, dst1_r, dst1_l, 6);
+ SRA_4V(dst2_r, dst2_l, dst3_r, dst3_l, 6);
+ SRA_4V(dst4_r, dst4_l, dst5_r, dst5_l, 6);
+ PCKEV_H4_SW(dst0_l, dst0_r, dst1_l, dst1_r, dst2_l, dst2_r, dst3_l, dst3_r,
+ dst0, dst1, dst2, dst3);
+
+ ILVRL_H2_SH(dst0, in0, tmp0, tmp1);
+ ILVRL_H2_SH(dst1, in1, tmp2, tmp3);
+ ILVRL_H2_SH(dst2, in2, tmp4, tmp5);
+ ILVRL_H2_SH(dst3, in3, tmp6, tmp7);
+ dst0 = __msa_dpadd_s_w(offset_vec, tmp0, weight_vec);
+ dst1 = __msa_dpadd_s_w(offset_vec, tmp1, weight_vec);
+ dst2 = __msa_dpadd_s_w(offset_vec, tmp2, weight_vec);
+ dst3 = __msa_dpadd_s_w(offset_vec, tmp3, weight_vec);
+ dst4 = __msa_dpadd_s_w(offset_vec, tmp4, weight_vec);
+ dst5 = __msa_dpadd_s_w(offset_vec, tmp5, weight_vec);
+ dst6 = __msa_dpadd_s_w(offset_vec, tmp6, weight_vec);
+ dst7 = __msa_dpadd_s_w(offset_vec, tmp7, weight_vec);
+ SRAR_W4_SW(dst0, dst1, dst2, dst3, rnd_vec);
+ SRAR_W4_SW(dst4, dst5, dst6, dst7, rnd_vec);
+ PCKEV_H4_SH(dst1, dst0, dst3, dst2, dst5, dst4, dst7, dst6,
+ tmp0, tmp1, tmp2, tmp3);
+ CLIP_SH4_0_255_MAX_SATU(tmp0, tmp1, tmp2, tmp3);
+ PCKEV_B2_UB(tmp1, tmp0, tmp3, tmp2, out0, out1);
+
+ PCKEV_H2_SW(dst4_l, dst4_r, dst5_l, dst5_r, dst0, dst1);
+ ILVRL_H2_SH(dst0, in4, tmp0, tmp1);
+ ILVRL_H2_SH(dst1, in5, tmp2, tmp3);
+ dst0 = __msa_dpadd_s_w(offset_vec, tmp0, weight_vec);
+ dst1 = __msa_dpadd_s_w(offset_vec, tmp1, weight_vec);
+ dst2 = __msa_dpadd_s_w(offset_vec, tmp2, weight_vec);
+ dst3 = __msa_dpadd_s_w(offset_vec, tmp3, weight_vec);
+ SRAR_W4_SW(dst0, dst1, dst2, dst3, rnd_vec);
+ PCKEV_H2_SH(dst1, dst0, dst3, dst2, tmp4, tmp5);
+ CLIP_SH2_0_255_MAX_SATU(tmp4, tmp5);
+ out2 = (v16u8) __msa_pckev_b((v16i8) tmp5, (v16i8) tmp4);
+ ST8x4_UB(out0, out1, dst, dst_stride);
+ dst += (4 * dst_stride);
+ ST8x2_UB(out2, dst, dst_stride);
}
static void hevc_hv_biwgt_4t_8multx4mult_msa(uint8_t *src0_ptr,
@@ -5297,20 +5421,22 @@ static void hevc_hv_biwgt_4t_8multx4mult_msa(uint8_t *src0_ptr,
uint8_t *src0_ptr_tmp;
int16_t *src1_ptr_tmp;
uint8_t *dst_tmp;
+ v16u8 out0, out1;
v16i8 src0, src1, src2, src3, src4, src5, src6;
v8i16 in0, in1, in2, in3;
v8i16 filt0, filt1;
- v4i32 filt_h0, filt_h1;
- v16i8 mask0 = { 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8 };
+ v8i16 filt_h0, filt_h1;
+ v16i8 mask0 = LD_SB(ff_hevc_mask_arr);
v16i8 mask1;
- v8i16 filter_vec, const_vec;
- v16i8 vec0, vec1, vec2, vec3, vec4, vec5;
- v8i16 dst0, dst1, dst2, dst3, dst4, dst5;
+ v8i16 filter_vec;
+ v16i8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
+ v8i16 dsth0, dsth1, dsth2, dsth3, dsth4, dsth5, dsth6;
v4i32 dst0_r, dst0_l, dst1_r, dst1_l, dst2_r, dst2_l, dst3_r, dst3_l;
- v8i16 tmp0, tmp1, tmp2, tmp3;
- v8i16 dst10_r, dst32_r, dst21_r, dst43_r;
- v8i16 dst10_l, dst32_l, dst21_l, dst43_l;
- v4i32 weight_vec, offset_vec, rnd_vec;
+ v8i16 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
+ v4i32 dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7;
+ v8i16 dst10_r, dst32_r, dst54_r, dst21_r, dst43_r, dst65_r;
+ v8i16 dst10_l, dst32_l, dst54_l, dst21_l, dst43_l, dst65_l, weight_vec;
+ v4i32 offset_vec, rnd_vec, const_vec;
src0_ptr -= (src_stride + 1);
@@ -5318,10 +5444,9 @@ static void hevc_hv_biwgt_4t_8multx4mult_msa(uint8_t *src0_ptr,
SPLATI_H2_SH(filter_vec, 0, 1, filt0, filt1);
filter_vec = LD_SH(filter_y);
- vec0 = __msa_clti_s_b((v16i8) filter_vec, 0);
- filter_vec = (v8i16) __msa_ilvr_b(vec0, (v16i8) filter_vec);
+ UNPCK_R_SB_SH(filter_vec, filter_vec);
- SPLATI_W2_SW(filter_vec, 0, filt_h0, filt_h1);
+ SPLATI_W2_SH(filter_vec, 0, filt_h0, filt_h1);
mask1 = mask0 + 2;
@@ -5329,11 +5454,12 @@ static void hevc_hv_biwgt_4t_8multx4mult_msa(uint8_t *src0_ptr,
weight0 = weight0 & 0x0000FFFF;
weight = weight0 | (weight1 << 16);
- const_vec = __msa_ldi_h(128);
+ const_vec = __msa_fill_w((128 * weight1));
const_vec <<= 6;
offset_vec = __msa_fill_w(offset);
- weight_vec = __msa_fill_w(weight);
+ weight_vec = (v8i16) __msa_fill_w(weight);
rnd_vec = __msa_fill_w(rnd_val + 1);
+ offset_vec += const_vec;
for (cnt = width >> 3; cnt--;) {
src0_ptr_tmp = src0_ptr;
@@ -5347,15 +5473,12 @@ static void hevc_hv_biwgt_4t_8multx4mult_msa(uint8_t *src0_ptr,
VSHF_B2_SB(src0, src0, src0, src0, mask0, mask1, vec0, vec1);
VSHF_B2_SB(src1, src1, src1, src1, mask0, mask1, vec2, vec3);
VSHF_B2_SB(src2, src2, src2, src2, mask0, mask1, vec4, vec5);
- dst0 = const_vec;
- DPADD_SB2_SH(vec0, vec1, filt0, filt1, dst0, dst0);
- dst1 = const_vec;
- DPADD_SB2_SH(vec2, vec3, filt0, filt1, dst1, dst1);
- dst2 = const_vec;
- DPADD_SB2_SH(vec4, vec5, filt0, filt1, dst2, dst2);
+ dsth0 = HEVC_FILT_4TAP_SH(vec0, vec1, filt0, filt1);
+ dsth1 = HEVC_FILT_4TAP_SH(vec2, vec3, filt0, filt1);
+ dsth2 = HEVC_FILT_4TAP_SH(vec4, vec5, filt0, filt1);
- ILVRL_H2_SH(dst1, dst0, dst10_r, dst10_l);
- ILVRL_H2_SH(dst2, dst1, dst21_r, dst21_l);
+ ILVRL_H2_SH(dsth1, dsth0, dst10_r, dst10_l);
+ ILVRL_H2_SH(dsth2, dsth1, dst21_r, dst21_l);
for (loop_cnt = height >> 2; loop_cnt--;) {
LD_SB4(src0_ptr_tmp, src_stride, src3, src4, src5, src6);
@@ -5365,55 +5488,59 @@ static void hevc_hv_biwgt_4t_8multx4mult_msa(uint8_t *src0_ptr,
XORI_B4_128_SB(src3, src4, src5, src6);
VSHF_B2_SB(src3, src3, src3, src3, mask0, mask1, vec0, vec1);
- dst3 = const_vec;
- DPADD_SB2_SH(vec0, vec1, filt0, filt1, dst3, dst3);
+ VSHF_B2_SB(src4, src4, src4, src4, mask0, mask1, vec2, vec3);
+ VSHF_B2_SB(src5, src5, src5, src5, mask0, mask1, vec4, vec5);
+ VSHF_B2_SB(src6, src6, src6, src6, mask0, mask1, vec6, vec7);
- ILVRL_H2_SH(dst3, dst2, dst32_r, dst32_l);
- dst0_r = HEVC_FILT_4TAP(dst10_r, dst32_r, filt_h0, filt_h1);
- dst0_l = HEVC_FILT_4TAP(dst10_l, dst32_l, filt_h0, filt_h1);
- dst0_r >>= 6;
- dst0_l >>= 6;
+ dsth3 = HEVC_FILT_4TAP_SH(vec0, vec1, filt0, filt1);
+ dsth4 = HEVC_FILT_4TAP_SH(vec2, vec3, filt0, filt1);
+ dsth5 = HEVC_FILT_4TAP_SH(vec4, vec5, filt0, filt1);
+ dsth6 = HEVC_FILT_4TAP_SH(vec6, vec7, filt0, filt1);
- VSHF_B2_SB(src4, src4, src4, src4, mask0, mask1, vec0, vec1);
- dst4 = const_vec;
- DPADD_SB2_SH(vec0, vec1, filt0, filt1, dst4, dst4);
+ ILVRL_H2_SH(dsth3, dsth2, dst32_r, dst32_l);
+ ILVRL_H2_SH(dsth4, dsth3, dst43_r, dst43_l);
+ ILVRL_H2_SH(dsth5, dsth4, dst54_r, dst54_l);
+ ILVRL_H2_SH(dsth6, dsth5, dst65_r, dst65_l);
- ILVRL_H2_SH(dst4, dst3, dst43_r, dst43_l);
+ dst0_r = HEVC_FILT_4TAP(dst10_r, dst32_r, filt_h0, filt_h1);
+ dst0_l = HEVC_FILT_4TAP(dst10_l, dst32_l, filt_h0, filt_h1);
dst1_r = HEVC_FILT_4TAP(dst21_r, dst43_r, filt_h0, filt_h1);
dst1_l = HEVC_FILT_4TAP(dst21_l, dst43_l, filt_h0, filt_h1);
- dst1_r >>= 6;
- dst1_l >>= 6;
-
- VSHF_B2_SB(src5, src5, src5, src5, mask0, mask1, vec0, vec1);
- dst5 = const_vec;
- DPADD_SB2_SH(vec0, vec1, filt0, filt1, dst5, dst5);
-
- ILVRL_H2_SH(dst5, dst4, dst10_r, dst10_l);
- dst2_r = HEVC_FILT_4TAP(dst32_r, dst10_r, filt_h0, filt_h1);
- dst2_l = HEVC_FILT_4TAP(dst32_l, dst10_l, filt_h0, filt_h1);
- dst2_r >>= 6;
- dst2_l >>= 6;
-
- VSHF_B2_SB(src6, src6, src6, src6, mask0, mask1, vec0, vec1);
- dst2 = const_vec;
- DPADD_SB2_SH(vec0, vec1, filt0, filt1, dst2, dst2);
-
- ILVRL_H2_SH(dst2, dst5, dst21_r, dst21_l);
- dst3_r = HEVC_FILT_4TAP(dst43_r, dst21_r, filt_h0, filt_h1);
- dst3_l = HEVC_FILT_4TAP(dst43_l, dst21_l, filt_h0, filt_h1);
- dst3_r >>= 6;
- dst3_l >>= 6;
-
- PCKEV_H4_SH(dst0_l, dst0_r, dst1_l, dst1_r,
- dst2_l, dst2_r, dst3_l, dst3_r, tmp0, tmp1, tmp2, tmp3);
- HEVC_BIW_RND_CLIP4(tmp0, tmp1, tmp2, tmp3,
- in0, in1, in2, in3,
- weight_vec, rnd_vec, offset_vec,
- tmp0, tmp1, tmp2, tmp3);
-
- PCKEV_B2_SH(tmp1, tmp0, tmp3, tmp2, tmp0, tmp1);
- ST8x4_UB(tmp0, tmp1, dst_tmp, dst_stride);
+ dst2_r = HEVC_FILT_4TAP(dst32_r, dst54_r, filt_h0, filt_h1);
+ dst2_l = HEVC_FILT_4TAP(dst32_l, dst54_l, filt_h0, filt_h1);
+ dst3_r = HEVC_FILT_4TAP(dst43_r, dst65_r, filt_h0, filt_h1);
+ dst3_l = HEVC_FILT_4TAP(dst43_l, dst65_l, filt_h0, filt_h1);
+
+ SRA_4V(dst0_r, dst0_l, dst1_r, dst1_l, 6);
+ SRA_4V(dst2_r, dst2_l, dst3_r, dst3_l, 6);
+ PCKEV_H4_SW(dst0_l, dst0_r, dst1_l, dst1_r, dst2_l, dst2_r, dst3_l,
+ dst3_r, dst0, dst1, dst2, dst3);
+ ILVRL_H2_SH(dst0, in0, tmp0, tmp1);
+ ILVRL_H2_SH(dst1, in1, tmp2, tmp3);
+ ILVRL_H2_SH(dst2, in2, tmp4, tmp5);
+ ILVRL_H2_SH(dst3, in3, tmp6, tmp7);
+ dst0 = __msa_dpadd_s_w(offset_vec, tmp0, weight_vec);
+ dst1 = __msa_dpadd_s_w(offset_vec, tmp1, weight_vec);
+ dst2 = __msa_dpadd_s_w(offset_vec, tmp2, weight_vec);
+ dst3 = __msa_dpadd_s_w(offset_vec, tmp3, weight_vec);
+ dst4 = __msa_dpadd_s_w(offset_vec, tmp4, weight_vec);
+ dst5 = __msa_dpadd_s_w(offset_vec, tmp5, weight_vec);
+ dst6 = __msa_dpadd_s_w(offset_vec, tmp6, weight_vec);
+ dst7 = __msa_dpadd_s_w(offset_vec, tmp7, weight_vec);
+ SRAR_W4_SW(dst0, dst1, dst2, dst3, rnd_vec);
+ SRAR_W4_SW(dst4, dst5, dst6, dst7, rnd_vec);
+ PCKEV_H4_SH(dst1, dst0, dst3, dst2, dst5, dst4, dst7, dst6,
+ tmp0, tmp1, tmp2, tmp3);
+ CLIP_SH4_0_255_MAX_SATU(tmp0, tmp1, tmp2, tmp3);
+ PCKEV_B2_UB(tmp1, tmp0, tmp3, tmp2, out0, out1);
+ ST8x4_UB(out0, out1, dst_tmp, dst_stride);
dst_tmp += (4 * dst_stride);
+
+ dst10_r = dst54_r;
+ dst10_l = dst54_l;
+ dst21_r = dst65_r;
+ dst21_l = dst65_l;
+ dsth2 = dsth6;
}
src0_ptr += 8;
@@ -5440,13 +5567,16 @@ static void hevc_hv_biwgt_4t_8w_msa(uint8_t *src0_ptr,
if (2 == height) {
hevc_hv_biwgt_4t_8x2_msa(src0_ptr, src_stride, src1_ptr, src2_stride,
dst, dst_stride, filter_x, filter_y,
- height, weight0, weight1, offset0, offset1,
- rnd_val);
+ weight0, weight1, offset0, offset1, rnd_val);
+ } else if (4 == height) {
+ hevc_hv_biwgt_4t_8multx4_msa(src0_ptr, src_stride, src1_ptr,
+ src2_stride, dst, dst_stride, filter_x,
+ filter_y, weight0, weight1, offset0,
+ offset1, rnd_val, 1);
} else if (6 == height) {
hevc_hv_biwgt_4t_8x6_msa(src0_ptr, src_stride, src1_ptr, src2_stride,
dst, dst_stride, filter_x, filter_y,
- height, weight0, weight1, offset0, offset1,
- rnd_val);
+ weight0, weight1, offset0, offset1, rnd_val);
} else if (0 == (height % 4)) {
hevc_hv_biwgt_4t_8multx4mult_msa(src0_ptr, src_stride,
src1_ptr, src2_stride,
@@ -5471,16 +5601,228 @@ static void hevc_hv_biwgt_4t_12w_msa(uint8_t *src0_ptr,
int32_t offset1,
int32_t rnd_val)
{
- hevc_hv_biwgt_4t_8multx4mult_msa(src0_ptr, src_stride,
- src1_ptr, src2_stride,
- dst, dst_stride,
- filter_x, filter_y, height, weight0,
- weight1, offset0, offset1, rnd_val, 8);
+ uint32_t loop_cnt;
+ uint64_t tp0, tp1;
+ int32_t offset, weight;
+ uint8_t *src0_ptr_tmp, *dst_tmp;
+ int16_t *src1_ptr_tmp;
+ v16u8 out0, out1;
+ v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10;
+ v16i8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
+ v16i8 mask0, mask1, mask2, mask3;
+ v8i16 filt0, filt1, filt_h0, filt_h1, filter_vec;
+ v8i16 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
+ v8i16 dsth0, dsth1, dsth2, dsth3, dsth4, dsth5, dsth6, weight_vec;
+ v8i16 dst10, dst21, dst22, dst73, dst84, dst95, dst106;
+ v8i16 dst76_r, dst98_r, dst87_r, dst109_r;
+ v8i16 in0 = { 0 }, in1 = { 0 }, in2 = { 0 }, in3 = { 0 };
+ v8i16 dst10_r, dst32_r, dst54_r, dst21_r, dst43_r, dst65_r;
+ v8i16 dst10_l, dst32_l, dst54_l, dst21_l, dst43_l, dst65_l;
+ v4i32 dst0_r, dst0_l, dst1_r, dst1_l, dst2_r, dst2_l, dst3_r, dst3_l;
+ v4i32 dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7;
+ v4i32 offset_vec, rnd_vec, const_vec;
+
+ src0_ptr -= (src_stride + 1);
+
+ filter_vec = LD_SH(filter_x);
+ SPLATI_H2_SH(filter_vec, 0, 1, filt0, filt1);
+
+ filter_vec = LD_SH(filter_y);
+ UNPCK_R_SB_SH(filter_vec, filter_vec);
+
+ SPLATI_W2_SH(filter_vec, 0, filt_h0, filt_h1);
+
+ mask0 = LD_SB(ff_hevc_mask_arr);
+ mask1 = mask0 + 2;
+
+ offset = (offset0 + offset1) << rnd_val;
+ weight0 = weight0 & 0x0000FFFF;
+ weight = weight0 | (weight1 << 16);
+
+ const_vec = __msa_fill_w((128 * weight1));
+ const_vec <<= 6;
+ offset_vec = __msa_fill_w(offset);
+ rnd_vec = __msa_fill_w(rnd_val + 1);
+ offset_vec += const_vec;
+ weight_vec = (v8i16) __msa_fill_w(weight);
+
+ src0_ptr_tmp = src0_ptr;
+ dst_tmp = dst;
+ src1_ptr_tmp = src1_ptr;
+
+ LD_SB3(src0_ptr_tmp, src_stride, src0, src1, src2);
+ src0_ptr_tmp += (3 * src_stride);
+
+ XORI_B3_128_SB(src0, src1, src2);
+
+ VSHF_B2_SB(src0, src0, src0, src0, mask0, mask1, vec0, vec1);
+ VSHF_B2_SB(src1, src1, src1, src1, mask0, mask1, vec2, vec3);
+ VSHF_B2_SB(src2, src2, src2, src2, mask0, mask1, vec4, vec5);
+
+ dsth0 = HEVC_FILT_4TAP_SH(vec0, vec1, filt0, filt1);
+ dsth1 = HEVC_FILT_4TAP_SH(vec2, vec3, filt0, filt1);
+ dsth2 = HEVC_FILT_4TAP_SH(vec4, vec5, filt0, filt1);
- hevc_hv_biwgt_4t_4w_msa(src0_ptr + 8, src_stride, src1_ptr + 8, src2_stride,
- dst + 8, dst_stride, filter_x, filter_y,
- height, weight0, weight1, offset0,
- offset1, rnd_val);
+ ILVRL_H2_SH(dsth1, dsth0, dst10_r, dst10_l);
+ ILVRL_H2_SH(dsth2, dsth1, dst21_r, dst21_l);
+
+ for (loop_cnt = 4; loop_cnt--;) {
+ LD_SB4(src0_ptr_tmp, src_stride, src3, src4, src5, src6);
+ src0_ptr_tmp += (4 * src_stride);
+ XORI_B4_128_SB(src3, src4, src5, src6);
+
+ LD_SH4(src1_ptr_tmp, src2_stride, in0, in1, in2, in3);
+ src1_ptr_tmp += (4 * src2_stride);
+
+ VSHF_B2_SB(src3, src3, src3, src3, mask0, mask1, vec0, vec1);
+ VSHF_B2_SB(src4, src4, src4, src4, mask0, mask1, vec2, vec3);
+ VSHF_B2_SB(src5, src5, src5, src5, mask0, mask1, vec4, vec5);
+ VSHF_B2_SB(src6, src6, src6, src6, mask0, mask1, vec6, vec7);
+
+ dsth3 = HEVC_FILT_4TAP_SH(vec0, vec1, filt0, filt1);
+ dsth4 = HEVC_FILT_4TAP_SH(vec2, vec3, filt0, filt1);
+ dsth5 = HEVC_FILT_4TAP_SH(vec4, vec5, filt0, filt1);
+ dsth6 = HEVC_FILT_4TAP_SH(vec6, vec7, filt0, filt1);
+
+ ILVRL_H2_SH(dsth3, dsth2, dst32_r, dst32_l);
+ ILVRL_H2_SH(dsth4, dsth3, dst43_r, dst43_l);
+ ILVRL_H2_SH(dsth5, dsth4, dst54_r, dst54_l);
+ ILVRL_H2_SH(dsth6, dsth5, dst65_r, dst65_l);
+
+ dst0_r = HEVC_FILT_4TAP(dst10_r, dst32_r, filt_h0, filt_h1);
+ dst0_l = HEVC_FILT_4TAP(dst10_l, dst32_l, filt_h0, filt_h1);
+ dst1_r = HEVC_FILT_4TAP(dst21_r, dst43_r, filt_h0, filt_h1);
+ dst1_l = HEVC_FILT_4TAP(dst21_l, dst43_l, filt_h0, filt_h1);
+ dst2_r = HEVC_FILT_4TAP(dst32_r, dst54_r, filt_h0, filt_h1);
+ dst2_l = HEVC_FILT_4TAP(dst32_l, dst54_l, filt_h0, filt_h1);
+ dst3_r = HEVC_FILT_4TAP(dst43_r, dst65_r, filt_h0, filt_h1);
+ dst3_l = HEVC_FILT_4TAP(dst43_l, dst65_l, filt_h0, filt_h1);
+
+ SRA_4V(dst0_r, dst0_l, dst1_r, dst1_l, 6);
+ SRA_4V(dst2_r, dst2_l, dst3_r, dst3_l, 6);
+ PCKEV_H4_SW(dst0_l, dst0_r, dst1_l, dst1_r, dst2_l, dst2_r, dst3_l,
+ dst3_r, dst0, dst1, dst2, dst3);
+ ILVRL_H2_SH(dst0, in0, tmp0, tmp1);
+ ILVRL_H2_SH(dst1, in1, tmp2, tmp3);
+ ILVRL_H2_SH(dst2, in2, tmp4, tmp5);
+ ILVRL_H2_SH(dst3, in3, tmp6, tmp7);
+ dst0 = __msa_dpadd_s_w(offset_vec, tmp0, weight_vec);
+ dst1 = __msa_dpadd_s_w(offset_vec, tmp1, weight_vec);
+ dst2 = __msa_dpadd_s_w(offset_vec, tmp2, weight_vec);
+ dst3 = __msa_dpadd_s_w(offset_vec, tmp3, weight_vec);
+ dst4 = __msa_dpadd_s_w(offset_vec, tmp4, weight_vec);
+ dst5 = __msa_dpadd_s_w(offset_vec, tmp5, weight_vec);
+ dst6 = __msa_dpadd_s_w(offset_vec, tmp6, weight_vec);
+ dst7 = __msa_dpadd_s_w(offset_vec, tmp7, weight_vec);
+ SRAR_W4_SW(dst0, dst1, dst2, dst3, rnd_vec);
+ SRAR_W4_SW(dst4, dst5, dst6, dst7, rnd_vec);
+ PCKEV_H4_SH(dst1, dst0, dst3, dst2, dst5, dst4, dst7, dst6,
+ tmp0, tmp1, tmp2, tmp3);
+ CLIP_SH4_0_255_MAX_SATU(tmp0, tmp1, tmp2, tmp3);
+ PCKEV_B2_UB(tmp1, tmp0, tmp3, tmp2, out0, out1);
+ ST8x4_UB(out0, out1, dst_tmp, dst_stride);
+ dst_tmp += (4 * dst_stride);
+
+ dst10_r = dst54_r;
+ dst10_l = dst54_l;
+ dst21_r = dst65_r;
+ dst21_l = dst65_l;
+ dsth2 = dsth6;
+ }
+
+ src0_ptr += 8;
+ dst += 8;
+ src1_ptr += 8;
+
+ mask2 = LD_SB(ff_hevc_mask_arr + 16);
+ mask3 = mask2 + 2;
+
+ LD_SB3(src0_ptr, src_stride, src0, src1, src2);
+ src0_ptr += (3 * src_stride);
+ XORI_B3_128_SB(src0, src1, src2);
+ VSHF_B2_SB(src0, src1, src0, src1, mask2, mask3, vec0, vec1);
+ VSHF_B2_SB(src1, src2, src1, src2, mask2, mask3, vec2, vec3);
+
+ dst10 = HEVC_FILT_4TAP_SH(vec0, vec1, filt0, filt1);
+ dst21 = HEVC_FILT_4TAP_SH(vec2, vec3, filt0, filt1);
+
+ ILVRL_H2_SH(dst21, dst10, dst10_r, dst21_r);
+ dst22 = (v8i16) __msa_splati_d((v2i64) dst21, 1);
+
+ for (loop_cnt = 2; loop_cnt--;) {
+ LD_SB8(src0_ptr, src_stride, src3, src4, src5, src6, src7, src8, src9,
+ src10);
+ src0_ptr += (8 * src_stride);
+ XORI_B8_128_SB(src3, src4, src5, src6, src7, src8, src9, src10);
+ VSHF_B2_SB(src3, src7, src3, src7, mask2, mask3, vec0, vec1);
+ VSHF_B2_SB(src4, src8, src4, src8, mask2, mask3, vec2, vec3);
+ VSHF_B2_SB(src5, src9, src5, src9, mask2, mask3, vec4, vec5);
+ VSHF_B2_SB(src6, src10, src6, src10, mask2, mask3, vec6, vec7);
+
+ dst73 = HEVC_FILT_4TAP_SH(vec0, vec1, filt0, filt1);
+ dst84 = HEVC_FILT_4TAP_SH(vec2, vec3, filt0, filt1);
+ dst95 = HEVC_FILT_4TAP_SH(vec4, vec5, filt0, filt1);
+ dst106 = HEVC_FILT_4TAP_SH(vec6, vec7, filt0, filt1);
+
+ dst32_r = __msa_ilvr_h(dst73, dst22);
+ ILVRL_H2_SH(dst84, dst73, dst43_r, dst87_r);
+ ILVRL_H2_SH(dst95, dst84, dst54_r, dst98_r);
+ ILVRL_H2_SH(dst106, dst95, dst65_r, dst109_r);
+ dst22 = (v8i16) __msa_splati_d((v2i64) dst73, 1);
+ dst76_r = __msa_ilvr_h(dst22, dst106);
+
+ LD2(src1_ptr, src2_stride, tp0, tp1);
+ src1_ptr += 2 * src2_stride;
+ INSERT_D2_SH(tp0, tp1, in0);
+ LD2(src1_ptr, src2_stride, tp0, tp1);
+ src1_ptr += 2 * src2_stride;
+ INSERT_D2_SH(tp0, tp1, in1);
+
+ LD2(src1_ptr, src2_stride, tp0, tp1);
+ src1_ptr += 2 * src2_stride;
+ INSERT_D2_SH(tp0, tp1, in2);
+ LD2(src1_ptr, src2_stride, tp0, tp1);
+ src1_ptr += 2 * src2_stride;
+ INSERT_D2_SH(tp0, tp1, in3);
+
+ dst0 = HEVC_FILT_4TAP(dst10_r, dst32_r, filt_h0, filt_h1);
+ dst1 = HEVC_FILT_4TAP(dst21_r, dst43_r, filt_h0, filt_h1);
+ dst2 = HEVC_FILT_4TAP(dst32_r, dst54_r, filt_h0, filt_h1);
+ dst3 = HEVC_FILT_4TAP(dst43_r, dst65_r, filt_h0, filt_h1);
+ dst4 = HEVC_FILT_4TAP(dst54_r, dst76_r, filt_h0, filt_h1);
+ dst5 = HEVC_FILT_4TAP(dst65_r, dst87_r, filt_h0, filt_h1);
+ dst6 = HEVC_FILT_4TAP(dst76_r, dst98_r, filt_h0, filt_h1);
+ dst7 = HEVC_FILT_4TAP(dst87_r, dst109_r, filt_h0, filt_h1);
+
+ SRA_4V(dst0, dst1, dst2, dst3, 6);
+ SRA_4V(dst4, dst5, dst6, dst7, 6);
+ PCKEV_H4_SW(dst1, dst0, dst3, dst2, dst5, dst4, dst7, dst6,
+ dst0, dst1, dst2, dst3);
+ ILVRL_H2_SH(dst0, in0, tmp0, tmp1);
+ ILVRL_H2_SH(dst1, in1, tmp2, tmp3);
+ ILVRL_H2_SH(dst2, in2, tmp4, tmp5);
+ ILVRL_H2_SH(dst3, in3, tmp6, tmp7);
+ dst0 = __msa_dpadd_s_w(offset_vec, tmp0, weight_vec);
+ dst1 = __msa_dpadd_s_w(offset_vec, tmp1, weight_vec);
+ dst2 = __msa_dpadd_s_w(offset_vec, tmp2, weight_vec);
+ dst3 = __msa_dpadd_s_w(offset_vec, tmp3, weight_vec);
+ dst4 = __msa_dpadd_s_w(offset_vec, tmp4, weight_vec);
+ dst5 = __msa_dpadd_s_w(offset_vec, tmp5, weight_vec);
+ dst6 = __msa_dpadd_s_w(offset_vec, tmp6, weight_vec);
+ dst7 = __msa_dpadd_s_w(offset_vec, tmp7, weight_vec);
+ SRAR_W4_SW(dst0, dst1, dst2, dst3, rnd_vec);
+ SRAR_W4_SW(dst4, dst5, dst6, dst7, rnd_vec);
+ PCKEV_H4_SH(dst1, dst0, dst3, dst2, dst5, dst4, dst7, dst6,
+ tmp0, tmp1, tmp2, tmp3);
+ CLIP_SH4_0_255_MAX_SATU(tmp0, tmp1, tmp2, tmp3);
+ PCKEV_B2_UB(tmp1, tmp0, tmp3, tmp2, out0, out1);
+ ST4x8_UB(out0, out1, dst, dst_stride);
+ dst += (8 * dst_stride);
+
+ dst10_r = dst98_r;
+ dst21_r = dst109_r;
+ dst22 = (v8i16) __msa_splati_d((v2i64) dst106, 1);
+ }
}
static void hevc_hv_biwgt_4t_16w_msa(uint8_t *src0_ptr,
@@ -5498,11 +5840,17 @@ static void hevc_hv_biwgt_4t_16w_msa(uint8_t *src0_ptr,
int32_t offset1,
int32_t rnd_val)
{
- hevc_hv_biwgt_4t_8multx4mult_msa(src0_ptr, src_stride,
- src1_ptr, src2_stride,
- dst, dst_stride,
- filter_x, filter_y, height, weight0,
- weight1, offset0, offset1, rnd_val, 16);
+ if (4 == height) {
+ hevc_hv_biwgt_4t_8multx4_msa(src0_ptr, src_stride, src1_ptr,
+ src2_stride, dst, dst_stride, filter_x,
+ filter_y, weight0, weight1, offset0,
+ offset1, rnd_val, 2);
+ } else {
+ hevc_hv_biwgt_4t_8multx4mult_msa(src0_ptr, src_stride, src1_ptr,
+ src2_stride, dst, dst_stride,
+ filter_x, filter_y, height, weight0,
+ weight1, offset0, offset1, rnd_val, 16);
+ }
}
static void hevc_hv_biwgt_4t_24w_msa(uint8_t *src0_ptr,