diff options
author | Linjie Fu <linjie.fu@intel.com> | 2020-03-05 15:48:09 +0800 |
---|---|---|
committer | Anton Khirnov <anton@khirnov.net> | 2020-03-27 10:57:40 +0100 |
commit | 8b8492452d53293b2ac8c842877fadf7925fc950 (patch) | |
tree | 51ad477ae083df79665e2727cdf6ac64f9356f68 /libavcodec | |
parent | e9abef437f0a348c017d4ac8b23a122881c1dc87 (diff) |
lavc/x86/hevc_add_res: Fix coeff overflow in ADD_RES_SSE_16_32_8
Fix overflow for coeff -32768 in function ADD_RES_SSE_16_32_8 with no
performance drop.(SSE2/AVX/AVX2)
./checkasm --test=hevc_add_res --bench
Mainline:
- hevc_add_res.add_residual [OK]
hevc_add_res_32x32_8_sse2: 127.5
hevc_add_res_32x32_8_avx: 127.0
hevc_add_res_32x32_8_avx2: 86.5
Add overflow test case:
- hevc_add_res.add_residual [FAILED]
After:
- hevc_add_res.add_residual [OK]
hevc_add_res_32x32_8_sse2: 126.8
hevc_add_res_32x32_8_avx: 128.3
hevc_add_res_32x32_8_avx2: 86.8
Signed-off-by: Xu Guangxin <guangxin.xu@intel.com>
Signed-off-by: Linjie Fu <linjie.fu@intel.com>
Signed-off-by: Anton Khirnov <anton@khirnov.net>
Diffstat (limited to 'libavcodec')
-rw-r--r-- | libavcodec/x86/hevc_add_res.asm | 40 |
1 files changed, 21 insertions, 19 deletions
diff --git a/libavcodec/x86/hevc_add_res.asm b/libavcodec/x86/hevc_add_res.asm index e5e9f247bb..c6c40078a0 100644 --- a/libavcodec/x86/hevc_add_res.asm +++ b/libavcodec/x86/hevc_add_res.asm @@ -84,34 +84,36 @@ cglobal hevc_add_residual_4_8, 3, 3, 6 %endmacro %macro ADD_RES_SSE_16_32_8 3 - mova xm2, [r1+%1] + mova m1, [%2] + mova m2, m1 + punpcklbw m1, m0 + punpckhbw m2, m0 + mova xm5, [r1+%1] mova xm6, [r1+%1+16] %if cpuflag(avx2) - vinserti128 m2, m2, [r1+%1+32], 1 + vinserti128 m5, m5, [r1+%1+32], 1 vinserti128 m6, m6, [r1+%1+48], 1 %endif - psubw m1, m0, m2 - psubw m5, m0, m6 - packuswb m2, m6 - packuswb m1, m5 + paddsw m1, m5 + paddsw m2, m6 - mova xm4, [r1+%1+mmsize*2] + mova m3, [%3] + mova m4, m3 + punpcklbw m3, m0 + punpckhbw m4, m0 + mova xm5, [r1+%1+mmsize*2] mova xm6, [r1+%1+mmsize*2+16] %if cpuflag(avx2) - vinserti128 m4, m4, [r1+%1+96 ], 1 + vinserti128 m5, m5, [r1+%1+96], 1 vinserti128 m6, m6, [r1+%1+112], 1 %endif - psubw m3, m0, m4 - psubw m5, m0, m6 - packuswb m4, m6 - packuswb m3, m5 - - paddusb m2, [%2] - paddusb m4, [%3] - psubusb m2, m1 - psubusb m4, m3 - mova [%2], m2 - mova [%3], m4 + paddsw m3, m5 + paddsw m4, m6 + + packuswb m1, m2 + packuswb m3, m4 + mova [%2], m1 + mova [%3], m3 %endmacro |