summaryrefslogtreecommitdiff
path: root/libavcodec/mips/hevcdsp_mmi.c
diff options
context:
space:
mode:
authorJiaxun Yang <jiaxun.yang@flygoat.com>2021-07-23 13:53:42 +0800
committerMichael Niedermayer <michael@niedermayer.cc>2021-07-28 23:31:48 +0200
commit1042039ccda7e9099f139b14981b48ee59900804 (patch)
tree1fbac90e774eae8a75586139238cfb3da13d9fb3 /libavcodec/mips/hevcdsp_mmi.c
parentb868272d7e9f7a50490abd856a5066b009779e07 (diff)
avcodec/mips: Use MMI marcos to replace Loongson3 instructions
Loongson3's extention instructions (prefixed with gs) are widely used in our MMI codebase. However, these instructions are not avilable on Loongson-2E/F while MMI code should work on these processors. Previously we introduced mmiutils marcos to provide backward compactbility but newly commited code didn't follow that. In this patch I revised the codebase and converted all these instructions into MMI marcos to get Loongson2 supproted again. Signed-off-by: Jiaxun Yang <jiaxun.yang@flygoat.com> Reviewed-by: Shiyou Yin <yinshiyou-hf@loongson.cn> Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
Diffstat (limited to 'libavcodec/mips/hevcdsp_mmi.c')
-rw-r--r--libavcodec/mips/hevcdsp_mmi.c251
1 files changed, 102 insertions, 149 deletions
diff --git a/libavcodec/mips/hevcdsp_mmi.c b/libavcodec/mips/hevcdsp_mmi.c
index 87fc2555a4..6583bef5da 100644
--- a/libavcodec/mips/hevcdsp_mmi.c
+++ b/libavcodec/mips/hevcdsp_mmi.c
@@ -35,6 +35,7 @@ void ff_hevc_put_hevc_qpel_h##w##_8_mmi(int16_t *dst, uint8_t *_src, \
double ftmp[15]; \
uint64_t rtmp[1]; \
const int8_t *filter = ff_hevc_qpel_filters[mx - 1]; \
+ DECLARE_VAR_ALL64; \
\
x = x_step; \
y = height; \
@@ -50,14 +51,10 @@ void ff_hevc_put_hevc_qpel_h##w##_8_mmi(int16_t *dst, uint8_t *_src, \
\
"1: \n\t" \
"2: \n\t" \
- "gsldlc1 %[ftmp3], 0x07(%[src]) \n\t" \
- "gsldrc1 %[ftmp3], 0x00(%[src]) \n\t" \
- "gsldlc1 %[ftmp4], 0x08(%[src]) \n\t" \
- "gsldrc1 %[ftmp4], 0x01(%[src]) \n\t" \
- "gsldlc1 %[ftmp5], 0x09(%[src]) \n\t" \
- "gsldrc1 %[ftmp5], 0x02(%[src]) \n\t" \
- "gsldlc1 %[ftmp6], 0x0a(%[src]) \n\t" \
- "gsldrc1 %[ftmp6], 0x03(%[src]) \n\t" \
+ MMI_ULDC1(%[ftmp3], %[src], 0x00) \
+ MMI_ULDC1(%[ftmp4], %[src], 0x01) \
+ MMI_ULDC1(%[ftmp5], %[src], 0x02) \
+ MMI_ULDC1(%[ftmp6], %[src], 0x03) \
"punpcklbh %[ftmp7], %[ftmp3], %[ftmp0] \n\t" \
"punpckhbh %[ftmp8], %[ftmp3], %[ftmp0] \n\t" \
"pmullh %[ftmp7], %[ftmp7], %[ftmp1] \n\t" \
@@ -83,8 +80,7 @@ void ff_hevc_put_hevc_qpel_h##w##_8_mmi(int16_t *dst, uint8_t *_src, \
"paddh %[ftmp3], %[ftmp3], %[ftmp4] \n\t" \
"paddh %[ftmp5], %[ftmp5], %[ftmp6] \n\t" \
"paddh %[ftmp3], %[ftmp3], %[ftmp5] \n\t" \
- "gssdlc1 %[ftmp3], 0x07(%[dst]) \n\t" \
- "gssdrc1 %[ftmp3], 0x00(%[dst]) \n\t" \
+ MMI_ULDC1(%[ftmp3], %[dst], 0x00) \
\
"daddi %[x], %[x], -0x01 \n\t" \
PTR_ADDIU "%[src], %[src], 0x04 \n\t" \
@@ -98,7 +94,8 @@ void ff_hevc_put_hevc_qpel_h##w##_8_mmi(int16_t *dst, uint8_t *_src, \
PTR_ADDU "%[src], %[src], %[stride] \n\t" \
PTR_ADDIU "%[dst], %[dst], 0x80 \n\t" \
"bnez %[y], 1b \n\t" \
- : [ftmp0]"=&f"(ftmp[0]), [ftmp1]"=&f"(ftmp[1]), \
+ : RESTRICT_ASM_ALL64 \
+ [ftmp0]"=&f"(ftmp[0]), [ftmp1]"=&f"(ftmp[1]), \
[ftmp2]"=&f"(ftmp[2]), [ftmp3]"=&f"(ftmp[3]), \
[ftmp4]"=&f"(ftmp[4]), [ftmp5]"=&f"(ftmp[5]), \
[ftmp6]"=&f"(ftmp[6]), [ftmp7]"=&f"(ftmp[7]), \
@@ -134,6 +131,7 @@ void ff_hevc_put_hevc_qpel_hv##w##_8_mmi(int16_t *dst, uint8_t *_src, \
int16_t *tmp = tmp_array; \
double ftmp[15]; \
uint64_t rtmp[1]; \
+ DECLARE_VAR_ALL64; \
\
src -= (QPEL_EXTRA_BEFORE * srcstride + 3); \
filter = ff_hevc_qpel_filters[mx - 1]; \
@@ -151,14 +149,10 @@ void ff_hevc_put_hevc_qpel_hv##w##_8_mmi(int16_t *dst, uint8_t *_src, \
\
"1: \n\t" \
"2: \n\t" \
- "gsldlc1 %[ftmp3], 0x07(%[src]) \n\t" \
- "gsldrc1 %[ftmp3], 0x00(%[src]) \n\t" \
- "gsldlc1 %[ftmp4], 0x08(%[src]) \n\t" \
- "gsldrc1 %[ftmp4], 0x01(%[src]) \n\t" \
- "gsldlc1 %[ftmp5], 0x09(%[src]) \n\t" \
- "gsldrc1 %[ftmp5], 0x02(%[src]) \n\t" \
- "gsldlc1 %[ftmp6], 0x0a(%[src]) \n\t" \
- "gsldrc1 %[ftmp6], 0x03(%[src]) \n\t" \
+ MMI_ULDC1(%[ftmp3], %[src], 0x00) \
+ MMI_ULDC1(%[ftmp4], %[src], 0x01) \
+ MMI_ULDC1(%[ftmp5], %[src], 0x02) \
+ MMI_ULDC1(%[ftmp6], %[src], 0x03) \
"punpcklbh %[ftmp7], %[ftmp3], %[ftmp0] \n\t" \
"punpckhbh %[ftmp8], %[ftmp3], %[ftmp0] \n\t" \
"pmullh %[ftmp7], %[ftmp7], %[ftmp1] \n\t" \
@@ -184,8 +178,7 @@ void ff_hevc_put_hevc_qpel_hv##w##_8_mmi(int16_t *dst, uint8_t *_src, \
"paddh %[ftmp3], %[ftmp3], %[ftmp4] \n\t" \
"paddh %[ftmp5], %[ftmp5], %[ftmp6] \n\t" \
"paddh %[ftmp3], %[ftmp3], %[ftmp5] \n\t" \
- "gssdlc1 %[ftmp3], 0x07(%[tmp]) \n\t" \
- "gssdrc1 %[ftmp3], 0x00(%[tmp]) \n\t" \
+ MMI_ULDC1(%[ftmp3], %[tmp], 0x00) \
\
"daddi %[x], %[x], -0x01 \n\t" \
PTR_ADDIU "%[src], %[src], 0x04 \n\t" \
@@ -199,7 +192,8 @@ void ff_hevc_put_hevc_qpel_hv##w##_8_mmi(int16_t *dst, uint8_t *_src, \
PTR_ADDU "%[src], %[src], %[stride] \n\t" \
PTR_ADDIU "%[tmp], %[tmp], 0x80 \n\t" \
"bnez %[y], 1b \n\t" \
- : [ftmp0]"=&f"(ftmp[0]), [ftmp1]"=&f"(ftmp[1]), \
+ : RESTRICT_ASM_ALL64 \
+ [ftmp0]"=&f"(ftmp[0]), [ftmp1]"=&f"(ftmp[1]), \
[ftmp2]"=&f"(ftmp[2]), [ftmp3]"=&f"(ftmp[3]), \
[ftmp4]"=&f"(ftmp[4]), [ftmp5]"=&f"(ftmp[5]), \
[ftmp6]"=&f"(ftmp[6]), [ftmp7]"=&f"(ftmp[7]), \
@@ -228,29 +222,21 @@ void ff_hevc_put_hevc_qpel_hv##w##_8_mmi(int16_t *dst, uint8_t *_src, \
\
"1: \n\t" \
"2: \n\t" \
- "gsldlc1 %[ftmp3], 0x07(%[tmp]) \n\t" \
- "gsldrc1 %[ftmp3], 0x00(%[tmp]) \n\t" \
+ MMI_ULDC1(%[ftmp3], %[tmp], 0x00) \
PTR_ADDIU "%[tmp], %[tmp], 0x80 \n\t" \
- "gsldlc1 %[ftmp4], 0x07(%[tmp]) \n\t" \
- "gsldrc1 %[ftmp4], 0x00(%[tmp]) \n\t" \
+ MMI_ULDC1(%[ftmp4], %[tmp], 0x00) \
PTR_ADDIU "%[tmp], %[tmp], 0x80 \n\t" \
- "gsldlc1 %[ftmp5], 0x07(%[tmp]) \n\t" \
- "gsldrc1 %[ftmp5], 0x00(%[tmp]) \n\t" \
+ MMI_ULDC1(%[ftmp5], %[tmp], 0x00) \
PTR_ADDIU "%[tmp], %[tmp], 0x80 \n\t" \
- "gsldlc1 %[ftmp6], 0x07(%[tmp]) \n\t" \
- "gsldrc1 %[ftmp6], 0x00(%[tmp]) \n\t" \
+ MMI_ULDC1(%[ftmp6], %[tmp], 0x00) \
PTR_ADDIU "%[tmp], %[tmp], 0x80 \n\t" \
- "gsldlc1 %[ftmp7], 0x07(%[tmp]) \n\t" \
- "gsldrc1 %[ftmp7], 0x00(%[tmp]) \n\t" \
+ MMI_ULDC1(%[ftmp7], %[tmp], 0x00) \
PTR_ADDIU "%[tmp], %[tmp], 0x80 \n\t" \
- "gsldlc1 %[ftmp8], 0x07(%[tmp]) \n\t" \
- "gsldrc1 %[ftmp8], 0x00(%[tmp]) \n\t" \
+ MMI_ULDC1(%[ftmp8], %[tmp], 0x00) \
PTR_ADDIU "%[tmp], %[tmp], 0x80 \n\t" \
- "gsldlc1 %[ftmp9], 0x07(%[tmp]) \n\t" \
- "gsldrc1 %[ftmp9], 0x00(%[tmp]) \n\t" \
+ MMI_ULDC1(%[ftmp9], %[tmp], 0x00) \
PTR_ADDIU "%[tmp], %[tmp], 0x80 \n\t" \
- "gsldlc1 %[ftmp10], 0x07(%[tmp]) \n\t" \
- "gsldrc1 %[ftmp10], 0x00(%[tmp]) \n\t" \
+ MMI_ULDC1(%[ftmp10], %[tmp], 0x00) \
PTR_ADDIU "%[tmp], %[tmp], -0x380 \n\t" \
TRANSPOSE_4H(%[ftmp3], %[ftmp4], %[ftmp5], %[ftmp6], \
%[ftmp11], %[ftmp12], %[ftmp13], %[ftmp14]) \
@@ -275,8 +261,7 @@ void ff_hevc_put_hevc_qpel_hv##w##_8_mmi(int16_t *dst, uint8_t *_src, \
"paddw %[ftmp5], %[ftmp5], %[ftmp6] \n\t" \
"psraw %[ftmp5], %[ftmp5], %[ftmp0] \n\t" \
"packsswh %[ftmp3], %[ftmp3], %[ftmp5] \n\t" \
- "gssdlc1 %[ftmp3], 0x07(%[dst]) \n\t" \
- "gssdrc1 %[ftmp3], 0x00(%[dst]) \n\t" \
+ MMI_USDC1(%[ftmp3], %[dst], 0x00) \
\
"daddi %[x], %[x], -0x01 \n\t" \
PTR_ADDIU "%[dst], %[dst], 0x08 \n\t" \
@@ -290,7 +275,8 @@ void ff_hevc_put_hevc_qpel_hv##w##_8_mmi(int16_t *dst, uint8_t *_src, \
PTR_ADDIU "%[dst], %[dst], 0x80 \n\t" \
PTR_ADDIU "%[tmp], %[tmp], 0x80 \n\t" \
"bnez %[y], 1b \n\t" \
- : [ftmp0]"=&f"(ftmp[0]), [ftmp1]"=&f"(ftmp[1]), \
+ : RESTRICT_ASM_ALL64 \
+ [ftmp0]"=&f"(ftmp[0]), [ftmp1]"=&f"(ftmp[1]), \
[ftmp2]"=&f"(ftmp[2]), [ftmp3]"=&f"(ftmp[3]), \
[ftmp4]"=&f"(ftmp[4]), [ftmp5]"=&f"(ftmp[5]), \
[ftmp6]"=&f"(ftmp[6]), [ftmp7]"=&f"(ftmp[7]), \
@@ -333,6 +319,8 @@ void ff_hevc_put_hevc_qpel_bi_h##w##_8_mmi(uint8_t *_dst, \
uint64_t rtmp[1]; \
union av_intfloat64 shift; \
union av_intfloat64 offset; \
+ DECLARE_VAR_ALL64; \
+ DECLARE_VAR_LOW32; \
shift.i = 7; \
offset.i = 64; \
\
@@ -353,14 +341,10 @@ void ff_hevc_put_hevc_qpel_bi_h##w##_8_mmi(uint8_t *_dst, \
"1: \n\t" \
"li %[x], " #x_step " \n\t" \
"2: \n\t" \
- "gsldlc1 %[ftmp3], 0x07(%[src]) \n\t" \
- "gsldrc1 %[ftmp3], 0x00(%[src]) \n\t" \
- "gsldlc1 %[ftmp4], 0x08(%[src]) \n\t" \
- "gsldrc1 %[ftmp4], 0x01(%[src]) \n\t" \
- "gsldlc1 %[ftmp5], 0x09(%[src]) \n\t" \
- "gsldrc1 %[ftmp5], 0x02(%[src]) \n\t" \
- "gsldlc1 %[ftmp6], 0x0a(%[src]) \n\t" \
- "gsldrc1 %[ftmp6], 0x03(%[src]) \n\t" \
+ MMI_ULDC1(%[ftmp3], %[src], 0x00) \
+ MMI_ULDC1(%[ftmp4], %[src], 0x01) \
+ MMI_ULDC1(%[ftmp5], %[src], 0x02) \
+ MMI_ULDC1(%[ftmp6], %[src], 0x03) \
"punpcklbh %[ftmp7], %[ftmp3], %[ftmp0] \n\t" \
"punpckhbh %[ftmp8], %[ftmp3], %[ftmp0] \n\t" \
"pmullh %[ftmp7], %[ftmp7], %[ftmp1] \n\t" \
@@ -387,8 +371,7 @@ void ff_hevc_put_hevc_qpel_bi_h##w##_8_mmi(uint8_t *_dst, \
"paddh %[ftmp5], %[ftmp5], %[ftmp6] \n\t" \
"paddh %[ftmp3], %[ftmp3], %[ftmp5] \n\t" \
"paddh %[ftmp3], %[ftmp3], %[offset] \n\t" \
- "gsldlc1 %[ftmp4], 0x07(%[src2]) \n\t" \
- "gsldrc1 %[ftmp4], 0x00(%[src2]) \n\t" \
+ MMI_ULDC1(%[ftmp4], %[src2], 0x00) \
"li %[rtmp0], 0x10 \n\t" \
"dmtc1 %[rtmp0], %[ftmp8] \n\t" \
"punpcklhw %[ftmp5], %[ftmp0], %[ftmp3] \n\t" \
@@ -407,8 +390,7 @@ void ff_hevc_put_hevc_qpel_bi_h##w##_8_mmi(uint8_t *_dst, \
"pcmpgth %[ftmp7], %[ftmp5], %[ftmp0] \n\t" \
"pand %[ftmp3], %[ftmp5], %[ftmp7] \n\t" \
"packushb %[ftmp3], %[ftmp3], %[ftmp3] \n\t" \
- "gsswlc1 %[ftmp3], 0x03(%[dst]) \n\t" \
- "gsswrc1 %[ftmp3], 0x00(%[dst]) \n\t" \
+ MMI_USWC1(%[ftmp3], %[dst], 0x00) \
\
"daddi %[x], %[x], -0x01 \n\t" \
PTR_ADDIU "%[src], %[src], 0x04 \n\t" \
@@ -424,7 +406,8 @@ void ff_hevc_put_hevc_qpel_bi_h##w##_8_mmi(uint8_t *_dst, \
PTR_ADDU "%[dst], %[dst], %[dst_stride] \n\t" \
PTR_ADDIU "%[src2], %[src2], 0x80 \n\t" \
"bnez %[y], 1b \n\t" \
- : [ftmp0]"=&f"(ftmp[0]), [ftmp1]"=&f"(ftmp[1]), \
+ : RESTRICT_ASM_ALL64 RESTRICT_ASM_LOW32 \
+ [ftmp0]"=&f"(ftmp[0]), [ftmp1]"=&f"(ftmp[1]), \
[ftmp2]"=&f"(ftmp[2]), [ftmp3]"=&f"(ftmp[3]), \
[ftmp4]"=&f"(ftmp[4]), [ftmp5]"=&f"(ftmp[5]), \
[ftmp6]"=&f"(ftmp[6]), [ftmp7]"=&f"(ftmp[7]), \
@@ -469,6 +452,8 @@ void ff_hevc_put_hevc_qpel_bi_hv##w##_8_mmi(uint8_t *_dst, \
uint64_t rtmp[1]; \
union av_intfloat64 shift; \
union av_intfloat64 offset; \
+ DECLARE_VAR_ALL64; \
+ DECLARE_VAR_LOW32; \
shift.i = 7; \
offset.i = 64; \
\
@@ -488,14 +473,10 @@ void ff_hevc_put_hevc_qpel_bi_hv##w##_8_mmi(uint8_t *_dst, \
\
"1: \n\t" \
"2: \n\t" \
- "gsldlc1 %[ftmp3], 0x07(%[src]) \n\t" \
- "gsldrc1 %[ftmp3], 0x00(%[src]) \n\t" \
- "gsldlc1 %[ftmp4], 0x08(%[src]) \n\t" \
- "gsldrc1 %[ftmp4], 0x01(%[src]) \n\t" \
- "gsldlc1 %[ftmp5], 0x09(%[src]) \n\t" \
- "gsldrc1 %[ftmp5], 0x02(%[src]) \n\t" \
- "gsldlc1 %[ftmp6], 0x0a(%[src]) \n\t" \
- "gsldrc1 %[ftmp6], 0x03(%[src]) \n\t" \
+ MMI_ULDC1(%[ftmp3], %[src], 0x00) \
+ MMI_ULDC1(%[ftmp4], %[src], 0x01) \
+ MMI_ULDC1(%[ftmp5], %[src], 0x02) \
+ MMI_ULDC1(%[ftmp6], %[src], 0x03) \
"punpcklbh %[ftmp7], %[ftmp3], %[ftmp0] \n\t" \
"punpckhbh %[ftmp8], %[ftmp3], %[ftmp0] \n\t" \
"pmullh %[ftmp7], %[ftmp7], %[ftmp1] \n\t" \
@@ -521,8 +502,7 @@ void ff_hevc_put_hevc_qpel_bi_hv##w##_8_mmi(uint8_t *_dst, \
"paddh %[ftmp3], %[ftmp3], %[ftmp4] \n\t" \
"paddh %[ftmp5], %[ftmp5], %[ftmp6] \n\t" \
"paddh %[ftmp3], %[ftmp3], %[ftmp5] \n\t" \
- "gssdlc1 %[ftmp3], 0x07(%[tmp]) \n\t" \
- "gssdrc1 %[ftmp3], 0x00(%[tmp]) \n\t" \
+ MMI_USDC1(%[ftmp3], %[tmp], 0x00) \
\
"daddi %[x], %[x], -0x01 \n\t" \
PTR_ADDIU "%[src], %[src], 0x04 \n\t" \
@@ -536,7 +516,8 @@ void ff_hevc_put_hevc_qpel_bi_hv##w##_8_mmi(uint8_t *_dst, \
PTR_ADDU "%[src], %[src], %[stride] \n\t" \
PTR_ADDIU "%[tmp], %[tmp], 0x80 \n\t" \
"bnez %[y], 1b \n\t" \
- : [ftmp0]"=&f"(ftmp[0]), [ftmp1]"=&f"(ftmp[1]), \
+ : RESTRICT_ASM_ALL64 \
+ [ftmp0]"=&f"(ftmp[0]), [ftmp1]"=&f"(ftmp[1]), \
[ftmp2]"=&f"(ftmp[2]), [ftmp3]"=&f"(ftmp[3]), \
[ftmp4]"=&f"(ftmp[4]), [ftmp5]"=&f"(ftmp[5]), \
[ftmp6]"=&f"(ftmp[6]), [ftmp7]"=&f"(ftmp[7]), \
@@ -567,29 +548,21 @@ void ff_hevc_put_hevc_qpel_bi_hv##w##_8_mmi(uint8_t *_dst, \
"1: \n\t" \
"li %[x], " #x_step " \n\t" \
"2: \n\t" \
- "gsldlc1 %[ftmp3], 0x07(%[tmp]) \n\t" \
- "gsldrc1 %[ftmp3], 0x00(%[tmp]) \n\t" \
+ MMI_ULDC1(%[ftmp3], %[tmp], 0x00) \
PTR_ADDIU "%[tmp], %[tmp], 0x80 \n\t" \
- "gsldlc1 %[ftmp4], 0x07(%[tmp]) \n\t" \
- "gsldrc1 %[ftmp4], 0x00(%[tmp]) \n\t" \
+ MMI_ULDC1(%[ftmp4], %[tmp], 0x00) \
PTR_ADDIU "%[tmp], %[tmp], 0x80 \n\t" \
- "gsldlc1 %[ftmp5], 0x07(%[tmp]) \n\t" \
- "gsldrc1 %[ftmp5], 0x00(%[tmp]) \n\t" \
+ MMI_ULDC1(%[ftmp5], %[tmp], 0x00) \
PTR_ADDIU "%[tmp], %[tmp], 0x80 \n\t" \
- "gsldlc1 %[ftmp6], 0x07(%[tmp]) \n\t" \
- "gsldrc1 %[ftmp6], 0x00(%[tmp]) \n\t" \
+ MMI_ULDC1(%[ftmp6], %[tmp], 0x00) \
PTR_ADDIU "%[tmp], %[tmp], 0x80 \n\t" \
- "gsldlc1 %[ftmp7], 0x07(%[tmp]) \n\t" \
- "gsldrc1 %[ftmp7], 0x00(%[tmp]) \n\t" \
+ MMI_ULDC1(%[ftmp7], %[tmp], 0x00) \
PTR_ADDIU "%[tmp], %[tmp], 0x80 \n\t" \
- "gsldlc1 %[ftmp8], 0x07(%[tmp]) \n\t" \
- "gsldrc1 %[ftmp8], 0x00(%[tmp]) \n\t" \
+ MMI_ULDC1(%[ftmp8], %[tmp], 0x00) \
PTR_ADDIU "%[tmp], %[tmp], 0x80 \n\t" \
- "gsldlc1 %[ftmp9], 0x07(%[tmp]) \n\t" \
- "gsldrc1 %[ftmp9], 0x00(%[tmp]) \n\t" \
+ MMI_ULDC1(%[ftmp9], %[tmp], 0x00) \
PTR_ADDIU "%[tmp], %[tmp], 0x80 \n\t" \
- "gsldlc1 %[ftmp10], 0x07(%[tmp]) \n\t" \
- "gsldrc1 %[ftmp10], 0x00(%[tmp]) \n\t" \
+ MMI_ULDC1(%[ftmp10], %[tmp], 0x00) \
PTR_ADDIU "%[tmp], %[tmp], -0x380 \n\t" \
TRANSPOSE_4H(%[ftmp3], %[ftmp4], %[ftmp5], %[ftmp6], \
%[ftmp11], %[ftmp12], %[ftmp13], %[ftmp14]) \
@@ -614,8 +587,7 @@ void ff_hevc_put_hevc_qpel_bi_hv##w##_8_mmi(uint8_t *_dst, \
"paddw %[ftmp5], %[ftmp5], %[ftmp6] \n\t" \
"psraw %[ftmp5], %[ftmp5], %[ftmp0] \n\t" \
"packsswh %[ftmp3], %[ftmp3], %[ftmp5] \n\t" \
- "gsldlc1 %[ftmp4], 0x07(%[src2]) \n\t" \
- "gsldrc1 %[ftmp4], 0x00(%[src2]) \n\t" \
+ MMI_ULDC1(%[ftmp4], %[src2], 0x00) \
"pxor %[ftmp7], %[ftmp7], %[ftmp7] \n\t" \
"li %[rtmp0], 0x10 \n\t" \
"dmtc1 %[rtmp0], %[ftmp8] \n\t" \
@@ -637,8 +609,7 @@ void ff_hevc_put_hevc_qpel_bi_hv##w##_8_mmi(uint8_t *_dst, \
"pcmpgth %[ftmp7], %[ftmp5], %[ftmp7] \n\t" \
"pand %[ftmp3], %[ftmp5], %[ftmp7] \n\t" \
"packushb %[ftmp3], %[ftmp3], %[ftmp3] \n\t" \
- "gsswlc1 %[ftmp3], 0x03(%[dst]) \n\t" \
- "gsswrc1 %[ftmp3], 0x00(%[dst]) \n\t" \
+ MMI_USWC1(%[ftmp3], %[dst], 0x00) \
\
"daddi %[x], %[x], -0x01 \n\t" \
PTR_ADDIU "%[src2], %[src2], 0x08 \n\t" \
@@ -654,7 +625,8 @@ void ff_hevc_put_hevc_qpel_bi_hv##w##_8_mmi(uint8_t *_dst, \
PTR_ADDU "%[dst], %[dst], %[stride] \n\t" \
PTR_ADDIU "%[tmp], %[tmp], 0x80 \n\t" \
"bnez %[y], 1b \n\t" \
- : [ftmp0]"=&f"(ftmp[0]), [ftmp1]"=&f"(ftmp[1]), \
+ : RESTRICT_ASM_ALL64 RESTRICT_ASM_LOW32 \
+ [ftmp0]"=&f"(ftmp[0]), [ftmp1]"=&f"(ftmp[1]), \
[ftmp2]"=&f"(ftmp[2]), [ftmp3]"=&f"(ftmp[3]), \
[ftmp4]"=&f"(ftmp[4]), [ftmp5]"=&f"(ftmp[5]), \
[ftmp6]"=&f"(ftmp[6]), [ftmp7]"=&f"(ftmp[7]), \
@@ -700,6 +672,8 @@ void ff_hevc_put_hevc_epel_bi_hv##w##_8_mmi(uint8_t *_dst, \
uint64_t rtmp[1]; \
union av_intfloat64 shift; \
union av_intfloat64 offset; \
+ DECLARE_VAR_ALL64; \
+ DECLARE_VAR_LOW32; \
shift.i = 7; \
offset.i = 64; \
\
@@ -716,14 +690,10 @@ void ff_hevc_put_hevc_epel_bi_hv##w##_8_mmi(uint8_t *_dst, \
\
"1: \n\t" \
"2: \n\t" \
- "gslwlc1 %[ftmp2], 0x03(%[src]) \n\t" \
- "gslwrc1 %[ftmp2], 0x00(%[src]) \n\t" \
- "gslwlc1 %[ftmp3], 0x04(%[src]) \n\t" \
- "gslwrc1 %[ftmp3], 0x01(%[src]) \n\t" \
- "gslwlc1 %[ftmp4], 0x05(%[src]) \n\t" \
- "gslwrc1 %[ftmp4], 0x02(%[src]) \n\t" \
- "gslwlc1 %[ftmp5], 0x06(%[src]) \n\t" \
- "gslwrc1 %[ftmp5], 0x03(%[src]) \n\t" \
+ MMI_ULDC1(%[ftmp3], %[src], 0x00) \
+ MMI_ULDC1(%[ftmp4], %[src], 0x01) \
+ MMI_ULDC1(%[ftmp5], %[src], 0x02) \
+ MMI_ULDC1(%[ftmp6], %[src], 0x03) \
"punpcklbh %[ftmp2], %[ftmp2], %[ftmp0] \n\t" \
"pmullh %[ftmp2], %[ftmp2], %[ftmp1] \n\t" \
"punpcklbh %[ftmp3], %[ftmp3], %[ftmp0] \n\t" \
@@ -737,8 +707,7 @@ void ff_hevc_put_hevc_epel_bi_hv##w##_8_mmi(uint8_t *_dst, \
"paddh %[ftmp2], %[ftmp2], %[ftmp3] \n\t" \
"paddh %[ftmp4], %[ftmp4], %[ftmp5] \n\t" \
"paddh %[ftmp2], %[ftmp2], %[ftmp4] \n\t" \
- "gssdlc1 %[ftmp2], 0x07(%[tmp]) \n\t" \
- "gssdrc1 %[ftmp2], 0x00(%[tmp]) \n\t" \
+ MMI_ULDC1(%[ftmp2], %[tmp], 0x00) \
\
"daddi %[x], %[x], -0x01 \n\t" \
PTR_ADDIU "%[src], %[src], 0x04 \n\t" \
@@ -752,7 +721,8 @@ void ff_hevc_put_hevc_epel_bi_hv##w##_8_mmi(uint8_t *_dst, \
PTR_ADDU "%[src], %[src], %[stride] \n\t" \
PTR_ADDIU "%[tmp], %[tmp], 0x80 \n\t" \
"bnez %[y], 1b \n\t" \
- : [ftmp0]"=&f"(ftmp[0]), [ftmp1]"=&f"(ftmp[1]), \
+ : RESTRICT_ASM_ALL64 \
+ [ftmp0]"=&f"(ftmp[0]), [ftmp1]"=&f"(ftmp[1]), \
[ftmp2]"=&f"(ftmp[2]), [ftmp3]"=&f"(ftmp[3]), \
[ftmp4]"=&f"(ftmp[4]), [ftmp5]"=&f"(ftmp[5]), \
[ftmp6]"=&f"(ftmp[6]), [ftmp7]"=&f"(ftmp[7]), \
@@ -782,17 +752,13 @@ void ff_hevc_put_hevc_epel_bi_hv##w##_8_mmi(uint8_t *_dst, \
"1: \n\t" \
"li %[x], " #x_step " \n\t" \
"2: \n\t" \
- "gsldlc1 %[ftmp3], 0x07(%[tmp]) \n\t" \
- "gsldrc1 %[ftmp3], 0x00(%[tmp]) \n\t" \
+ MMI_ULDC1(%[ftmp3], %[tmp], 0x00) \
PTR_ADDIU "%[tmp], %[tmp], 0x80 \n\t" \
- "gsldlc1 %[ftmp4], 0x07(%[tmp]) \n\t" \
- "gsldrc1 %[ftmp4], 0x00(%[tmp]) \n\t" \
+ MMI_ULDC1(%[ftmp4], %[tmp], 0x00) \
PTR_ADDIU "%[tmp], %[tmp], 0x80 \n\t" \
- "gsldlc1 %[ftmp5], 0x07(%[tmp]) \n\t" \
- "gsldrc1 %[ftmp5], 0x00(%[tmp]) \n\t" \
+ MMI_ULDC1(%[ftmp5], %[tmp], 0x00) \
PTR_ADDIU "%[tmp], %[tmp], 0x80 \n\t" \
- "gsldlc1 %[ftmp6], 0x07(%[tmp]) \n\t" \
- "gsldrc1 %[ftmp6], 0x00(%[tmp]) \n\t" \
+ MMI_ULDC1(%[ftmp6], %[tmp], 0x00) \
PTR_ADDIU "%[tmp], %[tmp], -0x180 \n\t" \
TRANSPOSE_4H(%[ftmp3], %[ftmp4], %[ftmp5], %[ftmp6], \
%[ftmp7], %[ftmp8], %[ftmp9], %[ftmp10]) \
@@ -807,8 +773,7 @@ void ff_hevc_put_hevc_epel_bi_hv##w##_8_mmi(uint8_t *_dst, \
"paddw %[ftmp5], %[ftmp5], %[ftmp6] \n\t" \
"psraw %[ftmp5], %[ftmp5], %[ftmp0] \n\t" \
"packsswh %[ftmp3], %[ftmp3], %[ftmp5] \n\t" \
- "gsldlc1 %[ftmp4], 0x07(%[src2]) \n\t" \
- "gsldrc1 %[ftmp4], 0x00(%[src2]) \n\t" \
+ MMI_ULDC1(%[ftmp4], %[tmp], 0x02) \
"li %[rtmp0], 0x10 \n\t" \
"dmtc1 %[rtmp0], %[ftmp8] \n\t" \
"punpcklhw %[ftmp5], %[ftmp2], %[ftmp3] \n\t" \
@@ -829,8 +794,7 @@ void ff_hevc_put_hevc_epel_bi_hv##w##_8_mmi(uint8_t *_dst, \
"pcmpgth %[ftmp7], %[ftmp5], %[ftmp2] \n\t" \
"pand %[ftmp3], %[ftmp5], %[ftmp7] \n\t" \
"packushb %[ftmp3], %[ftmp3], %[ftmp3] \n\t" \
- "gsswlc1 %[ftmp3], 0x03(%[dst]) \n\t" \
- "gsswrc1 %[ftmp3], 0x00(%[dst]) \n\t" \
+ MMI_USWC1(%[ftmp3], %[dst], 0x0) \
\
"daddi %[x], %[x], -0x01 \n\t" \
PTR_ADDIU "%[src2], %[src2], 0x08 \n\t" \
@@ -846,7 +810,8 @@ void ff_hevc_put_hevc_epel_bi_hv##w##_8_mmi(uint8_t *_dst, \
PTR_ADDU "%[dst], %[dst], %[stride] \n\t" \
PTR_ADDIU "%[tmp], %[tmp], 0x80 \n\t" \
"bnez %[y], 1b \n\t" \
- : [ftmp0]"=&f"(ftmp[0]), [ftmp1]"=&f"(ftmp[1]), \
+ : RESTRICT_ASM_LOW32 RESTRICT_ASM_ALL64 \
+ [ftmp0]"=&f"(ftmp[0]), [ftmp1]"=&f"(ftmp[1]), \
[ftmp2]"=&f"(ftmp[2]), [ftmp3]"=&f"(ftmp[3]), \
[ftmp4]"=&f"(ftmp[4]), [ftmp5]"=&f"(ftmp[5]), \
[ftmp6]"=&f"(ftmp[6]), [ftmp7]"=&f"(ftmp[7]), \
@@ -884,6 +849,7 @@ void ff_hevc_put_hevc_pel_bi_pixels##w##_8_mmi(uint8_t *_dst, \
double ftmp[12]; \
uint64_t rtmp[1]; \
union av_intfloat64 shift; \
+ DECLARE_VAR_ALL64; \
shift.i = 7; \
\
y = height; \
@@ -901,12 +867,9 @@ void ff_hevc_put_hevc_pel_bi_pixels##w##_8_mmi(uint8_t *_dst, \
\
"1: \n\t" \
"2: \n\t" \
- "gsldlc1 %[ftmp5], 0x07(%[src]) \n\t" \
- "gsldrc1 %[ftmp5], 0x00(%[src]) \n\t" \
- "gsldlc1 %[ftmp2], 0x07(%[src2]) \n\t" \
- "gsldrc1 %[ftmp2], 0x00(%[src2]) \n\t" \
- "gsldlc1 %[ftmp3], 0x0f(%[src2]) \n\t" \
- "gsldrc1 %[ftmp3], 0x08(%[src2]) \n\t" \
+ MMI_ULDC1(%[ftmp5], %[src], 0x00) \
+ MMI_ULDC1(%[ftmp2], %[src2], 0x00) \
+ MMI_ULDC1(%[ftmp3], %[src2], 0x08) \
"punpcklbh %[ftmp4], %[ftmp5], %[ftmp0] \n\t" \
"punpckhbh %[ftmp5], %[ftmp5], %[ftmp0] \n\t" \
"psllh %[ftmp4], %[ftmp4], %[ftmp1] \n\t" \
@@ -940,8 +903,7 @@ void ff_hevc_put_hevc_pel_bi_pixels##w##_8_mmi(uint8_t *_dst, \
"pand %[ftmp2], %[ftmp2], %[ftmp3] \n\t" \
"pand %[ftmp4], %[ftmp4], %[ftmp5] \n\t" \
"packushb %[ftmp2], %[ftmp2], %[ftmp4] \n\t" \
- "gssdlc1 %[ftmp2], 0x07(%[dst]) \n\t" \
- "gssdrc1 %[ftmp2], 0x00(%[dst]) \n\t" \
+ MMI_USDC1(%[ftmp2], %[dst], 0x0) \
\
"daddi %[x], %[x], -0x01 \n\t" \
PTR_ADDIU "%[src], %[src], 0x08 \n\t" \
@@ -958,7 +920,8 @@ void ff_hevc_put_hevc_pel_bi_pixels##w##_8_mmi(uint8_t *_dst, \
PTR_ADDU "%[dst], %[dst], %[dststride] \n\t" \
PTR_ADDIU "%[src2], %[src2], 0x80 \n\t" \
"bnez %[y], 1b \n\t" \
- : [ftmp0]"=&f"(ftmp[0]), [ftmp1]"=&f"(ftmp[1]), \
+ : RESTRICT_ASM_ALL64 \
+ [ftmp0]"=&f"(ftmp[0]), [ftmp1]"=&f"(ftmp[1]), \
[ftmp2]"=&f"(ftmp[2]), [ftmp3]"=&f"(ftmp[3]), \
[ftmp4]"=&f"(ftmp[4]), [ftmp5]"=&f"(ftmp[5]), \
[ftmp6]"=&f"(ftmp[6]), [ftmp7]"=&f"(ftmp[7]), \
@@ -1000,6 +963,8 @@ void ff_hevc_put_hevc_qpel_uni_hv##w##_8_mmi(uint8_t *_dst, \
uint64_t rtmp[1]; \
union av_intfloat64 shift; \
union av_intfloat64 offset; \
+ DECLARE_VAR_ALL64; \
+ DECLARE_VAR_LOW32; \
shift.i = 6; \
offset.i = 32; \
\
@@ -1019,14 +984,10 @@ void ff_hevc_put_hevc_qpel_uni_hv##w##_8_mmi(uint8_t *_dst, \
\
"1: \n\t" \
"2: \n\t" \
- "gsldlc1 %[ftmp3], 0x07(%[src]) \n\t" \
- "gsldrc1 %[ftmp3], 0x00(%[src]) \n\t" \
- "gsldlc1 %[ftmp4], 0x08(%[src]) \n\t" \
- "gsldrc1 %[ftmp4], 0x01(%[src]) \n\t" \
- "gsldlc1 %[ftmp5], 0x09(%[src]) \n\t" \
- "gsldrc1 %[ftmp5], 0x02(%[src]) \n\t" \
- "gsldlc1 %[ftmp6], 0x0a(%[src]) \n\t" \
- "gsldrc1 %[ftmp6], 0x03(%[src]) \n\t" \
+ MMI_ULDC1(%[ftmp3], %[src], 0x00) \
+ MMI_ULDC1(%[ftmp4], %[src], 0x01) \
+ MMI_ULDC1(%[ftmp5], %[src], 0x02) \
+ MMI_ULDC1(%[ftmp6], %[src], 0x03) \
"punpcklbh %[ftmp7], %[ftmp3], %[ftmp0] \n\t" \
"punpckhbh %[ftmp8], %[ftmp3], %[ftmp0] \n\t" \
"pmullh %[ftmp7], %[ftmp7], %[ftmp1] \n\t" \
@@ -1052,8 +1013,7 @@ void ff_hevc_put_hevc_qpel_uni_hv##w##_8_mmi(uint8_t *_dst, \
"paddh %[ftmp3], %[ftmp3], %[ftmp4] \n\t" \
"paddh %[ftmp5], %[ftmp5], %[ftmp6] \n\t" \
"paddh %[ftmp3], %[ftmp3], %[ftmp5] \n\t" \
- "gssdlc1 %[ftmp3], 0x07(%[tmp]) \n\t" \
- "gssdrc1 %[ftmp3], 0x00(%[tmp]) \n\t" \
+ MMI_USDC1(%[ftmp3], %[tmp], 0x0) \
\
"daddi %[x], %[x], -0x01 \n\t" \
PTR_ADDIU "%[src], %[src], 0x04 \n\t" \
@@ -1067,7 +1027,8 @@ void ff_hevc_put_hevc_qpel_uni_hv##w##_8_mmi(uint8_t *_dst, \
PTR_ADDU "%[src], %[src], %[stride] \n\t" \
PTR_ADDIU "%[tmp], %[tmp], 0x80 \n\t" \
"bnez %[y], 1b \n\t" \
- : [ftmp0]"=&f"(ftmp[0]), [ftmp1]"=&f"(ftmp[1]), \
+ : RESTRICT_ASM_ALL64 \
+ [ftmp0]"=&f"(ftmp[0]), [ftmp1]"=&f"(ftmp[1]), \
[ftmp2]"=&f"(ftmp[2]), [ftmp3]"=&f"(ftmp[3]), \
[ftmp4]"=&f"(ftmp[4]), [ftmp5]"=&f"(ftmp[5]), \
[ftmp6]"=&f"(ftmp[6]), [ftmp7]"=&f"(ftmp[7]), \
@@ -1099,29 +1060,21 @@ void ff_hevc_put_hevc_qpel_uni_hv##w##_8_mmi(uint8_t *_dst, \
"1: \n\t" \
"li %[x], " #x_step " \n\t" \
"2: \n\t" \
- "gsldlc1 %[ftmp3], 0x07(%[tmp]) \n\t" \
- "gsldrc1 %[ftmp3], 0x00(%[tmp]) \n\t" \
+ MMI_ULDC1(%[ftmp3], %[tmp], 0x00) \
PTR_ADDIU "%[tmp], %[tmp], 0x80 \n\t" \
- "gsldlc1 %[ftmp4], 0x07(%[tmp]) \n\t" \
- "gsldrc1 %[ftmp4], 0x00(%[tmp]) \n\t" \
+ MMI_ULDC1(%[ftmp4], %[tmp], 0x00) \
PTR_ADDIU "%[tmp], %[tmp], 0x80 \n\t" \
- "gsldlc1 %[ftmp5], 0x07(%[tmp]) \n\t" \
- "gsldrc1 %[ftmp5], 0x00(%[tmp]) \n\t" \
+ MMI_ULDC1(%[ftmp5], %[tmp], 0x00) \
PTR_ADDIU "%[tmp], %[tmp], 0x80 \n\t" \
- "gsldlc1 %[ftmp6], 0x07(%[tmp]) \n\t" \
- "gsldrc1 %[ftmp6], 0x00(%[tmp]) \n\t" \
+ MMI_ULDC1(%[ftmp6], %[tmp], 0x00) \
PTR_ADDIU "%[tmp], %[tmp], 0x80 \n\t" \
- "gsldlc1 %[ftmp7], 0x07(%[tmp]) \n\t" \
- "gsldrc1 %[ftmp7], 0x00(%[tmp]) \n\t" \
+ MMI_ULDC1(%[ftmp7], %[tmp], 0x00) \
PTR_ADDIU "%[tmp], %[tmp], 0x80 \n\t" \
- "gsldlc1 %[ftmp8], 0x07(%[tmp]) \n\t" \
- "gsldrc1 %[ftmp8], 0x00(%[tmp]) \n\t" \
+ MMI_ULDC1(%[ftmp8], %[tmp], 0x00) \
PTR_ADDIU "%[tmp], %[tmp], 0x80 \n\t" \
- "gsldlc1 %[ftmp9], 0x07(%[tmp]) \n\t" \
- "gsldrc1 %[ftmp9], 0x00(%[tmp]) \n\t" \
+ MMI_ULDC1(%[ftmp9], %[tmp], 0x00) \
PTR_ADDIU "%[tmp], %[tmp], 0x80 \n\t" \
- "gsldlc1 %[ftmp10], 0x07(%[tmp]) \n\t" \
- "gsldrc1 %[ftmp10], 0x00(%[tmp]) \n\t" \
+ MMI_ULDC1(%[ftmp10], %[tmp], 0x00) \
PTR_ADDIU "%[tmp], %[tmp], -0x380 \n\t" \
TRANSPOSE_4H(%[ftmp3], %[ftmp4], %[ftmp5], %[ftmp6], \
%[ftmp11], %[ftmp12], %[ftmp13], %[ftmp14]) \
@@ -1152,8 +1105,7 @@ void ff_hevc_put_hevc_qpel_uni_hv##w##_8_mmi(uint8_t *_dst, \
"pcmpgth %[ftmp7], %[ftmp3], %[ftmp7] \n\t" \
"pand %[ftmp3], %[ftmp3], %[ftmp7] \n\t" \
"packushb %[ftmp3], %[ftmp3], %[ftmp3] \n\t" \
- "gsswlc1 %[ftmp3], 0x03(%[dst]) \n\t" \
- "gsswrc1 %[ftmp3], 0x00(%[dst]) \n\t" \
+ MMI_USWC1(%[ftmp3], %[dst], 0x00) \
\
"daddi %[x], %[x], -0x01 \n\t" \
PTR_ADDIU "%[tmp], %[tmp], 0x08 \n\t" \
@@ -1166,7 +1118,8 @@ void ff_hevc_put_hevc_qpel_uni_hv##w##_8_mmi(uint8_t *_dst, \
PTR_ADDU "%[dst], %[dst], %[stride] \n\t" \
PTR_ADDIU "%[tmp], %[tmp], 0x80 \n\t" \
"bnez %[y], 1b \n\t" \
- : [ftmp0]"=&f"(ftmp[0]), [ftmp1]"=&f"(ftmp[1]), \
+ : RESTRICT_ASM_ALL64 RESTRICT_ASM_LOW32 \
+ [ftmp0]"=&f"(ftmp[0]), [ftmp1]"=&f"(ftmp[1]), \
[ftmp2]"=&f"(ftmp[2]), [ftmp3]"=&f"(ftmp[3]), \
[ftmp4]"=&f"(ftmp[4]), [ftmp5]"=&f"(ftmp[5]), \
[ftmp6]"=&f"(ftmp[6]), [ftmp7]"=&f"(ftmp[7]), \