summaryrefslogtreecommitdiff
path: root/libavcodec
diff options
context:
space:
mode:
authorDavid Conrad <lessen42@gmail.com>2008-09-18 03:07:41 +0000
committerDavid Conrad <lessen42@gmail.com>2008-09-18 03:07:41 +0000
commit3f429d11da91424566fe14e9e52a9b02d0b56d41 (patch)
treeab3c3a8d60105a54de7bcf38a7b23f9b2f74e339 /libavcodec
parent896a22b851ed3904ff78ba21374870cc1f5d0cb0 (diff)
Factorize SSE2_(Row|Column)_IDCT into one macro
Originally committed as revision 15353 to svn://svn.ffmpeg.org/ffmpeg/trunk
Diffstat (limited to 'libavcodec')
-rw-r--r--libavcodec/i386/vp3dsp_sse2.c135
1 files changed, 19 insertions, 116 deletions
diff --git a/libavcodec/i386/vp3dsp_sse2.c b/libavcodec/i386/vp3dsp_sse2.c
index 111ff21422..bdf662899f 100644
--- a/libavcodec/i386/vp3dsp_sse2.c
+++ b/libavcodec/i386/vp3dsp_sse2.c
@@ -38,7 +38,7 @@ DECLARE_ALIGNED_16(const uint16_t, ff_vp3_idct_data[7 * 8]) =
};
-#define SSE2_Column_IDCT() \
+#define VP3_1D_IDCT_SSE2(ADD, SHIFT) \
"movdqa "I(3)", %%xmm2 \n\t" /* xmm2 = i3 */ \
"movdqa "C(3)", %%xmm6 \n\t" /* xmm6 = c3 */ \
"movdqa %%xmm2, %%xmm4 \n\t" /* xmm4 = i3 */ \
@@ -113,142 +113,43 @@ DECLARE_ALIGNED_16(const uint16_t, ff_vp3_idct_data[7 * 8]) =
"paddsw %%xmm6, %%xmm2 \n\t" /* xmm2 = F + A. = A.. */ \
"paddw %%xmm3, %%xmm4 \n\t" /* xmm4 = c4 * ( i0 + i4 ) = 3 */ \
"psubsw %%xmm1, %%xmm2 \n\t" /* xmm2 = A.. - H. = R2 */ \
- "paddsw "OC_8", %%xmm2 \n\t" /* Adjust R2 and R1 before shifting */ \
+ ADD(%%xmm2) /* Adjust R2 and R1 before shifting */ \
"paddsw %%xmm1, %%xmm1 \n\t" /* xmm1 = H. + H. */ \
"paddsw %%xmm2, %%xmm1 \n\t" /* xmm1 = A.. + H. = R1 */ \
- "psraw $4, %%xmm2 \n\t" /* xmm2 = op2 */ \
+ SHIFT(%%xmm2) /* xmm2 = op2 */ \
"psubsw %%xmm7, %%xmm4 \n\t" /* xmm4 = E - G = E. */ \
- "psraw $4, %%xmm1 \n\t" /* xmm1 = op1 */ \
+ SHIFT(%%xmm1) /* xmm1 = op1 */ \
"movdqa "I(2)", %%xmm3 \n\t" /* Load D. from I(2) */ \
"paddsw %%xmm7, %%xmm7 \n\t" /* xmm7 = G + G */ \
"movdqa %%xmm2, "O(2)" \n\t" /* Write out op2 */ \
"paddsw %%xmm4, %%xmm7 \n\t" /* xmm7 = E + G = G. */ \
"movdqa %%xmm1, "O(1)" \n\t" /* Write out op1 */ \
"psubsw %%xmm3, %%xmm4 \n\t" /* xmm4 = E. - D. = R4 */ \
- "paddsw "OC_8", %%xmm4 \n\t" /* Adjust R4 and R3 before shifting */ \
+ ADD(%%xmm4) /* Adjust R4 and R3 before shifting */ \
"paddsw %%xmm3, %%xmm3 \n\t" /* xmm3 = D. + D. */ \
"paddsw %%xmm4, %%xmm3 \n\t" /* xmm3 = E. + D. = R3 */ \
- "psraw $4, %%xmm4 \n\t" /* xmm4 = op4 */ \
+ SHIFT(%%xmm4) /* xmm4 = op4 */ \
"psubsw %%xmm5, %%xmm6 \n\t" /* xmm6 = F. - B..= R6 */ \
- "psraw $4, %%xmm3 \n\t" /* xmm3 = op3 */ \
- "paddsw "OC_8", %%xmm6 \n\t" /* Adjust R6 and R5 before shifting */ \
+ SHIFT(%%xmm3) /* xmm3 = op3 */ \
+ ADD(%%xmm6) /* Adjust R6 and R5 before shifting */ \
"paddsw %%xmm5, %%xmm5 \n\t" /* xmm5 = B.. + B.. */ \
"paddsw %%xmm6, %%xmm5 \n\t" /* xmm5 = F. + B.. = R5 */ \
- "psraw $4, %%xmm6 \n\t" /* xmm6 = op6 */ \
+ SHIFT(%%xmm6) /* xmm6 = op6 */ \
"movdqa %%xmm4, "O(4)" \n\t" /* Write out op4 */ \
- "psraw $4, %%xmm5 \n\t" /* xmm5 = op5 */ \
+ SHIFT(%%xmm5) /* xmm5 = op5 */ \
"movdqa %%xmm3, "O(3)" \n\t" /* Write out op3 */ \
"psubsw %%xmm0, %%xmm7 \n\t" /* xmm7 = G. - C. = R7 */ \
- "paddsw "OC_8", %%xmm7 \n\t" /* Adjust R7 and R0 before shifting */ \
+ ADD(%%xmm7) /* Adjust R7 and R0 before shifting */ \
"paddsw %%xmm0, %%xmm0 \n\t" /* xmm0 = C. + C. */ \
"paddsw %%xmm7, %%xmm0 \n\t" /* xmm0 = G. + C. */ \
- "psraw $4, %%xmm7 \n\t" /* xmm7 = op7 */ \
+ SHIFT(%%xmm7) /* xmm7 = op7 */ \
"movdqa %%xmm6, "O(6)" \n\t" /* Write out op6 */ \
- "psraw $4, %%xmm0 \n\t" /* xmm0 = op0 */ \
+ SHIFT(%%xmm0) /* xmm0 = op0 */ \
"movdqa %%xmm5, "O(5)" \n\t" /* Write out op5 */ \
"movdqa %%xmm7, "O(7)" \n\t" /* Write out op7 */ \
"movdqa %%xmm0, "O(0)" \n\t" /* Write out op0 */
-#define SSE2_Row_IDCT() \
- "movdqa "I(3)", %%xmm2 \n\t" /* xmm2 = i3 */ \
- "movdqa "C(3)", %%xmm6 \n\t" /* xmm6 = c3 */ \
- "movdqa %%xmm2, %%xmm4 \n\t" /* xmm4 = i3 */ \
- "movdqa "I(5)", %%xmm7 \n\t" /* xmm7 = i5 */ \
- "pmulhw %%xmm6, %%xmm4 \n\t" /* xmm4 = c3 * i3 - i3 */ \
- "movdqa "C(5)", %%xmm1 \n\t" /* xmm1 = c5 */ \
- "pmulhw %%xmm7, %%xmm6 \n\t" /* xmm6 = c3 * i5 - i5 */ \
- "movdqa %%xmm1, %%xmm5 \n\t" /* xmm5 = c5 */ \
- "pmulhw %%xmm2, %%xmm1 \n\t" /* xmm1 = c5 * i3 - i3 */ \
- "movdqa "I(1)", %%xmm3 \n\t" /* xmm3 = i1 */ \
- "pmulhw %%xmm7, %%xmm5 \n\t" /* xmm5 = c5 * i5 - i5 */ \
- "movdqa "C(1)", %%xmm0 \n\t" /* xmm0 = c1 */ \
- "paddw %%xmm2, %%xmm4 \n\t" /* xmm4 = c3 * i3 */ \
- "paddw %%xmm7, %%xmm6 \n\t" /* xmm6 = c3 * i5 */ \
- "paddw %%xmm1, %%xmm2 \n\t" /* xmm2 = c5 * i3 */ \
- "movdqa "I(7)", %%xmm1 \n\t" /* xmm1 = i7 */ \
- "paddw %%xmm5, %%xmm7 \n\t" /* xmm7 = c5 * i5 */ \
- "movdqa %%xmm0, %%xmm5 \n\t" /* xmm5 = c1 */ \
- "pmulhw %%xmm3, %%xmm0 \n\t" /* xmm0 = c1 * i1 - i1 */ \
- "paddsw %%xmm7, %%xmm4 \n\t" /* xmm4 = c3 * i3 + c5 * i5 = C */ \
- "pmulhw %%xmm1, %%xmm5 \n\t" /* xmm5 = c1 * i7 - i7 */ \
- "movdqa "C(7)", %%xmm7 \n\t" /* xmm7 = c7 */ \
- "psubsw %%xmm2, %%xmm6 \n\t" /* xmm6 = c3 * i5 - c5 * i3 = D */ \
- "paddw %%xmm3, %%xmm0 \n\t" /* xmm0 = c1 * i1 */ \
- "pmulhw %%xmm7, %%xmm3 \n\t" /* xmm3 = c7 * i1 */ \
- "movdqa "I(2)", %%xmm2 \n\t" /* xmm2 = i2 */ \
- "pmulhw %%xmm1, %%xmm7 \n\t" /* xmm7 = c7 * i7 */ \
- "paddw %%xmm1, %%xmm5 \n\t" /* xmm5 = c1 * i7 */ \
- "movdqa %%xmm2, %%xmm1 \n\t" /* xmm1 = i2 */ \
- "pmulhw "C(2)", %%xmm2 \n\t" /* xmm2 = i2 * c2 -i2 */ \
- "psubsw %%xmm5, %%xmm3 \n\t" /* xmm3 = c7 * i1 - c1 * i7 = B */ \
- "movdqa "I(6)", %%xmm5 \n\t" /* xmm5 = i6 */ \
- "paddsw %%xmm7, %%xmm0 \n\t" /* xmm0 = c1 * i1 + c7 * i7 = A */ \
- "movdqa %%xmm5, %%xmm7 \n\t" /* xmm7 = i6 */ \
- "psubsw %%xmm4, %%xmm0 \n\t" /* xmm0 = A - C */ \
- "pmulhw "C(2)", %%xmm5 \n\t" /* xmm5 = c2 * i6 - i6 */ \
- "paddw %%xmm1, %%xmm2 \n\t" /* xmm2 = i2 * c2 */ \
- "pmulhw "C(6)", %%xmm1 \n\t" /* xmm1 = c6 * i2 */ \
- "paddsw %%xmm4, %%xmm4 \n\t" /* xmm4 = C + C */ \
- "paddsw %%xmm0, %%xmm4 \n\t" /* xmm4 = A + C = C. */ \
- "psubsw %%xmm6, %%xmm3 \n\t" /* xmm3 = B - D */ \
- "paddw %%xmm7, %%xmm5 \n\t" /* xmm5 = c2 * i6 */ \
- "paddsw %%xmm6, %%xmm6 \n\t" /* xmm6 = D + D */ \
- "pmulhw "C(6)", %%xmm7 \n\t" /* xmm7 = c6 * i6 */ \
- "paddsw %%xmm3, %%xmm6 \n\t" /* xmm6 = B + D = D. */ \
- "movdqa %%xmm4, "I(1)" \n\t" /* Save C. at I(1) */ \
- "psubsw %%xmm5, %%xmm1 \n\t" /* xmm1 = c6 * i2 - c2 * i6 = H */ \
- "movdqa "C(4)", %%xmm4 \n\t" /* xmm4 = c4 */ \
- "movdqa %%xmm3, %%xmm5 \n\t" /* xmm5 = B - D */ \
- "pmulhw %%xmm4, %%xmm3 \n\t" /* xmm3 = ( c4 -1 ) * ( B - D ) */ \
- "paddsw %%xmm2, %%xmm7 \n\t" /* xmm7 = c2 * i2 + c6 * i6 = G */ \
- "movdqa %%xmm6, "I(2)" \n\t" /* Save D. at I(2) */ \
- "movdqa %%xmm0, %%xmm2 \n\t" /* xmm2 = A - C */ \
- "movdqa "I(0)", %%xmm6 \n\t" /* xmm6 = i0 */ \
- "pmulhw %%xmm4, %%xmm0 \n\t" /* xmm0 = ( c4 - 1 ) * ( A - C ) = A. */ \
- "paddw %%xmm3, %%xmm5 \n\t" /* xmm5 = c4 * ( B - D ) = B. */ \
- "movdqa "I(4)", %%xmm3 \n\t" /* xmm3 = i4 */ \
- "psubsw %%xmm1, %%xmm5 \n\t" /* xmm5 = B. - H = B.. */ \
- "paddw %%xmm0, %%xmm2 \n\t" /* xmm2 = c4 * ( A - C) = A. */ \
- "psubsw %%xmm3, %%xmm6 \n\t" /* xmm6 = i0 - i4 */ \
- "movdqa %%xmm6, %%xmm0 \n\t" /* xmm0 = i0 - i4 */ \
- "pmulhw %%xmm4, %%xmm6 \n\t" /* xmm6 = ( c4 - 1 ) * ( i0 - i4 ) = F */ \
- "paddsw %%xmm3, %%xmm3 \n\t" /* xmm3 = i4 + i4 */ \
- "paddsw %%xmm1, %%xmm1 \n\t" /* xmm1 = H + H */ \
- "paddsw %%xmm0, %%xmm3 \n\t" /* xmm3 = i0 + i4 */ \
- "paddsw %%xmm5, %%xmm1 \n\t" /* xmm1 = B. + H = H. */ \
- "pmulhw %%xmm3, %%xmm4 \n\t" /* xmm4 = ( c4 - 1 ) * ( i0 + i4 ) */ \
- "paddw %%xmm0, %%xmm6 \n\t" /* xmm6 = c4 * ( i0 - i4 ) */ \
- "psubsw %%xmm2, %%xmm6 \n\t" /* xmm6 = F - A. = F. */ \
- "paddsw %%xmm2, %%xmm2 \n\t" /* xmm2 = A. + A. */ \
- "movdqa "I(1)", %%xmm0 \n\t" /* Load C. from I(1) */ \
- "paddsw %%xmm6, %%xmm2 \n\t" /* xmm2 = F + A. = A.. */ \
- "paddw %%xmm3, %%xmm4 \n\t" /* xmm4 = c4 * ( i0 + i4 ) = 3 */ \
- "psubsw %%xmm1, %%xmm2 \n\t" /* xmm2 = A.. - H. = R2 */ \
- "paddsw %%xmm1, %%xmm1 \n\t" /* xmm1 = H. + H. */ \
- "paddsw %%xmm2, %%xmm1 \n\t" /* xmm1 = A.. + H. = R1 */ \
- "psubsw %%xmm7, %%xmm4 \n\t" /* xmm4 = E - G = E. */ \
- "movdqa "I(2)", %%xmm3 \n\t" /* Load D. from I(2) */ \
- "paddsw %%xmm7, %%xmm7 \n\t" /* xmm7 = G + G */ \
- "movdqa %%xmm2, "I(2)" \n\t" /* Write out op2 */ \
- "paddsw %%xmm4, %%xmm7 \n\t" /* xmm7 = E + G = G. */ \
- "movdqa %%xmm1, "I(1)" \n\t" /* Write out op1 */ \
- "psubsw %%xmm3, %%xmm4 \n\t" /* xmm4 = E. - D. = R4 */ \
- "paddsw %%xmm3, %%xmm3 \n\t" /* xmm3 = D. + D. */ \
- "paddsw %%xmm4, %%xmm3 \n\t" /* xmm3 = E. + D. = R3 */ \
- "psubsw %%xmm5, %%xmm6 \n\t" /* xmm6 = F. - B..= R6 */ \
- "paddsw %%xmm5, %%xmm5 \n\t" /* xmm5 = B.. + B.. */ \
- "paddsw %%xmm6, %%xmm5 \n\t" /* xmm5 = F. + B.. = R5 */ \
- "movdqa %%xmm4, "I(4)" \n\t" /* Write out op4 */ \
- "movdqa %%xmm3, "I(3)" \n\t" /* Write out op3 */ \
- "psubsw %%xmm0, %%xmm7 \n\t" /* xmm7 = G. - C. = R7 */ \
- "paddsw %%xmm0, %%xmm0 \n\t" /* xmm0 = C. + C. */ \
- "paddsw %%xmm7, %%xmm0 \n\t" /* xmm0 = G. + C. */ \
- "movdqa %%xmm6, "I(6)" \n\t" /* Write out op6 */ \
- "movdqa %%xmm5, "I(5)" \n\t" /* Write out op5 */ \
- "movdqa %%xmm7, "I(7)" \n\t" /* Write out op7 */ \
- "movdqa %%xmm0, "I(0)" \n\t" /* Write out op0 */
-
#define SSE2_Transpose() \
"movdqa "I(4)", %%xmm4 \n\t" /* xmm4=e7e6e5e4e3e2e1e0 */ \
"movdqa "I(5)", %%xmm0 \n\t" /* xmm4=f7f6f5f4f3f2f1f0 */ \
@@ -305,20 +206,22 @@ DECLARE_ALIGNED_16(const uint16_t, ff_vp3_idct_data[7 * 8]) =
"movdqa %%xmm5, "I(6)" \n\t" /* save I(6) */ \
"movdqa %%xmm7, "I(7)" \n\t" /* save I(7) */
+#define NOP(xmm)
+#define SHIFT4(xmm) "psraw $4, "#xmm"\n\t"
+#define ADD8(xmm) "paddsw %2, "#xmm"\n\t"
+
void ff_vp3_idct_sse2(int16_t *input_data)
{
-#define OC_8 "%2"
-
#define I(x) AV_STRINGIFY(16*x)"(%0)"
#define O(x) I(x)
#define C(x) AV_STRINGIFY(16*(x-1))"(%1)"
asm volatile (
- SSE2_Row_IDCT()
+ VP3_1D_IDCT_SSE2(NOP, NOP)
SSE2_Transpose()
- SSE2_Column_IDCT()
+ VP3_1D_IDCT_SSE2(ADD8, SHIFT4)
:: "r"(input_data), "r"(ff_vp3_idct_data), "m"(ff_pw_8)
);
}