summaryrefslogtreecommitdiff
path: root/transfer_interp.asm
diff options
context:
space:
mode:
Diffstat (limited to 'transfer_interp.asm')
-rw-r--r--transfer_interp.asm194
1 files changed, 166 insertions, 28 deletions
diff --git a/transfer_interp.asm b/transfer_interp.asm
index b7c9af5..110203e 100644
--- a/transfer_interp.asm
+++ b/transfer_interp.asm
@@ -22,54 +22,192 @@
SECTION .text
+; double
+%define ELEM_SIZE 8
+
+; transpose a 4x4 matrix of doubles, supplied in %1-%4
+; %5, %6: temporary registers (clobbered)
+%macro TRANSPOSE4x4 6
+ unpcklpd %5, %1, %2 ; %5 = 00 10 02 12
+ unpckhpd %6, %1, %2 ; %6 = 01 11 03 13
+ unpcklpd %1, %3, %4 ; %1 = 20 30 22 32
+ unpckhpd %4, %3, %4 ; %4 = 21 31 23 33
+
+ vperm2f128 %3, %5, %1, 00110001b ; %3 = 02 12 22 32
+ vinsertf128 %1, %5, xmm%1, 1 ; %1 = 00 10 20 30
+ vinsertf128 %2, %6, xmm%4, 1 ; %2 = 01 11 21 31
+ vperm2f128 %4, %6, %4, 00110001b ; %4 = 03 13 23 33
+%endmacro
+
INIT_YMM avx2
-cglobal transfer_interp_line_cont_4, 7, 8, 6, dst, dst_len, src, src_stride, idx_x, fact_x, fact_y,\
- idx_x_val
+cglobal transfer_interp_line_cont_4, 9, 11, 16, dst, dst_len, src, src_stride, idx_x, fact_x, fact_x_stride, fact_y, fact_y_stride,\
+ idx_x_val, fact_x3
+ shl fact_y_strideq, 3
+ shl fact_x_strideq, 3
shl src_strideq, 3
shl dst_lenq, 3
add dstq, dst_lenq
add idx_xq, dst_lenq
- lea fact_xq, [fact_xq + 4 * dst_lenq]
+ add fact_xq, dst_lenq
neg dst_lenq
; from now on, the register that held the line size is used as the offset into data arrays
%define offsetq dst_lenq
- movu m0, [fact_yq]
- SPLATPD m1, m0, 1 ; fact y + 1 -> m1
- SPLATPD m2, m0, 2 ; fact y + 2 -> m2
- SPLATPD m3, m0, 3 ; fact y + 3 -> m3
- SPLATPD m0, m0, 0 ; fact y + 0 -> m0
+ ; load the y interpolation factors
+ SPLATPD m0, [fact_yq + fact_y_strideq * 0], 0 ; fact y + 0 -> m0
+ SPLATPD m1, [fact_yq + fact_y_strideq * 1], 0 ; fact y + 1 -> m1
+ SPLATPD m2, [fact_yq + fact_y_strideq * 2], 0 ; fact y + 2 -> m2
+ add fact_yq, fact_y_strideq
+ SPLATPD m3, [fact_yq + fact_y_strideq * 2], 0 ; fact y + 3 -> m3
+
+ ; reuse the now unneded fact_y[_stride] registers
+ %define fact_x1q fact_yq
+ %define fact_x2q fact_y_strideq
+ lea fact_x1q, [fact_xq + fact_x_strideq]
+ lea fact_x2q, [fact_xq + fact_x_strideq * 2]
+ lea fact_x3q, [fact_x1q + fact_x_strideq * 2]
.loop:
- mov idx_x_valq, [idx_xq + offsetq]
- shl idx_x_valq, 3
+ ; load the x interpolation factors
+ movu m4, [fact_xq + offsetq] ; fact x + 0 -> m4
+ movu m5, [fact_x1q + offsetq] ; fact x + 1 -> m5
+ movu m6, [fact_x2q + offsetq] ; fact x + 2 -> m6
+ movu m7, [fact_x3q + offsetq] ; fact x + 3 -> m7
+
+ ; load the x indices of the source values
+ movu m15, [idx_xq + offsetq]
+ psllq m15, m15, 3
+
+ movq idx_x_valq, xm15
+ movu m8, [srcq + idx_x_valq]
+
+ vpermq m13, m15, 1
+ movq idx_x_valq, xm13
+ movu m9, [srcq + idx_x_valq]
+
+ vpermq m13, m15, 2
+ movq idx_x_valq, xm13
+ movu m10, [srcq + idx_x_valq]
+
+ vpermq m13, m15, 3
+ movq idx_x_valq, xm13
+ movu m11, [srcq + idx_x_valq]
+
+ TRANSPOSE4x4 m8, m9, m10, m11, m12, m13
+
+ mulpd m14, m8, m4
+ vfmadd231pd m14, m9, m5
+ vfmadd231pd m14, m10, m6
+ vfmadd231pd m14, m11, m7
+ mulpd m14, m0
+
+ movq xm13, src_strideq
+ vbroadcastsd m13, xm13
+ paddq m15, m13
+
+ movq idx_x_valq, xm15
+ movu m8, [srcq + idx_x_valq]
+
+ vpermq m13, m15, 1
+ movq idx_x_valq, xm13
+ movu m9, [srcq + idx_x_valq]
- xorpd m4, m4
+ vpermq m13, m15, 2
+ movq idx_x_valq, xm13
+ movu m10, [srcq + idx_x_valq]
- movu m5, [fact_xq + 4 * offsetq]
+ vpermq m13, m15, 3
+ movq idx_x_valq, xm13
+ movu m11, [srcq + idx_x_valq]
- mulpd m6, m5, [srcq + idx_x_valq]
- mulpd m6, m0
+ TRANSPOSE4x4 m8, m9, m10, m11, m12, m13
- add idx_x_valq, src_strideq
- mulpd m7, m5, [srcq + idx_x_valq]
- vfmadd231pd m6, m7, m1
+ mulpd m13, m8, m4
+ vfmadd231pd m13, m9, m5
+ vfmadd231pd m13, m10, m6
+ vfmadd231pd m13, m11, m7
+ vfmadd231pd m14, m13, m1
- add idx_x_valq, src_strideq
- mulpd m7, m5, [srcq + idx_x_valq]
- vfmadd231pd m6, m7, m2
+ movq xm13, src_strideq
+ vbroadcastsd m13, xm13
+ paddq m15, m13
- add idx_x_valq, src_strideq
- mulpd m7, m5, [srcq + idx_x_valq]
- vfmadd231pd m6, m7, m3
+ movq idx_x_valq, xm15
+ movu m8, [srcq + idx_x_valq]
- haddpd m6, m6
- vpermq m6, m6, 00001000b
- haddpd m6, m6
+ vpermq m13, m15, 1
+ movq idx_x_valq, xm13
+ movu m9, [srcq + idx_x_valq]
- movq [dstq + offsetq], xm6
- add offsetq, 8
+ vpermq m13, m15, 2
+ movq idx_x_valq, xm13
+ movu m10, [srcq + idx_x_valq]
+
+ vpermq m13, m15, 3
+ movq idx_x_valq, xm13
+ movu m11, [srcq + idx_x_valq]
+
+ TRANSPOSE4x4 m8, m9, m10, m11, m12, m13
+
+ mulpd m13, m8, m4
+ vfmadd231pd m13, m9, m5
+ vfmadd231pd m13, m10, m6
+ vfmadd231pd m13, m11, m7
+ vfmadd231pd m14, m13, m2
+
+ movq xm13, src_strideq
+ vbroadcastsd m13, xm13
+ paddq m15, m13
+
+ movq idx_x_valq, xm15
+ movu m8, [srcq + idx_x_valq]
+
+ vpermq m13, m15, 1
+ movq idx_x_valq, xm13
+ movu m9, [srcq + idx_x_valq]
+
+ vpermq m13, m15, 2
+ movq idx_x_valq, xm13
+ movu m10, [srcq + idx_x_valq]
+
+ vpermq m13, m15, 3
+ movq idx_x_valq, xm13
+ movu m11, [srcq + idx_x_valq]
+
+ TRANSPOSE4x4 m8, m9, m10, m11, m12, m13
+
+ mulpd m13, m8, m4
+ vfmadd231pd m13, m9, m5
+ vfmadd231pd m13, m10, m6
+ vfmadd231pd m13, m11, m7
+ vfmadd231pd m14, m13, m3
+
+ add offsetq, mmsize
+ jg .store_partial
+ movu [dstq + offsetq - mmsize], m14
js .loop
+ jmp .finish
+
+.store_partial:
+ sub offsetq, ELEM_SIZE
+ jz .store3
+ sub offsetq, ELEM_SIZE
+ jz .store2
+
+.store1:
+ ; offsetq is now mmsize-2 after the write position
+ movq [dstq + offsetq - mmsize + 2 * ELEM_SIZE], xm14
+ jmp .finish
+.store2:
+ ; offsetq is now mmsize-2 after the write position
+ movu [dstq + offsetq - mmsize + 2 * ELEM_SIZE], xm14
+ jmp .finish
+.store3:
+ ; offsetq is now mmsize-1 after the write position
+ movu [dstq + offsetq - mmsize + 1 * ELEM_SIZE], xm14
+ vextractf128 xm14, m14, 1
+ movq [dstq + offsetq - mmsize + 3 * ELEM_SIZE], xm14
+.finish:
RET