summaryrefslogtreecommitdiff
path: root/libswscale/x86
diff options
context:
space:
mode:
authorRonald S. Bultje <rsbultje@gmail.com>2012-03-12 21:19:02 -0700
committerRonald S. Bultje <rsbultje@gmail.com>2012-03-14 20:09:53 -0700
commit45fdcc8e2d5ba0b8b2996278a4fffa5a57092c79 (patch)
tree7744ee138794b627fc957c04dfeefb6004e341f5 /libswscale/x86
parent98b9da2ac783861b73412941093e970f8d41b870 (diff)
swscale: convert hscale() to use named arguments.
Diffstat (limited to 'libswscale/x86')
-rw-r--r--libswscale/x86/scale.asm192
1 files changed, 98 insertions, 94 deletions
diff --git a/libswscale/x86/scale.asm b/libswscale/x86/scale.asm
index 4e6d9c3762..f7ed45fcf3 100644
--- a/libswscale/x86/scale.asm
+++ b/libswscale/x86/scale.asm
@@ -50,9 +50,13 @@ SECTION .text
; SCALE_FUNC source_width, intermediate_nbits, filtersize, filtersuffix, n_args, n_xmm
%macro SCALE_FUNC 6
-cglobal hscale%1to%2_%4, %5, 7, %6
+%ifnidn %3, X
+cglobal hscale%1to%2_%4, %5, 7, %6, pos0, dst, w, src, filter, fltpos, pos1
+%else
+cglobal hscale%1to%2_%4, %5, 7, %6, pos0, dst, w, srcmem, filter, fltpos, fltsize
+%endif
%if ARCH_X86_64
- movsxd r2, r2d
+ movsxd wq, wd
%define mov32 movsxd
%else ; x86-32
%define mov32 mov
@@ -87,48 +91,48 @@ cglobal hscale%1to%2_%4, %5, 7, %6
; setup loop
%if %3 == 8
- shl r2, 1 ; this allows *16 (i.e. now *8) in lea instructions for the 8-tap filter
-%define r2shr 1
+ shl wq, 1 ; this allows *16 (i.e. now *8) in lea instructions for the 8-tap filter
+%define wshr 1
%else ; %3 == 4
-%define r2shr 0
+%define wshr 0
%endif ; %3 == 8
- lea r4, [r4+r2*8]
+ lea filterq, [filterq+wq*8]
%if %2 == 15
- lea r1, [r1+r2*(2>>r2shr)]
+ lea dstq, [dstq+wq*(2>>wshr)]
%else ; %2 == 19
- lea r1, [r1+r2*(4>>r2shr)]
+ lea dstq, [dstq+wq*(4>>wshr)]
%endif ; %2 == 15/19
- lea r5, [r5+r2*(4>>r2shr)]
- neg r2
+ lea fltposq, [fltposq+wq*(4>>wshr)]
+ neg wq
.loop:
%if %3 == 4 ; filterSize == 4 scaling
; load 2x4 or 4x4 source pixels into m0/m1
- mov32 r0, dword [r5+r2*4+0] ; filterPos[0]
- mov32 r6, dword [r5+r2*4+4] ; filterPos[1]
- movlh m0, [r3+r0*srcmul] ; src[filterPos[0] + {0,1,2,3}]
+ mov32 pos0q, dword [fltposq+wq*4+ 0] ; filterPos[0]
+ mov32 pos1q, dword [fltposq+wq*4+ 4] ; filterPos[1]
+ movlh m0, [srcq+pos0q*srcmul] ; src[filterPos[0] + {0,1,2,3}]
%if mmsize == 8
- movlh m1, [r3+r6*srcmul] ; src[filterPos[1] + {0,1,2,3}]
+ movlh m1, [srcq+pos1q*srcmul] ; src[filterPos[1] + {0,1,2,3}]
%else ; mmsize == 16
%if %1 > 8
- movhps m0, [r3+r6*srcmul] ; src[filterPos[1] + {0,1,2,3}]
+ movhps m0, [srcq+pos1q*srcmul] ; src[filterPos[1] + {0,1,2,3}]
%else ; %1 == 8
- movd m4, [r3+r6*srcmul] ; src[filterPos[1] + {0,1,2,3}]
+ movd m4, [srcq+pos1q*srcmul] ; src[filterPos[1] + {0,1,2,3}]
%endif
- mov32 r0, dword [r5+r2*4+8] ; filterPos[2]
- mov32 r6, dword [r5+r2*4+12] ; filterPos[3]
- movlh m1, [r3+r0*srcmul] ; src[filterPos[2] + {0,1,2,3}]
+ mov32 pos0q, dword [fltposq+wq*4+ 8] ; filterPos[2]
+ mov32 pos1q, dword [fltposq+wq*4+12] ; filterPos[3]
+ movlh m1, [srcq+pos0q*srcmul] ; src[filterPos[2] + {0,1,2,3}]
%if %1 > 8
- movhps m1, [r3+r6*srcmul] ; src[filterPos[3] + {0,1,2,3}]
+ movhps m1, [srcq+pos1q*srcmul] ; src[filterPos[3] + {0,1,2,3}]
%else ; %1 == 8
- movd m5, [r3+r6*srcmul] ; src[filterPos[3] + {0,1,2,3}]
+ movd m5, [srcq+pos1q*srcmul] ; src[filterPos[3] + {0,1,2,3}]
punpckldq m0, m4
punpckldq m1, m5
%endif ; %1 == 8
%endif ; mmsize == 8/16
%if %1 == 8
- punpcklbw m0, m3 ; byte -> word
- punpcklbw m1, m3 ; byte -> word
+ punpcklbw m0, m3 ; byte -> word
+ punpcklbw m1, m3 ; byte -> word
%endif ; %1 == 8
; multiply with filter coefficients
@@ -137,8 +141,8 @@ cglobal hscale%1to%2_%4, %5, 7, %6
psubw m0, m6
psubw m1, m6
%endif ; %1 == 16
- pmaddwd m0, [r4+r2*8+mmsize*0] ; *= filter[{0,1,..,6,7}]
- pmaddwd m1, [r4+r2*8+mmsize*1] ; *= filter[{8,9,..,14,15}]
+ pmaddwd m0, [filterq+wq*8+mmsize*0] ; *= filter[{0,1,..,6,7}]
+ pmaddwd m1, [filterq+wq*8+mmsize*1] ; *= filter[{8,9,..,14,15}]
; add up horizontally (4 srcpix * 4 coefficients -> 1 dstpix)
%if mmsize == 8 ; mmx
@@ -152,32 +156,32 @@ cglobal hscale%1to%2_%4, %5, 7, %6
shufps m4, m1, 11011101b
paddd m0, m4
%else ; ssse3/sse4
- phaddd m0, m1 ; filter[{ 0, 1, 2, 3}]*src[filterPos[0]+{0,1,2,3}],
- ; filter[{ 4, 5, 6, 7}]*src[filterPos[1]+{0,1,2,3}],
- ; filter[{ 8, 9,10,11}]*src[filterPos[2]+{0,1,2,3}],
- ; filter[{12,13,14,15}]*src[filterPos[3]+{0,1,2,3}]
+ phaddd m0, m1 ; filter[{ 0, 1, 2, 3}]*src[filterPos[0]+{0,1,2,3}],
+ ; filter[{ 4, 5, 6, 7}]*src[filterPos[1]+{0,1,2,3}],
+ ; filter[{ 8, 9,10,11}]*src[filterPos[2]+{0,1,2,3}],
+ ; filter[{12,13,14,15}]*src[filterPos[3]+{0,1,2,3}]
%endif ; mmx/sse2/ssse3/sse4
%else ; %3 == 8, i.e. filterSize == 8 scaling
; load 2x8 or 4x8 source pixels into m0, m1, m4 and m5
- mov32 r0, dword [r5+r2*2+0] ; filterPos[0]
- mov32 r6, dword [r5+r2*2+4] ; filterPos[1]
- movbh m0, [r3+ r0 *srcmul] ; src[filterPos[0] + {0,1,2,3,4,5,6,7}]
+ mov32 pos0q, dword [fltposq+wq*2+0] ; filterPos[0]
+ mov32 pos1q, dword [fltposq+wq*2+4] ; filterPos[1]
+ movbh m0, [srcq+ pos0q *srcmul] ; src[filterPos[0] + {0,1,2,3,4,5,6,7}]
%if mmsize == 8
- movbh m1, [r3+(r0+4)*srcmul] ; src[filterPos[0] + {4,5,6,7}]
- movbh m4, [r3+ r6 *srcmul] ; src[filterPos[1] + {0,1,2,3}]
- movbh m5, [r3+(r6+4)*srcmul] ; src[filterPos[1] + {4,5,6,7}]
+ movbh m1, [srcq+(pos0q+4)*srcmul] ; src[filterPos[0] + {4,5,6,7}]
+ movbh m4, [srcq+ pos1q *srcmul] ; src[filterPos[1] + {0,1,2,3}]
+ movbh m5, [srcq+(pos1q+4)*srcmul] ; src[filterPos[1] + {4,5,6,7}]
%else ; mmsize == 16
- movbh m1, [r3+ r6 *srcmul] ; src[filterPos[1] + {0,1,2,3,4,5,6,7}]
- mov32 r0, dword [r5+r2*2+8] ; filterPos[2]
- mov32 r6, dword [r5+r2*2+12] ; filterPos[3]
- movbh m4, [r3+ r0 *srcmul] ; src[filterPos[2] + {0,1,2,3,4,5,6,7}]
- movbh m5, [r3+ r6 *srcmul] ; src[filterPos[3] + {0,1,2,3,4,5,6,7}]
+ movbh m1, [srcq+ pos1q *srcmul] ; src[filterPos[1] + {0,1,2,3,4,5,6,7}]
+ mov32 pos0q, dword [fltposq+wq*2+8] ; filterPos[2]
+ mov32 pos1q, dword [fltposq+wq*2+12] ; filterPos[3]
+ movbh m4, [srcq+ pos0q *srcmul] ; src[filterPos[2] + {0,1,2,3,4,5,6,7}]
+ movbh m5, [srcq+ pos1q *srcmul] ; src[filterPos[3] + {0,1,2,3,4,5,6,7}]
%endif ; mmsize == 8/16
%if %1 == 8
- punpcklbw m0, m3 ; byte -> word
- punpcklbw m1, m3 ; byte -> word
- punpcklbw m4, m3 ; byte -> word
- punpcklbw m5, m3 ; byte -> word
+ punpcklbw m0, m3 ; byte -> word
+ punpcklbw m1, m3 ; byte -> word
+ punpcklbw m4, m3 ; byte -> word
+ punpcklbw m5, m3 ; byte -> word
%endif ; %1 == 8
; multiply
@@ -188,10 +192,10 @@ cglobal hscale%1to%2_%4, %5, 7, %6
psubw m4, m6
psubw m5, m6
%endif ; %1 == 16
- pmaddwd m0, [r4+r2*8+mmsize*0] ; *= filter[{0,1,..,6,7}]
- pmaddwd m1, [r4+r2*8+mmsize*1] ; *= filter[{8,9,..,14,15}]
- pmaddwd m4, [r4+r2*8+mmsize*2] ; *= filter[{16,17,..,22,23}]
- pmaddwd m5, [r4+r2*8+mmsize*3] ; *= filter[{24,25,..,30,31}]
+ pmaddwd m0, [filterq+wq*8+mmsize*0] ; *= filter[{0,1,..,6,7}]
+ pmaddwd m1, [filterq+wq*8+mmsize*1] ; *= filter[{8,9,..,14,15}]
+ pmaddwd m4, [filterq+wq*8+mmsize*2] ; *= filter[{16,17,..,22,23}]
+ pmaddwd m5, [filterq+wq*8+mmsize*3] ; *= filter[{24,25,..,30,31}]
; add up horizontally (8 srcpix * 8 coefficients -> 1 dstpix)
%if mmsize == 8
@@ -226,55 +230,55 @@ cglobal hscale%1to%2_%4, %5, 7, %6
; of 3 x phaddd here, faster on older cpus
phaddd m0, m1
phaddd m4, m5
- phaddd m0, m4 ; filter[{ 0, 1,..., 6, 7}]*src[filterPos[0]+{0,1,...,6,7}],
- ; filter[{ 8, 9,...,14,15}]*src[filterPos[1]+{0,1,...,6,7}],
- ; filter[{16,17,...,22,23}]*src[filterPos[2]+{0,1,...,6,7}],
- ; filter[{24,25,...,30,31}]*src[filterPos[3]+{0,1,...,6,7}]
+ phaddd m0, m4 ; filter[{ 0, 1,..., 6, 7}]*src[filterPos[0]+{0,1,...,6,7}],
+ ; filter[{ 8, 9,...,14,15}]*src[filterPos[1]+{0,1,...,6,7}],
+ ; filter[{16,17,...,22,23}]*src[filterPos[2]+{0,1,...,6,7}],
+ ; filter[{24,25,...,30,31}]*src[filterPos[3]+{0,1,...,6,7}]
%endif ; mmx/sse2/ssse3/sse4
%endif ; %3 == 4/8
%else ; %3 == X, i.e. any filterSize scaling
%ifidn %4, X4
-%define r6sub 4
+%define dlt 4
%else ; %4 == X || %4 == X8
-%define r6sub 0
+%define dlt 0
%endif ; %4 ==/!= X4
%if ARCH_X86_64
push r12
- movsxd r6, r6d ; filterSize
- lea r12, [r3+(r6-r6sub)*srcmul] ; &src[filterSize&~4]
-%define src_reg r11
-%define r1x r10
-%define filter2 r12
+%define srcq r11
+%define pos1q r10
+%define srcendq r12
+ movsxd fltsizeq, fltsized ; filterSize
+ lea srcendq, [srcmemq+(fltsizeq-dlt)*srcmul] ; &src[filterSize&~4]
%else ; x86-32
- lea r0, [r3+(r6-r6sub)*srcmul] ; &src[filterSize&~4]
- mov r6m, r0
-%define src_reg r3
-%define r1x r1
-%define filter2 r6m
+%define srcq srcmemq
+%define pos1q dstq
+%define srcendq r6m
+ lea pos0q, [srcmemq+(fltsizeq-dlt)*srcmul] ; &src[filterSize&~4]
+ mov srcendq, pos0q
%endif ; x86-32/64
- lea r5, [r5+r2*4]
+ lea fltposq, [fltposq+wq*4]
%if %2 == 15
- lea r1, [r1+r2*2]
+ lea dstq, [dstq+wq*2]
%else ; %2 == 19
- lea r1, [r1+r2*4]
+ lea dstq, [dstq+wq*4]
%endif ; %2 == 15/19
- movifnidn r1mp, r1
- neg r2
+ movifnidn dstmp, dstq
+ neg wq
.loop:
- mov32 r0, dword [r5+r2*4+0] ; filterPos[0]
- mov32 r1x, dword [r5+r2*4+4] ; filterPos[1]
+ mov32 pos0q, dword [fltposq+wq*4+0] ; filterPos[0]
+ mov32 pos1q, dword [fltposq+wq*4+4] ; filterPos[1]
; FIXME maybe do 4px/iteration on x86-64 (x86-32 wouldn't have enough regs)?
pxor m4, m4
pxor m5, m5
- mov src_reg, r3mp
+ mov srcq, srcmemmp
.innerloop:
; load 2x4 (mmx) or 2x8 (sse) source pixels into m0/m1 -> m4/m5
- movbh m0, [src_reg+r0 *srcmul] ; src[filterPos[0] + {0,1,2,3(,4,5,6,7)}]
- movbh m1, [src_reg+(r1x+r6sub)*srcmul] ; src[filterPos[1] + {0,1,2,3(,4,5,6,7)}]
+ movbh m0, [srcq+ pos0q *srcmul] ; src[filterPos[0] + {0,1,2,3(,4,5,6,7)}]
+ movbh m1, [srcq+(pos1q+dlt)*srcmul] ; src[filterPos[1] + {0,1,2,3(,4,5,6,7)}]
%if %1 == 8
punpcklbw m0, m3
punpcklbw m1, m3
@@ -286,23 +290,23 @@ cglobal hscale%1to%2_%4, %5, 7, %6
psubw m0, m6
psubw m1, m6
%endif ; %1 == 16
- pmaddwd m0, [r4 ] ; filter[{0,1,2,3(,4,5,6,7)}]
- pmaddwd m1, [r4+(r6+r6sub)*2] ; filter[filtersize+{0,1,2,3(,4,5,6,7)}]
+ pmaddwd m0, [filterq] ; filter[{0,1,2,3(,4,5,6,7)}]
+ pmaddwd m1, [filterq+(fltsizeq+dlt)*2]; filter[filtersize+{0,1,2,3(,4,5,6,7)}]
paddd m4, m0
paddd m5, m1
- add r4, mmsize
- add src_reg, srcmul*mmsize/2
- cmp src_reg, filter2 ; while (src += 4) < &src[filterSize]
+ add filterq, mmsize
+ add srcq, srcmul*mmsize/2
+ cmp srcq, srcendq ; while (src += 4) < &src[filterSize]
jl .innerloop
%ifidn %4, X4
- mov32 r1x, dword [r5+r2*4+4] ; filterPos[1]
- movlh m0, [src_reg+r0 *srcmul] ; split last 4 srcpx of dstpx[0]
- sub r1x, r6 ; and first 4 srcpx of dstpx[1]
+ mov32 pos1q, dword [fltposq+wq*4+4] ; filterPos[1]
+ movlh m0, [srcq+ pos0q *srcmul] ; split last 4 srcpx of dstpx[0]
+ sub pos1q, fltsizeq ; and first 4 srcpx of dstpx[1]
%if %1 > 8
- movhps m0, [src_reg+(r1x+r6sub)*srcmul]
+ movhps m0, [srcq+(pos1q+dlt)*srcmul]
%else ; %1 == 8
- movd m1, [src_reg+(r1x+r6sub)*srcmul]
+ movd m1, [srcq+(pos1q+dlt)*srcmul]
punpckldq m0, m1
%endif ; %1 == 8
%if %1 == 8
@@ -312,10 +316,10 @@ cglobal hscale%1to%2_%4, %5, 7, %6
; add back 0x8000 * sum(coeffs) after the horizontal add
psubw m0, m6
%endif ; %1 == 16
- pmaddwd m0, [r4]
+ pmaddwd m0, [filterq]
%endif ; %4 == X4
- lea r4, [r4+(r6+r6sub)*2]
+ lea filterq, [filterq+(fltsizeq+dlt)*2]
%if mmsize == 8 ; mmx
movq m0, m4
@@ -352,14 +356,14 @@ cglobal hscale%1to%2_%4, %5, 7, %6
; clip, store
psrad m0, 14 + %1 - %2
%ifidn %3, X
- movifnidn r1, r1mp
+ movifnidn dstq, dstmp
%endif ; %3 == X
%if %2 == 15
packssdw m0, m0
%ifnidn %3, X
- movh [r1+r2*(2>>r2shr)], m0
+ movh [dstq+wq*(2>>wshr)], m0
%else ; %3 == X
- movd [r1+r2*2], m0
+ movd [dstq+wq*2], m0
%endif ; %3 ==/!= X
%else ; %2 == 19
%if mmsize == 8
@@ -372,16 +376,16 @@ cglobal hscale%1to%2_%4, %5, 7, %6
cvtps2dq m0, m0
%endif ; mmx/sse2/ssse3/sse4
%ifnidn %3, X
- mova [r1+r2*(4>>r2shr)], m0
+ mova [dstq+wq*(4>>wshr)], m0
%else ; %3 == X
- movq [r1+r2*4], m0
+ movq [dstq+wq*4], m0
%endif ; %3 ==/!= X
%endif ; %2 == 15/19
%ifnidn %3, X
- add r2, (mmsize<<r2shr)/4 ; both 8tap and 4tap really only do 4 pixels (or for mmx: 2 pixels)
- ; per iteration. see "shl r2,1" above as for why we do this
+ add wq, (mmsize<<wshr)/4 ; both 8tap and 4tap really only do 4 pixels (or for mmx: 2 pixels)
+ ; per iteration. see "shl wq,1" above as for why we do this
%else ; %3 == X
- add r2, 2
+ add wq, 2
%endif ; %3 ==/!= X
jl .loop
%ifnidn %3, X