summaryrefslogtreecommitdiff
path: root/libswscale/x86
diff options
context:
space:
mode:
authorRonald S. Bultje <rsbultje@gmail.com>2012-01-23 17:45:58 +0800
committerRonald S. Bultje <rsbultje@gmail.com>2012-01-27 10:19:57 +0800
commit3b15a6d742edd368696a1feb6fa99892768e8a73 (patch)
tree0ba73cc23175f3fb0e99cb842b8c2119c9cdf352 /libswscale/x86
parent08628b6afbc9b708b46f871f25a7a6be76ba4337 (diff)
config.asm: change %ifdef directives to %if directives.
This allows combining multiple conditionals in a single statement.
Diffstat (limited to 'libswscale/x86')
-rw-r--r--libswscale/x86/input.asm8
-rw-r--r--libswscale/x86/output.asm16
-rw-r--r--libswscale/x86/scale.asm8
3 files changed, 16 insertions, 16 deletions
diff --git a/libswscale/x86/input.asm b/libswscale/x86/input.asm
index a23cf05301..e6dde7c875 100644
--- a/libswscale/x86/input.asm
+++ b/libswscale/x86/input.asm
@@ -64,7 +64,7 @@ SECTION .text
; split the loop in an aligned and unaligned case
%macro YUYV_TO_Y_FN 2-3
cglobal %2ToY, 3, 3, %1, dst, src, w
-%ifdef ARCH_X86_64
+%if ARCH_X86_64
movsxd wq, wd
%endif
add dstq, wq
@@ -134,7 +134,7 @@ cglobal %2ToY, 3, 3, %1, dst, src, w
; split the loop in an aligned and unaligned case
%macro YUYV_TO_UV_FN 2-3
cglobal %2ToUV, 3, 4, %1, dstU, dstV, src, w
-%ifdef ARCH_X86_64
+%if ARCH_X86_64
movsxd wq, dword r4m
%else ; x86-32
mov wq, r4m
@@ -189,7 +189,7 @@ cglobal %2ToUV, 3, 4, %1, dstU, dstV, src, w
; %2 = nv12 or nv21
%macro NVXX_TO_UV_FN 2
cglobal %2ToUV, 3, 4, %1, dstU, dstV, src, w
-%ifdef ARCH_X86_64
+%if ARCH_X86_64
movsxd wq, dword r4m
%else ; x86-32
mov wq, r4m
@@ -215,7 +215,7 @@ cglobal %2ToUV, 3, 4, %1, dstU, dstV, src, w
%endif ; mmsize == 8/16
%endmacro
-%ifdef ARCH_X86_32
+%if ARCH_X86_32
INIT_MMX mmx
YUYV_TO_Y_FN 0, yuyv
YUYV_TO_Y_FN 0, uyvy
diff --git a/libswscale/x86/output.asm b/libswscale/x86/output.asm
index ae2929c84e..a288f08867 100644
--- a/libswscale/x86/output.asm
+++ b/libswscale/x86/output.asm
@@ -58,7 +58,7 @@ SECTION .text
%macro yuv2planeX_fn 3
-%ifdef ARCH_X86_32
+%if ARCH_X86_32
%define cntr_reg r1
%define movsx mov
%else
@@ -72,7 +72,7 @@ cglobal yuv2planeX_%1, %3, 7, %2
%endif ; %1 == 8/9/10
%if %1 == 8
-%ifdef ARCH_X86_32
+%if ARCH_X86_32
%assign pad 0x2c - (stack_offset & 15)
SUB rsp, pad
%define m_dith m7
@@ -91,7 +91,7 @@ cglobal yuv2planeX_%1, %3, 7, %2
.no_rot:
%if mmsize == 16
punpcklbw m_dith, m6
-%ifdef ARCH_X86_64
+%if ARCH_X86_64
punpcklwd m8, m_dith, m6
pslld m8, 12
%else ; x86-32
@@ -100,7 +100,7 @@ cglobal yuv2planeX_%1, %3, 7, %2
%endif ; x86-32/64
punpckhwd m_dith, m6
pslld m_dith, 12
-%ifdef ARCH_X86_32
+%if ARCH_X86_32
mova [rsp+ 0], m5
mova [rsp+16], m_dith
%endif
@@ -135,7 +135,7 @@ cglobal yuv2planeX_%1, %3, 7, %2
%endif ; %1 == 8
%if %1 == 8
-%ifdef ARCH_X86_32
+%if ARCH_X86_32
mova m2, [rsp+mmsize*(0+%%i)]
mova m1, [rsp+mmsize*(1+%%i)]
%else ; x86-64
@@ -233,7 +233,7 @@ cglobal yuv2planeX_%1, %3, 7, %2
jg .pixelloop
%if %1 == 8
-%ifdef ARCH_X86_32
+%if ARCH_X86_32
ADD rsp, pad
RET
%else ; x86-64
@@ -245,7 +245,7 @@ cglobal yuv2planeX_%1, %3, 7, %2
%endmacro
%define PALIGNR PALIGNR_MMX
-%ifdef ARCH_X86_32
+%if ARCH_X86_32
INIT_MMX mmx2
yuv2planeX_fn 8, 0, 7
yuv2planeX_fn 9, 0, 5
@@ -382,7 +382,7 @@ cglobal yuv2plane1_%1, %3, %3, %2
REP_RET
%endmacro
-%ifdef ARCH_X86_32
+%if ARCH_X86_32
INIT_MMX mmx
yuv2plane1_fn 8, 0, 5
yuv2plane1_fn 16, 0, 3
diff --git a/libswscale/x86/scale.asm b/libswscale/x86/scale.asm
index d35589419c..0d367f7a14 100644
--- a/libswscale/x86/scale.asm
+++ b/libswscale/x86/scale.asm
@@ -51,7 +51,7 @@ SECTION .text
; SCALE_FUNC source_width, intermediate_nbits, filtersize, filtersuffix, opt, n_args, n_xmm
%macro SCALE_FUNC 7
cglobal hscale%1to%2_%4_%5, %6, 7, %7
-%ifdef ARCH_X86_64
+%if ARCH_X86_64
movsxd r2, r2d
%endif ; x86-64
%if %2 == 19
@@ -237,7 +237,7 @@ cglobal hscale%1to%2_%4_%5, %6, 7, %7
%else ; %4 == X || %4 == X8
%define r6sub 0
%endif ; %4 ==/!= X4
-%ifdef ARCH_X86_64
+%if ARCH_X86_64
push r12
movsxd r6, r6d ; filterSize
lea r12, [r3+(r6-r6sub)*srcmul] ; &src[filterSize&~4]
@@ -384,7 +384,7 @@ cglobal hscale%1to%2_%4_%5, %6, 7, %7
%ifnidn %3, X
REP_RET
%else ; %3 == X
-%ifdef ARCH_X86_64
+%if ARCH_X86_64
pop r12
RET
%else ; x86-32
@@ -419,7 +419,7 @@ SCALE_FUNCS 10, 19, %1, %3
SCALE_FUNCS 16, 19, %1, %4
%endmacro
-%ifdef ARCH_X86_32
+%if ARCH_X86_32
INIT_MMX
SCALE_FUNCS2 mmx, 0, 0, 0
%endif