summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAndreas Rheinhardt <andreas.rheinhardt@outlook.com>2022-06-10 21:00:55 +0200
committerAndreas Rheinhardt <andreas.rheinhardt@outlook.com>2022-06-22 13:34:32 +0200
commit4038b5b209cd8a0a3cb559d1073e9b22196a8ace (patch)
tree66f2865debfb7a933f2c233b5dbd6589e7dc452a
parent9426a2f8ff4607b7293e6140e56b8cc44e629dbd (diff)
avcodec/x86/fmtconvert: Remove obsolete SSE functions
x64 always has MMX, MMXEXT, SSE and SSE2 and this means that some functions for MMX, MMXEXT and 3dnow are always overridden by other functions (unless one e.g. explicitly disables SSE2) for x64. So given that the only systems that benefit from these functions are truely ancient 32bit x86s they are removed. Signed-off-by: Andreas Rheinhardt <andreas.rheinhardt@outlook.com>
-rw-r--r--libavcodec/x86/fmtconvert.asm36
-rw-r--r--libavcodec/x86/fmtconvert_init.c7
2 files changed, 0 insertions, 43 deletions
diff --git a/libavcodec/x86/fmtconvert.asm b/libavcodec/x86/fmtconvert.asm
index 8f62a0a093..e70df4662d 100644
--- a/libavcodec/x86/fmtconvert.asm
+++ b/libavcodec/x86/fmtconvert.asm
@@ -44,35 +44,17 @@ cglobal int32_to_float_fmul_scalar, 4, 4, %1, dst, src, mul, len
add dstq, lenq
neg lenq
.loop:
-%if cpuflag(sse2)
cvtdq2ps m1, [srcq+lenq ]
cvtdq2ps m2, [srcq+lenq+16]
-%else
- cvtpi2ps m1, [srcq+lenq ]
- cvtpi2ps m3, [srcq+lenq+ 8]
- cvtpi2ps m2, [srcq+lenq+16]
- cvtpi2ps m4, [srcq+lenq+24]
- movlhps m1, m3
- movlhps m2, m4
-%endif
mulps m1, m0
mulps m2, m0
mova [dstq+lenq ], m1
mova [dstq+lenq+16], m2
add lenq, 32
jl .loop
-%if notcpuflag(sse2)
- ;; cvtpi2ps switches to MMX even if the source is a memory location
- ;; possible an error in documentation since every tested CPU disagrees with
- ;; that. Use emms anyway since the vast majority of machines will use the
- ;; SSE2 variant
- emms
-%endif
RET
%endmacro
-INIT_XMM sse
-INT32_TO_FLOAT_FMUL_SCALAR 5
INIT_XMM sse2
INT32_TO_FLOAT_FMUL_SCALAR 3
@@ -89,17 +71,8 @@ cglobal int32_to_float_fmul_array8, 5, 5, 5, c, dst, src, mul, len
.loop:
movss m0, [mulq]
SPLATD m0
-%if cpuflag(sse2)
cvtdq2ps m1, [srcq+lenq ]
cvtdq2ps m2, [srcq+lenq+16]
-%else
- cvtpi2ps m1, [srcq+lenq ]
- cvtpi2ps m3, [srcq+lenq+ 8]
- cvtpi2ps m2, [srcq+lenq+16]
- cvtpi2ps m4, [srcq+lenq+24]
- movlhps m1, m3
- movlhps m2, m4
-%endif
mulps m1, m0
mulps m2, m0
mova [dstq+lenq ], m1
@@ -107,18 +80,9 @@ cglobal int32_to_float_fmul_array8, 5, 5, 5, c, dst, src, mul, len
add mulq, 4
add lenq, 32
jl .loop
-%if notcpuflag(sse2)
- ;; cvtpi2ps switches to MMX even if the source is a memory location
- ;; possible an error in documentation since every tested CPU disagrees with
- ;; that. Use emms anyway since the vast majority of machines will use the
- ;; SSE2 variant
- emms
-%endif
RET
%endmacro
-INIT_XMM sse
-INT32_TO_FLOAT_FMUL_ARRAY8
INIT_XMM sse2
INT32_TO_FLOAT_FMUL_ARRAY8
diff --git a/libavcodec/x86/fmtconvert_init.c b/libavcodec/x86/fmtconvert_init.c
index df097054e4..58b396856e 100644
--- a/libavcodec/x86/fmtconvert_init.c
+++ b/libavcodec/x86/fmtconvert_init.c
@@ -29,10 +29,7 @@
#if HAVE_X86ASM
-void ff_int32_to_float_fmul_scalar_sse (float *dst, const int32_t *src, float mul, int len);
void ff_int32_to_float_fmul_scalar_sse2(float *dst, const int32_t *src, float mul, int len);
-void ff_int32_to_float_fmul_array8_sse (FmtConvertContext *c, float *dst, const int32_t *src,
- const float *mul, int len);
void ff_int32_to_float_fmul_array8_sse2(FmtConvertContext *c, float *dst, const int32_t *src,
const float *mul, int len);
@@ -43,10 +40,6 @@ av_cold void ff_fmt_convert_init_x86(FmtConvertContext *c, AVCodecContext *avctx
#if HAVE_X86ASM
int cpu_flags = av_get_cpu_flags();
- if (EXTERNAL_SSE(cpu_flags)) {
- c->int32_to_float_fmul_scalar = ff_int32_to_float_fmul_scalar_sse;
- c->int32_to_float_fmul_array8 = ff_int32_to_float_fmul_array8_sse;
- }
if (EXTERNAL_SSE2(cpu_flags)) {
c->int32_to_float_fmul_scalar = ff_int32_to_float_fmul_scalar_sse2;
c->int32_to_float_fmul_array8 = ff_int32_to_float_fmul_array8_sse2;