summaryrefslogtreecommitdiff
path: root/libswresample
diff options
context:
space:
mode:
authorJames Almer <jamrial@gmail.com>2014-03-17 17:07:34 -0300
committerMichael Niedermayer <michaelni@gmx.at>2014-03-18 15:00:50 +0100
commit7c8bf09eddf4fe738eb05bec68479024a66bbde9 (patch)
tree593c8e43f2d4045af13976ec7bd206a69a9f4195 /libswresample
parentc56d25c4764fee4b7b0e94212ff279f34c8ba0c8 (diff)
swresample: change COMMON_CORE_INT16 asm from SSSE3 to SSE2
pshuf+paddd is slightly faster than phaddd. The real gain is in pre-ssse3 processors like AMD K8 and K10, which get a big boost in performance compared to the mmxext version Signed-off-by: James Almer <jamrial@gmail.com> Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
Diffstat (limited to 'libswresample')
-rw-r--r--libswresample/resample.c10
-rw-r--r--libswresample/resample_template.c8
-rw-r--r--libswresample/x86/resample_mmx.h10
3 files changed, 15 insertions, 13 deletions
diff --git a/libswresample/resample.c b/libswresample/resample.c
index 8b1b6ca9af..034b47ae59 100644
--- a/libswresample/resample.c
+++ b/libswresample/resample.c
@@ -302,10 +302,10 @@ static int set_compensation(ResampleContext *c, int sample_delta, int compensati
#include "resample_template.c"
#undef TEMPLATE_RESAMPLE_S16_MMX2
-#if HAVE_SSSE3_INLINE
-#define TEMPLATE_RESAMPLE_S16_SSSE3
+#if HAVE_SSE2_INLINE
+#define TEMPLATE_RESAMPLE_S16_SSE2
#include "resample_template.c"
-#undef TEMPLATE_RESAMPLE_S16_SSSE3
+#undef TEMPLATE_RESAMPLE_S16_SSE2
#endif
#endif // HAVE_MMXEXT_INLINE
@@ -317,8 +317,8 @@ static int multiple_resample(ResampleContext *c, AudioData *dst, int dst_size, A
for(i=0; i<dst->ch_count; i++){
#if HAVE_MMXEXT_INLINE
-#if HAVE_SSSE3_INLINE
- if(c->format == AV_SAMPLE_FMT_S16P && (mm_flags&AV_CPU_FLAG_SSSE3)) ret= swri_resample_int16_ssse3(c, (int16_t*)dst->ch[i], (const int16_t*)src->ch[i], consumed, src_size, dst_size, i+1==dst->ch_count);
+#if HAVE_SSE2_INLINE
+ if(c->format == AV_SAMPLE_FMT_S16P && (mm_flags&AV_CPU_FLAG_SSE2)) ret= swri_resample_int16_sse2 (c, (int16_t*)dst->ch[i], (const int16_t*)src->ch[i], consumed, src_size, dst_size, i+1==dst->ch_count);
else
#endif
if(c->format == AV_SAMPLE_FMT_S16P && (mm_flags&AV_CPU_FLAG_MMX2 )){
diff --git a/libswresample/resample_template.c b/libswresample/resample_template.c
index 5bc12bcb71..bdb038a56f 100644
--- a/libswresample/resample_template.c
+++ b/libswresample/resample_template.c
@@ -57,7 +57,7 @@
#elif defined(TEMPLATE_RESAMPLE_S16) \
|| defined(TEMPLATE_RESAMPLE_S16_MMX2) \
- || defined(TEMPLATE_RESAMPLE_S16_SSSE3)
+ || defined(TEMPLATE_RESAMPLE_S16_SSE2)
# define FILTER_SHIFT 15
# define DELEM int16_t
@@ -74,9 +74,9 @@
# elif defined(TEMPLATE_RESAMPLE_S16_MMX2)
# define COMMON_CORE COMMON_CORE_INT16_MMX2
# define RENAME(N) N ## _int16_mmx2
-# elif defined(TEMPLATE_RESAMPLE_S16_SSSE3)
-# define COMMON_CORE COMMON_CORE_INT16_SSSE3
-# define RENAME(N) N ## _int16_ssse3
+# elif defined(TEMPLATE_RESAMPLE_S16_SSE2)
+# define COMMON_CORE COMMON_CORE_INT16_SSE2
+# define RENAME(N) N ## _int16_sse2
# endif
#endif
diff --git a/libswresample/x86/resample_mmx.h b/libswresample/x86/resample_mmx.h
index d96fd5a9d2..fab52f704a 100644
--- a/libswresample/x86/resample_mmx.h
+++ b/libswresample/x86/resample_mmx.h
@@ -23,7 +23,7 @@
#include "libswresample/swresample_internal.h"
int swri_resample_int16_mmx2 (struct ResampleContext *c, int16_t *dst, const int16_t *src, int *consumed, int src_size, int dst_size, int update_ctx);
-int swri_resample_int16_ssse3(struct ResampleContext *c, int16_t *dst, const int16_t *src, int *consumed, int src_size, int dst_size, int update_ctx);
+int swri_resample_int16_sse2 (struct ResampleContext *c, int16_t *dst, const int16_t *src, int *consumed, int src_size, int dst_size, int update_ctx);
DECLARE_ALIGNED(16, const uint64_t, ff_resample_int16_rounder)[2] = { 0x0000000000004000ULL, 0x0000000000000000ULL};
@@ -48,7 +48,7 @@ __asm__ volatile(\
"r" (dst+dst_index)\
);
-#define COMMON_CORE_INT16_SSSE3 \
+#define COMMON_CORE_INT16_SSE2 \
x86_reg len= -2*c->filter_length;\
__asm__ volatile(\
"movdqa "MANGLE(ff_resample_int16_rounder)", %%xmm0 \n\t"\
@@ -58,8 +58,10 @@ __asm__ volatile(\
"paddd %%xmm1, %%xmm0 \n\t"\
"add $16, %0 \n\t"\
" js 1b \n\t"\
- "phaddd %%xmm0, %%xmm0 \n\t"\
- "phaddd %%xmm0, %%xmm0 \n\t"\
+ "pshufd $0x0E, %%xmm0, %%xmm1 \n\t"\
+ "paddd %%xmm1, %%xmm0 \n\t"\
+ "pshufd $0x01, %%xmm0, %%xmm1 \n\t"\
+ "paddd %%xmm1, %%xmm0 \n\t"\
"psrad $15, %%xmm0 \n\t"\
"packssdw %%xmm0, %%xmm0 \n\t"\
"movd %%xmm0, (%3) \n\t"\