summaryrefslogtreecommitdiff
path: root/libswresample/x86
diff options
context:
space:
mode:
authorJames Almer <jamrial@gmail.com>2014-03-23 19:05:16 -0300
committerMichael Niedermayer <michaelni@gmx.at>2014-03-24 02:33:16 +0100
commitfa25c4c400649bcc7693107d2d4b9d1fa137173e (patch)
tree548982ee6e1c438be8f89881e6ccca6f704d36c8 /libswresample/x86
parentffd77f94a26be22b8ead3178ceec3ed39e68abc5 (diff)
swresample/resample: mmx2/sse2 int16 linear interpolation
About three times faster Signed-off-by: James Almer <jamrial@gmail.com> Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
Diffstat (limited to 'libswresample/x86')
-rw-r--r--libswresample/x86/resample_mmx.h61
1 files changed, 61 insertions, 0 deletions
diff --git a/libswresample/x86/resample_mmx.h b/libswresample/x86/resample_mmx.h
index ba36de9e5c..28a317ce78 100644
--- a/libswresample/x86/resample_mmx.h
+++ b/libswresample/x86/resample_mmx.h
@@ -50,6 +50,34 @@ __asm__ volatile(\
NAMED_CONSTRAINTS_ADD(ff_resample_int16_rounder)\
);
+#define LINEAR_CORE_INT16_MMX2 \
+ x86_reg len= -2*c->filter_length;\
+__asm__ volatile(\
+ "pxor %%mm0, %%mm0 \n\t"\
+ "pxor %%mm2, %%mm2 \n\t"\
+ "1: \n\t"\
+ "movq (%3, %0), %%mm1 \n\t"\
+ "movq %%mm1, %%mm3 \n\t"\
+ "pmaddwd (%4, %0), %%mm1 \n\t"\
+ "pmaddwd (%5, %0), %%mm3 \n\t"\
+ "paddd %%mm1, %%mm0 \n\t"\
+ "paddd %%mm3, %%mm2 \n\t"\
+ "add $8, %0 \n\t"\
+ " js 1b \n\t"\
+ "pshufw $0x0E, %%mm0, %%mm1 \n\t"\
+ "pshufw $0x0E, %%mm2, %%mm3 \n\t"\
+ "paddd %%mm1, %%mm0 \n\t"\
+ "paddd %%mm3, %%mm2 \n\t"\
+ "movd %%mm0, %1 \n\t"\
+ "movd %%mm2, %2 \n\t"\
+ : "+r" (len),\
+ "=r" (val),\
+ "=r" (v2)\
+ : "r" (((uint8_t*)(src+sample_index))-len),\
+ "r" (((uint8_t*)filter)-len),\
+ "r" (((uint8_t*)(filter+c->filter_alloc))-len)\
+);
+
#define COMMON_CORE_INT16_SSE2 \
x86_reg len= -2*c->filter_length;\
__asm__ volatile(\
@@ -74,6 +102,39 @@ __asm__ volatile(\
NAMED_CONSTRAINTS_ADD(ff_resample_int16_rounder)\
);
+#define LINEAR_CORE_INT16_SSE2 \
+ x86_reg len= -2*c->filter_length;\
+__asm__ volatile(\
+ "pxor %%xmm0, %%xmm0 \n\t"\
+ "pxor %%xmm2, %%xmm2 \n\t"\
+ "1: \n\t"\
+ "movdqu (%3, %0), %%xmm1 \n\t"\
+ "movdqa %%xmm1, %%xmm3 \n\t"\
+ "pmaddwd (%4, %0), %%xmm1 \n\t"\
+ "pmaddwd (%5, %0), %%xmm3 \n\t"\
+ "paddd %%xmm1, %%xmm0 \n\t"\
+ "paddd %%xmm3, %%xmm2 \n\t"\
+ "add $16, %0 \n\t"\
+ " js 1b \n\t"\
+ "pshufd $0x0E, %%xmm0, %%xmm1 \n\t"\
+ "pshufd $0x0E, %%xmm2, %%xmm3 \n\t"\
+ "paddd %%xmm1, %%xmm0 \n\t"\
+ "paddd %%xmm3, %%xmm2 \n\t"\
+ "pshufd $0x01, %%xmm0, %%xmm1 \n\t"\
+ "pshufd $0x01, %%xmm2, %%xmm3 \n\t"\
+ "paddd %%xmm1, %%xmm0 \n\t"\
+ "paddd %%xmm3, %%xmm2 \n\t"\
+ "movd %%xmm0, %1 \n\t"\
+ "movd %%xmm2, %2 \n\t"\
+ : "+r" (len),\
+ "=r" (val),\
+ "=r" (v2)\
+ : "r" (((uint8_t*)(src+sample_index))-len),\
+ "r" (((uint8_t*)filter)-len),\
+ "r" (((uint8_t*)(filter+c->filter_alloc))-len)\
+ XMM_CLOBBERS_ONLY("%xmm0", "%xmm1", "%xmm2", "%xmm3")\
+);
+
#define COMMON_CORE_FLT_SSE \
x86_reg len= -4*c->filter_length;\
__asm__ volatile(\