summaryrefslogtreecommitdiff
path: root/libavresample/x86/audio_convert.asm
diff options
context:
space:
mode:
Diffstat (limited to 'libavresample/x86/audio_convert.asm')
-rw-r--r--libavresample/x86/audio_convert.asm209
1 files changed, 209 insertions, 0 deletions
diff --git a/libavresample/x86/audio_convert.asm b/libavresample/x86/audio_convert.asm
index ba59f3314f..7b3cc223c7 100644
--- a/libavresample/x86/audio_convert.asm
+++ b/libavresample/x86/audio_convert.asm
@@ -1,6 +1,7 @@
;******************************************************************************
;* x86 optimized Format Conversion Utils
;* Copyright (c) 2008 Loren Merritt
+;* Copyright (c) 2012 Justin Ruggles <justin.ruggles@gmail.com>
;*
;* This file is part of Libav.
;*
@@ -21,9 +22,217 @@
%include "x86inc.asm"
%include "x86util.asm"
+%include "util.asm"
+
+SECTION_RODATA 32
+
+pf_s32_inv_scale: times 8 dd 0x30000000
+pf_s32_scale: times 8 dd 0x4f000000
+pf_s16_inv_scale: times 4 dd 0x38000000
+pf_s16_scale: times 4 dd 0x47000000
SECTION_TEXT
+;------------------------------------------------------------------------------
+; void ff_conv_s16_to_s32(int32_t *dst, const int16_t *src, int len);
+;------------------------------------------------------------------------------
+
+INIT_XMM sse2
+cglobal conv_s16_to_s32, 3,3,3, dst, src, len
+ lea lenq, [2*lend]
+ lea dstq, [dstq+2*lenq]
+ add srcq, lenq
+ neg lenq
+.loop:
+ mova m2, [srcq+lenq]
+ pxor m0, m0
+ pxor m1, m1
+ punpcklwd m0, m2
+ punpckhwd m1, m2
+ mova [dstq+2*lenq ], m0
+ mova [dstq+2*lenq+mmsize], m1
+ add lenq, mmsize
+ jl .loop
+ REP_RET
+
+;------------------------------------------------------------------------------
+; void ff_conv_s16_to_flt(float *dst, const int16_t *src, int len);
+;------------------------------------------------------------------------------
+
+%macro CONV_S16_TO_FLT 0
+cglobal conv_s16_to_flt, 3,3,3, dst, src, len
+ lea lenq, [2*lend]
+ add srcq, lenq
+ lea dstq, [dstq + 2*lenq]
+ neg lenq
+ mova m2, [pf_s16_inv_scale]
+ ALIGN 16
+.loop:
+ mova m0, [srcq+lenq]
+ S16_TO_S32_SX 0, 1
+ cvtdq2ps m0, m0
+ cvtdq2ps m1, m1
+ mulps m0, m2
+ mulps m1, m2
+ mova [dstq+2*lenq ], m0
+ mova [dstq+2*lenq+mmsize], m1
+ add lenq, mmsize
+ jl .loop
+ REP_RET
+%endmacro
+
+INIT_XMM sse2
+CONV_S16_TO_FLT
+INIT_XMM sse4
+CONV_S16_TO_FLT
+
+;------------------------------------------------------------------------------
+; void ff_conv_s32_to_s16(int16_t *dst, const int32_t *src, int len);
+;------------------------------------------------------------------------------
+
+%macro CONV_S32_TO_S16 0
+cglobal conv_s32_to_s16, 3,3,4, dst, src, len
+ lea lenq, [2*lend]
+ lea srcq, [srcq+2*lenq]
+ add dstq, lenq
+ neg lenq
+.loop:
+ mova m0, [srcq+2*lenq ]
+ mova m1, [srcq+2*lenq+ mmsize]
+ mova m2, [srcq+2*lenq+2*mmsize]
+ mova m3, [srcq+2*lenq+3*mmsize]
+ psrad m0, 16
+ psrad m1, 16
+ psrad m2, 16
+ psrad m3, 16
+ packssdw m0, m1
+ packssdw m2, m3
+ mova [dstq+lenq ], m0
+ mova [dstq+lenq+mmsize], m2
+ add lenq, mmsize*2
+ jl .loop
+%if mmsize == 8
+ emms
+ RET
+%else
+ REP_RET
+%endif
+%endmacro
+
+INIT_MMX mmx
+CONV_S32_TO_S16
+INIT_XMM sse2
+CONV_S32_TO_S16
+
+;------------------------------------------------------------------------------
+; void ff_conv_s32_to_flt(float *dst, const int32_t *src, int len);
+;------------------------------------------------------------------------------
+
+%macro CONV_S32_TO_FLT 0
+cglobal conv_s32_to_flt, 3,3,3, dst, src, len
+ lea lenq, [4*lend]
+ add srcq, lenq
+ add dstq, lenq
+ neg lenq
+ mova m0, [pf_s32_inv_scale]
+ ALIGN 16
+.loop:
+ cvtdq2ps m1, [srcq+lenq ]
+ cvtdq2ps m2, [srcq+lenq+mmsize]
+ mulps m1, m1, m0
+ mulps m2, m2, m0
+ mova [dstq+lenq ], m1
+ mova [dstq+lenq+mmsize], m2
+ add lenq, mmsize*2
+ jl .loop
+%if mmsize == 32
+ vzeroupper
+ RET
+%else
+ REP_RET
+%endif
+%endmacro
+
+INIT_XMM sse2
+CONV_S32_TO_FLT
+%if HAVE_AVX
+INIT_YMM avx
+CONV_S32_TO_FLT
+%endif
+
+;------------------------------------------------------------------------------
+; void ff_conv_flt_to_s16(int16_t *dst, const float *src, int len);
+;------------------------------------------------------------------------------
+
+INIT_XMM sse2
+cglobal conv_flt_to_s16, 3,3,5, dst, src, len
+ lea lenq, [2*lend]
+ lea srcq, [srcq+2*lenq]
+ add dstq, lenq
+ neg lenq
+ mova m4, [pf_s16_scale]
+.loop:
+ mova m0, [srcq+2*lenq ]
+ mova m1, [srcq+2*lenq+1*mmsize]
+ mova m2, [srcq+2*lenq+2*mmsize]
+ mova m3, [srcq+2*lenq+3*mmsize]
+ mulps m0, m4
+ mulps m1, m4
+ mulps m2, m4
+ mulps m3, m4
+ cvtps2dq m0, m0
+ cvtps2dq m1, m1
+ cvtps2dq m2, m2
+ cvtps2dq m3, m3
+ packssdw m0, m1
+ packssdw m2, m3
+ mova [dstq+lenq ], m0
+ mova [dstq+lenq+mmsize], m2
+ add lenq, mmsize*2
+ jl .loop
+ REP_RET
+
+;------------------------------------------------------------------------------
+; void ff_conv_flt_to_s32(int32_t *dst, const float *src, int len);
+;------------------------------------------------------------------------------
+
+%macro CONV_FLT_TO_S32 0
+cglobal conv_flt_to_s32, 3,3,5, dst, src, len
+ lea lenq, [lend*4]
+ add srcq, lenq
+ add dstq, lenq
+ neg lenq
+ mova m4, [pf_s32_scale]
+.loop:
+ mulps m0, m4, [srcq+lenq ]
+ mulps m1, m4, [srcq+lenq+1*mmsize]
+ mulps m2, m4, [srcq+lenq+2*mmsize]
+ mulps m3, m4, [srcq+lenq+3*mmsize]
+ cvtps2dq m0, m0
+ cvtps2dq m1, m1
+ cvtps2dq m2, m2
+ cvtps2dq m3, m3
+ mova [dstq+lenq ], m0
+ mova [dstq+lenq+1*mmsize], m1
+ mova [dstq+lenq+2*mmsize], m2
+ mova [dstq+lenq+3*mmsize], m3
+ add lenq, mmsize*4
+ jl .loop
+%if mmsize == 32
+ vzeroupper
+ RET
+%else
+ REP_RET
+%endif
+%endmacro
+
+INIT_XMM sse2
+CONV_FLT_TO_S32
+%if HAVE_AVX
+INIT_YMM avx
+CONV_FLT_TO_S32
+%endif
+
;-----------------------------------------------------------------------------
; void ff_conv_fltp_to_flt_6ch(float *dst, float *const *src, int len,
; int channels);