summaryrefslogtreecommitdiff
path: root/libavcodec/x86/vc1dsp.asm
diff options
context:
space:
mode:
authorMichael Niedermayer <michaelni@gmx.at>2012-08-13 14:38:43 +0200
committerMichael Niedermayer <michaelni@gmx.at>2012-08-13 14:38:43 +0200
commitd8c3170c9ff81b5563eba543ff56687bcb7f5127 (patch)
tree3d99afbb09f2032ef8851736d5f4801a2ba17586 /libavcodec/x86/vc1dsp.asm
parentbd70a527129a1c049a8ab38236bf87f7d459df10 (diff)
parent69665bd6f40f02ecf822f80c05dd2765da2dfa7b (diff)
Merge remote-tracking branch 'qatar/master'
* qatar/master: (22 commits) g723.1: do not pass large structs by value g723.1: do not bounce intermediate values via memory g723.1: declare a variable in the block it is used g723.1: avoid saving/restoring excitation g723.1: avoid unnecessary memcpy() in residual_interp() g723.1: make postfilter write directly to output buffer g723.1: drop unnecessary variable buf_ptr in formant_postfilter() g723.1: make scale_vector() output to a separate buffer g723.1: make autocorr_max() work on an arbitrary buffer g723.1: do not needlessly use int64_t g723.1: use saturating addition functions g723.1: optimise scale_vector() g723.1: remove useless uses of MUL64() g723.1: remove unnecessary argument 'shift' from dot_product() g723.1: deobfuscate "(x << 4) - x" to "15 * x" celp: optimise ff_celp_lp_synthesis_filter() libavutil: add saturating addition functions cllc: Implement ARGB support cllc: Add support for QRGB cllc: Rename some funcs to represent what they actually do ... Conflicts: LICENSE libavcodec/g723_1.c libavcodec/x86/Makefile Merged-by: Michael Niedermayer <michaelni@gmx.at>
Diffstat (limited to 'libavcodec/x86/vc1dsp.asm')
-rw-r--r--libavcodec/x86/vc1dsp.asm320
1 files changed, 320 insertions, 0 deletions
diff --git a/libavcodec/x86/vc1dsp.asm b/libavcodec/x86/vc1dsp.asm
new file mode 100644
index 0000000000..590aa509a7
--- /dev/null
+++ b/libavcodec/x86/vc1dsp.asm
@@ -0,0 +1,320 @@
+;******************************************************************************
+;* VC1 deblocking optimizations
+;* Copyright (c) 2009 David Conrad
+;*
+;* This file is part of FFmpeg.
+;*
+;* FFmpeg is free software; you can redistribute it and/or
+;* modify it under the terms of the GNU Lesser General Public
+;* License as published by the Free Software Foundation; either
+;* version 2.1 of the License, or (at your option) any later version.
+;*
+;* FFmpeg is distributed in the hope that it will be useful,
+;* but WITHOUT ANY WARRANTY; without even the implied warranty of
+;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+;* Lesser General Public License for more details.
+;*
+;* You should have received a copy of the GNU Lesser General Public
+;* License along with FFmpeg; if not, write to the Free Software
+;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+;******************************************************************************
+
+%include "libavutil/x86/x86inc.asm"
+%include "libavutil/x86/x86util.asm"
+
+cextern pw_4
+cextern pw_5
+
+section .text
+
+; dst_low, dst_high (src), zero
+; zero-extends one vector from 8 to 16 bits
+%macro UNPACK_8TO16 4
+ mova m%2, m%3
+ punpckh%1 m%3, m%4
+ punpckl%1 m%2, m%4
+%endmacro
+
+%macro STORE_4_WORDS_MMX 6
+ movd %6d, %5
+%if mmsize==16
+ psrldq %5, 4
+%else
+ psrlq %5, 32
+%endif
+ mov %1, %6w
+ shr %6, 16
+ mov %2, %6w
+ movd %6d, %5
+ mov %3, %6w
+ shr %6, 16
+ mov %4, %6w
+%endmacro
+
+%macro STORE_4_WORDS_SSE4 6
+ pextrw %1, %5, %6+0
+ pextrw %2, %5, %6+1
+ pextrw %3, %5, %6+2
+ pextrw %4, %5, %6+3
+%endmacro
+
+; in: p1 p0 q0 q1, clobbers p0
+; out: p1 = (2*(p1 - q1) - 5*(p0 - q0) + 4) >> 3
+%macro VC1_LOOP_FILTER_A0 4
+ psubw %1, %4
+ psubw %2, %3
+ paddw %1, %1
+ pmullw %2, [pw_5]
+ psubw %1, %2
+ paddw %1, [pw_4]
+ psraw %1, 3
+%endmacro
+
+; in: p0 q0 a0 a1 a2
+; m0 m1 m7 m6 m5
+; %1: size
+; out: m0=p0' m1=q0'
+%macro VC1_FILTER 1
+ PABSW m4, m7
+ PABSW m3, m6
+ PABSW m2, m5
+ mova m6, m4
+ pminsw m3, m2
+ pcmpgtw m6, m3 ; if (a2 < a0 || a1 < a0)
+ psubw m3, m4
+ pmullw m3, [pw_5] ; 5*(a3 - a0)
+ PABSW m2, m3
+ psraw m2, 3 ; abs(d/8)
+ pxor m7, m3 ; d_sign ^= a0_sign
+
+ pxor m5, m5
+ movd m3, r2d
+%if %1 > 4
+ punpcklbw m3, m3
+%endif
+ punpcklbw m3, m5
+ pcmpgtw m3, m4 ; if (a0 < pq)
+ pand m6, m3
+
+ mova m3, m0
+ psubw m3, m1
+ PABSW m4, m3
+ psraw m4, 1
+ pxor m3, m7 ; d_sign ^ clip_sign
+ psraw m3, 15
+ pminsw m2, m4 ; min(d, clip)
+ pcmpgtw m4, m5
+ pand m6, m4 ; filt3 (C return value)
+
+; each set of 4 pixels is not filtered if the 3rd is not
+%if mmsize==16
+ pshuflw m4, m6, 0xaa
+%if %1 > 4
+ pshufhw m4, m4, 0xaa
+%endif
+%else
+ pshufw m4, m6, 0xaa
+%endif
+ pandn m3, m4
+ pand m2, m6
+ pand m3, m2 ; d final
+
+ psraw m7, 15
+ pxor m3, m7
+ psubw m3, m7
+ psubw m0, m3
+ paddw m1, m3
+ packuswb m0, m0
+ packuswb m1, m1
+%endmacro
+
+; 1st param: size of filter
+; 2nd param: mov suffix equivalent to the filter size
+%macro VC1_V_LOOP_FILTER 2
+ pxor m5, m5
+ mov%2 m6, [r4]
+ mov%2 m4, [r4+r1]
+ mov%2 m7, [r4+2*r1]
+ mov%2 m0, [r4+r3]
+ punpcklbw m6, m5
+ punpcklbw m4, m5
+ punpcklbw m7, m5
+ punpcklbw m0, m5
+
+ VC1_LOOP_FILTER_A0 m6, m4, m7, m0
+ mov%2 m1, [r0]
+ mov%2 m2, [r0+r1]
+ punpcklbw m1, m5
+ punpcklbw m2, m5
+ mova m4, m0
+ VC1_LOOP_FILTER_A0 m7, m4, m1, m2
+ mov%2 m3, [r0+2*r1]
+ mov%2 m4, [r0+r3]
+ punpcklbw m3, m5
+ punpcklbw m4, m5
+ mova m5, m1
+ VC1_LOOP_FILTER_A0 m5, m2, m3, m4
+
+ VC1_FILTER %1
+ mov%2 [r4+r3], m0
+ mov%2 [r0], m1
+%endmacro
+
+; 1st param: size of filter
+; NOTE: UNPACK_8TO16 this number of 8 bit numbers are in half a register
+; 2nd (optional) param: temp register to use for storing words
+%macro VC1_H_LOOP_FILTER 1-2
+%if %1 == 4
+ movq m0, [r0 -4]
+ movq m1, [r0+ r1-4]
+ movq m2, [r0+2*r1-4]
+ movq m3, [r0+ r3-4]
+ TRANSPOSE4x4B 0, 1, 2, 3, 4
+%else
+ movq m0, [r0 -4]
+ movq m4, [r0+ r1-4]
+ movq m1, [r0+2*r1-4]
+ movq m5, [r0+ r3-4]
+ movq m2, [r4 -4]
+ movq m6, [r4+ r1-4]
+ movq m3, [r4+2*r1-4]
+ movq m7, [r4+ r3-4]
+ punpcklbw m0, m4
+ punpcklbw m1, m5
+ punpcklbw m2, m6
+ punpcklbw m3, m7
+ TRANSPOSE4x4W 0, 1, 2, 3, 4
+%endif
+ pxor m5, m5
+
+ UNPACK_8TO16 bw, 6, 0, 5
+ UNPACK_8TO16 bw, 7, 1, 5
+ VC1_LOOP_FILTER_A0 m6, m0, m7, m1
+ UNPACK_8TO16 bw, 4, 2, 5
+ mova m0, m1 ; m0 = p0
+ VC1_LOOP_FILTER_A0 m7, m1, m4, m2
+ UNPACK_8TO16 bw, 1, 3, 5
+ mova m5, m4
+ VC1_LOOP_FILTER_A0 m5, m2, m1, m3
+ SWAP 1, 4 ; m1 = q0
+
+ VC1_FILTER %1
+ punpcklbw m0, m1
+%if %0 > 1
+ STORE_4_WORDS_MMX [r0-1], [r0+r1-1], [r0+2*r1-1], [r0+r3-1], m0, %2
+%if %1 > 4
+ psrldq m0, 4
+ STORE_4_WORDS_MMX [r4-1], [r4+r1-1], [r4+2*r1-1], [r4+r3-1], m0, %2
+%endif
+%else
+ STORE_4_WORDS_SSE4 [r0-1], [r0+r1-1], [r0+2*r1-1], [r0+r3-1], m0, 0
+ STORE_4_WORDS_SSE4 [r4-1], [r4+r1-1], [r4+2*r1-1], [r4+r3-1], m0, 4
+%endif
+%endmacro
+
+
+%macro START_V_FILTER 0
+ mov r4, r0
+ lea r3, [4*r1]
+ sub r4, r3
+ lea r3, [r1+2*r1]
+ imul r2, 0x01010101
+%endmacro
+
+%macro START_H_FILTER 1
+ lea r3, [r1+2*r1]
+%if %1 > 4
+ lea r4, [r0+4*r1]
+%endif
+ imul r2, 0x01010101
+%endmacro
+
+%macro VC1_LF_MMX 1
+INIT_MMX
+cglobal vc1_v_loop_filter_internal_%1
+ VC1_V_LOOP_FILTER 4, d
+ ret
+
+cglobal vc1_h_loop_filter_internal_%1
+ VC1_H_LOOP_FILTER 4, r4
+ ret
+
+; void ff_vc1_v_loop_filter4_mmx2(uint8_t *src, int stride, int pq)
+cglobal vc1_v_loop_filter4_%1, 3,5,0
+ START_V_FILTER
+ call vc1_v_loop_filter_internal_%1
+ RET
+
+; void ff_vc1_h_loop_filter4_mmx2(uint8_t *src, int stride, int pq)
+cglobal vc1_h_loop_filter4_%1, 3,5,0
+ START_H_FILTER 4
+ call vc1_h_loop_filter_internal_%1
+ RET
+
+; void ff_vc1_v_loop_filter8_mmx2(uint8_t *src, int stride, int pq)
+cglobal vc1_v_loop_filter8_%1, 3,5,0
+ START_V_FILTER
+ call vc1_v_loop_filter_internal_%1
+ add r4, 4
+ add r0, 4
+ call vc1_v_loop_filter_internal_%1
+ RET
+
+; void ff_vc1_h_loop_filter8_mmx2(uint8_t *src, int stride, int pq)
+cglobal vc1_h_loop_filter8_%1, 3,5,0
+ START_H_FILTER 4
+ call vc1_h_loop_filter_internal_%1
+ lea r0, [r0+4*r1]
+ call vc1_h_loop_filter_internal_%1
+ RET
+%endmacro
+
+%define PABSW PABSW_MMX2
+VC1_LF_MMX mmx2
+
+INIT_XMM
+; void ff_vc1_v_loop_filter8_sse2(uint8_t *src, int stride, int pq)
+cglobal vc1_v_loop_filter8_sse2, 3,5,8
+ START_V_FILTER
+ VC1_V_LOOP_FILTER 8, q
+ RET
+
+; void ff_vc1_h_loop_filter8_sse2(uint8_t *src, int stride, int pq)
+cglobal vc1_h_loop_filter8_sse2, 3,6,8
+ START_H_FILTER 8
+ VC1_H_LOOP_FILTER 8, r5
+ RET
+
+%define PABSW PABSW_SSSE3
+
+INIT_MMX
+; void ff_vc1_v_loop_filter4_ssse3(uint8_t *src, int stride, int pq)
+cglobal vc1_v_loop_filter4_ssse3, 3,5,0
+ START_V_FILTER
+ VC1_V_LOOP_FILTER 4, d
+ RET
+
+; void ff_vc1_h_loop_filter4_ssse3(uint8_t *src, int stride, int pq)
+cglobal vc1_h_loop_filter4_ssse3, 3,5,0
+ START_H_FILTER 4
+ VC1_H_LOOP_FILTER 4, r4
+ RET
+
+INIT_XMM
+; void ff_vc1_v_loop_filter8_ssse3(uint8_t *src, int stride, int pq)
+cglobal vc1_v_loop_filter8_ssse3, 3,5,8
+ START_V_FILTER
+ VC1_V_LOOP_FILTER 8, q
+ RET
+
+; void ff_vc1_h_loop_filter8_ssse3(uint8_t *src, int stride, int pq)
+cglobal vc1_h_loop_filter8_ssse3, 3,6,8
+ START_H_FILTER 8
+ VC1_H_LOOP_FILTER 8, r5
+ RET
+
+; void ff_vc1_h_loop_filter8_sse4(uint8_t *src, int stride, int pq)
+cglobal vc1_h_loop_filter8_sse4, 3,5,8
+ START_H_FILTER 8
+ VC1_H_LOOP_FILTER 8
+ RET