summaryrefslogtreecommitdiff
path: root/libavcodec/x86
diff options
context:
space:
mode:
authorMichael Niedermayer <michaelni@gmx.at>2013-01-27 13:32:47 +0100
committerMichael Niedermayer <michaelni@gmx.at>2013-01-27 13:32:56 +0100
commit6b2f7fd1c7285974898c31d48d477728a30199f9 (patch)
tree205aa6d68a223538dcbbb699bd1a0b4651639059 /libavcodec/x86
parentf2b6aabd3da7d0d15c7cea0a9fb649b530e2d3cb (diff)
parentf90ff772e7e35b4923c2de429d1fab9f2569b568 (diff)
Merge commit 'f90ff772e7e35b4923c2de429d1fab9f2569b568'
* commit 'f90ff772e7e35b4923c2de429d1fab9f2569b568': Move H264/QPEL specific asm from dsputil.asm to h264_qpel_*.asm. doc: update the reference for the title Merged-by: Michael Niedermayer <michaelni@gmx.at>
Diffstat (limited to 'libavcodec/x86')
-rw-r--r--libavcodec/x86/dsputil.asm188
-rw-r--r--libavcodec/x86/h264_qpel_8bit.asm169
2 files changed, 169 insertions, 188 deletions
diff --git a/libavcodec/x86/dsputil.asm b/libavcodec/x86/dsputil.asm
index 98fed1734f..6a76655a8b 100644
--- a/libavcodec/x86/dsputil.asm
+++ b/libavcodec/x86/dsputil.asm
@@ -649,194 +649,6 @@ BSWAP32_BUF
INIT_XMM ssse3
BSWAP32_BUF
-%macro op_avgh 3
- movh %3, %2
- pavgb %1, %3
- movh %2, %1
-%endmacro
-
-%macro op_avg 2
- pavgb %1, %2
- mova %2, %1
-%endmacro
-
-%macro op_puth 2-3
- movh %2, %1
-%endmacro
-
-%macro op_put 2
- mova %2, %1
-%endmacro
-
-; void pixels4_l2_mmxext(uint8_t *dst, uint8_t *src1, uint8_t *src2, int dstStride, int src1Stride, int h)
-%macro PIXELS4_L2 1
-%define OP op_%1h
-cglobal %1_pixels4_l2, 6,6
- movsxdifnidn r3, r3d
- movsxdifnidn r4, r4d
- test r5d, 1
- je .loop
- movd m0, [r1]
- movd m1, [r2]
- add r1, r4
- add r2, 4
- pavgb m0, m1
- OP m0, [r0], m3
- add r0, r3
- dec r5d
-.loop:
- mova m0, [r1]
- mova m1, [r1+r4]
- lea r1, [r1+2*r4]
- pavgb m0, [r2]
- pavgb m1, [r2+4]
- OP m0, [r0], m3
- OP m1, [r0+r3], m3
- lea r0, [r0+2*r3]
- mova m0, [r1]
- mova m1, [r1+r4]
- lea r1, [r1+2*r4]
- pavgb m0, [r2+8]
- pavgb m1, [r2+12]
- OP m0, [r0], m3
- OP m1, [r0+r3], m3
- lea r0, [r0+2*r3]
- add r2, 16
- sub r5d, 4
- jne .loop
- REP_RET
-%endmacro
-
-INIT_MMX mmxext
-PIXELS4_L2 put
-PIXELS4_L2 avg
-
-; void pixels8_l2_mmxext(uint8_t *dst, uint8_t *src1, uint8_t *src2, int dstStride, int src1Stride, int h)
-%macro PIXELS8_L2 1
-%define OP op_%1
-cglobal %1_pixels8_l2, 6,6
- movsxdifnidn r3, r3d
- movsxdifnidn r4, r4d
- test r5d, 1
- je .loop
- mova m0, [r1]
- mova m1, [r2]
- add r1, r4
- add r2, 8
- pavgb m0, m1
- OP m0, [r0]
- add r0, r3
- dec r5d
-.loop:
- mova m0, [r1]
- mova m1, [r1+r4]
- lea r1, [r1+2*r4]
- pavgb m0, [r2]
- pavgb m1, [r2+8]
- OP m0, [r0]
- OP m1, [r0+r3]
- lea r0, [r0+2*r3]
- mova m0, [r1]
- mova m1, [r1+r4]
- lea r1, [r1+2*r4]
- pavgb m0, [r2+16]
- pavgb m1, [r2+24]
- OP m0, [r0]
- OP m1, [r0+r3]
- lea r0, [r0+2*r3]
- add r2, 32
- sub r5d, 4
- jne .loop
- REP_RET
-%endmacro
-
-INIT_MMX mmxext
-PIXELS8_L2 put
-PIXELS8_L2 avg
-
-; void pixels16_l2_mmxext(uint8_t *dst, uint8_t *src1, uint8_t *src2, int dstStride, int src1Stride, int h)
-%macro PIXELS16_L2 1
-%define OP op_%1
-cglobal %1_pixels16_l2, 6,6
- movsxdifnidn r3, r3d
- movsxdifnidn r4, r4d
- test r5d, 1
- je .loop
- mova m0, [r1]
- mova m1, [r1+8]
- pavgb m0, [r2]
- pavgb m1, [r2+8]
- add r1, r4
- add r2, 16
- OP m0, [r0]
- OP m1, [r0+8]
- add r0, r3
- dec r5d
-.loop:
- mova m0, [r1]
- mova m1, [r1+8]
- add r1, r4
- pavgb m0, [r2]
- pavgb m1, [r2+8]
- OP m0, [r0]
- OP m1, [r0+8]
- add r0, r3
- mova m0, [r1]
- mova m1, [r1+8]
- add r1, r4
- pavgb m0, [r2+16]
- pavgb m1, [r2+24]
- OP m0, [r0]
- OP m1, [r0+8]
- add r0, r3
- add r2, 32
- sub r5d, 2
- jne .loop
- REP_RET
-%endmacro
-
-INIT_MMX mmxext
-PIXELS16_L2 put
-PIXELS16_L2 avg
-
-INIT_MMX mmxext
-; void pixels(uint8_t *block, const uint8_t *pixels, int line_size, int h)
-%macro PIXELS48 2
-%if %2 == 4
-%define OP movh
-%else
-%define OP mova
-%endif
-cglobal %1_pixels%2, 4,5
- movsxdifnidn r2, r2d
- lea r4, [r2*3]
-.loop:
- OP m0, [r1]
- OP m1, [r1+r2]
- OP m2, [r1+r2*2]
- OP m3, [r1+r4]
- lea r1, [r1+r2*4]
-%ifidn %1, avg
- pavgb m0, [r0]
- pavgb m1, [r0+r2]
- pavgb m2, [r0+r2*2]
- pavgb m3, [r0+r4]
-%endif
- OP [r0], m0
- OP [r0+r2], m1
- OP [r0+r2*2], m2
- OP [r0+r4], m3
- sub r3d, 4
- lea r0, [r0+r2*4]
- jne .loop
- RET
-%endmacro
-
-PIXELS48 put, 4
-PIXELS48 avg, 4
-PIXELS48 put, 8
-PIXELS48 avg, 8
-
INIT_XMM sse2
; void put_pixels16_sse2(uint8_t *block, const uint8_t *pixels, int line_size, int h)
cglobal put_pixels16, 4,5,4
diff --git a/libavcodec/x86/h264_qpel_8bit.asm b/libavcodec/x86/h264_qpel_8bit.asm
index 2d287ba443..3039f17206 100644
--- a/libavcodec/x86/h264_qpel_8bit.asm
+++ b/libavcodec/x86/h264_qpel_8bit.asm
@@ -860,3 +860,172 @@ INIT_XMM ssse3
QPEL16_H_LOWPASS_L2_OP put
QPEL16_H_LOWPASS_L2_OP avg
%endif
+
+; void pixels4_l2_mmxext(uint8_t *dst, uint8_t *src1, uint8_t *src2, int dstStride, int src1Stride, int h)
+%macro PIXELS4_L2 1
+%define OP op_%1h
+cglobal %1_pixels4_l2, 6,6
+ movsxdifnidn r3, r3d
+ movsxdifnidn r4, r4d
+ test r5d, 1
+ je .loop
+ movd m0, [r1]
+ movd m1, [r2]
+ add r1, r4
+ add r2, 4
+ pavgb m0, m1
+ OP m0, [r0], m3
+ add r0, r3
+ dec r5d
+.loop:
+ mova m0, [r1]
+ mova m1, [r1+r4]
+ lea r1, [r1+2*r4]
+ pavgb m0, [r2]
+ pavgb m1, [r2+4]
+ OP m0, [r0], m3
+ OP m1, [r0+r3], m3
+ lea r0, [r0+2*r3]
+ mova m0, [r1]
+ mova m1, [r1+r4]
+ lea r1, [r1+2*r4]
+ pavgb m0, [r2+8]
+ pavgb m1, [r2+12]
+ OP m0, [r0], m3
+ OP m1, [r0+r3], m3
+ lea r0, [r0+2*r3]
+ add r2, 16
+ sub r5d, 4
+ jne .loop
+ REP_RET
+%endmacro
+
+INIT_MMX mmxext
+PIXELS4_L2 put
+PIXELS4_L2 avg
+
+; void pixels8_l2_mmxext(uint8_t *dst, uint8_t *src1, uint8_t *src2, int dstStride, int src1Stride, int h)
+%macro PIXELS8_L2 1
+%define OP op_%1
+cglobal %1_pixels8_l2, 6,6
+ movsxdifnidn r3, r3d
+ movsxdifnidn r4, r4d
+ test r5d, 1
+ je .loop
+ mova m0, [r1]
+ mova m1, [r2]
+ add r1, r4
+ add r2, 8
+ pavgb m0, m1
+ OP m0, [r0]
+ add r0, r3
+ dec r5d
+.loop:
+ mova m0, [r1]
+ mova m1, [r1+r4]
+ lea r1, [r1+2*r4]
+ pavgb m0, [r2]
+ pavgb m1, [r2+8]
+ OP m0, [r0]
+ OP m1, [r0+r3]
+ lea r0, [r0+2*r3]
+ mova m0, [r1]
+ mova m1, [r1+r4]
+ lea r1, [r1+2*r4]
+ pavgb m0, [r2+16]
+ pavgb m1, [r2+24]
+ OP m0, [r0]
+ OP m1, [r0+r3]
+ lea r0, [r0+2*r3]
+ add r2, 32
+ sub r5d, 4
+ jne .loop
+ REP_RET
+%endmacro
+
+INIT_MMX mmxext
+PIXELS8_L2 put
+PIXELS8_L2 avg
+
+; void pixels16_l2_mmxext(uint8_t *dst, uint8_t *src1, uint8_t *src2, int dstStride, int src1Stride, int h)
+%macro PIXELS16_L2 1
+%define OP op_%1
+cglobal %1_pixels16_l2, 6,6
+ movsxdifnidn r3, r3d
+ movsxdifnidn r4, r4d
+ test r5d, 1
+ je .loop
+ mova m0, [r1]
+ mova m1, [r1+8]
+ pavgb m0, [r2]
+ pavgb m1, [r2+8]
+ add r1, r4
+ add r2, 16
+ OP m0, [r0]
+ OP m1, [r0+8]
+ add r0, r3
+ dec r5d
+.loop:
+ mova m0, [r1]
+ mova m1, [r1+8]
+ add r1, r4
+ pavgb m0, [r2]
+ pavgb m1, [r2+8]
+ OP m0, [r0]
+ OP m1, [r0+8]
+ add r0, r3
+ mova m0, [r1]
+ mova m1, [r1+8]
+ add r1, r4
+ pavgb m0, [r2+16]
+ pavgb m1, [r2+24]
+ OP m0, [r0]
+ OP m1, [r0+8]
+ add r0, r3
+ add r2, 32
+ sub r5d, 2
+ jne .loop
+ REP_RET
+%endmacro
+
+INIT_MMX mmxext
+PIXELS16_L2 put
+PIXELS16_L2 avg
+
+INIT_MMX mmxext
+; void pixels(uint8_t *block, const uint8_t *pixels, int line_size, int h)
+%macro PIXELS48 2
+%if %2 == 4
+%define OP movh
+%else
+%define OP mova
+%endif
+cglobal %1_pixels%2, 4,5
+ movsxdifnidn r2, r2d
+ lea r4, [r2*3]
+.loop:
+ OP m0, [r1]
+ OP m1, [r1+r2]
+ OP m2, [r1+r2*2]
+ OP m3, [r1+r4]
+ lea r1, [r1+r2*4]
+%ifidn %1, avg
+ pavgb m0, [r0]
+ pavgb m1, [r0+r2]
+ pavgb m2, [r0+r2*2]
+ pavgb m3, [r0+r4]
+%endif
+ OP [r0], m0
+ OP [r0+r2], m1
+ OP [r0+r2*2], m2
+ OP [r0+r4], m3
+ sub r3d, 4
+ lea r0, [r0+r2*4]
+ jne .loop
+ RET
+%endmacro
+
+PIXELS48 put, 4
+PIXELS48 avg, 4
+PIXELS48 put, 8
+PIXELS48 avg, 8