summaryrefslogtreecommitdiff
path: root/libavcodec/x86/videodsp.asm
diff options
context:
space:
mode:
Diffstat (limited to 'libavcodec/x86/videodsp.asm')
-rw-r--r--libavcodec/x86/videodsp.asm134
1 files changed, 74 insertions, 60 deletions
diff --git a/libavcodec/x86/videodsp.asm b/libavcodec/x86/videodsp.asm
index 59f19378ca..1ac02574d6 100644
--- a/libavcodec/x86/videodsp.asm
+++ b/libavcodec/x86/videodsp.asm
@@ -2,20 +2,20 @@
;* Core video DSP functions
;* Copyright (c) 2012 Ronald S. Bultje <rsbultje@gmail.com>
;*
-;* This file is part of Libav.
+;* This file is part of FFmpeg.
;*
-;* Libav is free software; you can redistribute it and/or
+;* FFmpeg is free software; you can redistribute it and/or
;* modify it under the terms of the GNU Lesser General Public
;* License as published by the Free Software Foundation; either
;* version 2.1 of the License, or (at your option) any later version.
;*
-;* Libav is distributed in the hope that it will be useful,
+;* FFmpeg is distributed in the hope that it will be useful,
;* but WITHOUT ANY WARRANTY; without even the implied warranty of
;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
;* Lesser General Public License for more details.
;*
;* You should have received a copy of the GNU Lesser General Public
-;* License along with Libav; if not, write to the Free Software
+;* License along with FFmpeg; if not, write to the Free Software
;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
;******************************************************************************
@@ -54,13 +54,13 @@ SECTION .text
; | | <- bottom is copied from last line in body of source
; '----' <- bh
%if ARCH_X86_64
-cglobal emu_edge_vvar, 7, 8, 1, dst, src, dst_stride, src_stride, \
+cglobal emu_edge_vvar, 7, 8, 1, dst, dst_stride, src, src_stride, \
start_y, end_y, bh, w
%else ; x86-32
cglobal emu_edge_vvar, 1, 6, 1, dst, src, start_y, end_y, bh, w
%define src_strideq r3mp
-%define dst_strideq r2mp
- mov srcq, r1mp
+%define dst_strideq r1mp
+ mov srcq, r2mp
mov start_yq, r4mp
mov end_yq, r5mp
mov bhq, r6mp
@@ -102,8 +102,8 @@ cglobal emu_edge_hvar, 5, 6, 1, dst, dst_stride, start_x, n_words, h, w
imul wd, 0x01010101 ; w *= 0x01010101
movd m0, wd
mov wq, n_wordsq ; initialize w
-%if cpuflag(sse)
- shufps m0, m0, q0000 ; splat
+%if cpuflag(sse2)
+ pshufd m0, m0, q0000 ; splat
%else ; mmx
punpckldq m0, m0 ; splat
%endif ; mmx/sse
@@ -124,7 +124,7 @@ INIT_MMX mmx
hvar_fn
%endif
-INIT_XMM sse
+INIT_XMM sse2
hvar_fn
; macro to read/write a horizontal number of pixels (%2) to/from registers
@@ -137,42 +137,49 @@ hvar_fn
; - if (%2 & 3) fills 1, 2 or 4 bytes in eax
; writing data out is in the same way
%macro READ_NUM_BYTES 2
-%assign %%off 0 ; offset in source buffer
-%assign %%idx 0 ; mmx/xmm register index
+%assign %%off 0 ; offset in source buffer
+%assign %%mmx_idx 0 ; mmx register index
+%assign %%xmm_idx 0 ; xmm register index
%rep %2/mmsize
- movu m %+ %%idx, [srcq+%%off]
+%if mmsize == 16
+ movu xmm %+ %%xmm_idx, [srcq+%%off]
+%assign %%xmm_idx %%xmm_idx+1
+%else ; mmx
+ movu mm %+ %%mmx_idx, [srcq+%%off]
+%assign %%mmx_idx %%mmx_idx+1
+%endif
%assign %%off %%off+mmsize
-%assign %%idx %%idx+1
%endrep ; %2/mmsize
%if mmsize == 16
%if (%2-%%off) >= 8
%if %2 > 16 && (%2-%%off) > 8
- movu m %+ %%idx, [srcq+%2-16]
+ movu xmm %+ %%xmm_idx, [srcq+%2-16]
+%assign %%xmm_idx %%xmm_idx+1
%assign %%off %2
%else
- movq m %+ %%idx, [srcq+%%off]
+ movq mm %+ %%mmx_idx, [srcq+%%off]
+%assign %%mmx_idx %%mmx_idx+1
%assign %%off %%off+8
%endif
-%assign %%idx %%idx+1
%endif ; (%2-%%off) >= 8
%endif
%if (%2-%%off) >= 4
%if %2 > 8 && (%2-%%off) > 4
- movq m %+ %%idx, [srcq+%2-8]
+ movq mm %+ %%mmx_idx, [srcq+%2-8]
%assign %%off %2
%else
- movd m %+ %%idx, [srcq+%%off]
+ movd mm %+ %%mmx_idx, [srcq+%%off]
%assign %%off %%off+4
%endif
-%assign %%idx %%idx+1
+%assign %%mmx_idx %%mmx_idx+1
%endif ; (%2-%%off) >= 4
%if (%2-%%off) >= 1
%if %2 >= 4
- movd m %+ %%idx, [srcq+%2-4]
+ movd mm %+ %%mmx_idx, [srcq+%2-4]
%elif (%2-%%off) == 1
mov valb, [srcq+%2-1]
%elif (%2-%%off) == 2
@@ -180,48 +187,55 @@ hvar_fn
%elifidn %1, body
mov vald, [srcq+%2-3]
%else
- movd m %+ %%idx, [srcq+%2-3]
+ movd mm %+ %%mmx_idx, [srcq+%2-3]
%endif
%endif ; (%2-%%off) >= 1
%endmacro ; READ_NUM_BYTES
%macro WRITE_NUM_BYTES 2
-%assign %%off 0 ; offset in destination buffer
-%assign %%idx 0 ; mmx/xmm register index
+%assign %%off 0 ; offset in destination buffer
+%assign %%mmx_idx 0 ; mmx register index
+%assign %%xmm_idx 0 ; xmm register index
%rep %2/mmsize
- movu [dstq+%%off], m %+ %%idx
+%if mmsize == 16
+ movu [dstq+%%off], xmm %+ %%xmm_idx
+%assign %%xmm_idx %%xmm_idx+1
+%else ; mmx
+ movu [dstq+%%off], mm %+ %%mmx_idx
+%assign %%mmx_idx %%mmx_idx+1
+%endif
%assign %%off %%off+mmsize
-%assign %%idx %%idx+1
%endrep ; %2/mmsize
%if mmsize == 16
%if (%2-%%off) >= 8
%if %2 > 16 && (%2-%%off) > 8
- movu [dstq+%2-16], m %+ %%idx
+ movu [dstq+%2-16], xmm %+ %%xmm_idx
+%assign %%xmm_idx %%xmm_idx+1
%assign %%off %2
%else
- movq [dstq+%%off], m %+ %%idx
+ movq [dstq+%%off], mm %+ %%mmx_idx
+%assign %%mmx_idx %%mmx_idx+1
%assign %%off %%off+8
%endif
-%assign %%idx %%idx+1
%endif ; (%2-%%off) >= 8
%endif
%if (%2-%%off) >= 4
%if %2 > 8 && (%2-%%off) > 4
- movq [dstq+%2-8], m %+ %%idx
+ movq [dstq+%2-8], mm %+ %%mmx_idx
%assign %%off %2
%else
- movd [dstq+%%off], m %+ %%idx
+ movd [dstq+%%off], mm %+ %%mmx_idx
%assign %%off %%off+4
%endif
-%assign %%idx %%idx+1
+%assign %%mmx_idx %%mmx_idx+1
%endif ; (%2-%%off) >= 4
%if (%2-%%off) >= 1
%if %2 >= 4
- movd [dstq+%2-4], m %+ %%idx
+ movd [dstq+%2-4], mm %+ %%mmx_idx
%elif (%2-%%off) == 1
mov [dstq+%2-1], valb
%elif (%2-%%off) == 2
@@ -231,7 +245,7 @@ hvar_fn
shr vald, 16
mov [dstq+%2-1], valb
%else
- movd vald, m %+ %%idx
+ movd vald, mm %+ %%mmx_idx
mov [dstq+%2-3], valw
shr vald, 16
mov [dstq+%2-1], valb
@@ -248,30 +262,30 @@ hvar_fn
%rep 1+%2-%1
%if %%n <= 3
%if ARCH_X86_64
-cglobal emu_edge_vfix %+ %%n, 6, 8, 0, dst, src, dst_stride, src_stride, \
+cglobal emu_edge_vfix %+ %%n, 6, 8, 0, dst, dst_stride, src, src_stride, \
start_y, end_y, val, bh
mov bhq, r6mp ; r6mp = bhmp
%else ; x86-32
cglobal emu_edge_vfix %+ %%n, 0, 6, 0, val, dst, src, start_y, end_y, bh
mov dstq, r0mp
- mov srcq, r1mp
+ mov srcq, r2mp
mov start_yq, r4mp
mov end_yq, r5mp
mov bhq, r6mp
-%define dst_strideq r2mp
+%define dst_strideq r1mp
%define src_strideq r3mp
%endif ; x86-64/32
%else
%if ARCH_X86_64
-cglobal emu_edge_vfix %+ %%n, 7, 7, 1, dst, src, dst_stride, src_stride, \
+cglobal emu_edge_vfix %+ %%n, 7, 7, 1, dst, dst_stride, src, src_stride, \
start_y, end_y, bh
%else ; x86-32
cglobal emu_edge_vfix %+ %%n, 1, 5, 1, dst, src, start_y, end_y, bh
- mov srcq, r1mp
+ mov srcq, r2mp
mov start_yq, r4mp
mov end_yq, r5mp
mov bhq, r6mp
-%define dst_strideq r2mp
+%define dst_strideq r1mp
%define src_strideq r3mp
%endif ; x86-64/32
%endif
@@ -330,25 +344,23 @@ VERTICAL_EXTEND 16, 22
; obviously not the same on both sides.
%macro READ_V_PIXEL 2
-%if %1 == 2
- movzx valw, byte %2
- imul valw, 0x0101
-%else
movzx vald, byte %2
imul vald, 0x01010101
%if %1 >= 8
movd m0, vald
%if mmsize == 16
- shufps m0, m0, q0000
+ pshufd m0, m0, q0000
%else
punpckldq m0, m0
-%endif
-%endif ; %1 >= 8
-%endif
+%endif ; mmsize == 16
+%endif ; %1 > 16
%endmacro ; READ_V_PIXEL
%macro WRITE_V_PIXEL 2
%assign %%off 0
+
+%if %1 >= 8
+
%rep %1/mmsize
movu [%2+%%off], m0
%assign %%off %%off+mmsize
@@ -364,27 +376,29 @@ VERTICAL_EXTEND 16, 22
%assign %%off %%off+8
%endif
%endif ; %1-%%off >= 8
-%endif
+%endif ; mmsize == 16
%if %1-%%off >= 4
-%if %1 > 8 %% %1-%%off > 4
+%if %1 > 8 && %1-%%off > 4
movq [%2+%1-8], m0
%assign %%off %1
-%elif %1 >= 8 && %1-%%off >= 4
- movd [%2+%%off], m0
-%assign %%off %%off+4
%else
- mov [%2+%%off], vald
+ movd [%2+%%off], m0
%assign %%off %%off+4
%endif
%endif ; %1-%%off >= 4
-%if %1-%%off >= 2
-%if %1 >= 8
- movd [%2+%1-4], m0
-%else
+%else ; %1 < 8
+
+%rep %1/4
+ mov [%2+%%off], vald
+%assign %%off %%off+4
+%endrep ; %1/4
+
+%endif ; %1 >=/< 8
+
+%if %1-%%off == 2
mov [%2+%%off], valw
-%endif
%endif ; (%1-%%off)/2
%endmacro ; WRITE_V_PIXEL
@@ -409,7 +423,7 @@ H_EXTEND 2, 14
H_EXTEND 16, 22
%endif
-INIT_XMM sse
+INIT_XMM sse2
H_EXTEND 16, 22
%macro PREFETCH_FN 1