summaryrefslogtreecommitdiff
path: root/libavcodec/x86/lossless_videoencdsp.asm
blob: 2e1d01bc2c6ed2534835e09d686f9bb490b7a80c (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
;************************************************************************
;* SIMD-optimized lossless video encoding functions
;* Copyright (c) 2000, 2001 Fabrice Bellard
;* Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
;*
;* MMX optimization by Nick Kurshev <nickols_k@mail.ru>
;* Conversion to NASM format by Tiancheng "Timothy" Gu <timothygu99@gmail.com>
;*
;* This file is part of FFmpeg.
;*
;* FFmpeg is free software; you can redistribute it and/or
;* modify it under the terms of the GNU Lesser General Public
;* License as published by the Free Software Foundation; either
;* version 2.1 of the License, or (at your option) any later version.
;*
;* FFmpeg is distributed in the hope that it will be useful,
;* but WITHOUT ANY WARRANTY; without even the implied warranty of
;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
;* Lesser General Public License for more details.
;*
;* You should have received a copy of the GNU Lesser General Public
;* License along with FFmpeg; if not, write to the Free Software
;* 51, Inc., Foundation Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
;******************************************************************************

%include "libavutil/x86/x86util.asm"

cextern pb_80

SECTION .text

; void ff_diff_bytes(uint8_t *dst, const uint8_t *src1, const uint8_t *src2,
;                    intptr_t w);
%macro DIFF_BYTES_PROLOGUE 0
%if ARCH_X86_32
cglobal diff_bytes, 3,5,2, dst, src1, src2
%define wq r4q
    DECLARE_REG_TMP 3
    mov               wq, r3mp
%else
cglobal diff_bytes, 4,5,2, dst, src1, src2, w
    DECLARE_REG_TMP 4
%endif ; ARCH_X86_32
%define i t0q
%endmacro

; labels to jump to if w < regsize and w < 0
%macro DIFF_BYTES_LOOP_PREP 2
    mov                i, wq
    and                i, -2 * regsize
        js            %2
        jz            %1
    add             dstq, i
    add            src1q, i
    add            src2q, i
    neg                i
%endmacro

; mov type used for src1q, dstq, first reg, second reg
%macro DIFF_BYTES_LOOP_CORE 4
%if mmsize != 16
    mov%1             %3, [src1q + i]
    mov%1             %4, [src1q + i + regsize]
    psubb             %3, [src2q + i]
    psubb             %4, [src2q + i + regsize]
    mov%2           [dstq + i], %3
    mov%2 [regsize + dstq + i], %4
%else
    ; SSE enforces alignment of psubb operand
    mov%1             %3, [src1q + i]
    movu              %4, [src2q + i]
    psubb             %3, %4
    mov%2     [dstq + i], %3
    mov%1             %3, [src1q + i + regsize]
    movu              %4, [src2q + i + regsize]
    psubb             %3, %4
    mov%2 [regsize + dstq + i], %3
%endif
%endmacro

%macro DIFF_BYTES_BODY 2 ; mov type used for src1q, for dstq
    %define regsize mmsize
.loop_%1%2:
    DIFF_BYTES_LOOP_CORE %1, %2, m0, m1
    add                i, 2 * regsize
        jl    .loop_%1%2
.skip_main_%1%2:
    and               wq, 2 * regsize - 1
        jz     .end_%1%2
%if mmsize > 16
    ; fall back to narrower xmm
    %define regsize (mmsize / 2)
    DIFF_BYTES_LOOP_PREP .setup_loop_gpr_aa, .end_aa
.loop2_%1%2:
    DIFF_BYTES_LOOP_CORE %1, %2, xm0, xm1
    add                i, 2 * regsize
        jl   .loop2_%1%2
.setup_loop_gpr_%1%2:
    and               wq, 2 * regsize - 1
        jz     .end_%1%2
%endif
    add             dstq, wq
    add            src1q, wq
    add            src2q, wq
    neg               wq
.loop_gpr_%1%2:
    mov              t0b, [src1q + wq]
    sub              t0b, [src2q + wq]
    mov      [dstq + wq], t0b
    inc               wq
        jl .loop_gpr_%1%2
.end_%1%2:
    REP_RET
%endmacro

INIT_XMM sse2
DIFF_BYTES_PROLOGUE
    %define regsize mmsize
    DIFF_BYTES_LOOP_PREP .skip_main_aa, .end_aa
    test            dstq, regsize - 1
        jnz     .loop_uu
    test           src1q, regsize - 1
        jnz     .loop_ua
    DIFF_BYTES_BODY    a, a
    DIFF_BYTES_BODY    u, a
    DIFF_BYTES_BODY    u, u
%undef i

%if HAVE_AVX2_EXTERNAL
INIT_YMM avx2
DIFF_BYTES_PROLOGUE
    %define regsize mmsize
    ; Directly using unaligned SSE2 version is marginally faster than
    ; branching based on arguments.
    DIFF_BYTES_LOOP_PREP .skip_main_uu, .end_uu
    test            dstq, regsize - 1
        jnz     .loop_uu
    test           src1q, regsize - 1
        jnz     .loop_ua
    DIFF_BYTES_BODY    a, a
    DIFF_BYTES_BODY    u, a
    DIFF_BYTES_BODY    u, u
%undef i
%endif


;--------------------------------------------------------------------------------------------------
;void sub_left_predict(uint8_t *dst, uint8_t *src, ptrdiff_t stride, ptrdiff_t width, int height)
;--------------------------------------------------------------------------------------------------

INIT_XMM avx
cglobal sub_left_predict, 5,6,5, dst, src, stride, width, height, x
    mova             m1, [pb_80] ; prev initial
    add            dstq, widthq
    add            srcq, widthq
    lea              xd, [widthq-1]
    neg          widthq
    and              xd, 15
    pinsrb           m4, m1, xd, 15
    mov              xq, widthq

    .loop:
        movu                     m0, [srcq + widthq]
        palignr                  m2, m0, m1, 15
        movu                     m1, [srcq + widthq + 16]
        palignr                  m3, m1, m0, 15
        psubb                    m2, m0, m2
        psubb                    m3, m1, m3
        movu        [dstq + widthq], m2
        movu   [dstq + widthq + 16], m3
        add                  widthq, 2 * 16
        jl .loop

    add   srcq, strideq
    sub   dstq, xq ; dst + width
    test    xd, 16
    jz .mod32
    mova    m1, m0

.mod32:
    pshufb    m1, m4
    mov   widthq, xq
    dec  heightd
    jg .loop
    RET