summaryrefslogtreecommitdiff
path: root/libavcodec/x86/hevc_add_res.asm
blob: c6c40078a0fb3f26647f44a6cb25b6da6c1157ee (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
; *****************************************************************************
; * Provide SIMD optimizations for add_residual functions for HEVC decoding
; * Copyright (c) 2014 Pierre-Edouard LEPERE
; *
; * This file is part of FFmpeg.
; *
; * FFmpeg is free software; you can redistribute it and/or
; * modify it under the terms of the GNU Lesser General Public
; * License as published by the Free Software Foundation; either
; * version 2.1 of the License, or (at your option) any later version.
; *
; * FFmpeg is distributed in the hope that it will be useful,
; * but WITHOUT ANY WARRANTY; without even the implied warranty of
; * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
; * Lesser General Public License for more details.
; *
; * You should have received a copy of the GNU Lesser General Public
; * License along with FFmpeg; if not, write to the Free Software
; * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
; ******************************************************************************

%include "libavutil/x86/x86util.asm"

SECTION .text

cextern pw_1023
%define max_pixels_10 pw_1023

; the add_res macros and functions were largely inspired by h264_idct.asm from the x264 project
%macro ADD_RES_MMX_4_8 0
    mova              m0, [r1]
    mova              m2, [r1+8]

    movd              m1, [r0]
    movd              m3, [r0+r2]
    punpcklbw         m1, m4
    punpcklbw         m3, m4

    paddsw            m0, m1
    paddsw            m2, m3
    packuswb          m0, m4
    packuswb          m2, m4

    movd            [r0], m0
    movd         [r0+r2], m2
%endmacro


INIT_MMX mmxext
; void ff_hevc_add_residual_4_8_mmxext(uint8_t *dst, int16_t *res, ptrdiff_t stride)
cglobal hevc_add_residual_4_8, 3, 3, 6
    pxor              m4, m4
    ADD_RES_MMX_4_8
    add               r1, 16
    lea               r0, [r0+r2*2]
    ADD_RES_MMX_4_8
    RET

%macro ADD_RES_SSE_8_8 0
    movq              m0, [r0]
    movq              m1, [r0+r2]
    punpcklbw         m0, m4
    punpcklbw         m1, m4
    mova              m2, [r1]
    mova              m3, [r1+16]
    paddsw            m0, m2
    paddsw            m1, m3
    packuswb          m0, m1

    movq              m2, [r0+r2*2]
    movq              m3, [r0+r3]
    punpcklbw         m2, m4
    punpcklbw         m3, m4
    mova              m6, [r1+32]
    mova              m7, [r1+48]
    paddsw            m2, m6
    paddsw            m3, m7
    packuswb          m2, m3

    movq            [r0], m0
    movhps       [r0+r2], m0
    movq       [r0+r2*2], m2
    movhps       [r0+r3], m2
%endmacro

%macro ADD_RES_SSE_16_32_8 3
    mova              m1, [%2]
    mova              m2, m1
    punpcklbw         m1, m0
    punpckhbw         m2, m0
    mova             xm5, [r1+%1]
    mova             xm6, [r1+%1+16]
%if cpuflag(avx2)
    vinserti128       m5, m5, [r1+%1+32], 1
    vinserti128       m6, m6, [r1+%1+48], 1
%endif
    paddsw            m1, m5
    paddsw            m2, m6

    mova              m3, [%3]
    mova              m4, m3
    punpcklbw         m3, m0
    punpckhbw         m4, m0
    mova             xm5, [r1+%1+mmsize*2]
    mova             xm6, [r1+%1+mmsize*2+16]
%if cpuflag(avx2)
    vinserti128       m5, m5, [r1+%1+96], 1
    vinserti128       m6, m6, [r1+%1+112], 1
%endif
    paddsw            m3, m5
    paddsw            m4, m6

    packuswb          m1, m2
    packuswb          m3, m4
    mova            [%2], m1
    mova            [%3], m3
%endmacro


%macro TRANSFORM_ADD_8 0
; void ff_hevc_add_residual_8_8_<opt>(uint8_t *dst, int16_t *res, ptrdiff_t stride)
cglobal hevc_add_residual_8_8, 3, 4, 8
    pxor              m4, m4
    lea               r3, [r2*3]
    ADD_RES_SSE_8_8
    add               r1, 64
    lea               r0, [r0+r2*4]
    ADD_RES_SSE_8_8
    RET

; void ff_hevc_add_residual_16_8_<opt>(uint8_t *dst, int16_t *res, ptrdiff_t stride)
cglobal hevc_add_residual_16_8, 3, 5, 7
    pxor                m0, m0
    lea                 r3, [r2*3]
    mov                r4d, 4
.loop:
    ADD_RES_SSE_16_32_8  0, r0,      r0+r2
    ADD_RES_SSE_16_32_8 64, r0+r2*2, r0+r3
    add                 r1, 128
    lea                 r0, [r0+r2*4]
    dec                r4d
    jg .loop
    RET

; void ff_hevc_add_residual_32_8_<opt>(uint8_t *dst, int16_t *res, ptrdiff_t stride)
cglobal hevc_add_residual_32_8, 3, 5, 7
    pxor                m0, m0
    mov                r4d, 16
.loop:
    ADD_RES_SSE_16_32_8  0, r0,    r0+16
    ADD_RES_SSE_16_32_8 64, r0+r2, r0+r2+16
    add                 r1, 128
    lea                 r0, [r0+r2*2]
    dec                r4d
    jg .loop
    RET
%endmacro

INIT_XMM sse2
TRANSFORM_ADD_8
INIT_XMM avx
TRANSFORM_ADD_8

%if HAVE_AVX2_EXTERNAL
INIT_YMM avx2
; void ff_hevc_add_residual_32_8_avx2(uint8_t *dst, int16_t *res, ptrdiff_t stride)
cglobal hevc_add_residual_32_8, 3, 5, 7
    pxor                 m0, m0
    lea                  r3, [r2*3]
    mov                 r4d, 8
.loop:
    ADD_RES_SSE_16_32_8   0, r0,      r0+r2
    ADD_RES_SSE_16_32_8 128, r0+r2*2, r0+r3
    add                  r1, 256
    lea                  r0, [r0+r2*4]
    dec                 r4d
    jg .loop
    RET
%endif ;HAVE_AVX2_EXTERNAL

%macro ADD_RES_SSE_8_10 4
    mova              m0, [%4]
    mova              m1, [%4+16]
    mova              m2, [%4+32]
    mova              m3, [%4+48]
    paddw             m0, [%1+0]
    paddw             m1, [%1+%2]
    paddw             m2, [%1+%2*2]
    paddw             m3, [%1+%3]
    CLIPW             m0, m4, m5
    CLIPW             m1, m4, m5
    CLIPW             m2, m4, m5
    CLIPW             m3, m4, m5
    mova          [%1+0], m0
    mova         [%1+%2], m1
    mova       [%1+%2*2], m2
    mova         [%1+%3], m3
%endmacro

%macro ADD_RES_MMX_4_10 3
    mova              m0, [%1+0]
    mova              m1, [%1+%2]
    paddw             m0, [%3]
    paddw             m1, [%3+8]
    CLIPW             m0, m2, m3
    CLIPW             m1, m2, m3
    mova          [%1+0], m0
    mova         [%1+%2], m1
%endmacro

%macro ADD_RES_SSE_16_10 3
    mova              m0, [%3]
    mova              m1, [%3+16]
    mova              m2, [%3+32]
    mova              m3, [%3+48]
    paddw             m0, [%1]
    paddw             m1, [%1+16]
    paddw             m2, [%1+%2]
    paddw             m3, [%1+%2+16]
    CLIPW             m0, m4, m5
    CLIPW             m1, m4, m5
    CLIPW             m2, m4, m5
    CLIPW             m3, m4, m5
    mova            [%1], m0
    mova         [%1+16], m1
    mova         [%1+%2], m2
    mova      [%1+%2+16], m3
%endmacro

%macro ADD_RES_SSE_32_10 2
    mova              m0, [%2]
    mova              m1, [%2+16]
    mova              m2, [%2+32]
    mova              m3, [%2+48]

    paddw             m0, [%1]
    paddw             m1, [%1+16]
    paddw             m2, [%1+32]
    paddw             m3, [%1+48]
    CLIPW             m0, m4, m5
    CLIPW             m1, m4, m5
    CLIPW             m2, m4, m5
    CLIPW             m3, m4, m5
    mova            [%1], m0
    mova         [%1+16], m1
    mova         [%1+32], m2
    mova         [%1+48], m3
%endmacro

%macro ADD_RES_AVX2_16_10 4
    mova              m0, [%4]
    mova              m1, [%4+32]
    mova              m2, [%4+64]
    mova              m3, [%4+96]

    paddw             m0, [%1+0]
    paddw             m1, [%1+%2]
    paddw             m2, [%1+%2*2]
    paddw             m3, [%1+%3]

    CLIPW             m0, m4, m5
    CLIPW             m1, m4, m5
    CLIPW             m2, m4, m5
    CLIPW             m3, m4, m5
    mova          [%1+0], m0
    mova         [%1+%2], m1
    mova       [%1+%2*2], m2
    mova         [%1+%3], m3
%endmacro

%macro ADD_RES_AVX2_32_10 3
    mova              m0, [%3]
    mova              m1, [%3+32]
    mova              m2, [%3+64]
    mova              m3, [%3+96]

    paddw             m0, [%1]
    paddw             m1, [%1+32]
    paddw             m2, [%1+%2]
    paddw             m3, [%1+%2+32]

    CLIPW             m0, m4, m5
    CLIPW             m1, m4, m5
    CLIPW             m2, m4, m5
    CLIPW             m3, m4, m5
    mova            [%1], m0
    mova         [%1+32], m1
    mova         [%1+%2], m2
    mova      [%1+%2+32], m3
%endmacro

; void ff_hevc_add_residual_<4|8|16|32>_10(pixel *dst, int16_t *block, ptrdiff_t stride)
INIT_MMX mmxext
cglobal hevc_add_residual_4_10, 3, 3, 6
    pxor              m2, m2
    mova              m3, [max_pixels_10]
    ADD_RES_MMX_4_10  r0, r2, r1
    add               r1, 16
    lea               r0, [r0+2*r2]
    ADD_RES_MMX_4_10  r0, r2, r1
    RET

INIT_XMM sse2
cglobal hevc_add_residual_8_10, 3, 4, 6
    pxor              m4, m4
    mova              m5, [max_pixels_10]
    lea               r3, [r2*3]

    ADD_RES_SSE_8_10  r0, r2, r3, r1
    lea               r0, [r0+r2*4]
    add               r1, 64
    ADD_RES_SSE_8_10  r0, r2, r3, r1
    RET

cglobal hevc_add_residual_16_10, 3, 5, 6
    pxor              m4, m4
    mova              m5, [max_pixels_10]

    mov              r4d, 8
.loop:
    ADD_RES_SSE_16_10 r0, r2, r1
    lea               r0, [r0+r2*2]
    add               r1, 64
    dec              r4d
    jg .loop
    RET

cglobal hevc_add_residual_32_10, 3, 5, 6
    pxor              m4, m4
    mova              m5, [max_pixels_10]

    mov              r4d, 32
.loop:
    ADD_RES_SSE_32_10 r0, r1
    lea               r0, [r0+r2]
    add               r1, 64
    dec              r4d
    jg .loop
    RET

%if HAVE_AVX2_EXTERNAL
INIT_YMM avx2
cglobal hevc_add_residual_16_10, 3, 5, 6
    pxor               m4, m4
    mova               m5, [max_pixels_10]
    lea                r3, [r2*3]

    mov               r4d, 4
.loop:
    ADD_RES_AVX2_16_10 r0, r2, r3, r1
    lea                r0, [r0+r2*4]
    add                r1, 128
    dec               r4d
    jg .loop
    RET

cglobal hevc_add_residual_32_10, 3, 5, 6
    pxor               m4, m4
    mova               m5, [max_pixels_10]

    mov               r4d, 16
.loop:
    ADD_RES_AVX2_32_10 r0, r2, r1
    lea                r0, [r0+r2*2]
    add                r1, 128
    dec               r4d
    jg .loop
    RET
%endif ;HAVE_AVX2_EXTERNAL