summaryrefslogtreecommitdiff
path: root/libavcodec/x86/cavsidct.asm
blob: 6c768c26468806c6d91e906c5fec95c9d4ec0104 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
; Chinese AVS video (AVS1-P2, JiZhun profile) decoder
; Copyright (c) 2006  Stefan Gehrer <stefan.gehrer@gmx.de>
;
; MMX-optimized DSP functions, based on H.264 optimizations by
; Michael Niedermayer and Loren Merritt
; Conversion from gcc syntax to x264asm syntax with modifications
; by Ronald S. Bultje <rsbultje@gmail.com>
;
; This file is part of FFmpeg.
;
; FFmpeg is free software; you can redistribute it and/or
; modify it under the terms of the GNU Lesser General Public
; License as published by the Free Software Foundation; either
; version 2.1 of the License, or (at your option) any later version.
;
; FFmpeg is distributed in the hope that it will be useful,
; but WITHOUT ANY WARRANTY; without even the implied warranty of
; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
; Lesser General Public License for more details.
;
; You should have received a copy of the GNU Lesser General Public License
; along with FFmpeg; if not, write to the Free Software Foundation,
; Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA

%include "libavutil/x86/x86util.asm"

cextern pw_4
cextern pw_64

SECTION .text

%macro CAVS_IDCT8_1D 2-3 1 ; source, round, init_load
%if %3 == 1
    mova            m4, [%1+7*16]       ; m4 = src7
    mova            m5, [%1+1*16]       ; m5 = src1
    mova            m2, [%1+5*16]       ; m2 = src5
    mova            m7, [%1+3*16]       ; m7 = src3
%else
    SWAP             1, 7
    SWAP             4, 6
%endif
    mova            m0, m4
    mova            m3, m5
    mova            m6, m2
    mova            m1, m7

    paddw           m4, m4              ; m4 = 2*src7
    paddw           m3, m3              ; m3 = 2*src1
    paddw           m6, m6              ; m6 = 2*src5
    paddw           m1, m1              ; m1 = 2*src3
    paddw           m0, m4              ; m0 = 3*src7
    paddw           m5, m3              ; m5 = 3*src1
    paddw           m2, m6              ; m2 = 3*src5
    paddw           m7, m1              ; m7 = 3*src3
    psubw           m5, m4              ; m5 = 3*src1 - 2*src7 = a0
    paddw           m7, m6              ; m7 = 3*src3 - 2*src5 = a1
    psubw           m1, m2              ; m1 = 2*src3 - 3*src5 = a2
    paddw           m3, m0              ; m3 = 2*src1 - 3*src7 = a3

    mova            m4, m5
    mova            m6, m7
    mova            m0, m3
    mova            m2, m1
    SUMSUB_BA     w, 7, 5               ; m7 = a0 + a1, m5 = a0 - a1
    paddw           m7, m3              ; m7 = a0 + a1 + a3
    paddw           m5, m1              ; m5 = a0 - a1 + a2
    paddw           m7, m7
    paddw           m5, m5
    paddw           m7, m6              ; m7 = b4
    paddw           m5, m4              ; m5 = b5

    SUMSUB_BA     w, 1, 3               ; m1 = a3 + a2, m3 = a3 - a2
    psubw           m4, m1              ; m4 = a0 - a2 - a3
    mova            m1, m4              ; m1 = a0 - a2 - a3
    psubw           m3, m6              ; m3 = a3 - a2 - a1
    paddw           m1, m1
    paddw           m3, m3
    psubw           m1, m2              ; m1 = b7
    paddw           m3, m0              ; m3 = b6

    mova            m2, [%1+2*16]       ; m2 = src2
    mova            m6, [%1+6*16]       ; m6 = src6
    mova            m4, m2
    mova            m0, m6
    psllw           m4, 2               ; m4 = 4*src2
    psllw           m6, 2               ; m6 = 4*src6
    paddw           m2, m4              ; m2 = 5*src2
    paddw           m0, m6              ; m0 = 5*src6
    paddw           m2, m2
    paddw           m0, m0
    psubw           m4, m0              ; m4 = 4*src2 - 10*src6 = a7
    paddw           m6, m2              ; m6 = 4*src6 + 10*src2 = a6

    mova            m2, [%1+0*16]       ; m2 = src0
    mova            m0, [%1+4*16]       ; m0 = src4
    SUMSUB_BA     w, 0, 2               ; m0 = src0 + src4, m2 = src0 - src4
    psllw           m0, 3
    psllw           m2, 3
    paddw           m0, %2              ; add rounding bias
    paddw           m2, %2              ; add rounding bias

    SUMSUB_BA     w, 6, 0               ; m6 = a4 + a6, m0 = a4 - a6
    SUMSUB_BA     w, 4, 2               ; m4 = a5 + a7, m2 = a5 - a7
    SUMSUB_BA     w, 7, 6               ; m7 = dst0, m6 = dst7
    SUMSUB_BA     w, 5, 4               ; m5 = dst1, m4 = dst6
    SUMSUB_BA     w, 3, 2               ; m3 = dst2, m2 = dst5
    SUMSUB_BA     w, 1, 0               ; m1 = dst3, m0 = dst4
%endmacro

INIT_MMX mmx
cglobal cavs_idct8, 2, 4, 8, 8 * 16, out, in, cnt, tmp
    mov           cntd, 2
    mov           tmpq, rsp

.loop_1:
    CAVS_IDCT8_1D  inq, [pw_4]
    psraw           m7, 3
    psraw           m6, 3
    psraw           m5, 3
    psraw           m4, 3
    psraw           m3, 3
    psraw           m2, 3
    psraw           m1, 3
    psraw           m0, 3
    mova        [tmpq], m7
    TRANSPOSE4x4W    0, 2, 4, 6, 7
    mova    [tmpq+1*8], m0
    mova    [tmpq+3*8], m2
    mova    [tmpq+5*8], m4
    mova    [tmpq+7*8], m6
    mova            m7, [tmpq]
    TRANSPOSE4x4W    7, 5, 3, 1, 0
    mova    [tmpq+0*8], m7
    mova    [tmpq+2*8], m5
    mova    [tmpq+4*8], m3
    mova    [tmpq+6*8], m1

    add            inq, mmsize
    add           tmpq, 64
    dec           cntd
    jg .loop_1

    mov           cntd, 2
    mov           tmpq, rsp
.loop_2:
    CAVS_IDCT8_1D tmpq, [pw_64]
    psraw           m7, 7
    psraw           m6, 7
    psraw           m5, 7
    psraw           m4, 7
    psraw           m3, 7
    psraw           m2, 7
    psraw           m1, 7
    psraw           m0, 7

    mova   [outq+0*16], m7
    mova   [outq+1*16], m5
    mova   [outq+2*16], m3
    mova   [outq+3*16], m1
    mova   [outq+4*16], m0
    mova   [outq+5*16], m2
    mova   [outq+6*16], m4
    mova   [outq+7*16], m6

    add           outq, mmsize
    add           tmpq, mmsize
    dec           cntd
    jg .loop_2

    RET

INIT_XMM sse2
cglobal cavs_idct8, 2, 2, 8 + ARCH_X86_64, 0 - 8 * 16, out, in
    CAVS_IDCT8_1D  inq, [pw_4]
    psraw           m7, 3
    psraw           m6, 3
    psraw           m5, 3
    psraw           m4, 3
    psraw           m3, 3
    psraw           m2, 3
    psraw           m1, 3
    psraw           m0, 3
%if ARCH_X86_64
    TRANSPOSE8x8W    7, 5, 3, 1, 0, 2, 4, 6, 8
    mova    [rsp+4*16], m0
%else
    mova    [rsp+0*16], m4
    TRANSPOSE8x8W    7, 5, 3, 1, 0, 2, 4, 6, [rsp+0*16], [rsp+4*16], 1
%endif
    mova    [rsp+0*16], m7
    mova    [rsp+2*16], m3
    mova    [rsp+6*16], m4
    CAVS_IDCT8_1D  rsp, [pw_64], 0
    psraw           m7, 7
    psraw           m6, 7
    psraw           m5, 7
    psraw           m4, 7
    psraw           m3, 7
    psraw           m2, 7
    psraw           m1, 7
    psraw           m0, 7

    mova   [outq+0*16], m7
    mova   [outq+1*16], m5
    mova   [outq+2*16], m3
    mova   [outq+3*16], m1
    mova   [outq+4*16], m0
    mova   [outq+5*16], m2
    mova   [outq+6*16], m4
    mova   [outq+7*16], m6
    RET