summaryrefslogtreecommitdiff
path: root/libavcodec/loongarch/hevc_add_res.S
blob: dd2d820af8e715e76d1724c907c58e8b6c77fc16 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
/*
 * Loongson LSX optimized add_residual functions for HEVC decoding
 *
 * Copyright (c) 2023 Loongson Technology Corporation Limited
 * Contributed by jinbo <jinbo@loongson.cn>
 *
 * This file is part of FFmpeg.
 *
 * FFmpeg is free software; you can redistribute it and/or
 * modify it under the terms of the GNU Lesser General Public
 * License as published by the Free Software Foundation; either
 * version 2.1 of the License, or (at your option) any later version.
 *
 * FFmpeg is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * Lesser General Public License for more details.
 *
 * You should have received a copy of the GNU Lesser General Public
 * License along with FFmpeg; if not, write to the Free Software
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
 */

#include "loongson_asm.S"

/*
 * void ff_hevc_add_residual4x4_lsx(uint8_t *dst, const int16_t *res, ptrdiff_t stride)
 */
.macro ADD_RES_LSX_4x4_8
    vldrepl.w      vr0,    a0,     0
    add.d          t0,     a0,     a2
    vldrepl.w      vr1,    t0,     0
    vld            vr2,    a1,     0

    vilvl.w        vr1,    vr1,    vr0
    vsllwil.hu.bu  vr1,    vr1,    0
    vadd.h         vr1,    vr1,    vr2
    vssrani.bu.h   vr1,    vr1,    0

    vstelm.w       vr1,    a0,     0,    0
    vstelm.w       vr1,    t0,     0,    1
.endm

function ff_hevc_add_residual4x4_8_lsx
    ADD_RES_LSX_4x4_8
    alsl.d         a0,     a2,     a0,   1
    addi.d         a1,     a1,     16
    ADD_RES_LSX_4x4_8
endfunc

/*
 * void ff_hevc_add_residual8x8_8_lsx(uint8_t *dst, const int16_t *res, ptrdiff_t stride)
 */
.macro ADD_RES_LSX_8x8_8
    vldrepl.d      vr0,    a0,     0
    add.d          t0,     a0,     a2
    vldrepl.d      vr1,    t0,     0
    add.d          t1,     t0,     a2
    vldrepl.d      vr2,    t1,     0
    add.d          t2,     t1,     a2
    vldrepl.d      vr3,    t2,     0

    vld            vr4,    a1,     0
    addi.d         t3,     zero,   16
    vldx           vr5,    a1,     t3
    addi.d         t4,     a1,     32
    vld            vr6,    t4,     0
    vldx           vr7,    t4,     t3

    vsllwil.hu.bu  vr0,    vr0,    0
    vsllwil.hu.bu  vr1,    vr1,    0
    vsllwil.hu.bu  vr2,    vr2,    0
    vsllwil.hu.bu  vr3,    vr3,    0
    vadd.h         vr0,    vr0,    vr4
    vadd.h         vr1,    vr1,    vr5
    vadd.h         vr2,    vr2,    vr6
    vadd.h         vr3,    vr3,    vr7
    vssrani.bu.h   vr1,    vr0,    0
    vssrani.bu.h   vr3,    vr2,    0

    vstelm.d       vr1,    a0,     0,     0
    vstelm.d       vr1,    t0,     0,     1
    vstelm.d       vr3,    t1,     0,     0
    vstelm.d       vr3,    t2,     0,     1
.endm

function ff_hevc_add_residual8x8_8_lsx
    ADD_RES_LSX_8x8_8
    alsl.d         a0,     a2,     a0,    2
    addi.d         a1,     a1,     64
    ADD_RES_LSX_8x8_8
endfunc

/*
 * void ff_hevc_add_residual16x16_8_lsx(uint8_t *dst, const int16_t *res, ptrdiff_t stride)
 */
function ff_hevc_add_residual16x16_8_lsx
.rept 8
    vld            vr0,    a0,     0
    vldx           vr2,    a0,     a2

    vld            vr4,    a1,     0
    addi.d         t0,     zero,   16
    vldx           vr5,    a1,     t0
    addi.d         t1,     a1,     32
    vld            vr6,    t1,     0
    vldx           vr7,    t1,     t0

    vexth.hu.bu    vr1,    vr0
    vsllwil.hu.bu  vr0,    vr0,    0
    vexth.hu.bu    vr3,    vr2
    vsllwil.hu.bu  vr2,    vr2,    0
    vadd.h         vr0,    vr0,    vr4
    vadd.h         vr1,    vr1,    vr5
    vadd.h         vr2,    vr2,    vr6
    vadd.h         vr3,    vr3,    vr7

    vssrani.bu.h   vr1,    vr0,    0
    vssrani.bu.h   vr3,    vr2,    0

    vst            vr1,    a0,     0
    vstx           vr3,    a0,     a2

    alsl.d         a0,     a2,     a0,   1
    addi.d         a1,     a1,     64
.endr
endfunc

/*
 * void ff_hevc_add_residual32x32_8_lsx(uint8_t *dst, const int16_t *res, ptrdiff_t stride)
 */
function ff_hevc_add_residual32x32_8_lsx
.rept 32
    vld            vr0,    a0,     0
    addi.w         t0,     zero,   16
    vldx           vr2,    a0,     t0

    vld            vr4,    a1,     0
    vldx           vr5,    a1,     t0
    addi.d         t1,     a1,     32
    vld            vr6,    t1,     0
    vldx           vr7,    t1,     t0

    vexth.hu.bu    vr1,    vr0
    vsllwil.hu.bu  vr0,    vr0,    0
    vexth.hu.bu    vr3,    vr2
    vsllwil.hu.bu  vr2,    vr2,    0
    vadd.h         vr0,    vr0,    vr4
    vadd.h         vr1,    vr1,    vr5
    vadd.h         vr2,    vr2,    vr6
    vadd.h         vr3,    vr3,    vr7

    vssrani.bu.h   vr1,    vr0,    0
    vssrani.bu.h   vr3,    vr2,    0

    vst            vr1,    a0,     0
    vstx           vr3,    a0,     t0

    add.d          a0,     a0,     a2
    addi.d         a1,     a1,     64
.endr
endfunc