summaryrefslogtreecommitdiff
path: root/libswscale/aarch64
diff options
context:
space:
mode:
Diffstat (limited to 'libswscale/aarch64')
-rw-r--r--libswscale/aarch64/Makefile6
-rw-r--r--libswscale/aarch64/hscale.S59
-rw-r--r--libswscale/aarch64/output.S66
-rw-r--r--libswscale/aarch64/swscale.c44
-rw-r--r--libswscale/aarch64/swscale_unscaled.c132
-rw-r--r--libswscale/aarch64/yuv2rgb_neon.S210
6 files changed, 517 insertions, 0 deletions
diff --git a/libswscale/aarch64/Makefile b/libswscale/aarch64/Makefile
new file mode 100644
index 0000000000..64a3fe208d
--- /dev/null
+++ b/libswscale/aarch64/Makefile
@@ -0,0 +1,6 @@
+OBJS += aarch64/swscale.o \
+ aarch64/swscale_unscaled.o \
+
+NEON-OBJS += aarch64/hscale.o \
+ aarch64/output.o \
+ aarch64/yuv2rgb_neon.o \
diff --git a/libswscale/aarch64/hscale.S b/libswscale/aarch64/hscale.S
new file mode 100644
index 0000000000..cc78c1901d
--- /dev/null
+++ b/libswscale/aarch64/hscale.S
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2016 Clément Bœsch <clement stupeflix.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/aarch64/asm.S"
+
+function ff_hscale_8_to_15_neon, export=1
+ add x10, x4, w6, UXTW #1 // filter2 = filter + filterSize*2 (x2 because int16)
+1: ldr w8, [x5], #4 // filterPos[0]
+ ldr w9, [x5], #4 // filterPos[1]
+ movi v4.4S, #0 // val sum part 1 (for dst[0])
+ movi v5.4S, #0 // val sum part 2 (for dst[1])
+ mov w7, w6 // filterSize counter
+ mov x13, x3 // srcp = src
+2: add x11, x13, w8, UXTW // srcp + filterPos[0]
+ add x12, x13, w9, UXTW // srcp + filterPos[1]
+ ld1 {v0.8B}, [x11] // srcp[filterPos[0] + {0..7}]
+ ld1 {v1.8B}, [x12] // srcp[filterPos[1] + {0..7}]
+ ld1 {v2.8H}, [x4], #16 // load 8x16-bit filter values, part 1
+ ld1 {v3.8H}, [x10], #16 // ditto at filter+filterSize for part 2
+ uxtl v0.8H, v0.8B // unpack part 1 to 16-bit
+ uxtl v1.8H, v1.8B // unpack part 2 to 16-bit
+ smull v16.4S, v0.4H, v2.4H // v16.i32{0..3} = part 1 of: srcp[filterPos[0] + {0..7}] * filter[{0..7}]
+ smull v18.4S, v1.4H, v3.4H // v18.i32{0..3} = part 1 of: srcp[filterPos[1] + {0..7}] * filter[{0..7}]
+ smull2 v17.4S, v0.8H, v2.8H // v17.i32{0..3} = part 2 of: srcp[filterPos[0] + {0..7}] * filter[{0..7}]
+ smull2 v19.4S, v1.8H, v3.8H // v19.i32{0..3} = part 2 of: srcp[filterPos[1] + {0..7}] * filter[{0..7}]
+ addp v16.4S, v16.4S, v17.4S // horizontal pair adding of the 8x32-bit multiplied values for part 1 into 4x32-bit
+ addp v18.4S, v18.4S, v19.4S // horizontal pair adding of the 8x32-bit multiplied values for part 2 into 4x32-bit
+ add v4.4S, v4.4S, v16.4S // update val accumulator for part 1
+ add v5.4S, v5.4S, v18.4S // update val accumulator for part 2
+ add x13, x13, #8 // srcp += 8
+ subs w7, w7, #8 // processed 8/filterSize
+ b.gt 2b // inner loop if filterSize not consumed completely
+ mov x4, x10 // filter = filter2
+ add x10, x10, w6, UXTW #1 // filter2 += filterSize*2
+ addp v4.4S, v4.4S, v5.4S // horizontal pair adding of the 8x32-bit sums into 4x32-bit
+ addp v4.4S, v4.4S, v4.4S // horizontal pair adding of the 4x32-bit sums into 2x32-bit
+ sqshrn v4.4H, v4.4S, #7 // shift and clip the 2x16-bit final values
+ st1 {v4.S}[0], [x1], #4 // write to destination
+ subs w2, w2, #2 // dstW -= 2
+ b.gt 1b // loop until end of line
+ ret
+endfunc
diff --git a/libswscale/aarch64/output.S b/libswscale/aarch64/output.S
new file mode 100644
index 0000000000..90d3b57b10
--- /dev/null
+++ b/libswscale/aarch64/output.S
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2016 Clément Bœsch <clement stupeflix.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/aarch64/asm.S"
+
+function ff_yuv2planeX_8_neon, export=1
+ ld1 {v0.8B}, [x5] // load 8x8-bit dither
+ cbz w6, 1f // check if offsetting present
+ ext v0.8B, v0.8B, v0.8B, #3 // honor offsetting which can be 0 or 3 only
+1: uxtl v0.8H, v0.8B // extend dither to 16-bit
+ ushll v1.4S, v0.4H, #12 // extend dither to 32-bit with left shift by 12 (part 1)
+ ushll2 v2.4S, v0.8H, #12 // extend dither to 32-bit with left shift by 12 (part 2)
+ mov x7, #0 // i = 0
+2: mov v3.16B, v1.16B // initialize accumulator part 1 with dithering value
+ mov v4.16B, v2.16B // initialize accumulator part 2 with dithering value
+ mov w8, w1 // tmpfilterSize = filterSize
+ mov x9, x2 // srcp = src
+ mov x10, x0 // filterp = filter
+3: ldp x11, x12, [x9], #16 // get 2 pointers: src[j] and src[j+1]
+ add x11, x11, x7, lsl #1 // &src[j ][i]
+ add x12, x12, x7, lsl #1 // &src[j+1][i]
+ ld1 {v5.8H}, [x11] // read 8x16-bit @ src[j ][i + {0..7}]: A,B,C,D,E,F,G,H
+ ld1 {v6.8H}, [x12] // read 8x16-bit @ src[j+1][i + {0..7}]: I,J,K,L,M,N,O,P
+ ldr w11, [x10], #4 // read 2x16-bit coeffs (X, Y) at (filter[j], filter[j+1])
+ zip1 v16.8H, v5.8H, v6.8H // A,I,B,J,C,K,D,L
+ zip2 v17.8H, v5.8H, v6.8H // E,M,F,N,F,O,H,P
+ dup v7.4S, w11 // X,Y,X,Y,X,Y,X,Y
+ smull v18.4S, v16.4H, v7.4H // A.X I.Y B.X J.Y
+ smull v20.4S, v17.4H, v7.4H // E.X M.Y F.X N.Y
+ smull2 v19.4S, v16.8H, v7.8H // C.X K.Y D.X L.Y
+ smull2 v21.4S, v17.8H, v7.8H // G.X O.Y H.X P.Y
+ addp v16.4S, v18.4S, v19.4S // A.X+I.Y B.X+J.Y C.X+K.Y D.X+L.Y
+ addp v17.4S, v20.4S, v21.4S // E.X+M.Y F.X+N.Y F.X+O.Y H.X+P.Y
+ add v3.4S, v3.4S, v16.4S // update val accumulator for part 1
+ add v4.4S, v4.4S, v17.4S // update val accumulator for part 2
+ subs w8, w8, #2 // tmpfilterSize -= 2
+ b.gt 3b // loop until filterSize consumed
+ sshr v3.4S, v3.4S, #19 // val>>19 (part 1)
+ sshr v4.4S, v4.4S, #19 // val>>19 (part 2)
+ sqxtun v3.4H, v3.4S // clip16(val>>19) (part 1)
+ sqxtun v4.4H, v4.4S // clip16(val>>19) (part 2)
+ mov v3.D[1], v4.D[0] // merge part 1 and part 2
+ uqxtn v3.8B, v3.8H // clip8(val>>19)
+ st1 {v3.1D}, [x3], #8 // write to destination
+ add x7, x7, #8 // i += 8
+ subs w4, w4, #8 // dstW -= 8
+ b.gt 2b // loop until width consumed
+ ret
+endfunc
diff --git a/libswscale/aarch64/swscale.c b/libswscale/aarch64/swscale.c
new file mode 100644
index 0000000000..54a3beabe8
--- /dev/null
+++ b/libswscale/aarch64/swscale.c
@@ -0,0 +1,44 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "config.h"
+#include "libswscale/swscale.h"
+#include "libswscale/swscale_internal.h"
+#include "libavutil/aarch64/cpu.h"
+
+void ff_hscale_8_to_15_neon(SwsContext *c, int16_t *dst, int dstW,
+ const uint8_t *src, const int16_t *filter,
+ const int32_t *filterPos, int filterSize);
+
+void ff_yuv2planeX_8_neon(const int16_t *filter, int filterSize,
+ const int16_t **src, uint8_t *dest, int dstW,
+ const uint8_t *dither, int offset);
+
+av_cold void ff_sws_init_swscale_aarch64(SwsContext *c)
+{
+ int cpu_flags = av_get_cpu_flags();
+
+ if (have_neon(cpu_flags)) {
+ if (c->srcBpc == 8 && c->dstBpc <= 14) {
+ c->hyScale = c->hcScale = ff_hscale_8_to_15_neon;
+ }
+ if (c->dstBpc == 8) {
+ c->yuv2planeX = ff_yuv2planeX_8_neon;
+ }
+ }
+}
diff --git a/libswscale/aarch64/swscale_unscaled.c b/libswscale/aarch64/swscale_unscaled.c
new file mode 100644
index 0000000000..551daad9e3
--- /dev/null
+++ b/libswscale/aarch64/swscale_unscaled.c
@@ -0,0 +1,132 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "config.h"
+#include "libswscale/swscale.h"
+#include "libswscale/swscale_internal.h"
+#include "libavutil/aarch64/cpu.h"
+
+#define YUV_TO_RGB_TABLE \
+ c->yuv2rgb_v2r_coeff, \
+ c->yuv2rgb_u2g_coeff, \
+ c->yuv2rgb_v2g_coeff, \
+ c->yuv2rgb_u2b_coeff, \
+
+#define DECLARE_FF_YUVX_TO_RGBX_FUNCS(ifmt, ofmt) \
+int ff_##ifmt##_to_##ofmt##_neon(int w, int h, \
+ uint8_t *dst, int linesize, \
+ const uint8_t *srcY, int linesizeY, \
+ const uint8_t *srcU, int linesizeU, \
+ const uint8_t *srcV, int linesizeV, \
+ const int16_t *table, \
+ int y_offset, \
+ int y_coeff); \
+ \
+static int ifmt##_to_##ofmt##_neon_wrapper(SwsContext *c, const uint8_t *src[], \
+ int srcStride[], int srcSliceY, int srcSliceH, \
+ uint8_t *dst[], int dstStride[]) { \
+ const int16_t yuv2rgb_table[] = { YUV_TO_RGB_TABLE }; \
+ \
+ ff_##ifmt##_to_##ofmt##_neon(c->srcW, srcSliceH, \
+ dst[0] + srcSliceY * dstStride[0], dstStride[0], \
+ src[0], srcStride[0], \
+ src[1], srcStride[1], \
+ src[2], srcStride[2], \
+ yuv2rgb_table, \
+ c->yuv2rgb_y_offset >> 6, \
+ c->yuv2rgb_y_coeff); \
+ return 0; \
+} \
+
+#define DECLARE_FF_YUVX_TO_ALL_RGBX_FUNCS(yuvx) \
+DECLARE_FF_YUVX_TO_RGBX_FUNCS(yuvx, argb) \
+DECLARE_FF_YUVX_TO_RGBX_FUNCS(yuvx, rgba) \
+DECLARE_FF_YUVX_TO_RGBX_FUNCS(yuvx, abgr) \
+DECLARE_FF_YUVX_TO_RGBX_FUNCS(yuvx, bgra) \
+
+DECLARE_FF_YUVX_TO_ALL_RGBX_FUNCS(yuv420p)
+DECLARE_FF_YUVX_TO_ALL_RGBX_FUNCS(yuv422p)
+
+#define DECLARE_FF_NVX_TO_RGBX_FUNCS(ifmt, ofmt) \
+int ff_##ifmt##_to_##ofmt##_neon(int w, int h, \
+ uint8_t *dst, int linesize, \
+ const uint8_t *srcY, int linesizeY, \
+ const uint8_t *srcC, int linesizeC, \
+ const int16_t *table, \
+ int y_offset, \
+ int y_coeff); \
+ \
+static int ifmt##_to_##ofmt##_neon_wrapper(SwsContext *c, const uint8_t *src[], \
+ int srcStride[], int srcSliceY, int srcSliceH, \
+ uint8_t *dst[], int dstStride[]) { \
+ const int16_t yuv2rgb_table[] = { YUV_TO_RGB_TABLE }; \
+ \
+ ff_##ifmt##_to_##ofmt##_neon(c->srcW, srcSliceH, \
+ dst[0] + srcSliceY * dstStride[0], dstStride[0], \
+ src[0], srcStride[0], src[1], srcStride[1], \
+ yuv2rgb_table, \
+ c->yuv2rgb_y_offset >> 6, \
+ c->yuv2rgb_y_coeff); \
+ \
+ return 0; \
+} \
+
+#define DECLARE_FF_NVX_TO_ALL_RGBX_FUNCS(nvx) \
+DECLARE_FF_NVX_TO_RGBX_FUNCS(nvx, argb) \
+DECLARE_FF_NVX_TO_RGBX_FUNCS(nvx, rgba) \
+DECLARE_FF_NVX_TO_RGBX_FUNCS(nvx, abgr) \
+DECLARE_FF_NVX_TO_RGBX_FUNCS(nvx, bgra) \
+
+DECLARE_FF_NVX_TO_ALL_RGBX_FUNCS(nv12)
+DECLARE_FF_NVX_TO_ALL_RGBX_FUNCS(nv21)
+
+/* We need a 16 pixel width alignment. This constraint can easily be removed
+ * for input reading but for the output which is 4-bytes per pixel (RGBA) the
+ * assembly might be writing as much as 4*15=60 extra bytes at the end of the
+ * line, which won't fit the 32-bytes buffer alignment. */
+#define SET_FF_NVX_TO_RGBX_FUNC(ifmt, IFMT, ofmt, OFMT, accurate_rnd) do { \
+ if (c->srcFormat == AV_PIX_FMT_##IFMT \
+ && c->dstFormat == AV_PIX_FMT_##OFMT \
+ && !(c->srcH & 1) \
+ && !(c->srcW & 15) \
+ && !accurate_rnd) \
+ c->swscale = ifmt##_to_##ofmt##_neon_wrapper; \
+} while (0)
+
+#define SET_FF_NVX_TO_ALL_RGBX_FUNC(nvx, NVX, accurate_rnd) do { \
+ SET_FF_NVX_TO_RGBX_FUNC(nvx, NVX, argb, ARGB, accurate_rnd); \
+ SET_FF_NVX_TO_RGBX_FUNC(nvx, NVX, rgba, RGBA, accurate_rnd); \
+ SET_FF_NVX_TO_RGBX_FUNC(nvx, NVX, abgr, ABGR, accurate_rnd); \
+ SET_FF_NVX_TO_RGBX_FUNC(nvx, NVX, bgra, BGRA, accurate_rnd); \
+} while (0)
+
+static void get_unscaled_swscale_neon(SwsContext *c) {
+ int accurate_rnd = c->flags & SWS_ACCURATE_RND;
+
+ SET_FF_NVX_TO_ALL_RGBX_FUNC(nv12, NV12, accurate_rnd);
+ SET_FF_NVX_TO_ALL_RGBX_FUNC(nv21, NV21, accurate_rnd);
+ SET_FF_NVX_TO_ALL_RGBX_FUNC(yuv420p, YUV420P, accurate_rnd);
+ SET_FF_NVX_TO_ALL_RGBX_FUNC(yuv422p, YUV422P, accurate_rnd);
+}
+
+void ff_get_unscaled_swscale_aarch64(SwsContext *c)
+{
+ int cpu_flags = av_get_cpu_flags();
+ if (have_neon(cpu_flags))
+ get_unscaled_swscale_neon(c);
+}
diff --git a/libswscale/aarch64/yuv2rgb_neon.S b/libswscale/aarch64/yuv2rgb_neon.S
new file mode 100644
index 0000000000..b7446aa105
--- /dev/null
+++ b/libswscale/aarch64/yuv2rgb_neon.S
@@ -0,0 +1,210 @@
+/*
+ * Copyright (c) 2016 Matthieu Bouron <matthieu.bouron stupeflix.com>
+ * Copyright (c) 2016 Clément Bœsch <clement stupeflix.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/aarch64/asm.S"
+
+.macro load_yoff_ycoeff yoff ycoeff
+#if defined(__APPLE__)
+ ldp w9, w10, [sp, #\yoff]
+#else
+ ldr w9, [sp, #\yoff]
+ ldr w10, [sp, #\ycoeff]
+#endif
+.endm
+
+.macro load_args_nv12
+ ldr x8, [sp] // table
+ load_yoff_ycoeff 8, 16 // y_offset, y_coeff
+ ld1 {v1.1D}, [x8]
+ dup v0.8H, w10
+ dup v3.8H, w9
+ sub w3, w3, w0, lsl #2 // w3 = linesize - width * 4 (padding)
+ sub w5, w5, w0 // w5 = linesizeY - width (paddingY)
+ sub w7, w7, w0 // w7 = linesizeC - width (paddingC)
+ neg w11, w0
+.endm
+
+.macro load_args_nv21
+ load_args_nv12
+.endm
+
+.macro load_args_yuv420p
+ ldr x13, [sp] // srcV
+ ldr w14, [sp, #8] // linesizeV
+ ldr x8, [sp, #16] // table
+ load_yoff_ycoeff 24, 32 // y_offset, y_coeff
+ ld1 {v1.1D}, [x8]
+ dup v0.8H, w10
+ dup v3.8H, w9
+ sub w3, w3, w0, lsl #2 // w3 = linesize - width * 4 (padding)
+ sub w5, w5, w0 // w5 = linesizeY - width (paddingY)
+ sub w7, w7, w0, lsr #1 // w7 = linesizeU - width / 2 (paddingU)
+ sub w14, w14, w0, lsr #1 // w14 = linesizeV - width / 2 (paddingV)
+ lsr w11, w0, #1
+ neg w11, w11
+.endm
+
+.macro load_args_yuv422p
+ ldr x13, [sp] // srcV
+ ldr w14, [sp, #8] // linesizeV
+ ldr x8, [sp, #16] // table
+ load_yoff_ycoeff 24, 32 // y_offset, y_coeff
+ ld1 {v1.1D}, [x8]
+ dup v0.8H, w10
+ dup v3.8H, w9
+ sub w3, w3, w0, lsl #2 // w3 = linesize - width * 4 (padding)
+ sub w5, w5, w0 // w5 = linesizeY - width (paddingY)
+ sub w7, w7, w0, lsr #1 // w7 = linesizeU - width / 2 (paddingU)
+ sub w14, w14, w0, lsr #1 // w14 = linesizeV - width / 2 (paddingV)
+.endm
+
+.macro load_chroma_nv12
+ ld2 {v16.8B, v17.8B}, [x6], #16
+ ushll v18.8H, v16.8B, #3
+ ushll v19.8H, v17.8B, #3
+.endm
+
+.macro load_chroma_nv21
+ ld2 {v16.8B, v17.8B}, [x6], #16
+ ushll v19.8H, v16.8B, #3
+ ushll v18.8H, v17.8B, #3
+.endm
+
+.macro load_chroma_yuv420p
+ ld1 {v16.8B}, [ x6], #8
+ ld1 {v17.8B}, [x13], #8
+ ushll v18.8H, v16.8B, #3
+ ushll v19.8H, v17.8B, #3
+.endm
+
+.macro load_chroma_yuv422p
+ load_chroma_yuv420p
+.endm
+
+.macro increment_nv12
+ ands w15, w1, #1
+ csel w16, w7, w11, ne // incC = (h & 1) ? paddincC : -width
+ add x6, x6, w16, SXTW // srcC += incC
+.endm
+
+.macro increment_nv21
+ increment_nv12
+.endm
+
+.macro increment_yuv420p
+ ands w15, w1, #1
+ csel w16, w7, w11, ne // incU = (h & 1) ? paddincU : -width/2
+ csel w17, w14, w11, ne // incV = (h & 1) ? paddincV : -width/2
+ add x6, x6, w16, SXTW // srcU += incU
+ add x13, x13, w17, SXTW // srcV += incV
+.endm
+
+.macro increment_yuv422p
+ add x6, x6, w7, UXTW // srcU += incU
+ add x13, x13, w14, UXTW // srcV += incV
+.endm
+
+.macro compute_rgba r1 g1 b1 a1 r2 g2 b2 a2
+ add v20.8H, v26.8H, v20.8H // Y1 + R1
+ add v21.8H, v27.8H, v21.8H // Y2 + R2
+ add v22.8H, v26.8H, v22.8H // Y1 + G1
+ add v23.8H, v27.8H, v23.8H // Y2 + G2
+ add v24.8H, v26.8H, v24.8H // Y1 + B1
+ add v25.8H, v27.8H, v25.8H // Y2 + B2
+ sqrshrun \r1, v20.8H, #1 // clip_u8((Y1 + R1) >> 1)
+ sqrshrun \r2, v21.8H, #1 // clip_u8((Y2 + R1) >> 1)
+ sqrshrun \g1, v22.8H, #1 // clip_u8((Y1 + G1) >> 1)
+ sqrshrun \g2, v23.8H, #1 // clip_u8((Y2 + G1) >> 1)
+ sqrshrun \b1, v24.8H, #1 // clip_u8((Y1 + B1) >> 1)
+ sqrshrun \b2, v25.8H, #1 // clip_u8((Y2 + B1) >> 1)
+ movi \a1, #255
+ movi \a2, #255
+.endm
+
+.macro declare_func ifmt ofmt
+function ff_\ifmt\()_to_\ofmt\()_neon, export=1
+ load_args_\ifmt
+1:
+ mov w8, w0 // w8 = width
+2:
+ movi v5.8H, #4, lsl #8 // 128 * (1<<3)
+ load_chroma_\ifmt
+ sub v18.8H, v18.8H, v5.8H // U*(1<<3) - 128*(1<<3)
+ sub v19.8H, v19.8H, v5.8H // V*(1<<3) - 128*(1<<3)
+ sqdmulh v20.8H, v19.8H, v1.H[0] // V * v2r (R)
+ sqdmulh v22.8H, v18.8H, v1.H[1] // U * u2g
+ sqdmulh v19.8H, v19.8H, v1.H[2] // V * v2g
+ add v22.8H, v22.8H, v19.8H // U * u2g + V * v2g (G)
+ sqdmulh v24.8H, v18.8H, v1.H[3] // U * u2b (B)
+ zip2 v21.8H, v20.8H, v20.8H // R2
+ zip1 v20.8H, v20.8H, v20.8H // R1
+ zip2 v23.8H, v22.8H, v22.8H // G2
+ zip1 v22.8H, v22.8H, v22.8H // G1
+ zip2 v25.8H, v24.8H, v24.8H // B2
+ zip1 v24.8H, v24.8H, v24.8H // B1
+ ld1 {v2.16B}, [x4], #16 // load luma
+ ushll v26.8H, v2.8B, #3 // Y1*(1<<3)
+ ushll2 v27.8H, v2.16B, #3 // Y2*(1<<3)
+ sub v26.8H, v26.8H, v3.8H // Y1*(1<<3) - y_offset
+ sub v27.8H, v27.8H, v3.8H // Y2*(1<<3) - y_offset
+ sqdmulh v26.8H, v26.8H, v0.8H // ((Y1*(1<<3) - y_offset) * y_coeff) >> 15
+ sqdmulh v27.8H, v27.8H, v0.8H // ((Y2*(1<<3) - y_offset) * y_coeff) >> 15
+
+.ifc \ofmt,argb // 1 2 3 0
+ compute_rgba v5.8B,v6.8B,v7.8B,v4.8B, v17.8B,v18.8B,v19.8B,v16.8B
+.endif
+
+.ifc \ofmt,rgba // 0 1 2 3
+ compute_rgba v4.8B,v5.8B,v6.8B,v7.8B, v16.8B,v17.8B,v18.8B,v19.8B
+.endif
+
+.ifc \ofmt,abgr // 3 2 1 0
+ compute_rgba v7.8B,v6.8B,v5.8B,v4.8B, v19.8B,v18.8B,v17.8B,v16.8B
+.endif
+
+.ifc \ofmt,bgra // 2 1 0 3
+ compute_rgba v6.8B,v5.8B,v4.8B,v7.8B, v18.8B,v17.8B,v16.8B,v19.8B
+.endif
+
+ st4 { v4.8B, v5.8B, v6.8B, v7.8B}, [x2], #32
+ st4 {v16.8B,v17.8B,v18.8B,v19.8B}, [x2], #32
+ subs w8, w8, #16 // width -= 16
+ b.gt 2b
+ add x2, x2, w3, UXTW // dst += padding
+ add x4, x4, w5, UXTW // srcY += paddingY
+ increment_\ifmt
+ subs w1, w1, #1 // height -= 1
+ b.gt 1b
+ ret
+endfunc
+.endm
+
+.macro declare_rgb_funcs ifmt
+ declare_func \ifmt, argb
+ declare_func \ifmt, rgba
+ declare_func \ifmt, abgr
+ declare_func \ifmt, bgra
+.endm
+
+declare_rgb_funcs nv12
+declare_rgb_funcs nv21
+declare_rgb_funcs yuv420p
+declare_rgb_funcs yuv422p