From 3a09494939ddb2f2fd0f8d015162d5174ec07d4c Mon Sep 17 00:00:00 2001 From: "Ronald S. Bultje" Date: Tue, 24 Dec 2013 16:17:03 -0500 Subject: vp9mc/x86: add 16px functions (64bit only). Signed-off-by: Anton Khirnov --- libavcodec/x86/vp9mc.asm | 122 +++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 122 insertions(+) (limited to 'libavcodec/x86/vp9mc.asm') diff --git a/libavcodec/x86/vp9mc.asm b/libavcodec/x86/vp9mc.asm index 59e56687f2..152715c9b9 100644 --- a/libavcodec/x86/vp9mc.asm +++ b/libavcodec/x86/vp9mc.asm @@ -144,6 +144,62 @@ INIT_XMM ssse3 filter_h_fn put filter_h_fn avg +%if ARCH_X86_64 +%macro filter_hx2_fn 1 +%assign %%px mmsize +cglobal %1_8tap_1d_h_ %+ %%px, 6, 6, 14, dst, src, dstride, sstride, h, filtery + mova m13, [pw_256] + mova m8, [filteryq+ 0] + mova m9, [filteryq+16] + mova m10, [filteryq+32] + mova m11, [filteryq+48] +.loop: + movu m0, [srcq-3] + movu m1, [srcq-2] + movu m2, [srcq-1] + movu m3, [srcq+0] + movu m4, [srcq+1] + movu m5, [srcq+2] + movu m6, [srcq+3] + movu m7, [srcq+4] + add srcq, sstrideq + SBUTTERFLY bw, 0, 1, 12 + SBUTTERFLY bw, 2, 3, 12 + SBUTTERFLY bw, 4, 5, 12 + SBUTTERFLY bw, 6, 7, 12 + pmaddubsw m0, m8 + pmaddubsw m1, m8 + pmaddubsw m2, m9 + pmaddubsw m3, m9 + pmaddubsw m4, m10 + pmaddubsw m5, m10 + pmaddubsw m6, m11 + pmaddubsw m7, m11 + paddw m0, m2 + paddw m1, m3 + paddw m4, m6 + paddw m5, m7 + paddsw m0, m4 + paddsw m1, m5 + pmulhrsw m0, m13 + pmulhrsw m1, m13 + packuswb m0, m1 +%ifidn %1, avg + pavgb m0, [dstq] +%endif + mova [dstq], m0 + add dstq, dstrideq + dec hd + jg .loop + RET +%endmacro + +INIT_XMM ssse3 +filter_hx2_fn put +filter_hx2_fn avg + +%endif ; ARCH_X86_64 + %macro filter_v_fn 1 %assign %%px mmsize/2 %if ARCH_X86_64 @@ -218,6 +274,72 @@ INIT_XMM ssse3 filter_v_fn put filter_v_fn avg +%if ARCH_X86_64 + +%macro filter_vx2_fn 1 +%assign %%px mmsize +cglobal %1_8tap_1d_v_ %+ %%px, 6, 8, 14, dst, src, dstride, sstride, h, filtery, src4, sstride3 + sub srcq, sstrideq + lea sstride3q, [sstrideq*3] + sub srcq, sstrideq + mova m13, [pw_256] + sub srcq, sstrideq + mova m8, [filteryq+ 0] + lea src4q, [srcq+sstrideq*4] + mova m9, [filteryq+16] + mova m10, [filteryq+32] + mova m11, [filteryq+48] +.loop: + ; FIXME maybe reuse loads from previous rows, or just + ; more generally unroll this to prevent multiple loads of + ; the same data? + movu m0, [srcq] + movu m1, [srcq+sstrideq] + movu m2, [srcq+sstrideq*2] + movu m3, [srcq+sstride3q] + movu m4, [src4q] + movu m5, [src4q+sstrideq] + movu m6, [src4q+sstrideq*2] + movu m7, [src4q+sstride3q] + add srcq, sstrideq + add src4q, sstrideq + SBUTTERFLY bw, 0, 1, 12 + SBUTTERFLY bw, 2, 3, 12 + SBUTTERFLY bw, 4, 5, 12 + SBUTTERFLY bw, 6, 7, 12 + pmaddubsw m0, m8 + pmaddubsw m1, m8 + pmaddubsw m2, m9 + pmaddubsw m3, m9 + pmaddubsw m4, m10 + pmaddubsw m5, m10 + pmaddubsw m6, m11 + pmaddubsw m7, m11 + paddw m0, m2 + paddw m1, m3 + paddw m4, m6 + paddw m5, m7 + paddsw m0, m4 + paddsw m1, m5 + pmulhrsw m0, m13 + pmulhrsw m1, m13 + packuswb m0, m1 +%ifidn %1, avg + pavgb m0, [dstq] +%endif + mova [dstq], m0 + add dstq, dstrideq + dec hd + jg .loop + RET +%endmacro + +INIT_XMM ssse3 +filter_vx2_fn put +filter_vx2_fn avg + +%endif ; ARCH_X86_64 + %macro fpel_fn 6 %if %2 == 4 %define %%srcfn movh -- cgit v1.2.3 From 6ab642d69d18b4ecf1ea65a4dceca159f03a0313 Mon Sep 17 00:00:00 2001 From: Clément Bœsch Date: Wed, 15 Jan 2014 22:35:43 +0100 Subject: vp9mc/x86: simplify a few inits. Signed-off-by: Anton Khirnov --- libavcodec/x86/vp9mc.asm | 16 ++++++---------- 1 file changed, 6 insertions(+), 10 deletions(-) (limited to 'libavcodec/x86/vp9mc.asm') diff --git a/libavcodec/x86/vp9mc.asm b/libavcodec/x86/vp9mc.asm index 152715c9b9..43989dee73 100644 --- a/libavcodec/x86/vp9mc.asm +++ b/libavcodec/x86/vp9mc.asm @@ -209,13 +209,11 @@ cglobal %1_8tap_1d_v_ %+ %%px, 4, 7, 11, dst, src, dstride, sstride, filtery, sr mov filteryq, r5mp %define hd r4mp %endif - sub srcq, sstrideq - lea sstride3q, [sstrideq*3] - sub srcq, sstrideq mova m6, [pw_256] - sub srcq, sstrideq + lea sstride3q, [sstrideq*3] + lea src4q, [srcq+sstrideq] + sub srcq, sstride3q mova m7, [filteryq+ 0] - lea src4q, [srcq+sstrideq*4] %if ARCH_X86_64 && mmsize > 8 mova m8, [filteryq+16] mova m9, [filteryq+32] @@ -279,13 +277,11 @@ filter_v_fn avg %macro filter_vx2_fn 1 %assign %%px mmsize cglobal %1_8tap_1d_v_ %+ %%px, 6, 8, 14, dst, src, dstride, sstride, h, filtery, src4, sstride3 - sub srcq, sstrideq - lea sstride3q, [sstrideq*3] - sub srcq, sstrideq mova m13, [pw_256] - sub srcq, sstrideq + lea sstride3q, [sstrideq*3] + lea src4q, [srcq+sstrideq] + sub srcq, sstride3q mova m8, [filteryq+ 0] - lea src4q, [srcq+sstrideq*4] mova m9, [filteryq+16] mova m10, [filteryq+32] mova m11, [filteryq+48] -- cgit v1.2.3 From 8be8444d01d850b7ff2363f6886bfa8a8ea4a449 Mon Sep 17 00:00:00 2001 From: James Almer Date: Sat, 18 Jan 2014 02:29:22 -0300 Subject: vp9mc/x86: rename ff_avg[48]_sse to ff_avg[48]_mmxext pavgb is an sse integer instruction, so the mmxext flag is enough Signed-off-by: James Almer Reviewed-by: "Ronald S. Bultje" Signed-off-by: Anton Khirnov --- libavcodec/x86/vp9dsp_init.c | 11 +++++++---- libavcodec/x86/vp9mc.asm | 2 +- 2 files changed, 8 insertions(+), 5 deletions(-) (limited to 'libavcodec/x86/vp9mc.asm') diff --git a/libavcodec/x86/vp9dsp_init.c b/libavcodec/x86/vp9dsp_init.c index dc08e60662..2b94af3480 100644 --- a/libavcodec/x86/vp9dsp_init.c +++ b/libavcodec/x86/vp9dsp_init.c @@ -40,8 +40,8 @@ fpel_func(put, 8, mmx); fpel_func(put, 16, sse); fpel_func(put, 32, sse); fpel_func(put, 64, sse); -fpel_func(avg, 4, sse); -fpel_func(avg, 8, sse); +fpel_func(avg, 4, mmxext); +fpel_func(avg, 8, mmxext); fpel_func(avg, 16, sse2); fpel_func(avg, 32, sse2); fpel_func(avg, 64, sse2); @@ -222,12 +222,15 @@ av_cold void ff_vp9dsp_init_x86(VP9DSPContext *dsp) init_fpel(3, 0, 8, put, mmx); } + if (EXTERNAL_MMXEXT(cpu_flags)) { + init_fpel(4, 1, 4, avg, mmxext); + init_fpel(3, 1, 8, avg, mmxext); + } + if (EXTERNAL_SSE(cpu_flags)) { init_fpel(2, 0, 16, put, sse); init_fpel(1, 0, 32, put, sse); init_fpel(0, 0, 64, put, sse); - init_fpel(4, 1, 4, avg, sse); - init_fpel(3, 1, 8, avg, sse); } if (EXTERNAL_SSE2(cpu_flags)) { diff --git a/libavcodec/x86/vp9mc.asm b/libavcodec/x86/vp9mc.asm index 43989dee73..4c747ab1bb 100644 --- a/libavcodec/x86/vp9mc.asm +++ b/libavcodec/x86/vp9mc.asm @@ -379,7 +379,7 @@ cglobal %1%2, 5, 5, 4, dst, src, dstride, sstride, h INIT_MMX mmx fpel_fn put, 4, strideq, strideq*2, stride3q, 4 fpel_fn put, 8, strideq, strideq*2, stride3q, 4 -INIT_MMX sse +INIT_MMX mmxext fpel_fn avg, 4, strideq, strideq*2, stride3q, 4 fpel_fn avg, 8, strideq, strideq*2, stride3q, 4 INIT_XMM sse -- cgit v1.2.3 From 3cda179f180e48cda9afee9b1875f10e89a848a6 Mon Sep 17 00:00:00 2001 From: Clément Bœsch Date: Fri, 28 Mar 2014 22:33:51 +0100 Subject: vp9mc/x86: rename ff_* to ff_vp9_* Signed-off-by: Anton Khirnov --- libavcodec/x86/vp9dsp_init.c | 94 ++++++++++++++++++++++---------------------- libavcodec/x86/vp9mc.asm | 14 +++---- 2 files changed, 54 insertions(+), 54 deletions(-) (limited to 'libavcodec/x86/vp9mc.asm') diff --git a/libavcodec/x86/vp9dsp_init.c b/libavcodec/x86/vp9dsp_init.c index 2b94af3480..13662961af 100644 --- a/libavcodec/x86/vp9dsp_init.c +++ b/libavcodec/x86/vp9dsp_init.c @@ -29,11 +29,11 @@ #if HAVE_YASM -#define fpel_func(avg, sz, opt) \ -void ff_ ## avg ## sz ## _ ## opt(uint8_t *dst, const uint8_t *src, \ - ptrdiff_t dst_stride, \ - ptrdiff_t src_stride, \ - int h, int mx, int my) +#define fpel_func(avg, sz, opt) \ +void ff_vp9_ ## avg ## sz ## _ ## opt(uint8_t *dst, const uint8_t *src, \ + ptrdiff_t dst_stride, \ + ptrdiff_t src_stride, \ + int h, int mx, int my) fpel_func(put, 4, mmx); fpel_func(put, 8, mmx); @@ -47,14 +47,14 @@ fpel_func(avg, 32, sse2); fpel_func(avg, 64, sse2); #undef fpel_func -#define mc_func(avg, sz, dir, opt) \ -void \ -ff_ ## avg ## _8tap_1d_ ## dir ## _ ## sz ## _ ## opt(uint8_t *dst, \ - const uint8_t *src, \ - ptrdiff_t dst_stride, \ - ptrdiff_t src_stride, \ - int h, \ - const int8_t (*filter)[16]) +#define mc_func(avg, sz, dir, opt) \ +void \ +ff_vp9_ ## avg ## _8tap_1d_ ## dir ## _ ## sz ## _ ## opt(uint8_t *dst, \ + const uint8_t *src, \ + ptrdiff_t dst_stride, \ + ptrdiff_t src_stride, \ + int h, \ + const int8_t (*filter)[16]) #define mc_funcs(sz) \ mc_func(put, sz, h, ssse3); \ @@ -73,19 +73,19 @@ mc_funcs(16); #define mc_rep_func(avg, sz, hsz, dir, opt) \ static av_always_inline void \ -ff_ ## avg ## _8tap_1d_ ## dir ## _ ## sz ## _ ## opt(uint8_t *dst, \ +ff_vp9_ ## avg ## _8tap_1d_ ## dir ## _ ## sz ## _ ## opt(uint8_t *dst, \ const uint8_t *src, \ ptrdiff_t dst_stride, \ ptrdiff_t src_stride, \ int h, \ const int8_t (*filter)[16]) \ { \ - ff_ ## avg ## _8tap_1d_ ## dir ## _ ## hsz ## _ ## opt(dst, src, \ + ff_vp9_ ## avg ## _8tap_1d_ ## dir ## _ ## hsz ## _ ## opt(dst, src, \ dst_stride, \ src_stride, \ h, \ filter); \ - ff_ ## avg ## _8tap_1d_ ## dir ## _ ## hsz ## _ ## opt(dst + hsz, \ + ff_vp9_ ## avg ## _8tap_1d_ ## dir ## _ ## hsz ## _ ## opt(dst + hsz, \ src + hsz, \ dst_stride, \ src_stride, \ @@ -109,23 +109,23 @@ mc_rep_funcs(64, 32); extern const int8_t ff_filters_ssse3[3][15][4][16]; -#define filter_8tap_2d_fn(op, sz, f, fname) \ -static void \ -op ## _8tap_ ## fname ## _ ## sz ## hv_ssse3(uint8_t *dst, \ - const uint8_t *src, \ - ptrdiff_t dst_stride, \ - ptrdiff_t src_stride, \ - int h, int mx, int my) \ -{ \ - LOCAL_ALIGNED_16(uint8_t, temp, [71 * 64]); \ - ff_put_8tap_1d_h_ ## sz ## _ssse3(temp, src - 3 * src_stride, \ - 64, src_stride, \ - h + 7, \ - ff_filters_ssse3[f][mx - 1]); \ - ff_ ## op ## _8tap_1d_v_ ## sz ## _ssse3(dst, temp + 3 * 64, \ - dst_stride, 64, \ - h, \ - ff_filters_ssse3[f][my - 1]); \ +#define filter_8tap_2d_fn(op, sz, f, fname) \ +static void \ +op ## _8tap_ ## fname ## _ ## sz ## hv_ssse3(uint8_t *dst, \ + const uint8_t *src, \ + ptrdiff_t dst_stride, \ + ptrdiff_t src_stride, \ + int h, int mx, int my) \ +{ \ + LOCAL_ALIGNED_16(uint8_t, temp, [71 * 64]); \ + ff_vp9_put_8tap_1d_h_ ## sz ## _ssse3(temp, src - 3 * src_stride, \ + 64, src_stride, \ + h + 7, \ + ff_filters_ssse3[f][mx - 1]); \ + ff_vp9_ ## op ## _8tap_1d_v_ ## sz ## _ssse3(dst, temp + 3 * 64, \ + dst_stride, 64, \ + h, \ + ff_filters_ssse3[f][my - 1]); \ } #define filters_8tap_2d_fn(op, sz) \ @@ -147,19 +147,19 @@ filters_8tap_2d_fn2(avg) #undef filters_8tap_2d_fn #undef filter_8tap_2d_fn -#define filter_8tap_1d_fn(op, sz, f, fname, dir, dvar) \ -static void \ -op ## _8tap_ ## fname ## _ ## sz ## dir ## _ssse3(uint8_t *dst, \ - const uint8_t *src, \ - ptrdiff_t dst_stride, \ - ptrdiff_t src_stride, \ - int h, int mx, \ - int my) \ -{ \ - ff_ ## op ## _8tap_1d_ ## dir ## _ ## sz ## _ssse3(dst, src, \ - dst_stride, \ - src_stride, h, \ - ff_filters_ssse3[f][dvar - 1]); \ +#define filter_8tap_1d_fn(op, sz, f, fname, dir, dvar) \ +static void \ +op ## _8tap_ ## fname ## _ ## sz ## dir ## _ssse3(uint8_t *dst, \ + const uint8_t *src, \ + ptrdiff_t dst_stride, \ + ptrdiff_t src_stride, \ + int h, int mx, \ + int my) \ +{ \ + ff_vp9_ ## op ## _8tap_1d_ ## dir ## _ ## sz ## _ssse3(dst, src, \ + dst_stride, \ + src_stride, h,\ + ff_filters_ssse3[f][dvar - 1]); \ } #define filters_8tap_1d_fn(op, sz, dir, dvar) \ @@ -197,7 +197,7 @@ av_cold void ff_vp9dsp_init_x86(VP9DSPContext *dsp) dsp->mc[idx1][FILTER_8TAP_SMOOTH ][idx2][0][0] = \ dsp->mc[idx1][FILTER_8TAP_REGULAR][idx2][0][0] = \ dsp->mc[idx1][FILTER_8TAP_SHARP ][idx2][0][0] = \ - dsp->mc[idx1][FILTER_BILINEAR ][idx2][0][0] = ff_ ## type ## sz ## _ ## opt + dsp->mc[idx1][FILTER_BILINEAR ][idx2][0][0] = ff_vp9_ ## type ## sz ## _ ## opt #define init_subpel1(idx1, idx2, idxh, idxv, sz, dir, type, opt) \ diff --git a/libavcodec/x86/vp9mc.asm b/libavcodec/x86/vp9mc.asm index 4c747ab1bb..41b22204c0 100644 --- a/libavcodec/x86/vp9mc.asm +++ b/libavcodec/x86/vp9mc.asm @@ -86,7 +86,7 @@ SECTION .text %macro filter_h_fn 1 %assign %%px mmsize/2 -cglobal %1_8tap_1d_h_ %+ %%px, 6, 6, 11, dst, src, dstride, sstride, h, filtery +cglobal vp9_%1_8tap_1d_h_ %+ %%px, 6, 6, 11, dst, src, dstride, sstride, h, filtery mova m6, [pw_256] mova m7, [filteryq+ 0] %if ARCH_X86_64 && mmsize > 8 @@ -147,7 +147,7 @@ filter_h_fn avg %if ARCH_X86_64 %macro filter_hx2_fn 1 %assign %%px mmsize -cglobal %1_8tap_1d_h_ %+ %%px, 6, 6, 14, dst, src, dstride, sstride, h, filtery +cglobal vp9_%1_8tap_1d_h_ %+ %%px, 6, 6, 14, dst, src, dstride, sstride, h, filtery mova m13, [pw_256] mova m8, [filteryq+ 0] mova m9, [filteryq+16] @@ -203,9 +203,9 @@ filter_hx2_fn avg %macro filter_v_fn 1 %assign %%px mmsize/2 %if ARCH_X86_64 -cglobal %1_8tap_1d_v_ %+ %%px, 6, 8, 11, dst, src, dstride, sstride, h, filtery, src4, sstride3 +cglobal vp9_%1_8tap_1d_v_ %+ %%px, 6, 8, 11, dst, src, dstride, sstride, h, filtery, src4, sstride3 %else -cglobal %1_8tap_1d_v_ %+ %%px, 4, 7, 11, dst, src, dstride, sstride, filtery, src4, sstride3 +cglobal vp9_%1_8tap_1d_v_ %+ %%px, 4, 7, 11, dst, src, dstride, sstride, filtery, src4, sstride3 mov filteryq, r5mp %define hd r4mp %endif @@ -276,7 +276,7 @@ filter_v_fn avg %macro filter_vx2_fn 1 %assign %%px mmsize -cglobal %1_8tap_1d_v_ %+ %%px, 6, 8, 14, dst, src, dstride, sstride, h, filtery, src4, sstride3 +cglobal vp9_%1_8tap_1d_v_ %+ %%px, 6, 8, 14, dst, src, dstride, sstride, h, filtery, src4, sstride3 mova m13, [pw_256] lea sstride3q, [sstrideq*3] lea src4q, [srcq+sstrideq] @@ -346,11 +346,11 @@ filter_vx2_fn avg %endif %if %2 <= 16 -cglobal %1%2, 5, 7, 4, dst, src, dstride, sstride, h, dstride3, sstride3 +cglobal vp9_%1%2, 5, 7, 4, dst, src, dstride, sstride, h, dstride3, sstride3 lea sstride3q, [sstrideq*3] lea dstride3q, [dstrideq*3] %else -cglobal %1%2, 5, 5, 4, dst, src, dstride, sstride, h +cglobal vp9_%1%2, 5, 5, 4, dst, src, dstride, sstride, h %endif .loop: %%srcfn m0, [srcq] -- cgit v1.2.3 From 67922b4ee48b5a5850ebf2cb6fcddf5979a26f68 Mon Sep 17 00:00:00 2001 From: James Almer Date: Mon, 22 Sep 2014 21:55:13 -0300 Subject: vp9mc/x86: add AVX and AVX2 MC Roughly 25% faster MC than ssse3 for blocksizes 32 and 64. Reviewed-by: Ronald S. Bultje Signed-off-by: James Almer Signed-off-by: Anton Khirnov --- libavcodec/x86/constants.c | 3 +- libavcodec/x86/constants.h | 2 +- libavcodec/x86/vp9dsp_init.c | 210 ++++++++++++++++++++++++++----------------- libavcodec/x86/vp9mc.asm | 74 +++++++++------ 4 files changed, 178 insertions(+), 111 deletions(-) (limited to 'libavcodec/x86/vp9mc.asm') diff --git a/libavcodec/x86/constants.c b/libavcodec/x86/constants.c index 47f6ef53ae..6f7dd7346e 100644 --- a/libavcodec/x86/constants.c +++ b/libavcodec/x86/constants.c @@ -43,7 +43,8 @@ DECLARE_ALIGNED(16, const xmm_reg, ff_pw_64) = { 0x0040004000400040ULL, 0x004 DECLARE_ALIGNED(8, const uint64_t, ff_pw_96) = 0x0060006000600060ULL; DECLARE_ALIGNED(8, const uint64_t, ff_pw_128) = 0x0080008000800080ULL; DECLARE_ALIGNED(8, const uint64_t, ff_pw_255) = 0x00ff00ff00ff00ffULL; -DECLARE_ALIGNED(16, const xmm_reg, ff_pw_256) = { 0x0100010001000100ULL, 0x0100010001000100ULL }; +DECLARE_ALIGNED(32, const ymm_reg, ff_pw_256) = { 0x0100010001000100ULL, 0x0100010001000100ULL, + 0x0100010001000100ULL, 0x0100010001000100ULL }; DECLARE_ALIGNED(16, const xmm_reg, ff_pw_512) = { 0x0200020002000200ULL, 0x0200020002000200ULL }; DECLARE_ALIGNED(16, const xmm_reg, ff_pw_1019) = { 0x03FB03FB03FB03FBULL, 0x03FB03FB03FB03FBULL }; DECLARE_ALIGNED(16, const xmm_reg, ff_pw_m1) = { 0xFFFFFFFFFFFFFFFFULL, 0xFFFFFFFFFFFFFFFFULL }; diff --git a/libavcodec/x86/constants.h b/libavcodec/x86/constants.h index c3b8d501cd..59ff94725d 100644 --- a/libavcodec/x86/constants.h +++ b/libavcodec/x86/constants.h @@ -42,7 +42,7 @@ extern const xmm_reg ff_pw_64; extern const uint64_t ff_pw_96; extern const uint64_t ff_pw_128; extern const uint64_t ff_pw_255; -extern const xmm_reg ff_pw_256; +extern const ymm_reg ff_pw_256; extern const xmm_reg ff_pw_512; extern const xmm_reg ff_pw_m1; diff --git a/libavcodec/x86/vp9dsp_init.c b/libavcodec/x86/vp9dsp_init.c index 13662961af..8c4af8368c 100644 --- a/libavcodec/x86/vp9dsp_init.c +++ b/libavcodec/x86/vp9dsp_init.c @@ -45,6 +45,10 @@ fpel_func(avg, 8, mmxext); fpel_func(avg, 16, sse2); fpel_func(avg, 32, sse2); fpel_func(avg, 64, sse2); +fpel_func(put, 32, avx); +fpel_func(put, 64, avx); +fpel_func(avg, 32, avx2); +fpel_func(avg, 64, avx2); #undef fpel_func #define mc_func(avg, sz, dir, opt) \ @@ -54,18 +58,19 @@ ff_vp9_ ## avg ## _8tap_1d_ ## dir ## _ ## sz ## _ ## opt(uint8_t *dst, ptrdiff_t dst_stride, \ ptrdiff_t src_stride, \ int h, \ - const int8_t (*filter)[16]) + const int8_t (*filter)[32]) -#define mc_funcs(sz) \ - mc_func(put, sz, h, ssse3); \ - mc_func(avg, sz, h, ssse3); \ - mc_func(put, sz, v, ssse3); \ - mc_func(avg, sz, v, ssse3) +#define mc_funcs(sz, opt) \ + mc_func(put, sz, h, opt); \ + mc_func(avg, sz, h, opt); \ + mc_func(put, sz, v, opt); \ + mc_func(avg, sz, v, opt) -mc_funcs(4); -mc_funcs(8); +mc_funcs(4, ssse3); +mc_funcs(8, ssse3); #if ARCH_X86_64 -mc_funcs(16); +mc_funcs(16, ssse3); +mc_funcs(32, avx2); #endif #undef mc_funcs @@ -78,7 +83,7 @@ ff_vp9_ ## avg ## _8tap_1d_ ## dir ## _ ## sz ## _ ## opt(uint8_t *dst, \ ptrdiff_t dst_stride, \ ptrdiff_t src_stride, \ int h, \ - const int8_t (*filter)[16]) \ + const int8_t (*filter)[32]) \ { \ ff_vp9_ ## avg ## _8tap_1d_ ## dir ## _ ## hsz ## _ ## opt(dst, src, \ dst_stride, \ @@ -92,94 +97,109 @@ ff_vp9_ ## avg ## _8tap_1d_ ## dir ## _ ## sz ## _ ## opt(uint8_t *dst, \ h, filter); \ } -#define mc_rep_funcs(sz, hsz) \ - mc_rep_func(put, sz, hsz, h, ssse3); \ - mc_rep_func(avg, sz, hsz, h, ssse3); \ - mc_rep_func(put, sz, hsz, v, ssse3); \ - mc_rep_func(avg, sz, hsz, v, ssse3) +#define mc_rep_funcs(sz, hsz, opt) \ + mc_rep_func(put, sz, hsz, h, opt); \ + mc_rep_func(avg, sz, hsz, h, opt); \ + mc_rep_func(put, sz, hsz, v, opt); \ + mc_rep_func(avg, sz, hsz, v, opt) #if ARCH_X86_32 -mc_rep_funcs(16, 8); +mc_rep_funcs(16, 8, ssse3); +#endif +mc_rep_funcs(32, 16, ssse3); +mc_rep_funcs(64, 32, ssse3); +#if ARCH_X86_64 && HAVE_AVX2_EXTERNAL +mc_rep_funcs(64, 32, avx2); #endif -mc_rep_funcs(32, 16); -mc_rep_funcs(64, 32); #undef mc_rep_funcs #undef mc_rep_func -extern const int8_t ff_filters_ssse3[3][15][4][16]; - -#define filter_8tap_2d_fn(op, sz, f, fname) \ -static void \ -op ## _8tap_ ## fname ## _ ## sz ## hv_ssse3(uint8_t *dst, \ - const uint8_t *src, \ - ptrdiff_t dst_stride, \ - ptrdiff_t src_stride, \ - int h, int mx, int my) \ -{ \ - LOCAL_ALIGNED_16(uint8_t, temp, [71 * 64]); \ - ff_vp9_put_8tap_1d_h_ ## sz ## _ssse3(temp, src - 3 * src_stride, \ - 64, src_stride, \ - h + 7, \ - ff_filters_ssse3[f][mx - 1]); \ - ff_vp9_ ## op ## _8tap_1d_v_ ## sz ## _ssse3(dst, temp + 3 * 64, \ - dst_stride, 64, \ - h, \ - ff_filters_ssse3[f][my - 1]); \ +extern const int8_t ff_filters_ssse3[3][15][4][32]; + +#define filter_8tap_2d_fn(op, sz, f, fname, align, opt) \ +static void \ +op ## _8tap_ ## fname ## _ ## sz ## hv_ ## opt(uint8_t *dst, \ + const uint8_t *src, \ + ptrdiff_t dst_stride, \ + ptrdiff_t src_stride, \ + int h, int mx, int my) \ +{ \ + LOCAL_ALIGNED_ ## align(uint8_t, temp, [71 * 64]); \ + ff_vp9_put_8tap_1d_h_ ## sz ## _ ## opt(temp, src - 3 * src_stride, \ + 64, src_stride, \ + h + 7, \ + ff_filters_ssse3[f][mx - 1]); \ + ff_vp9_ ## op ## _8tap_1d_v_ ## sz ## _ ## opt(dst, temp + 3 * 64, \ + dst_stride, 64, \ + h, \ + ff_filters_ssse3[f][my - 1]); \ } -#define filters_8tap_2d_fn(op, sz) \ - filter_8tap_2d_fn(op, sz, FILTER_8TAP_REGULAR, regular) \ - filter_8tap_2d_fn(op, sz, FILTER_8TAP_SHARP, sharp) \ - filter_8tap_2d_fn(op, sz, FILTER_8TAP_SMOOTH, smooth) - -#define filters_8tap_2d_fn2(op) \ - filters_8tap_2d_fn(op, 64) \ - filters_8tap_2d_fn(op, 32) \ - filters_8tap_2d_fn(op, 16) \ - filters_8tap_2d_fn(op, 8) \ - filters_8tap_2d_fn(op, 4) - -filters_8tap_2d_fn2(put) -filters_8tap_2d_fn2(avg) +#define filters_8tap_2d_fn(op, sz, align, opt) \ + filter_8tap_2d_fn(op, sz, FILTER_8TAP_REGULAR, regular, align, opt) \ + filter_8tap_2d_fn(op, sz, FILTER_8TAP_SHARP, sharp, align, opt) \ + filter_8tap_2d_fn(op, sz, FILTER_8TAP_SMOOTH, smooth, align, opt) + +#define filters_8tap_2d_fn2(op, align, opt) \ + filters_8tap_2d_fn(op, 64, align, opt) \ + filters_8tap_2d_fn(op, 32, align, opt) \ + filters_8tap_2d_fn(op, 16, align, opt) \ + filters_8tap_2d_fn(op, 8, align, opt) \ + filters_8tap_2d_fn(op, 4, align, opt) + +filters_8tap_2d_fn2(put, 16, ssse3) +filters_8tap_2d_fn2(avg, 16, ssse3) +#if ARCH_X86_64 && HAVE_AVX2_EXTERNAL +filters_8tap_2d_fn(put, 64, 32, avx2) +filters_8tap_2d_fn(put, 32, 32, avx2) +filters_8tap_2d_fn(avg, 64, 32, avx2) +filters_8tap_2d_fn(avg, 32, 32, avx2) +#endif #undef filters_8tap_2d_fn2 #undef filters_8tap_2d_fn #undef filter_8tap_2d_fn -#define filter_8tap_1d_fn(op, sz, f, fname, dir, dvar) \ -static void \ -op ## _8tap_ ## fname ## _ ## sz ## dir ## _ssse3(uint8_t *dst, \ - const uint8_t *src, \ - ptrdiff_t dst_stride, \ - ptrdiff_t src_stride, \ - int h, int mx, \ - int my) \ -{ \ - ff_vp9_ ## op ## _8tap_1d_ ## dir ## _ ## sz ## _ssse3(dst, src, \ - dst_stride, \ - src_stride, h,\ - ff_filters_ssse3[f][dvar - 1]); \ +#define filter_8tap_1d_fn(op, sz, f, fname, dir, dvar, opt) \ +static void \ +op ## _8tap_ ## fname ## _ ## sz ## dir ## _ ## opt(uint8_t *dst, \ + const uint8_t *src, \ + ptrdiff_t dst_stride, \ + ptrdiff_t src_stride, \ + int h, int mx, \ + int my) \ +{ \ + ff_vp9_ ## op ## _8tap_1d_ ## dir ## _ ## sz ## _ ## opt(dst, src, \ + dst_stride, \ + src_stride, h,\ + ff_filters_ssse3[f][dvar - 1]); \ } -#define filters_8tap_1d_fn(op, sz, dir, dvar) \ - filter_8tap_1d_fn(op, sz, FILTER_8TAP_REGULAR, regular, dir, dvar) \ - filter_8tap_1d_fn(op, sz, FILTER_8TAP_SHARP, sharp, dir, dvar) \ - filter_8tap_1d_fn(op, sz, FILTER_8TAP_SMOOTH, smooth, dir, dvar) - -#define filters_8tap_1d_fn2(op, sz) \ - filters_8tap_1d_fn(op, sz, h, mx) \ - filters_8tap_1d_fn(op, sz, v, my) - -#define filters_8tap_1d_fn3(op) \ - filters_8tap_1d_fn2(op, 64) \ - filters_8tap_1d_fn2(op, 32) \ - filters_8tap_1d_fn2(op, 16) \ - filters_8tap_1d_fn2(op, 8) \ - filters_8tap_1d_fn2(op, 4) - -filters_8tap_1d_fn3(put) -filters_8tap_1d_fn3(avg) +#define filters_8tap_1d_fn(op, sz, dir, dvar, opt) \ + filter_8tap_1d_fn(op, sz, FILTER_8TAP_REGULAR, regular, dir, dvar, opt) \ + filter_8tap_1d_fn(op, sz, FILTER_8TAP_SHARP, sharp, dir, dvar, opt) \ + filter_8tap_1d_fn(op, sz, FILTER_8TAP_SMOOTH, smooth, dir, dvar, opt) + +#define filters_8tap_1d_fn2(op, sz, opt) \ + filters_8tap_1d_fn(op, sz, h, mx, opt) \ + filters_8tap_1d_fn(op, sz, v, my, opt) + +#define filters_8tap_1d_fn3(op, opt) \ + filters_8tap_1d_fn2(op, 64, opt) \ + filters_8tap_1d_fn2(op, 32, opt) \ + filters_8tap_1d_fn2(op, 16, opt) \ + filters_8tap_1d_fn2(op, 8, opt) \ + filters_8tap_1d_fn2(op, 4, opt) + +filters_8tap_1d_fn3(put, ssse3) +filters_8tap_1d_fn3(avg, ssse3) +#if ARCH_X86_64 && HAVE_AVX2_EXTERNAL +filters_8tap_1d_fn2(put, 64, avx2) +filters_8tap_1d_fn2(put, 32, avx2) +filters_8tap_1d_fn2(avg, 64, avx2) +filters_8tap_1d_fn2(avg, 32, avx2) +#endif #undef filters_8tap_1d_fn #undef filters_8tap_1d_fn2 @@ -205,9 +225,12 @@ av_cold void ff_vp9dsp_init_x86(VP9DSPContext *dsp) dsp->mc[idx1][FILTER_8TAP_REGULAR][idx2][idxh][idxv] = type ## _8tap_regular_ ## sz ## dir ## _ ## opt; \ dsp->mc[idx1][FILTER_8TAP_SHARP][idx2][idxh][idxv] = type ## _8tap_sharp_ ## sz ## dir ## _ ## opt +#define init_subpel2_32_64(idx, idxh, idxv, dir, type, opt) \ + init_subpel1(0, idx, idxh, idxv, 64, dir, type, opt); \ + init_subpel1(1, idx, idxh, idxv, 32, dir, type, opt) + #define init_subpel2(idx, idxh, idxv, dir, type, opt) \ - init_subpel1(0, idx, idxh, idxv, 64, dir, type, opt); \ - init_subpel1(1, idx, idxh, idxv, 32, dir, type, opt); \ + init_subpel2_32_64(idx, idxh, idxv, dir, type, opt); \ init_subpel1(2, idx, idxh, idxv, 16, dir, type, opt); \ init_subpel1(3, idx, idxh, idxv, 8, dir, type, opt); \ init_subpel1(4, idx, idxh, idxv, 4, dir, type, opt) @@ -244,6 +267,25 @@ av_cold void ff_vp9dsp_init_x86(VP9DSPContext *dsp) init_subpel3(1, avg, ssse3); } + if (EXTERNAL_AVX(cpu_flags)) { + init_fpel(1, 0, 32, put, avx); + init_fpel(0, 0, 64, put, avx); + } + + if (EXTERNAL_AVX2(cpu_flags)) { + init_fpel(1, 1, 32, avg, avx2); + init_fpel(0, 1, 64, avg, avx2); + +#if ARCH_X86_64 && HAVE_AVX2_EXTERNAL + init_subpel2_32_64(0, 1, 1, hv, put, avx2); + init_subpel2_32_64(0, 0, 1, v, put, avx2); + init_subpel2_32_64(0, 1, 0, h, put, avx2); + init_subpel2_32_64(1, 1, 1, hv, avg, avx2); + init_subpel2_32_64(1, 0, 1, v, avg, avx2); + init_subpel2_32_64(1, 1, 0, h, avg, avx2); +#endif /* ARCH_X86_64 && HAVE_AVX2_EXTERNAL */ + } + #undef init_fpel #undef init_subpel1 #undef init_subpel2 diff --git a/libavcodec/x86/vp9mc.asm b/libavcodec/x86/vp9mc.asm index 41b22204c0..4f66ea1c92 100644 --- a/libavcodec/x86/vp9mc.asm +++ b/libavcodec/x86/vp9mc.asm @@ -22,17 +22,17 @@ %include "libavutil/x86/x86util.asm" -SECTION_RODATA +SECTION_RODATA 32 cextern pw_256 %macro F8_TAPS 8 -times 8 db %1, %2 -times 8 db %3, %4 -times 8 db %5, %6 -times 8 db %7, %8 +times 16 db %1, %2 +times 16 db %3, %4 +times 16 db %5, %6 +times 16 db %7, %8 %endmacro -; int8_t ff_filters_ssse3[3][15][4][16] +; int8_t ff_filters_ssse3[3][15][4][32] const filters_ssse3 ; smooth F8_TAPS -3, -1, 32, 64, 38, 1, -3, 0 F8_TAPS -2, -2, 29, 63, 41, 2, -3, 0 @@ -90,9 +90,9 @@ cglobal vp9_%1_8tap_1d_h_ %+ %%px, 6, 6, 11, dst, src, dstride, sstride, h, filt mova m6, [pw_256] mova m7, [filteryq+ 0] %if ARCH_X86_64 && mmsize > 8 - mova m8, [filteryq+16] - mova m9, [filteryq+32] - mova m10, [filteryq+48] + mova m8, [filteryq+32] + mova m9, [filteryq+64] + mova m10, [filteryq+96] %endif .loop: movh m0, [srcq-3] @@ -114,9 +114,9 @@ cglobal vp9_%1_8tap_1d_h_ %+ %%px, 6, 6, 11, dst, src, dstride, sstride, h, filt pmaddubsw m4, m9 pmaddubsw m1, m10 %else - pmaddubsw m2, [filteryq+16] - pmaddubsw m4, [filteryq+32] - pmaddubsw m1, [filteryq+48] + pmaddubsw m2, [filteryq+32] + pmaddubsw m4, [filteryq+64] + pmaddubsw m1, [filteryq+96] %endif paddw m0, m2 paddw m4, m1 @@ -150,9 +150,9 @@ filter_h_fn avg cglobal vp9_%1_8tap_1d_h_ %+ %%px, 6, 6, 14, dst, src, dstride, sstride, h, filtery mova m13, [pw_256] mova m8, [filteryq+ 0] - mova m9, [filteryq+16] - mova m10, [filteryq+32] - mova m11, [filteryq+48] + mova m9, [filteryq+32] + mova m10, [filteryq+64] + mova m11, [filteryq+96] .loop: movu m0, [srcq-3] movu m1, [srcq-2] @@ -198,6 +198,12 @@ INIT_XMM ssse3 filter_hx2_fn put filter_hx2_fn avg +%if HAVE_AVX2_EXTERNAL +INIT_YMM avx2 +filter_hx2_fn put +filter_hx2_fn avg +%endif + %endif ; ARCH_X86_64 %macro filter_v_fn 1 @@ -215,9 +221,9 @@ cglobal vp9_%1_8tap_1d_v_ %+ %%px, 4, 7, 11, dst, src, dstride, sstride, filtery sub srcq, sstride3q mova m7, [filteryq+ 0] %if ARCH_X86_64 && mmsize > 8 - mova m8, [filteryq+16] - mova m9, [filteryq+32] - mova m10, [filteryq+48] + mova m8, [filteryq+32] + mova m9, [filteryq+64] + mova m10, [filteryq+96] %endif .loop: ; FIXME maybe reuse loads from previous rows, or just more generally @@ -242,9 +248,9 @@ cglobal vp9_%1_8tap_1d_v_ %+ %%px, 4, 7, 11, dst, src, dstride, sstride, filtery pmaddubsw m4, m9 pmaddubsw m1, m10 %else - pmaddubsw m2, [filteryq+16] - pmaddubsw m4, [filteryq+32] - pmaddubsw m1, [filteryq+48] + pmaddubsw m2, [filteryq+32] + pmaddubsw m4, [filteryq+64] + pmaddubsw m1, [filteryq+96] %endif paddw m0, m2 paddw m4, m1 @@ -282,9 +288,9 @@ cglobal vp9_%1_8tap_1d_v_ %+ %%px, 6, 8, 14, dst, src, dstride, sstride, h, filt lea src4q, [srcq+sstrideq] sub srcq, sstride3q mova m8, [filteryq+ 0] - mova m9, [filteryq+16] - mova m10, [filteryq+32] - mova m11, [filteryq+48] + mova m9, [filteryq+32] + mova m10, [filteryq+64] + mova m11, [filteryq+96] .loop: ; FIXME maybe reuse loads from previous rows, or just ; more generally unroll this to prevent multiple loads of @@ -334,6 +340,12 @@ INIT_XMM ssse3 filter_vx2_fn put filter_vx2_fn avg +%if HAVE_AVX2_EXTERNAL +INIT_YMM avx2 +filter_vx2_fn put +filter_vx2_fn avg +%endif + %endif ; ARCH_X86_64 %macro fpel_fn 6 @@ -345,7 +357,7 @@ filter_vx2_fn avg %define %%dstfn mova %endif -%if %2 <= 16 +%if %2 <= mmsize cglobal vp9_%1%2, 5, 7, 4, dst, src, dstride, sstride, h, dstride3, sstride3 lea sstride3q, [sstrideq*3] lea dstride3q, [dstrideq*3] @@ -376,6 +388,8 @@ cglobal vp9_%1%2, 5, 5, 4, dst, src, dstride, sstride, h %define d16 16 %define s16 16 +%define d32 32 +%define s32 32 INIT_MMX mmx fpel_fn put, 4, strideq, strideq*2, stride3q, 4 fpel_fn put, 8, strideq, strideq*2, stride3q, 4 @@ -390,5 +404,15 @@ INIT_XMM sse2 fpel_fn avg, 16, strideq, strideq*2, stride3q, 4 fpel_fn avg, 32, mmsize, strideq, strideq+mmsize, 2 fpel_fn avg, 64, mmsize, mmsize*2, mmsize*3, 1 +INIT_YMM avx +fpel_fn put, 32, strideq, strideq*2, stride3q, 4 +fpel_fn put, 64, mmsize, strideq, strideq+mmsize, 2 +%if HAVE_AVX2_EXTERNAL +INIT_YMM avx2 +fpel_fn avg, 32, strideq, strideq*2, stride3q, 4 +fpel_fn avg, 64, mmsize, strideq, strideq+mmsize, 2 +%endif %undef s16 %undef d16 +%undef s32 +%undef d32 -- cgit v1.2.3 From 9790b44a89d191a07a9d8b361fb4d18ea15f51a1 Mon Sep 17 00:00:00 2001 From: "Ronald S. Bultje" Date: Sun, 14 Dec 2014 20:13:24 -0500 Subject: vp9mc/x86: sse2 MC assembly. Also a slight change to the ssse3 code, which prevents a theoretical overflow in the sharp filter. Signed-off-by: Anton Khirnov --- libavcodec/x86/vp9dsp_init.c | 184 +++++++++++++++++--------------- libavcodec/x86/vp9mc.asm | 246 +++++++++++++++++++++++++++++++++++++++---- 2 files changed, 324 insertions(+), 106 deletions(-) (limited to 'libavcodec/x86/vp9mc.asm') diff --git a/libavcodec/x86/vp9dsp_init.c b/libavcodec/x86/vp9dsp_init.c index 8c4af8368c..41fa35a4c3 100644 --- a/libavcodec/x86/vp9dsp_init.c +++ b/libavcodec/x86/vp9dsp_init.c @@ -51,39 +51,41 @@ fpel_func(avg, 32, avx2); fpel_func(avg, 64, avx2); #undef fpel_func -#define mc_func(avg, sz, dir, opt) \ +#define mc_func(avg, sz, dir, opt, type, f_sz) \ void \ ff_vp9_ ## avg ## _8tap_1d_ ## dir ## _ ## sz ## _ ## opt(uint8_t *dst, \ const uint8_t *src, \ ptrdiff_t dst_stride, \ ptrdiff_t src_stride, \ int h, \ - const int8_t (*filter)[32]) - -#define mc_funcs(sz, opt) \ - mc_func(put, sz, h, opt); \ - mc_func(avg, sz, h, opt); \ - mc_func(put, sz, v, opt); \ - mc_func(avg, sz, v, opt) - -mc_funcs(4, ssse3); -mc_funcs(8, ssse3); + const type (*filter)[f_sz]) + +#define mc_funcs(sz, opt, type, f_sz) \ + mc_func(put, sz, h, opt, type, f_sz); \ + mc_func(avg, sz, h, opt, type, f_sz); \ + mc_func(put, sz, v, opt, type, f_sz); \ + mc_func(avg, sz, v, opt, type, f_sz) + +mc_funcs(4, mmxext, int16_t, 8); +mc_funcs(8, sse2, int16_t, 8); +mc_funcs(4, ssse3, int8_t, 32); +mc_funcs(8, ssse3, int8_t, 32); #if ARCH_X86_64 -mc_funcs(16, ssse3); -mc_funcs(32, avx2); +mc_funcs(16, ssse3, int8_t, 32); +mc_funcs(32, avx2, int8_t, 32); #endif #undef mc_funcs #undef mc_func -#define mc_rep_func(avg, sz, hsz, dir, opt) \ +#define mc_rep_func(avg, sz, hsz, dir, opt, type, f_sz) \ static av_always_inline void \ ff_vp9_ ## avg ## _8tap_1d_ ## dir ## _ ## sz ## _ ## opt(uint8_t *dst, \ const uint8_t *src, \ ptrdiff_t dst_stride, \ ptrdiff_t src_stride, \ int h, \ - const int8_t (*filter)[32]) \ + const type (*filter)[f_sz]) \ { \ ff_vp9_ ## avg ## _8tap_1d_ ## dir ## _ ## hsz ## _ ## opt(dst, src, \ dst_stride, \ @@ -97,27 +99,31 @@ ff_vp9_ ## avg ## _8tap_1d_ ## dir ## _ ## sz ## _ ## opt(uint8_t *dst, \ h, filter); \ } -#define mc_rep_funcs(sz, hsz, opt) \ - mc_rep_func(put, sz, hsz, h, opt); \ - mc_rep_func(avg, sz, hsz, h, opt); \ - mc_rep_func(put, sz, hsz, v, opt); \ - mc_rep_func(avg, sz, hsz, v, opt) +#define mc_rep_funcs(sz, hsz, opt, type, f_sz) \ + mc_rep_func(put, sz, hsz, h, opt, type, f_sz); \ + mc_rep_func(avg, sz, hsz, h, opt, type, f_sz); \ + mc_rep_func(put, sz, hsz, v, opt, type, f_sz); \ + mc_rep_func(avg, sz, hsz, v, opt, type, f_sz) +mc_rep_funcs(16, 8, sse2, int16_t, 8); #if ARCH_X86_32 -mc_rep_funcs(16, 8, ssse3); +mc_rep_funcs(16, 8, ssse3, int8_t, 32); #endif -mc_rep_funcs(32, 16, ssse3); -mc_rep_funcs(64, 32, ssse3); +mc_rep_funcs(32, 16, sse2, int16_t, 8); +mc_rep_funcs(32, 16, ssse3, int8_t, 32); +mc_rep_funcs(64, 32, sse2, int16_t, 8); +mc_rep_funcs(64, 32, ssse3, int8_t, 32); #if ARCH_X86_64 && HAVE_AVX2_EXTERNAL -mc_rep_funcs(64, 32, avx2); +mc_rep_funcs(64, 32, avx2, int8_t, 32); #endif #undef mc_rep_funcs #undef mc_rep_func extern const int8_t ff_filters_ssse3[3][15][4][32]; +extern const int16_t ff_filters_sse2[3][15][8][8]; -#define filter_8tap_2d_fn(op, sz, f, fname, align, opt) \ +#define filter_8tap_2d_fn(op, sz, f, f_opt, fname, align, opt) \ static void \ op ## _8tap_ ## fname ## _ ## sz ## hv_ ## opt(uint8_t *dst, \ const uint8_t *src, \ @@ -129,39 +135,42 @@ op ## _8tap_ ## fname ## _ ## sz ## hv_ ## opt(uint8_t *dst, ff_vp9_put_8tap_1d_h_ ## sz ## _ ## opt(temp, src - 3 * src_stride, \ 64, src_stride, \ h + 7, \ - ff_filters_ssse3[f][mx - 1]); \ + ff_filters_ ## f_opt[f][mx - 1]); \ ff_vp9_ ## op ## _8tap_1d_v_ ## sz ## _ ## opt(dst, temp + 3 * 64, \ dst_stride, 64, \ h, \ - ff_filters_ssse3[f][my - 1]); \ + ff_filters_ ## f_opt[f][my - 1]); \ } -#define filters_8tap_2d_fn(op, sz, align, opt) \ - filter_8tap_2d_fn(op, sz, FILTER_8TAP_REGULAR, regular, align, opt) \ - filter_8tap_2d_fn(op, sz, FILTER_8TAP_SHARP, sharp, align, opt) \ - filter_8tap_2d_fn(op, sz, FILTER_8TAP_SMOOTH, smooth, align, opt) +#define filters_8tap_2d_fn(op, sz, align, opt, f_opt) \ + filter_8tap_2d_fn(op, sz, FILTER_8TAP_REGULAR, f_opt, regular, align, opt) \ + filter_8tap_2d_fn(op, sz, FILTER_8TAP_SHARP, f_opt, sharp, align, opt) \ + filter_8tap_2d_fn(op, sz, FILTER_8TAP_SMOOTH, f_opt, smooth, align, opt) -#define filters_8tap_2d_fn2(op, align, opt) \ - filters_8tap_2d_fn(op, 64, align, opt) \ - filters_8tap_2d_fn(op, 32, align, opt) \ - filters_8tap_2d_fn(op, 16, align, opt) \ - filters_8tap_2d_fn(op, 8, align, opt) \ - filters_8tap_2d_fn(op, 4, align, opt) +#define filters_8tap_2d_fn2(op, align, opt4, opt8, f_opt) \ + filters_8tap_2d_fn(op, 64, align, opt8, f_opt) \ + filters_8tap_2d_fn(op, 32, align, opt8, f_opt) \ + filters_8tap_2d_fn(op, 16, align, opt8, f_opt) \ + filters_8tap_2d_fn(op, 8, align, opt8, f_opt) \ + filters_8tap_2d_fn(op, 4, align, opt4, f_opt) -filters_8tap_2d_fn2(put, 16, ssse3) -filters_8tap_2d_fn2(avg, 16, ssse3) + +filters_8tap_2d_fn2(put, 16, mmxext, sse2, sse2) +filters_8tap_2d_fn2(avg, 16, mmxext, sse2, sse2) +filters_8tap_2d_fn2(put, 16, ssse3, ssse3, ssse3) +filters_8tap_2d_fn2(avg, 16, ssse3, ssse3, ssse3) #if ARCH_X86_64 && HAVE_AVX2_EXTERNAL -filters_8tap_2d_fn(put, 64, 32, avx2) -filters_8tap_2d_fn(put, 32, 32, avx2) -filters_8tap_2d_fn(avg, 64, 32, avx2) -filters_8tap_2d_fn(avg, 32, 32, avx2) +filters_8tap_2d_fn(put, 64, 32, avx2, ssse3) +filters_8tap_2d_fn(put, 32, 32, avx2, ssse3) +filters_8tap_2d_fn(avg, 64, 32, avx2, ssse3) +filters_8tap_2d_fn(avg, 32, 32, avx2, ssse3) #endif #undef filters_8tap_2d_fn2 #undef filters_8tap_2d_fn #undef filter_8tap_2d_fn -#define filter_8tap_1d_fn(op, sz, f, fname, dir, dvar, opt) \ +#define filter_8tap_1d_fn(op, sz, f, f_opt, fname, dir, dvar, opt) \ static void \ op ## _8tap_ ## fname ## _ ## sz ## dir ## _ ## opt(uint8_t *dst, \ const uint8_t *src, \ @@ -173,32 +182,34 @@ op ## _8tap_ ## fname ## _ ## sz ## dir ## _ ## opt(uint8_t *dst, \ ff_vp9_ ## op ## _8tap_1d_ ## dir ## _ ## sz ## _ ## opt(dst, src, \ dst_stride, \ src_stride, h,\ - ff_filters_ssse3[f][dvar - 1]); \ + ff_filters_ ## f_opt[f][dvar - 1]); \ } -#define filters_8tap_1d_fn(op, sz, dir, dvar, opt) \ - filter_8tap_1d_fn(op, sz, FILTER_8TAP_REGULAR, regular, dir, dvar, opt) \ - filter_8tap_1d_fn(op, sz, FILTER_8TAP_SHARP, sharp, dir, dvar, opt) \ - filter_8tap_1d_fn(op, sz, FILTER_8TAP_SMOOTH, smooth, dir, dvar, opt) - -#define filters_8tap_1d_fn2(op, sz, opt) \ - filters_8tap_1d_fn(op, sz, h, mx, opt) \ - filters_8tap_1d_fn(op, sz, v, my, opt) - -#define filters_8tap_1d_fn3(op, opt) \ - filters_8tap_1d_fn2(op, 64, opt) \ - filters_8tap_1d_fn2(op, 32, opt) \ - filters_8tap_1d_fn2(op, 16, opt) \ - filters_8tap_1d_fn2(op, 8, opt) \ - filters_8tap_1d_fn2(op, 4, opt) - -filters_8tap_1d_fn3(put, ssse3) -filters_8tap_1d_fn3(avg, ssse3) +#define filters_8tap_1d_fn(op, sz, dir, dvar, opt, f_opt) \ + filter_8tap_1d_fn(op, sz, FILTER_8TAP_REGULAR, f_opt, regular, dir, dvar, opt) \ + filter_8tap_1d_fn(op, sz, FILTER_8TAP_SHARP, f_opt, sharp, dir, dvar, opt) \ + filter_8tap_1d_fn(op, sz, FILTER_8TAP_SMOOTH, f_opt, smooth, dir, dvar, opt) + +#define filters_8tap_1d_fn2(op, sz, opt, f_opt) \ + filters_8tap_1d_fn(op, sz, h, mx, opt, f_opt) \ + filters_8tap_1d_fn(op, sz, v, my, opt, f_opt) + +#define filters_8tap_1d_fn3(op, opt4, opt8, f_opt) \ + filters_8tap_1d_fn2(op, 64, opt8, f_opt) \ + filters_8tap_1d_fn2(op, 32, opt8, f_opt) \ + filters_8tap_1d_fn2(op, 16, opt8, f_opt) \ + filters_8tap_1d_fn2(op, 8, opt8, f_opt) \ + filters_8tap_1d_fn2(op, 4, opt4, f_opt) + +filters_8tap_1d_fn3(put, mmxext, sse2, sse2) +filters_8tap_1d_fn3(avg, mmxext, sse2, sse2) +filters_8tap_1d_fn3(put, ssse3, ssse3, ssse3) +filters_8tap_1d_fn3(avg, ssse3, ssse3, ssse3) #if ARCH_X86_64 && HAVE_AVX2_EXTERNAL -filters_8tap_1d_fn2(put, 64, avx2) -filters_8tap_1d_fn2(put, 32, avx2) -filters_8tap_1d_fn2(avg, 64, avx2) -filters_8tap_1d_fn2(avg, 32, avx2) +filters_8tap_1d_fn2(put, 64, avx2, ssse3) +filters_8tap_1d_fn2(put, 32, avx2, ssse3) +filters_8tap_1d_fn2(avg, 64, avx2, ssse3) +filters_8tap_1d_fn2(avg, 32, avx2, ssse3) #endif #undef filters_8tap_1d_fn @@ -225,20 +236,23 @@ av_cold void ff_vp9dsp_init_x86(VP9DSPContext *dsp) dsp->mc[idx1][FILTER_8TAP_REGULAR][idx2][idxh][idxv] = type ## _8tap_regular_ ## sz ## dir ## _ ## opt; \ dsp->mc[idx1][FILTER_8TAP_SHARP][idx2][idxh][idxv] = type ## _8tap_sharp_ ## sz ## dir ## _ ## opt -#define init_subpel2_32_64(idx, idxh, idxv, dir, type, opt) \ - init_subpel1(0, idx, idxh, idxv, 64, dir, type, opt); \ - init_subpel1(1, idx, idxh, idxv, 32, dir, type, opt) +#define init_subpel2(idx1, idx2, sz, type, opt) \ + init_subpel1(idx1, idx2, 1, 1, sz, hv, type, opt); \ + init_subpel1(idx1, idx2, 0, 1, sz, v, type, opt); \ + init_subpel1(idx1, idx2, 1, 0, sz, h, type, opt) + +#define init_subpel3_32_64(idx, type, opt) \ + init_subpel2(0, idx, 64, type, opt); \ + init_subpel2(1, idx, 32, type, opt) -#define init_subpel2(idx, idxh, idxv, dir, type, opt) \ - init_subpel2_32_64(idx, idxh, idxv, dir, type, opt); \ - init_subpel1(2, idx, idxh, idxv, 16, dir, type, opt); \ - init_subpel1(3, idx, idxh, idxv, 8, dir, type, opt); \ - init_subpel1(4, idx, idxh, idxv, 4, dir, type, opt) +#define init_subpel3_8to64(idx, type, opt) \ + init_subpel3_32_64(idx, type, opt); \ + init_subpel2(2, idx, 16, type, opt); \ + init_subpel2(3, idx, 8, type, opt) -#define init_subpel3(idx, type, opt) \ - init_subpel2(idx, 1, 1, hv, type, opt); \ - init_subpel2(idx, 0, 1, v, type, opt); \ - init_subpel2(idx, 1, 0, h, type, opt) +#define init_subpel3(idx, type, opt) \ + init_subpel3_8to64(idx, type, opt); \ + init_subpel2(4, idx, 4, type, opt) if (EXTERNAL_MMX(cpu_flags)) { init_fpel(4, 0, 4, put, mmx); @@ -246,6 +260,8 @@ av_cold void ff_vp9dsp_init_x86(VP9DSPContext *dsp) } if (EXTERNAL_MMXEXT(cpu_flags)) { + init_subpel2(4, 0, 4, put, mmxext); + init_subpel2(4, 1, 4, avg, mmxext); init_fpel(4, 1, 4, avg, mmxext); init_fpel(3, 1, 8, avg, mmxext); } @@ -257,6 +273,8 @@ av_cold void ff_vp9dsp_init_x86(VP9DSPContext *dsp) } if (EXTERNAL_SSE2(cpu_flags)) { + init_subpel3_8to64(0, put, sse2); + init_subpel3_8to64(1, avg, sse2); init_fpel(2, 1, 16, avg, sse2); init_fpel(1, 1, 32, avg, sse2); init_fpel(0, 1, 64, avg, sse2); @@ -277,12 +295,8 @@ av_cold void ff_vp9dsp_init_x86(VP9DSPContext *dsp) init_fpel(0, 1, 64, avg, avx2); #if ARCH_X86_64 && HAVE_AVX2_EXTERNAL - init_subpel2_32_64(0, 1, 1, hv, put, avx2); - init_subpel2_32_64(0, 0, 1, v, put, avx2); - init_subpel2_32_64(0, 1, 0, h, put, avx2); - init_subpel2_32_64(1, 1, 1, hv, avg, avx2); - init_subpel2_32_64(1, 0, 1, v, avg, avx2); - init_subpel2_32_64(1, 1, 0, h, avg, avx2); + init_subpel3_32_64(0, put, avx2); + init_subpel3_32_64(1, avg, avx2); #endif /* ARCH_X86_64 && HAVE_AVX2_EXTERNAL */ } diff --git a/libavcodec/x86/vp9mc.asm b/libavcodec/x86/vp9mc.asm index 4f66ea1c92..15e93ea6cb 100644 --- a/libavcodec/x86/vp9mc.asm +++ b/libavcodec/x86/vp9mc.asm @@ -25,15 +25,28 @@ SECTION_RODATA 32 cextern pw_256 +cextern pw_64 -%macro F8_TAPS 8 +%macro F8_SSSE3_TAPS 8 times 16 db %1, %2 times 16 db %3, %4 times 16 db %5, %6 times 16 db %7, %8 %endmacro -; int8_t ff_filters_ssse3[3][15][4][32] -const filters_ssse3 ; smooth + +%macro F8_SSE2_TAPS 8 +times 8 dw %1 +times 8 dw %2 +times 8 dw %3 +times 8 dw %4 +times 8 dw %5 +times 8 dw %6 +times 8 dw %7 +times 8 dw %8 +%endmacro + +%macro FILTER 1 +const filters_%1 ; smooth F8_TAPS -3, -1, 32, 64, 38, 1, -3, 0 F8_TAPS -2, -2, 29, 63, 41, 2, -3, 0 F8_TAPS -2, -2, 26, 63, 43, 4, -4, 0 @@ -81,9 +94,102 @@ const filters_ssse3 ; smooth F8_TAPS -2, 5, -10, 27, 121, -17, 7, -3 F8_TAPS -1, 3, -6, 17, 125, -13, 5, -2 F8_TAPS 0, 1, -3, 8, 127, -7, 3, -1 +%endmacro + +%define F8_TAPS F8_SSSE3_TAPS +; int8_t ff_filters_ssse3[3][15][4][32] +FILTER ssse3 +%define F8_TAPS F8_SSE2_TAPS +; int16_t ff_filters_sse2[3][15][8][8] +FILTER sse2 SECTION .text +%macro filter_sse2_h_fn 1 +%assign %%px mmsize/2 +cglobal vp9_%1_8tap_1d_h_ %+ %%px, 6, 6, 15, dst, src, dstride, sstride, h, filtery + pxor m5, m5 + mova m6, [pw_64] + mova m7, [filteryq+ 0] +%if ARCH_X86_64 && mmsize > 8 + mova m8, [filteryq+ 16] + mova m9, [filteryq+ 32] + mova m10, [filteryq+ 48] + mova m11, [filteryq+ 64] + mova m12, [filteryq+ 80] + mova m13, [filteryq+ 96] + mova m14, [filteryq+112] +%endif +.loop: + movh m0, [srcq-3] + movh m1, [srcq-2] + movh m2, [srcq-1] + movh m3, [srcq+0] + movh m4, [srcq+1] + punpcklbw m0, m5 + punpcklbw m1, m5 + punpcklbw m2, m5 + punpcklbw m3, m5 + punpcklbw m4, m5 + pmullw m0, m7 +%if ARCH_X86_64 && mmsize > 8 + pmullw m1, m8 + pmullw m2, m9 + pmullw m3, m10 + pmullw m4, m11 +%else + pmullw m1, [filteryq+ 16] + pmullw m2, [filteryq+ 32] + pmullw m3, [filteryq+ 48] + pmullw m4, [filteryq+ 64] +%endif + paddw m0, m1 + paddw m2, m3 + paddw m0, m4 + movh m1, [srcq+2] + movh m3, [srcq+3] + movh m4, [srcq+4] + add srcq, sstrideq + punpcklbw m1, m5 + punpcklbw m3, m5 + punpcklbw m4, m5 +%if ARCH_X86_64 && mmsize > 8 + pmullw m1, m12 + pmullw m3, m13 + pmullw m4, m14 +%else + pmullw m1, [filteryq+ 80] + pmullw m3, [filteryq+ 96] + pmullw m4, [filteryq+112] +%endif + paddw m0, m1 + paddw m3, m4 + paddw m0, m6 + paddw m2, m3 + paddsw m0, m2 + psraw m0, 7 +%ifidn %1, avg + movh m1, [dstq] +%endif + packuswb m0, m0 +%ifidn %1, avg + pavgb m0, m1 +%endif + movh [dstq], m0 + add dstq, dstrideq + dec hd + jg .loop + RET +%endmacro + +INIT_MMX mmxext +filter_sse2_h_fn put +filter_sse2_h_fn avg + +INIT_XMM sse2 +filter_sse2_h_fn put +filter_sse2_h_fn avg + %macro filter_h_fn 1 %assign %%px mmsize/2 cglobal vp9_%1_8tap_1d_h_ %+ %%px, 6, 6, 11, dst, src, dstride, sstride, h, filtery @@ -118,9 +224,9 @@ cglobal vp9_%1_8tap_1d_h_ %+ %%px, 6, 6, 11, dst, src, dstride, sstride, h, filt pmaddubsw m4, [filteryq+64] pmaddubsw m1, [filteryq+96] %endif - paddw m0, m2 - paddw m4, m1 - paddsw m0, m4 + paddw m0, m4 + paddw m2, m1 + paddsw m0, m2 pmulhrsw m0, m6 %ifidn %1, avg movh m1, [dstq] @@ -175,12 +281,12 @@ cglobal vp9_%1_8tap_1d_h_ %+ %%px, 6, 6, 14, dst, src, dstride, sstride, h, filt pmaddubsw m5, m10 pmaddubsw m6, m11 pmaddubsw m7, m11 - paddw m0, m2 - paddw m1, m3 - paddw m4, m6 - paddw m5, m7 - paddsw m0, m4 - paddsw m1, m5 + paddw m0, m4 + paddw m1, m5 + paddw m2, m6 + paddw m3, m7 + paddsw m0, m2 + paddsw m1, m3 pmulhrsw m0, m13 pmulhrsw m1, m13 packuswb m0, m1 @@ -206,6 +312,104 @@ filter_hx2_fn avg %endif ; ARCH_X86_64 +%macro filter_sse2_v_fn 1 +%assign %%px mmsize/2 +%if ARCH_X86_64 +cglobal vp9_%1_8tap_1d_v_ %+ %%px, 6, 8, 15, dst, src, dstride, sstride, h, filtery, src4, sstride3 +%else +cglobal vp9_%1_8tap_1d_v_ %+ %%px, 4, 7, 15, dst, src, dstride, sstride, filtery, src4, sstride3 + mov filteryq, r5mp +%define hd r4mp +%endif + pxor m5, m5 + mova m6, [pw_64] + lea sstride3q, [sstrideq*3] + lea src4q, [srcq+sstrideq] + sub srcq, sstride3q + mova m7, [filteryq+ 0] +%if ARCH_X86_64 && mmsize > 8 + mova m8, [filteryq+ 16] + mova m9, [filteryq+ 32] + mova m10, [filteryq+ 48] + mova m11, [filteryq+ 64] + mova m12, [filteryq+ 80] + mova m13, [filteryq+ 96] + mova m14, [filteryq+112] +%endif +.loop: + ; FIXME maybe reuse loads from previous rows, or just + ; more generally unroll this to prevent multiple loads of + ; the same data? + movh m0, [srcq] + movh m1, [srcq+sstrideq] + movh m2, [srcq+sstrideq*2] + movh m3, [srcq+sstride3q] + add srcq, sstrideq + movh m4, [src4q] + punpcklbw m0, m5 + punpcklbw m1, m5 + punpcklbw m2, m5 + punpcklbw m3, m5 + punpcklbw m4, m5 + pmullw m0, m7 +%if ARCH_X86_64 && mmsize > 8 + pmullw m1, m8 + pmullw m2, m9 + pmullw m3, m10 + pmullw m4, m11 +%else + pmullw m1, [filteryq+ 16] + pmullw m2, [filteryq+ 32] + pmullw m3, [filteryq+ 48] + pmullw m4, [filteryq+ 64] +%endif + paddw m0, m1 + paddw m2, m3 + paddw m0, m4 + movh m1, [src4q+sstrideq] + movh m3, [src4q+sstrideq*2] + movh m4, [src4q+sstride3q] + add src4q, sstrideq + punpcklbw m1, m5 + punpcklbw m3, m5 + punpcklbw m4, m5 +%if ARCH_X86_64 && mmsize > 8 + pmullw m1, m12 + pmullw m3, m13 + pmullw m4, m14 +%else + pmullw m1, [filteryq+ 80] + pmullw m3, [filteryq+ 96] + pmullw m4, [filteryq+112] +%endif + paddw m0, m1 + paddw m3, m4 + paddw m0, m6 + paddw m2, m3 + paddsw m0, m2 + psraw m0, 7 +%ifidn %1, avg + movh m1, [dstq] +%endif + packuswb m0, m0 +%ifidn %1, avg + pavgb m0, m1 +%endif + movh [dstq], m0 + add dstq, dstrideq + dec hd + jg .loop + RET +%endmacro + +INIT_MMX mmxext +filter_sse2_v_fn put +filter_sse2_v_fn avg + +INIT_XMM sse2 +filter_sse2_v_fn put +filter_sse2_v_fn avg + %macro filter_v_fn 1 %assign %%px mmsize/2 %if ARCH_X86_64 @@ -252,9 +456,9 @@ cglobal vp9_%1_8tap_1d_v_ %+ %%px, 4, 7, 11, dst, src, dstride, sstride, filtery pmaddubsw m4, [filteryq+64] pmaddubsw m1, [filteryq+96] %endif - paddw m0, m2 - paddw m4, m1 - paddsw m0, m4 + paddw m0, m4 + paddw m2, m1 + paddsw m0, m2 pmulhrsw m0, m6 %ifidn %1, avg movh m1, [dstq] @@ -317,12 +521,12 @@ cglobal vp9_%1_8tap_1d_v_ %+ %%px, 6, 8, 14, dst, src, dstride, sstride, h, filt pmaddubsw m5, m10 pmaddubsw m6, m11 pmaddubsw m7, m11 - paddw m0, m2 - paddw m1, m3 - paddw m4, m6 - paddw m5, m7 - paddsw m0, m4 - paddsw m1, m5 + paddw m0, m4 + paddw m1, m5 + paddw m2, m6 + paddw m3, m7 + paddsw m0, m2 + paddsw m1, m3 pmulhrsw m0, m13 pmulhrsw m1, m13 packuswb m0, m1 -- cgit v1.2.3