summaryrefslogtreecommitdiff
path: root/libavutil/x86
diff options
context:
space:
mode:
Diffstat (limited to 'libavutil/x86')
-rw-r--r--libavutil/x86/Makefile10
-rw-r--r--libavutil/x86/asm.h55
-rw-r--r--libavutil/x86/bswap.h44
-rw-r--r--libavutil/x86/cpu.c24
-rw-r--r--libavutil/x86/cpu.h15
-rw-r--r--libavutil/x86/cpuid.asm8
-rw-r--r--libavutil/x86/emms.asm8
-rw-r--r--libavutil/x86/emms.h18
-rw-r--r--libavutil/x86/fixed_dsp.asm48
-rw-r--r--libavutil/x86/fixed_dsp_init.c35
-rw-r--r--libavutil/x86/float_dsp.asm167
-rw-r--r--libavutil/x86/float_dsp_init.c97
-rw-r--r--libavutil/x86/intmath.h136
-rw-r--r--libavutil/x86/intreadwrite.h8
-rw-r--r--libavutil/x86/lls.asm66
-rw-r--r--libavutil/x86/lls_init.c18
-rw-r--r--libavutil/x86/pixelutils.asm165
-rw-r--r--libavutil/x86/pixelutils.h26
-rw-r--r--libavutil/x86/pixelutils_init.c64
-rw-r--r--libavutil/x86/timer.h9
-rw-r--r--libavutil/x86/w64xmmtest.h13
-rw-r--r--libavutil/x86/x86inc.asm108
-rw-r--r--libavutil/x86/x86util.asm132
23 files changed, 1027 insertions, 247 deletions
diff --git a/libavutil/x86/Makefile b/libavutil/x86/Makefile
index 1e19082233..94d8832062 100644
--- a/libavutil/x86/Makefile
+++ b/libavutil/x86/Makefile
@@ -1,8 +1,16 @@
OBJS += x86/cpu.o \
+ x86/fixed_dsp_init.o \
x86/float_dsp_init.o \
x86/lls_init.o \
+OBJS-$(CONFIG_PIXELUTILS) += x86/pixelutils_init.o \
+
+EMMS_OBJS_$(HAVE_MMX_INLINE)_$(HAVE_MMX_EXTERNAL)_$(HAVE_MM_EMPTY) = x86/emms.o
+
YASM-OBJS += x86/cpuid.o \
- x86/emms.o \
+ $(EMMS_OBJS__yes_) \
+ x86/fixed_dsp.o \
x86/float_dsp.o \
x86/lls.o \
+
+YASM-OBJS-$(CONFIG_PIXELUTILS) += x86/pixelutils.o \
diff --git a/libavutil/x86/asm.h b/libavutil/x86/asm.h
index db5f3d5ac5..109b65e542 100644
--- a/libavutil/x86/asm.h
+++ b/libavutil/x86/asm.h
@@ -1,20 +1,20 @@
/*
* copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at>
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -38,7 +38,8 @@ typedef struct ymm_reg { uint64_t a, b, c, d; } ymm_reg;
# define PTR_SIZE "8"
typedef int64_t x86_reg;
-# define REG_SP "rsp"
+/* REG_SP is defined in Solaris sys headers, so use REG_sp */
+# define REG_sp "rsp"
# define REG_BP "rbp"
# define REGBP rbp
# define REGa rax
@@ -59,7 +60,7 @@ typedef int64_t x86_reg;
# define PTR_SIZE "4"
typedef int32_t x86_reg;
-# define REG_SP "esp"
+# define REG_sp "esp"
# define REG_BP "ebp"
# define REGBP ebp
# define REGa eax
@@ -108,6 +109,46 @@ typedef int x86_reg;
# define LOCAL_MANGLE(a) #a
#endif
-#define MANGLE(a) EXTERN_PREFIX LOCAL_MANGLE(a)
+#if HAVE_INLINE_ASM_DIRECT_SYMBOL_REFS
+# define MANGLE(a) EXTERN_PREFIX LOCAL_MANGLE(a)
+# define NAMED_CONSTRAINTS_ADD(...)
+# define NAMED_CONSTRAINTS(...)
+# define NAMED_CONSTRAINTS_ARRAY_ADD(...)
+# define NAMED_CONSTRAINTS_ARRAY(...)
+#else
+ /* When direct symbol references are used in code passed to a compiler that does not support them
+ * then these references need to be converted to named asm constraints instead.
+ * Instead of returning a direct symbol MANGLE now returns a named constraint for that specific symbol.
+ * In order for this to work there must also be a corresponding entry in the asm-interface. To add this
+ * entry use the macro NAMED_CONSTRAINTS() and pass in a list of each symbol reference used in the
+ * corresponding block of code. (e.g. NAMED_CONSTRAINTS(var1,var2,var3) where var1 is the first symbol etc. ).
+ * If there are already existing constraints then use NAMED_CONSTRAINTS_ADD to add to the existing constraint list.
+ */
+# define MANGLE(a) "%["#a"]"
+ // Intel/MSVC does not correctly expand va-args so we need a rather ugly hack in order to get it to work
+# define FE_0(P,X) P(X)
+# define FE_1(P,X,X1) P(X), FE_0(P,X1)
+# define FE_2(P,X,X1,X2) P(X), FE_1(P,X1,X2)
+# define FE_3(P,X,X1,X2,X3) P(X), FE_2(P,X1,X2,X3)
+# define FE_4(P,X,X1,X2,X3,X4) P(X), FE_3(P,X1,X2,X3,X4)
+# define FE_5(P,X,X1,X2,X3,X4,X5) P(X), FE_4(P,X1,X2,X3,X4,X5)
+# define FE_6(P,X,X1,X2,X3,X4,X5,X6) P(X), FE_5(P,X1,X2,X3,X4,X5,X6)
+# define FE_7(P,X,X1,X2,X3,X4,X5,X6,X7) P(X), FE_6(P,X1,X2,X3,X4,X5,X6,X7)
+# define FE_8(P,X,X1,X2,X3,X4,X5,X6,X7,X8) P(X), FE_7(P,X1,X2,X3,X4,X5,X6,X7,X8)
+# define FE_9(P,X,X1,X2,X3,X4,X5,X6,X7,X8,X9) P(X), FE_8(P,X1,X2,X3,X4,X5,X6,X7,X8,X9)
+# define GET_FE_IMPL(_0,_1,_2,_3,_4,_5,_6,_7,_8,_9,NAME,...) NAME
+# define GET_FE(A) GET_FE_IMPL A
+# define GET_FE_GLUE(x, y) x y
+# define FOR_EACH_VA(P,...) GET_FE_GLUE(GET_FE((__VA_ARGS__,FE_9,FE_8,FE_7,FE_6,FE_5,FE_4,FE_3,FE_2,FE_1,FE_0)), (P,__VA_ARGS__))
+# define NAME_CONSTRAINT(x) [x] "m"(x)
+ // Parameters are a list of each symbol reference required
+# define NAMED_CONSTRAINTS_ADD(...) , FOR_EACH_VA(NAME_CONSTRAINT,__VA_ARGS__)
+ // Same but without comma for when there are no previously defined constraints
+# define NAMED_CONSTRAINTS(...) FOR_EACH_VA(NAME_CONSTRAINT,__VA_ARGS__)
+ // Same as above NAMED_CONSTRAINTS except used for passing arrays/pointers instead of normal variables
+# define NAME_CONSTRAINT_ARRAY(x) [x] "m"(*x)
+# define NAMED_CONSTRAINTS_ARRAY_ADD(...) , FOR_EACH_VA(NAME_CONSTRAINT_ARRAY,__VA_ARGS__)
+# define NAMED_CONSTRAINTS_ARRAY(...) FOR_EACH_VA(NAME_CONSTRAINT_ARRAY,__VA_ARGS__)
+#endif
#endif /* AVUTIL_X86_ASM_H */
diff --git a/libavutil/x86/bswap.h b/libavutil/x86/bswap.h
index c73be9af81..ffa59e4c82 100644
--- a/libavutil/x86/bswap.h
+++ b/libavutil/x86/bswap.h
@@ -1,18 +1,18 @@
/*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -25,21 +25,47 @@
#define AVUTIL_X86_BSWAP_H
#include <stdint.h>
+#if defined(_MSC_VER)
+#include <intrin.h>
+#endif
#include "config.h"
#include "libavutil/attributes.h"
-#if HAVE_INLINE_ASM
+#if defined(_MSC_VER)
+
+#define av_bswap16 av_bswap16
+static av_always_inline av_const uint16_t av_bswap16(uint16_t x)
+{
+ return _rotr16(x, 8);
+}
+
+#define av_bswap32 av_bswap32
+static av_always_inline av_const uint32_t av_bswap32(uint32_t x)
+{
+ return _byteswap_ulong(x);
+}
+
+#if ARCH_X86_64
+#define av_bswap64 av_bswap64
+static inline uint64_t av_const av_bswap64(uint64_t x)
+{
+ return _byteswap_uint64(x);
+}
+#endif
+
+
+#elif HAVE_INLINE_ASM
-#if !AV_GCC_VERSION_AT_LEAST(4,1)
+#if AV_GCC_VERSION_AT_MOST(4,0)
#define av_bswap16 av_bswap16
static av_always_inline av_const unsigned av_bswap16(unsigned x)
{
__asm__("rorw $8, %w0" : "+r"(x));
return x;
}
-#endif /* !AV_GCC_VERSION_AT_LEAST(4,1) */
+#endif /* AV_GCC_VERSION_AT_MOST(4,0) */
-#if !AV_GCC_VERSION_AT_LEAST(4,5)
+#if AV_GCC_VERSION_AT_MOST(4,4) || defined(__INTEL_COMPILER)
#define av_bswap32 av_bswap32
static av_always_inline av_const uint32_t av_bswap32(uint32_t x)
{
@@ -55,7 +81,7 @@ static inline uint64_t av_const av_bswap64(uint64_t x)
return x;
}
#endif
-#endif /* !AV_GCC_VERSION_AT_LEAST(4,5) */
+#endif /* AV_GCC_VERSION_AT_MOST(4,4) */
#endif /* HAVE_INLINE_ASM */
#endif /* AVUTIL_X86_BSWAP_H */
diff --git a/libavutil/x86/cpu.c b/libavutil/x86/cpu.c
index 098ccf7004..bb63daac3d 100644
--- a/libavutil/x86/cpu.c
+++ b/libavutil/x86/cpu.c
@@ -3,20 +3,20 @@
* (c)1997-99 by H. Dietz and R. Fisher
* Converted to C and improved by Fabrice Bellard.
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -45,7 +45,7 @@
"cpuid \n\t" \
"xchg %%"REG_b", %%"REG_S \
: "=a" (eax), "=S" (ebx), "=c" (ecx), "=d" (edx) \
- : "0" (index))
+ : "0" (index), "2"(0))
#define xgetbv(index, eax, edx) \
__asm__ (".byte 0x0f, 0x01, 0xd0" : "=a"(eax), "=d"(edx) : "c" (index))
@@ -126,6 +126,8 @@ int ff_get_cpu_flags_x86(void)
rval |= AV_CPU_FLAG_SSE4;
if (ecx & 0x00100000 )
rval |= AV_CPU_FLAG_SSE42;
+ if (ecx & 0x01000000 )
+ rval |= AV_CPU_FLAG_AESNI;
#if HAVE_AVX
/* Check OXSAVE and AVX bits */
if ((ecx & 0x18000000) == 0x18000000) {
@@ -143,7 +145,7 @@ int ff_get_cpu_flags_x86(void)
if (max_std_level >= 7) {
cpuid(7, eax, ebx, ecx, edx);
#if HAVE_AVX2
- if (ebx & 0x00000020)
+ if ((rval & AV_CPU_FLAG_AVX) && (ebx & 0x00000020))
rval |= AV_CPU_FLAG_AVX2;
#endif /* HAVE_AVX2 */
/* BMI1/2 don't need OS support */
@@ -180,13 +182,11 @@ int ff_get_cpu_flags_x86(void)
/* Similar to the above but for AVX functions on AMD processors.
This is necessary only for functions using YMM registers on Bulldozer
- based CPUs as they lack 256-bits execution units. SSE/AVX functions
- using XMM registers are always faster on them.
+ and Jaguar based CPUs as they lack 256-bits execution units. SSE/AVX
+ functions using XMM registers are always faster on them.
AV_CPU_FLAG_AVX and AV_CPU_FLAG_AVXSLOW are both set so that AVX is
- used unless explicitly disabled by checking AV_CPU_FLAG_AVXSLOW.
- TODO: Confirm if Excavator is affected or not by this once it's
- released, and update the check if necessary. Same for btver2. */
- if (family == 0x15 && (rval & AV_CPU_FLAG_AVX))
+ used unless explicitly disabled by checking AV_CPU_FLAG_AVXSLOW. */
+ if ((family == 0x15 || family == 0x16) && (rval & AV_CPU_FLAG_AVX))
rval |= AV_CPU_FLAG_AVXSLOW;
}
diff --git a/libavutil/x86/cpu.h b/libavutil/x86/cpu.h
index 0695436548..f171037f1c 100644
--- a/libavutil/x86/cpu.h
+++ b/libavutil/x86/cpu.h
@@ -1,18 +1,18 @@
/*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -47,6 +47,7 @@
#define X86_FMA3(flags) CPUEXT(flags, FMA3)
#define X86_FMA4(flags) CPUEXT(flags, FMA4)
#define X86_AVX2(flags) CPUEXT(flags, AVX2)
+#define X86_AESNI(flags) CPUEXT(flags, AESNI)
#define EXTERNAL_AMD3DNOW(flags) CPUEXT_SUFFIX(flags, _EXTERNAL, AMD3DNOW)
#define EXTERNAL_AMD3DNOWEXT(flags) CPUEXT_SUFFIX(flags, _EXTERNAL, AMD3DNOWEXT)
@@ -67,8 +68,13 @@
#define EXTERNAL_AVX_SLOW(flags) CPUEXT_SUFFIX_SLOW(flags, _EXTERNAL, AVX)
#define EXTERNAL_XOP(flags) CPUEXT_SUFFIX(flags, _EXTERNAL, XOP)
#define EXTERNAL_FMA3(flags) CPUEXT_SUFFIX(flags, _EXTERNAL, FMA3)
+#define EXTERNAL_FMA3_FAST(flags) CPUEXT_SUFFIX_FAST2(flags, _EXTERNAL, FMA3, AVX)
+#define EXTERNAL_FMA3_SLOW(flags) CPUEXT_SUFFIX_SLOW2(flags, _EXTERNAL, FMA3, AVX)
#define EXTERNAL_FMA4(flags) CPUEXT_SUFFIX(flags, _EXTERNAL, FMA4)
#define EXTERNAL_AVX2(flags) CPUEXT_SUFFIX(flags, _EXTERNAL, AVX2)
+#define EXTERNAL_AVX2_FAST(flags) CPUEXT_SUFFIX_FAST2(flags, _EXTERNAL, AVX2, AVX)
+#define EXTERNAL_AVX2_SLOW(flags) CPUEXT_SUFFIX_SLOW2(flags, _EXTERNAL, AVX2, AVX)
+#define EXTERNAL_AESNI(flags) CPUEXT_SUFFIX(flags, _EXTERNAL, AESNI)
#define INLINE_AMD3DNOW(flags) CPUEXT_SUFFIX(flags, _INLINE, AMD3DNOW)
#define INLINE_AMD3DNOWEXT(flags) CPUEXT_SUFFIX(flags, _INLINE, AMD3DNOWEXT)
@@ -91,6 +97,7 @@
#define INLINE_FMA3(flags) CPUEXT_SUFFIX(flags, _INLINE, FMA3)
#define INLINE_FMA4(flags) CPUEXT_SUFFIX(flags, _INLINE, FMA4)
#define INLINE_AVX2(flags) CPUEXT_SUFFIX(flags, _INLINE, AVX2)
+#define INLINE_AESNI(flags) CPUEXT_SUFFIX(flags, _INLINE, AESNI)
void ff_cpu_cpuid(int index, int *eax, int *ebx, int *ecx, int *edx);
void ff_cpu_xgetbv(int op, int *eax, int *edx);
diff --git a/libavutil/x86/cpuid.asm b/libavutil/x86/cpuid.asm
index 1cb8e94ea3..c3f7866ec7 100644
--- a/libavutil/x86/cpuid.asm
+++ b/libavutil/x86/cpuid.asm
@@ -4,20 +4,20 @@
;* Authors: Loren Merritt <lorenm@u.washington.edu>
;* Fiona Glaser <fiona@x264.com>
;*
-;* This file is part of Libav.
+;* This file is part of FFmpeg.
;*
-;* Libav is free software; you can redistribute it and/or
+;* FFmpeg is free software; you can redistribute it and/or
;* modify it under the terms of the GNU Lesser General Public
;* License as published by the Free Software Foundation; either
;* version 2.1 of the License, or (at your option) any later version.
;*
-;* Libav is distributed in the hope that it will be useful,
+;* FFmpeg is distributed in the hope that it will be useful,
;* but WITHOUT ANY WARRANTY; without even the implied warranty of
;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
;* Lesser General Public License for more details.
;*
;* You should have received a copy of the GNU Lesser General Public
-;* License along with Libav; if not, write to the Free Software
+;* License along with FFmpeg; if not, write to the Free Software
;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
;******************************************************************************
diff --git a/libavutil/x86/emms.asm b/libavutil/x86/emms.asm
index a6851acc99..0aad34af3f 100644
--- a/libavutil/x86/emms.asm
+++ b/libavutil/x86/emms.asm
@@ -1,20 +1,20 @@
;*****************************************************************************
;* Copyright (C) 2013 Martin Storsjo
;*
-;* This file is part of Libav.
+;* This file is part of FFmpeg.
;*
-;* Libav is free software; you can redistribute it and/or
+;* FFmpeg is free software; you can redistribute it and/or
;* modify it under the terms of the GNU Lesser General Public
;* License as published by the Free Software Foundation; either
;* version 2.1 of the License, or (at your option) any later version.
;*
-;* Libav is distributed in the hope that it will be useful,
+;* FFmpeg is distributed in the hope that it will be useful,
;* but WITHOUT ANY WARRANTY; without even the implied warranty of
;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
;* Lesser General Public License for more details.
;*
;* You should have received a copy of the GNU Lesser General Public
-;* License along with Libav; if not, write to the Free Software
+;* License along with FFmpeg; if not, write to the Free Software
;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
;******************************************************************************
diff --git a/libavutil/x86/emms.h b/libavutil/x86/emms.h
index 2ed9e5d09d..6fda6e2763 100644
--- a/libavutil/x86/emms.h
+++ b/libavutil/x86/emms.h
@@ -1,18 +1,18 @@
/*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -21,6 +21,7 @@
#include "config.h"
#include "libavutil/attributes.h"
+#include "libavutil/cpu.h"
void avpriv_emms_yasm(void);
@@ -33,7 +34,14 @@ void avpriv_emms_yasm(void);
*/
static av_always_inline void emms_c(void)
{
- __asm__ volatile ("emms" ::: "memory");
+/* Some inlined functions may also use mmx instructions regardless of
+ * runtime cpuflags. With that in mind, we unconditionally empty the
+ * mmx state if the target cpu chosen at configure time supports it.
+ */
+#if !defined(__MMX__)
+ if(av_get_cpu_flags() & AV_CPU_FLAG_MMX)
+#endif
+ __asm__ volatile ("emms" ::: "memory");
}
#elif HAVE_MMX && HAVE_MM_EMPTY
# include <mmintrin.h>
diff --git a/libavutil/x86/fixed_dsp.asm b/libavutil/x86/fixed_dsp.asm
new file mode 100644
index 0000000000..979dd5c334
--- /dev/null
+++ b/libavutil/x86/fixed_dsp.asm
@@ -0,0 +1,48 @@
+;*****************************************************************************
+;* x86-optimized Float DSP functions
+;*
+;* Copyright 2016 James Almer
+;*
+;* This file is part of FFmpeg.
+;*
+;* FFmpeg is free software; you can redistribute it and/or
+;* modify it under the terms of the GNU Lesser General Public
+;* License as published by the Free Software Foundation; either
+;* version 2.1 of the License, or (at your option) any later version.
+;*
+;* FFmpeg is distributed in the hope that it will be useful,
+;* but WITHOUT ANY WARRANTY; without even the implied warranty of
+;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+;* Lesser General Public License for more details.
+;*
+;* You should have received a copy of the GNU Lesser General Public
+;* License along with FFmpeg; if not, write to the Free Software
+;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+;******************************************************************************
+
+%include "x86util.asm"
+
+SECTION .text
+
+;-----------------------------------------------------------------------------
+; void ff_butterflies_fixed(float *src0, float *src1, int len);
+;-----------------------------------------------------------------------------
+INIT_XMM sse2
+cglobal butterflies_fixed, 3,3,3, src0, src1, len
+ shl lend, 2
+ add src0q, lenq
+ add src1q, lenq
+ neg lenq
+
+align 16
+.loop:
+ mova m0, [src0q + lenq]
+ mova m1, [src1q + lenq]
+ mova m2, m0
+ paddd m0, m1
+ psubd m2, m1
+ mova [src0q + lenq], m0
+ mova [src1q + lenq], m2
+ add lenq, mmsize
+ jl .loop
+ RET
diff --git a/libavutil/x86/fixed_dsp_init.c b/libavutil/x86/fixed_dsp_init.c
new file mode 100644
index 0000000000..303a2eb922
--- /dev/null
+++ b/libavutil/x86/fixed_dsp_init.c
@@ -0,0 +1,35 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "config.h"
+
+#include "libavutil/attributes.h"
+#include "libavutil/cpu.h"
+#include "libavutil/fixed_dsp.h"
+#include "cpu.h"
+
+void ff_butterflies_fixed_sse2(int *src0, int *src1, int len);
+
+av_cold void ff_fixed_dsp_init_x86(AVFixedDSPContext *fdsp)
+{
+ int cpu_flags = av_get_cpu_flags();
+
+ if (EXTERNAL_SSE2(cpu_flags)) {
+ fdsp->butterflies_fixed = ff_butterflies_fixed_sse2;
+ }
+}
diff --git a/libavutil/x86/float_dsp.asm b/libavutil/x86/float_dsp.asm
index d96249978a..021ff03c87 100644
--- a/libavutil/x86/float_dsp.asm
+++ b/libavutil/x86/float_dsp.asm
@@ -1,20 +1,22 @@
;*****************************************************************************
;* x86-optimized Float DSP functions
;*
-;* This file is part of Libav.
+;* Copyright 2006 Loren Merritt
;*
-;* Libav is free software; you can redistribute it and/or
+;* This file is part of FFmpeg.
+;*
+;* FFmpeg is free software; you can redistribute it and/or
;* modify it under the terms of the GNU Lesser General Public
;* License as published by the Free Software Foundation; either
;* version 2.1 of the License, or (at your option) any later version.
;*
-;* Libav is distributed in the hope that it will be useful,
+;* FFmpeg is distributed in the hope that it will be useful,
;* but WITHOUT ANY WARRANTY; without even the implied warranty of
;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
;* Lesser General Public License for more details.
;*
;* You should have received a copy of the GNU Lesser General Public
-;* License along with Libav; if not, write to the Free Software
+;* License along with FFmpeg; if not, write to the Free Software
;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
;******************************************************************************
@@ -48,8 +50,10 @@ ALIGN 16
INIT_XMM sse
VECTOR_FMUL
+%if HAVE_AVX_EXTERNAL
INIT_YMM avx
VECTOR_FMUL
+%endif
;------------------------------------------------------------------------------
; void ff_vector_fmac_scalar(float *dst, const float *src, float mul, int len)
@@ -57,33 +61,48 @@ VECTOR_FMUL
%macro VECTOR_FMAC_SCALAR 0
%if UNIX64
-cglobal vector_fmac_scalar, 3,3,3, dst, src, len
+cglobal vector_fmac_scalar, 3,3,5, dst, src, len
%else
-cglobal vector_fmac_scalar, 4,4,3, dst, src, mul, len
+cglobal vector_fmac_scalar, 4,4,5, dst, src, mul, len
%endif
%if ARCH_X86_32
VBROADCASTSS m0, mulm
%else
%if WIN64
- mova xmm0, xmm2
+ SWAP 0, 2
%endif
- shufps xmm0, xmm0, 0
+ shufps xm0, xm0, 0
%if cpuflag(avx)
- vinsertf128 m0, m0, xmm0, 1
+ vinsertf128 m0, m0, xm0, 1
%endif
%endif
lea lenq, [lend*4-64]
.loop:
-%assign a 0
-%rep 32/mmsize
- mulps m1, m0, [srcq+lenq+(a+0)*mmsize]
- mulps m2, m0, [srcq+lenq+(a+1)*mmsize]
- addps m1, m1, [dstq+lenq+(a+0)*mmsize]
- addps m2, m2, [dstq+lenq+(a+1)*mmsize]
- mova [dstq+lenq+(a+0)*mmsize], m1
- mova [dstq+lenq+(a+1)*mmsize], m2
-%assign a a+2
-%endrep
+%if cpuflag(fma3)
+ mova m1, [dstq+lenq]
+ mova m2, [dstq+lenq+1*mmsize]
+ fmaddps m1, m0, [srcq+lenq], m1
+ fmaddps m2, m0, [srcq+lenq+1*mmsize], m2
+%else ; cpuflag
+ mulps m1, m0, [srcq+lenq]
+ mulps m2, m0, [srcq+lenq+1*mmsize]
+%if mmsize < 32
+ mulps m3, m0, [srcq+lenq+2*mmsize]
+ mulps m4, m0, [srcq+lenq+3*mmsize]
+%endif ; mmsize
+ addps m1, m1, [dstq+lenq]
+ addps m2, m2, [dstq+lenq+1*mmsize]
+%if mmsize < 32
+ addps m3, m3, [dstq+lenq+2*mmsize]
+ addps m4, m4, [dstq+lenq+3*mmsize]
+%endif ; mmsize
+%endif ; cpuflag
+ mova [dstq+lenq], m1
+ mova [dstq+lenq+1*mmsize], m2
+%if mmsize < 32
+ mova [dstq+lenq+2*mmsize], m3
+ mova [dstq+lenq+3*mmsize], m4
+%endif ; mmsize
sub lenq, 64
jge .loop
REP_RET
@@ -91,8 +110,14 @@ cglobal vector_fmac_scalar, 4,4,3, dst, src, mul, len
INIT_XMM sse
VECTOR_FMAC_SCALAR
+%if HAVE_AVX_EXTERNAL
INIT_YMM avx
VECTOR_FMAC_SCALAR
+%endif
+%if HAVE_FMA3_EXTERNAL
+INIT_YMM fma3
+VECTOR_FMAC_SCALAR
+%endif
;------------------------------------------------------------------------------
; void ff_vector_fmul_scalar(float *dst, const float *src, float mul, int len)
@@ -141,16 +166,11 @@ cglobal vector_dmul_scalar, 4,4,3, dst, src, mul, len
VBROADCASTSD m0, mulm
%else
%if WIN64
- movlhps xmm2, xmm2
-%if cpuflag(avx)
- vinsertf128 ymm2, ymm2, xmm2, 1
-%endif
SWAP 0, 2
-%else
- movlhps xmm0, xmm0
-%if cpuflag(avx)
- vinsertf128 ymm0, ymm0, xmm0, 1
%endif
+ movlhps xm0, xm0
+%if cpuflag(avx)
+ vinsertf128 ym0, ym0, xm0, 1
%endif
%endif
lea lenq, [lend*8-2*mmsize]
@@ -172,20 +192,85 @@ VECTOR_DMUL_SCALAR
%endif
;-----------------------------------------------------------------------------
+; vector_fmul_window(float *dst, const float *src0,
+; const float *src1, const float *win, int len);
+;-----------------------------------------------------------------------------
+%macro VECTOR_FMUL_WINDOW 0
+cglobal vector_fmul_window, 5, 6, 6, dst, src0, src1, win, len, len1
+ shl lend, 2
+ lea len1q, [lenq - mmsize]
+ add src0q, lenq
+ add dstq, lenq
+ add winq, lenq
+ neg lenq
+.loop:
+ mova m0, [winq + lenq]
+ mova m4, [src0q + lenq]
+%if cpuflag(sse)
+ mova m1, [winq + len1q]
+ mova m5, [src1q + len1q]
+ shufps m1, m1, 0x1b
+ shufps m5, m5, 0x1b
+ mova m2, m0
+ mova m3, m1
+ mulps m2, m4
+ mulps m3, m5
+ mulps m1, m4
+ mulps m0, m5
+ addps m2, m3
+ subps m1, m0
+ shufps m2, m2, 0x1b
+%else
+ pswapd m1, [winq + len1q]
+ pswapd m5, [src1q + len1q]
+ mova m2, m0
+ mova m3, m1
+ pfmul m2, m4
+ pfmul m3, m5
+ pfmul m1, m4
+ pfmul m0, m5
+ pfadd m2, m3
+ pfsub m1, m0
+ pswapd m2, m2
+%endif
+ mova [dstq + lenq], m1
+ mova [dstq + len1q], m2
+ sub len1q, mmsize
+ add lenq, mmsize
+ jl .loop
+%if mmsize == 8
+ femms
+%endif
+ REP_RET
+%endmacro
+
+INIT_MMX 3dnowext
+VECTOR_FMUL_WINDOW
+INIT_XMM sse
+VECTOR_FMUL_WINDOW
+
+;-----------------------------------------------------------------------------
; vector_fmul_add(float *dst, const float *src0, const float *src1,
; const float *src2, int len)
;-----------------------------------------------------------------------------
%macro VECTOR_FMUL_ADD 0
-cglobal vector_fmul_add, 5,5,2, dst, src0, src1, src2, len
+cglobal vector_fmul_add, 5,5,4, dst, src0, src1, src2, len
lea lenq, [lend*4 - 2*mmsize]
ALIGN 16
.loop:
mova m0, [src0q + lenq]
mova m1, [src0q + lenq + mmsize]
+%if cpuflag(fma3)
+ mova m2, [src2q + lenq]
+ mova m3, [src2q + lenq + mmsize]
+ fmaddps m0, m0, [src1q + lenq], m2
+ fmaddps m1, m1, [src1q + lenq + mmsize], m3
+%else
mulps m0, m0, [src1q + lenq]
mulps m1, m1, [src1q + lenq + mmsize]
addps m0, m0, [src2q + lenq]
addps m1, m1, [src2q + lenq + mmsize]
+%endif
mova [dstq + lenq], m0
mova [dstq + lenq + mmsize], m1
@@ -196,8 +281,14 @@ ALIGN 16
INIT_XMM sse
VECTOR_FMUL_ADD
+%if HAVE_AVX_EXTERNAL
INIT_YMM avx
VECTOR_FMUL_ADD
+%endif
+%if HAVE_FMA3_EXTERNAL
+INIT_YMM fma3
+VECTOR_FMUL_ADD
+%endif
;-----------------------------------------------------------------------------
; void vector_fmul_reverse(float *dst, const float *src0, const float *src1,
@@ -233,16 +324,18 @@ ALIGN 16
INIT_XMM sse
VECTOR_FMUL_REVERSE
+%if HAVE_AVX_EXTERNAL
INIT_YMM avx
VECTOR_FMUL_REVERSE
+%endif
; float scalarproduct_float_sse(const float *v1, const float *v2, int len)
INIT_XMM sse
cglobal scalarproduct_float, 3,3,2, v1, v2, offset
+ shl offsetd, 2
+ add v1q, offsetq
+ add v2q, offsetq
neg offsetq
- shl offsetq, 2
- sub v1q, offsetq
- sub v2q, offsetq
xorps xmm0, xmm0
.loop:
movaps xmm1, [v1q+offsetq]
@@ -266,14 +359,9 @@ cglobal scalarproduct_float, 3,3,2, v1, v2, offset
;-----------------------------------------------------------------------------
INIT_XMM sse
cglobal butterflies_float, 3,3,3, src0, src1, len
-%if ARCH_X86_64
- movsxd lenq, lend
-%endif
- test lenq, lenq
- jz .end
- shl lenq, 2
- lea src0q, [src0q + lenq]
- lea src1q, [src1q + lenq]
+ shl lend, 2
+ add src0q, lenq
+ add src1q, lenq
neg lenq
.loop:
mova m0, [src0q + lenq]
@@ -284,5 +372,4 @@ cglobal butterflies_float, 3,3,3, src0, src1, len
mova [src0q + lenq], m0
add lenq, mmsize
jl .loop
-.end:
REP_RET
diff --git a/libavutil/x86/float_dsp_init.c b/libavutil/x86/float_dsp_init.c
index b70433031a..c836a78e1b 100644
--- a/libavutil/x86/float_dsp_init.c
+++ b/libavutil/x86/float_dsp_init.c
@@ -1,18 +1,18 @@
/*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -33,6 +33,8 @@ void ff_vector_fmac_scalar_sse(float *dst, const float *src, float mul,
int len);
void ff_vector_fmac_scalar_avx(float *dst, const float *src, float mul,
int len);
+void ff_vector_fmac_scalar_fma3(float *dst, const float *src, float mul,
+ int len);
void ff_vector_fmul_scalar_sse(float *dst, const float *src, float mul,
int len);
@@ -42,10 +44,17 @@ void ff_vector_dmul_scalar_sse2(double *dst, const double *src,
void ff_vector_dmul_scalar_avx(double *dst, const double *src,
double mul, int len);
+void ff_vector_fmul_window_3dnowext(float *dst, const float *src0,
+ const float *src1, const float *win, int len);
+void ff_vector_fmul_window_sse(float *dst, const float *src0,
+ const float *src1, const float *win, int len);
+
void ff_vector_fmul_add_sse(float *dst, const float *src0, const float *src1,
const float *src2, int len);
void ff_vector_fmul_add_avx(float *dst, const float *src0, const float *src1,
const float *src2, int len);
+void ff_vector_fmul_add_fma3(float *dst, const float *src0, const float *src1,
+ const float *src2, int len);
void ff_vector_fmul_reverse_sse(float *dst, const float *src0,
const float *src1, int len);
@@ -56,88 +65,18 @@ float ff_scalarproduct_float_sse(const float *v1, const float *v2, int order);
void ff_butterflies_float_sse(float *src0, float *src1, int len);
-#if HAVE_6REGS && HAVE_INLINE_ASM
-static void vector_fmul_window_3dnowext(float *dst, const float *src0,
- const float *src1, const float *win,
- int len)
-{
- x86_reg i = -len * 4;
- x86_reg j = len * 4 - 8;
- __asm__ volatile (
- "1: \n"
- "pswapd (%5, %1), %%mm1 \n"
- "movq (%5, %0), %%mm0 \n"
- "pswapd (%4, %1), %%mm5 \n"
- "movq (%3, %0), %%mm4 \n"
- "movq %%mm0, %%mm2 \n"
- "movq %%mm1, %%mm3 \n"
- "pfmul %%mm4, %%mm2 \n" // src0[len + i] * win[len + i]
- "pfmul %%mm5, %%mm3 \n" // src1[j] * win[len + j]
- "pfmul %%mm4, %%mm1 \n" // src0[len + i] * win[len + j]
- "pfmul %%mm5, %%mm0 \n" // src1[j] * win[len + i]
- "pfadd %%mm3, %%mm2 \n"
- "pfsub %%mm0, %%mm1 \n"
- "pswapd %%mm2, %%mm2 \n"
- "movq %%mm1, (%2, %0) \n"
- "movq %%mm2, (%2, %1) \n"
- "sub $8, %1 \n"
- "add $8, %0 \n"
- "jl 1b \n"
- "femms \n"
- : "+r"(i), "+r"(j)
- : "r"(dst + len), "r"(src0 + len), "r"(src1), "r"(win + len)
- );
-}
-
-static void vector_fmul_window_sse(float *dst, const float *src0,
- const float *src1, const float *win, int len)
-{
- x86_reg i = -len * 4;
- x86_reg j = len * 4 - 16;
- __asm__ volatile (
- "1: \n"
- "movaps (%5, %1), %%xmm1 \n"
- "movaps (%5, %0), %%xmm0 \n"
- "movaps (%4, %1), %%xmm5 \n"
- "movaps (%3, %0), %%xmm4 \n"
- "shufps $0x1b, %%xmm1, %%xmm1 \n"
- "shufps $0x1b, %%xmm5, %%xmm5 \n"
- "movaps %%xmm0, %%xmm2 \n"
- "movaps %%xmm1, %%xmm3 \n"
- "mulps %%xmm4, %%xmm2 \n" // src0[len + i] * win[len + i]
- "mulps %%xmm5, %%xmm3 \n" // src1[j] * win[len + j]
- "mulps %%xmm4, %%xmm1 \n" // src0[len + i] * win[len + j]
- "mulps %%xmm5, %%xmm0 \n" // src1[j] * win[len + i]
- "addps %%xmm3, %%xmm2 \n"
- "subps %%xmm0, %%xmm1 \n"
- "shufps $0x1b, %%xmm2, %%xmm2 \n"
- "movaps %%xmm1, (%2, %0) \n"
- "movaps %%xmm2, (%2, %1) \n"
- "sub $16, %1 \n"
- "add $16, %0 \n"
- "jl 1b \n"
- : "+r"(i), "+r"(j)
- : "r"(dst + len), "r"(src0 + len), "r"(src1), "r"(win + len)
- );
-}
-#endif /* HAVE_6REGS && HAVE_INLINE_ASM */
-
av_cold void ff_float_dsp_init_x86(AVFloatDSPContext *fdsp)
{
int cpu_flags = av_get_cpu_flags();
-#if HAVE_6REGS && HAVE_INLINE_ASM
- if (INLINE_AMD3DNOWEXT(cpu_flags)) {
- fdsp->vector_fmul_window = vector_fmul_window_3dnowext;
+ if (EXTERNAL_AMD3DNOWEXT(cpu_flags)) {
+ fdsp->vector_fmul_window = ff_vector_fmul_window_3dnowext;
}
- if (INLINE_SSE(cpu_flags)) {
- fdsp->vector_fmul_window = vector_fmul_window_sse;
- }
-#endif
if (EXTERNAL_SSE(cpu_flags)) {
fdsp->vector_fmul = ff_vector_fmul_sse;
fdsp->vector_fmac_scalar = ff_vector_fmac_scalar_sse;
fdsp->vector_fmul_scalar = ff_vector_fmul_scalar_sse;
+ fdsp->vector_fmul_window = ff_vector_fmul_window_sse;
fdsp->vector_fmul_add = ff_vector_fmul_add_sse;
fdsp->vector_fmul_reverse = ff_vector_fmul_reverse_sse;
fdsp->scalarproduct_float = ff_scalarproduct_float_sse;
@@ -153,4 +92,8 @@ av_cold void ff_float_dsp_init_x86(AVFloatDSPContext *fdsp)
fdsp->vector_fmul_add = ff_vector_fmul_add_avx;
fdsp->vector_fmul_reverse = ff_vector_fmul_reverse_avx;
}
+ if (EXTERNAL_FMA3_FAST(cpu_flags)) {
+ fdsp->vector_fmac_scalar = ff_vector_fmac_scalar_fma3;
+ fdsp->vector_fmul_add = ff_vector_fmul_add_fma3;
+ }
}
diff --git a/libavutil/x86/intmath.h b/libavutil/x86/intmath.h
new file mode 100644
index 0000000000..f58b0d08da
--- /dev/null
+++ b/libavutil/x86/intmath.h
@@ -0,0 +1,136 @@
+/*
+ * Copyright (c) 2015 James Almer
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_X86_INTMATH_H
+#define AVUTIL_X86_INTMATH_H
+
+#include <stdint.h>
+#include <stdlib.h>
+#if HAVE_FAST_CLZ
+#if defined(_MSC_VER)
+#include <intrin.h>
+#elif defined(__INTEL_COMPILER)
+#include <immintrin.h>
+#endif
+#endif
+#include "config.h"
+
+#if HAVE_FAST_CLZ
+#if (defined(__INTEL_COMPILER) && (__INTEL_COMPILER>=1216)) || defined(_MSC_VER)
+# if defined(__INTEL_COMPILER)
+# define ff_log2(x) (_bit_scan_reverse((x)|1))
+# else
+# define ff_log2 ff_log2_x86
+static av_always_inline av_const int ff_log2_x86(unsigned int v)
+{
+ unsigned long n;
+ _BitScanReverse(&n, v|1);
+ return n;
+}
+# endif
+# define ff_log2_16bit av_log2
+
+# define ff_ctz(v) _tzcnt_u32(v)
+
+# if ARCH_X86_64
+# define ff_ctzll(v) _tzcnt_u64(v)
+# else
+# define ff_ctzll ff_ctzll_x86
+static av_always_inline av_const int ff_ctzll_x86(long long v)
+{
+ return ((uint32_t)v == 0) ? _tzcnt_u32((uint32_t)(v >> 32)) + 32 : _tzcnt_u32((uint32_t)v);
+}
+# endif
+
+#endif /* __INTEL_COMPILER */
+
+#endif /* HAVE_FAST_CLZ */
+
+#if defined(__GNUC__)
+
+/* Our generic version of av_popcount is faster than GCC's built-in on
+ * CPUs that don't support the popcnt instruction.
+ */
+#if defined(__POPCNT__)
+ #define av_popcount __builtin_popcount
+#if ARCH_X86_64
+ #define av_popcount64 __builtin_popcountll
+#endif
+
+#endif /* __POPCNT__ */
+
+#if defined(__BMI2__)
+
+#if AV_GCC_VERSION_AT_LEAST(5,1)
+#define av_mod_uintp2 __builtin_ia32_bzhi_si
+#elif HAVE_INLINE_ASM
+/* GCC releases before 5.1.0 have a broken bzhi builtin, so for those we
+ * implement it using inline assembly
+ */
+#define av_mod_uintp2 av_mod_uintp2_bmi2
+static av_always_inline av_const unsigned av_mod_uintp2_bmi2(unsigned a, unsigned p)
+{
+ if (av_builtin_constant_p(p))
+ return a & ((1 << p) - 1);
+ else {
+ unsigned x;
+ __asm__ ("bzhi %2, %1, %0 \n\t" : "=r"(x) : "rm"(a), "r"(p));
+ return x;
+ }
+}
+#endif /* AV_GCC_VERSION_AT_LEAST */
+
+#endif /* __BMI2__ */
+
+#if defined(__SSE2__) && !defined(__INTEL_COMPILER)
+
+#define av_clipd av_clipd_sse2
+static av_always_inline av_const double av_clipd_sse2(double a, double amin, double amax)
+{
+#if defined(ASSERT_LEVEL) && ASSERT_LEVEL >= 2
+ if (amin > amax) abort();
+#endif
+ __asm__ ("minsd %2, %0 \n\t"
+ "maxsd %1, %0 \n\t"
+ : "+&x"(a) : "xm"(amin), "xm"(amax));
+ return a;
+}
+
+#endif /* __SSE2__ */
+
+#if defined(__SSE__) && !defined(__INTEL_COMPILER)
+
+#define av_clipf av_clipf_sse
+static av_always_inline av_const float av_clipf_sse(float a, float amin, float amax)
+{
+#if defined(ASSERT_LEVEL) && ASSERT_LEVEL >= 2
+ if (amin > amax) abort();
+#endif
+ __asm__ ("minss %2, %0 \n\t"
+ "maxss %1, %0 \n\t"
+ : "+&x"(a) : "xm"(amin), "xm"(amax));
+ return a;
+}
+
+#endif /* __SSE__ */
+
+#endif /* __GNUC__ */
+
+#endif /* AVUTIL_X86_INTMATH_H */
diff --git a/libavutil/x86/intreadwrite.h b/libavutil/x86/intreadwrite.h
index 635096e569..4061d19231 100644
--- a/libavutil/x86/intreadwrite.h
+++ b/libavutil/x86/intreadwrite.h
@@ -1,20 +1,20 @@
/*
* Copyright (c) 2010 Alexander Strange <astrange@ithinksw.com>
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
diff --git a/libavutil/x86/lls.asm b/libavutil/x86/lls.asm
index eab85ed050..317fba6fca 100644
--- a/libavutil/x86/lls.asm
+++ b/libavutil/x86/lls.asm
@@ -3,20 +3,20 @@
;*
;* Copyright (c) 2013 Loren Merritt
;*
-;* This file is part of Libav.
+;* This file is part of FFmpeg.
;*
-;* Libav is free software; you can redistribute it and/or
+;* FFmpeg is free software; you can redistribute it and/or
;* modify it under the terms of the GNU Lesser General Public
;* License as published by the Free Software Foundation; either
;* version 2.1 of the License, or (at your option) any later version.
;*
-;* Libav is distributed in the hope that it will be useful,
+;* FFmpeg is distributed in the hope that it will be useful,
;* but WITHOUT ANY WARRANTY; without even the implied warranty of
;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
;* Lesser General Public License for more details.
;*
;* You should have received a copy of the GNU Lesser General Public
-;* License along with Libav; if not, write to the Free Software
+;* License along with FFmpeg; if not, write to the Free Software
;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
;******************************************************************************
@@ -125,7 +125,7 @@ cglobal update_lls, 2,5,8, ctx, var, i, j, covar2
.ret:
REP_RET
-INIT_YMM avx
+%macro UPDATE_LLS 0
cglobal update_lls, 3,6,8, ctx, var, count, i, j, count2
%define covarq ctxq
mov countd, [ctxq + LLSModel.indep_count]
@@ -139,6 +139,18 @@ cglobal update_lls, 3,6,8, ctx, var, count, i, j, count2
vbroadcastsd ymm6, [varq + iq*8 + 16]
vbroadcastsd ymm7, [varq + iq*8 + 24]
vextractf128 xmm3, ymm1, 1
+%if cpuflag(fma3)
+ mova ymm0, COVAR(iq ,0)
+ mova xmm2, COVAR(iq+2,2)
+ fmaddpd ymm0, ymm1, ymm4, ymm0
+ fmaddpd xmm2, xmm3, xmm6, xmm2
+ fmaddpd ymm1, ymm5, ymm1, COVAR(iq ,1)
+ fmaddpd xmm3, xmm7, xmm3, COVAR(iq+2,3)
+ mova COVAR(iq ,0), ymm0
+ mova COVAR(iq ,1), ymm1
+ mova COVAR(iq+2,2), xmm2
+ mova COVAR(iq+2,3), xmm3
+%else
vmulpd ymm0, ymm1, ymm4
vmulpd ymm1, ymm1, ymm5
vmulpd xmm2, xmm3, xmm6
@@ -147,12 +159,26 @@ cglobal update_lls, 3,6,8, ctx, var, count, i, j, count2
ADDPD_MEM COVAR(iq ,1), ymm1
ADDPD_MEM COVAR(iq+2,2), xmm2
ADDPD_MEM COVAR(iq+2,3), xmm3
+%endif ; cpuflag(fma3)
lea jd, [iq + 4]
cmp jd, count2d
jg .skip4x4
.loop4x4:
; Compute all 16 pairwise products of a 4x4 block
mova ymm3, [varq + jq*8]
+%if cpuflag(fma3)
+ mova ymm0, COVAR(jq, 0)
+ mova ymm1, COVAR(jq, 1)
+ mova ymm2, COVAR(jq, 2)
+ fmaddpd ymm0, ymm3, ymm4, ymm0
+ fmaddpd ymm1, ymm3, ymm5, ymm1
+ fmaddpd ymm2, ymm3, ymm6, ymm2
+ fmaddpd ymm3, ymm7, ymm3, COVAR(jq,3)
+ mova COVAR(jq, 0), ymm0
+ mova COVAR(jq, 1), ymm1
+ mova COVAR(jq, 2), ymm2
+ mova COVAR(jq, 3), ymm3
+%else
vmulpd ymm0, ymm3, ymm4
vmulpd ymm1, ymm3, ymm5
vmulpd ymm2, ymm3, ymm6
@@ -161,6 +187,7 @@ cglobal update_lls, 3,6,8, ctx, var, count, i, j, count2
ADDPD_MEM COVAR(jq,1), ymm1
ADDPD_MEM COVAR(jq,2), ymm2
ADDPD_MEM COVAR(jq,3), ymm3
+%endif ; cpuflag(fma3)
add jd, 4
cmp jd, count2d
jle .loop4x4
@@ -168,6 +195,19 @@ cglobal update_lls, 3,6,8, ctx, var, count, i, j, count2
cmp jd, countd
jg .skip2x4
mova xmm3, [varq + jq*8]
+%if cpuflag(fma3)
+ mova xmm0, COVAR(jq, 0)
+ mova xmm1, COVAR(jq, 1)
+ mova xmm2, COVAR(jq, 2)
+ fmaddpd xmm0, xmm3, xmm4, xmm0
+ fmaddpd xmm1, xmm3, xmm5, xmm1
+ fmaddpd xmm2, xmm3, xmm6, xmm2
+ fmaddpd xmm3, xmm7, xmm3, COVAR(jq,3)
+ mova COVAR(jq, 0), xmm0
+ mova COVAR(jq, 1), xmm1
+ mova COVAR(jq, 2), xmm2
+ mova COVAR(jq, 3), xmm3
+%else
vmulpd xmm0, xmm3, xmm4
vmulpd xmm1, xmm3, xmm5
vmulpd xmm2, xmm3, xmm6
@@ -176,6 +216,7 @@ cglobal update_lls, 3,6,8, ctx, var, count, i, j, count2
ADDPD_MEM COVAR(jq,1), xmm1
ADDPD_MEM COVAR(jq,2), xmm2
ADDPD_MEM COVAR(jq,3), xmm3
+%endif ; cpuflag(fma3)
.skip2x4:
add id, 4
add covarq, 4*COVAR_STRIDE
@@ -186,15 +227,30 @@ cglobal update_lls, 3,6,8, ctx, var, count, i, j, count2
mov jd, id
.loop2x1:
vmovddup xmm0, [varq + iq*8]
+%if cpuflag(fma3)
+ mova xmm1, [varq + jq*8]
+ fmaddpd xmm0, xmm1, xmm0, COVAR(jq,0)
+ mova COVAR(jq,0), xmm0
+%else
vmulpd xmm0, [varq + jq*8]
ADDPD_MEM COVAR(jq,0), xmm0
+%endif ; cpuflag(fma3)
inc id
add covarq, COVAR_STRIDE
cmp id, countd
jle .loop2x1
.ret:
REP_RET
+%endmacro ; UPDATE_LLS
+%if HAVE_AVX_EXTERNAL
+INIT_YMM avx
+UPDATE_LLS
+%endif
+%if HAVE_FMA3_EXTERNAL
+INIT_YMM fma3
+UPDATE_LLS
+%endif
INIT_XMM sse2
cglobal evaluate_lls, 3,4,2, ctx, var, order, i
diff --git a/libavutil/x86/lls_init.c b/libavutil/x86/lls_init.c
index 80cda29139..1c5dca42dc 100644
--- a/libavutil/x86/lls_init.c
+++ b/libavutil/x86/lls_init.c
@@ -3,29 +3,30 @@
*
* Copyright (c) 2013 Loren Merritt
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/lls.h"
#include "libavutil/x86/cpu.h"
-void ff_update_lls_sse2(LLSModel *m, double *var);
-void ff_update_lls_avx(LLSModel *m, double *var);
-double ff_evaluate_lls_sse2(LLSModel *m, double *var, int order);
+void ff_update_lls_sse2(LLSModel *m, const double *var);
+void ff_update_lls_avx(LLSModel *m, const double *var);
+void ff_update_lls_fma3(LLSModel *m, const double *var);
+double ff_evaluate_lls_sse2(LLSModel *m, const double *var, int order);
av_cold void ff_init_lls_x86(LLSModel *m)
{
@@ -38,4 +39,7 @@ av_cold void ff_init_lls_x86(LLSModel *m)
if (EXTERNAL_AVX_FAST(cpu_flags)) {
m->update_lls = ff_update_lls_avx;
}
+ if (EXTERNAL_FMA3_FAST(cpu_flags)) {
+ m->update_lls = ff_update_lls_fma3;
+ }
}
diff --git a/libavutil/x86/pixelutils.asm b/libavutil/x86/pixelutils.asm
new file mode 100644
index 0000000000..7af3007d0c
--- /dev/null
+++ b/libavutil/x86/pixelutils.asm
@@ -0,0 +1,165 @@
+;******************************************************************************
+;* Pixel utilities SIMD
+;*
+;* Copyright (C) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
+;* Copyright (C) 2014 Clément Bœsch <u pkh me>
+;*
+;* This file is part of FFmpeg.
+;*
+;* FFmpeg is free software; you can redistribute it and/or
+;* modify it under the terms of the GNU Lesser General Public
+;* License as published by the Free Software Foundation; either
+;* version 2.1 of the License, or (at your option) any later version.
+;*
+;* FFmpeg is distributed in the hope that it will be useful,
+;* but WITHOUT ANY WARRANTY; without even the implied warranty of
+;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+;* Lesser General Public License for more details.
+;*
+;* You should have received a copy of the GNU Lesser General Public
+;* License along with FFmpeg; if not, write to the Free Software
+;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+;******************************************************************************
+
+%include "x86util.asm"
+
+SECTION .text
+
+;-------------------------------------------------------------------------------
+; int ff_pixelutils_sad_8x8_mmx(const uint8_t *src1, ptrdiff_t stride1,
+; const uint8_t *src2, ptrdiff_t stride2);
+;-------------------------------------------------------------------------------
+INIT_MMX mmx
+cglobal pixelutils_sad_8x8, 4,4,0, src1, stride1, src2, stride2
+ pxor m7, m7
+ pxor m6, m6
+%rep 4
+ mova m0, [src1q]
+ mova m2, [src1q + stride1q]
+ mova m1, [src2q]
+ mova m3, [src2q + stride2q]
+ psubusb m4, m0, m1
+ psubusb m5, m2, m3
+ psubusb m1, m0
+ psubusb m3, m2
+ por m1, m4
+ por m3, m5
+ punpcklbw m0, m1, m7
+ punpcklbw m2, m3, m7
+ punpckhbw m1, m7
+ punpckhbw m3, m7
+ paddw m0, m1
+ paddw m2, m3
+ paddw m0, m2
+ paddw m6, m0
+ lea src1q, [src1q + 2*stride1q]
+ lea src2q, [src2q + 2*stride2q]
+%endrep
+ psrlq m0, m6, 32
+ paddw m6, m0
+ psrlq m0, m6, 16
+ paddw m6, m0
+ movd eax, m6
+ movzx eax, ax
+ RET
+
+;-------------------------------------------------------------------------------
+; int ff_pixelutils_sad_8x8_mmxext(const uint8_t *src1, ptrdiff_t stride1,
+; const uint8_t *src2, ptrdiff_t stride2);
+;-------------------------------------------------------------------------------
+INIT_MMX mmxext
+cglobal pixelutils_sad_8x8, 4,4,0, src1, stride1, src2, stride2
+ pxor m2, m2
+%rep 4
+ mova m0, [src1q]
+ mova m1, [src1q + stride1q]
+ psadbw m0, [src2q]
+ psadbw m1, [src2q + stride2q]
+ paddw m2, m0
+ paddw m2, m1
+ lea src1q, [src1q + 2*stride1q]
+ lea src2q, [src2q + 2*stride2q]
+%endrep
+ movd eax, m2
+ RET
+
+;-------------------------------------------------------------------------------
+; int ff_pixelutils_sad_16x16_mmxext(const uint8_t *src1, ptrdiff_t stride1,
+; const uint8_t *src2, ptrdiff_t stride2);
+;-------------------------------------------------------------------------------
+INIT_MMX mmxext
+cglobal pixelutils_sad_16x16, 4,4,0, src1, stride1, src2, stride2
+ pxor m2, m2
+%rep 16
+ mova m0, [src1q]
+ mova m1, [src1q + 8]
+ psadbw m0, [src2q]
+ psadbw m1, [src2q + 8]
+ paddw m2, m0
+ paddw m2, m1
+ add src1q, stride1q
+ add src2q, stride2q
+%endrep
+ movd eax, m2
+ RET
+
+;-------------------------------------------------------------------------------
+; int ff_pixelutils_sad_16x16_sse(const uint8_t *src1, ptrdiff_t stride1,
+; const uint8_t *src2, ptrdiff_t stride2);
+;-------------------------------------------------------------------------------
+INIT_XMM sse2
+cglobal pixelutils_sad_16x16, 4,4,5, src1, stride1, src2, stride2
+ movu m4, [src1q]
+ movu m2, [src2q]
+ movu m1, [src1q + stride1q]
+ movu m3, [src2q + stride2q]
+ psadbw m4, m2
+ psadbw m1, m3
+ paddw m4, m1
+%rep 7
+ lea src1q, [src1q + 2*stride1q]
+ lea src2q, [src2q + 2*stride2q]
+ movu m0, [src1q]
+ movu m2, [src2q]
+ movu m1, [src1q + stride1q]
+ movu m3, [src2q + stride2q]
+ psadbw m0, m2
+ psadbw m1, m3
+ paddw m4, m0
+ paddw m4, m1
+%endrep
+ movhlps m0, m4
+ paddw m4, m0
+ movd eax, m4
+ RET
+
+;-------------------------------------------------------------------------------
+; int ff_pixelutils_sad_[au]_16x16_sse(const uint8_t *src1, ptrdiff_t stride1,
+; const uint8_t *src2, ptrdiff_t stride2);
+;-------------------------------------------------------------------------------
+%macro SAD_XMM_16x16 1
+INIT_XMM sse2
+cglobal pixelutils_sad_%1_16x16, 4,4,3, src1, stride1, src2, stride2
+ mov%1 m2, [src2q]
+ psadbw m2, [src1q]
+ mov%1 m1, [src2q + stride2q]
+ psadbw m1, [src1q + stride1q]
+ paddw m2, m1
+%rep 7
+ lea src1q, [src1q + 2*stride1q]
+ lea src2q, [src2q + 2*stride2q]
+ mov%1 m0, [src2q]
+ psadbw m0, [src1q]
+ mov%1 m1, [src2q + stride2q]
+ psadbw m1, [src1q + stride1q]
+ paddw m2, m0
+ paddw m2, m1
+%endrep
+ movhlps m0, m2
+ paddw m2, m0
+ movd eax, m2
+ RET
+%endmacro
+
+SAD_XMM_16x16 a
+SAD_XMM_16x16 u
diff --git a/libavutil/x86/pixelutils.h b/libavutil/x86/pixelutils.h
new file mode 100644
index 0000000000..876cf46053
--- /dev/null
+++ b/libavutil/x86/pixelutils.h
@@ -0,0 +1,26 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_X86_PIXELUTILS_H
+#define AVUTIL_X86_PIXELUTILS_H
+
+#include "libavutil/pixelutils.h"
+
+void ff_pixelutils_sad_init_x86(av_pixelutils_sad_fn *sad, int aligned);
+
+#endif /* AVUTIL_X86_PIXELUTILS_H */
diff --git a/libavutil/x86/pixelutils_init.c b/libavutil/x86/pixelutils_init.c
new file mode 100644
index 0000000000..c24a533aea
--- /dev/null
+++ b/libavutil/x86/pixelutils_init.c
@@ -0,0 +1,64 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "config.h"
+
+#include "pixelutils.h"
+#include "cpu.h"
+
+int ff_pixelutils_sad_8x8_mmx(const uint8_t *src1, ptrdiff_t stride1,
+ const uint8_t *src2, ptrdiff_t stride2);
+int ff_pixelutils_sad_8x8_mmxext(const uint8_t *src1, ptrdiff_t stride1,
+ const uint8_t *src2, ptrdiff_t stride2);
+
+int ff_pixelutils_sad_16x16_mmxext(const uint8_t *src1, ptrdiff_t stride1,
+ const uint8_t *src2, ptrdiff_t stride2);
+int ff_pixelutils_sad_16x16_sse2(const uint8_t *src1, ptrdiff_t stride1,
+ const uint8_t *src2, ptrdiff_t stride2);
+int ff_pixelutils_sad_a_16x16_sse2(const uint8_t *src1, ptrdiff_t stride1,
+ const uint8_t *src2, ptrdiff_t stride2);
+int ff_pixelutils_sad_u_16x16_sse2(const uint8_t *src1, ptrdiff_t stride1,
+ const uint8_t *src2, ptrdiff_t stride2);
+
+void ff_pixelutils_sad_init_x86(av_pixelutils_sad_fn *sad, int aligned)
+{
+ int cpu_flags = av_get_cpu_flags();
+
+ if (EXTERNAL_MMX(cpu_flags)) {
+ sad[2] = ff_pixelutils_sad_8x8_mmx;
+ }
+
+ // The best way to use SSE2 would be to do 2 SADs in parallel,
+ // but we'd have to modify the pixelutils API to return SIMD functions.
+
+ // It's probably not faster to shuffle data around
+ // to get two lines of 8 pixels into a single 16byte register,
+ // so just use the MMX 8x8 version even when SSE2 is available.
+ if (EXTERNAL_MMXEXT(cpu_flags)) {
+ sad[2] = ff_pixelutils_sad_8x8_mmxext;
+ sad[3] = ff_pixelutils_sad_16x16_mmxext;
+ }
+
+ if (EXTERNAL_SSE2(cpu_flags)) {
+ switch (aligned) {
+ case 0: sad[3] = ff_pixelutils_sad_16x16_sse2; break; // src1 unaligned, src2 unaligned
+ case 1: sad[3] = ff_pixelutils_sad_u_16x16_sse2; break; // src1 aligned, src2 unaligned
+ case 2: sad[3] = ff_pixelutils_sad_a_16x16_sse2; break; // src1 aligned, src2 aligned
+ }
+ }
+}
diff --git a/libavutil/x86/timer.h b/libavutil/x86/timer.h
index bb7c341341..4d1e88def0 100644
--- a/libavutil/x86/timer.h
+++ b/libavutil/x86/timer.h
@@ -1,20 +1,20 @@
/*
* copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at>
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -25,6 +25,7 @@
#if HAVE_INLINE_ASM
+#define FF_TIMER_UNITS "decicycles"
#define AV_READ_TIME read_time
static inline uint64_t read_time(void)
diff --git a/libavutil/x86/w64xmmtest.h b/libavutil/x86/w64xmmtest.h
index b4ce7d3daf..a4a05b0419 100644
--- a/libavutil/x86/w64xmmtest.h
+++ b/libavutil/x86/w64xmmtest.h
@@ -2,23 +2,26 @@
* check XMM registers for clobbers on Win64
* Copyright (c) 2008 Ramiro Polla <ramiro.polla@gmail.com>
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
+#ifndef AVUTIL_X86_W64XMMTEST_H
+#define AVUTIL_X86_W64XMMTEST_H
+
#include <inttypes.h>
#include <stdint.h>
#include <stdlib.h>
@@ -71,3 +74,5 @@
int __real_ ## func; \
int __wrap_ ## func; \
int __wrap_ ## func
+
+#endif /* AVUTIL_X86_W64XMMTEST_H */
diff --git a/libavutil/x86/x86inc.asm b/libavutil/x86/x86inc.asm
index 20ef7b8a19..b2e9c60195 100644
--- a/libavutil/x86/x86inc.asm
+++ b/libavutil/x86/x86inc.asm
@@ -798,6 +798,7 @@ BRANCH_INSTR jz, je, jnz, jne, jl, jle, jnl, jnle, jg, jge, jng, jnge, ja, jae,
%assign cpuflags_atom (1<<21)
%assign cpuflags_bmi1 (1<<22)|cpuflags_lzcnt
%assign cpuflags_bmi2 (1<<23)|cpuflags_bmi1
+%assign cpuflags_aesni (1<<24)|cpuflags_sse42
; Returns a boolean value expressing whether or not the specified cpuflag is enabled.
%define cpuflag(x) (((((cpuflags & (cpuflags_ %+ x)) ^ (cpuflags_ %+ x)) - 1) >> 31) & 1)
@@ -1096,7 +1097,7 @@ INIT_XMM
;%1 == instruction
;%2 == minimal instruction set
;%3 == 1 if float, 0 if int
-;%4 == 1 if non-destructive or 4-operand (xmm, xmm, xmm, imm), 0 otherwise
+;%4 == 1 if 4-operand emulation, 0 if 3-operand emulation, 255 otherwise (no emulation)
;%5 == 1 if commutative (i.e. doesn't matter which src arg is which), 0 if not
;%6+: operands
%macro RUN_AVX_INSTR 6-9+
@@ -1129,14 +1130,12 @@ INIT_XMM
%if __emulate_avx
%xdefine __src1 %7
%xdefine __src2 %8
- %ifnidn %6, %7
- %if %0 >= 9
- CHECK_AVX_INSTR_EMU {%1 %6, %7, %8, %9}, %6, %8, %9
- %else
- CHECK_AVX_INSTR_EMU {%1 %6, %7, %8}, %6, %8
- %endif
- %if %5 && %4 == 0
- %ifnid %8
+ %if %5 && %4 == 0
+ %ifnidn %6, %7
+ %ifidn %6, %8
+ %xdefine __src1 %8
+ %xdefine __src2 %7
+ %elifnnum sizeof%8
; 3-operand AVX instructions with a memory arg can only have it in src2,
; whereas SSE emulation prefers to have it in src1 (i.e. the mov).
; So, if the instruction is commutative with a memory arg, swap them.
@@ -1144,6 +1143,13 @@ INIT_XMM
%xdefine __src2 %7
%endif
%endif
+ %endif
+ %ifnidn %6, __src1
+ %if %0 >= 9
+ CHECK_AVX_INSTR_EMU {%1 %6, %7, %8, %9}, %6, __src2, %9
+ %else
+ CHECK_AVX_INSTR_EMU {%1 %6, %7, %8}, %6, __src2
+ %endif
%if __sizeofreg == 8
MOVQ %6, __src1
%elif %3
@@ -1171,9 +1177,9 @@ INIT_XMM
;%1 == instruction
;%2 == minimal instruction set
;%3 == 1 if float, 0 if int
-;%4 == 1 if non-destructive or 4-operand (xmm, xmm, xmm, imm), 0 otherwise
+;%4 == 1 if 4-operand emulation, 0 if 3-operand emulation, 255 otherwise (no emulation)
;%5 == 1 if commutative (i.e. doesn't matter which src arg is which), 0 if not
-%macro AVX_INSTR 1-5 fnord, 0, 1, 0
+%macro AVX_INSTR 1-5 fnord, 0, 255, 0
%macro %1 1-10 fnord, fnord, fnord, fnord, %1, %2, %3, %4, %5
%ifidn %2, fnord
RUN_AVX_INSTR %6, %7, %8, %9, %10, %1
@@ -1193,24 +1199,24 @@ INIT_XMM
; Non-destructive instructions are written without parameters
AVX_INSTR addpd, sse2, 1, 0, 1
AVX_INSTR addps, sse, 1, 0, 1
-AVX_INSTR addsd, sse2, 1, 0, 1
-AVX_INSTR addss, sse, 1, 0, 1
+AVX_INSTR addsd, sse2, 1, 0, 0
+AVX_INSTR addss, sse, 1, 0, 0
AVX_INSTR addsubpd, sse3, 1, 0, 0
AVX_INSTR addsubps, sse3, 1, 0, 0
-AVX_INSTR aesdec, fnord, 0, 0, 0
-AVX_INSTR aesdeclast, fnord, 0, 0, 0
-AVX_INSTR aesenc, fnord, 0, 0, 0
-AVX_INSTR aesenclast, fnord, 0, 0, 0
-AVX_INSTR aesimc
-AVX_INSTR aeskeygenassist
+AVX_INSTR aesdec, aesni, 0, 0, 0
+AVX_INSTR aesdeclast, aesni, 0, 0, 0
+AVX_INSTR aesenc, aesni, 0, 0, 0
+AVX_INSTR aesenclast, aesni, 0, 0, 0
+AVX_INSTR aesimc, aesni
+AVX_INSTR aeskeygenassist, aesni
AVX_INSTR andnpd, sse2, 1, 0, 0
AVX_INSTR andnps, sse, 1, 0, 0
AVX_INSTR andpd, sse2, 1, 0, 1
AVX_INSTR andps, sse, 1, 0, 1
-AVX_INSTR blendpd, sse4, 1, 0, 0
-AVX_INSTR blendps, sse4, 1, 0, 0
-AVX_INSTR blendvpd, sse4, 1, 0, 0
-AVX_INSTR blendvps, sse4, 1, 0, 0
+AVX_INSTR blendpd, sse4, 1, 1, 0
+AVX_INSTR blendps, sse4, 1, 1, 0
+AVX_INSTR blendvpd, sse4 ; can't be emulated
+AVX_INSTR blendvps, sse4 ; can't be emulated
AVX_INSTR cmppd, sse2, 1, 1, 0
AVX_INSTR cmpps, sse, 1, 1, 0
AVX_INSTR cmpsd, sse2, 1, 1, 0
@@ -1224,10 +1230,10 @@ AVX_INSTR cvtpd2ps, sse2
AVX_INSTR cvtps2dq, sse2
AVX_INSTR cvtps2pd, sse2
AVX_INSTR cvtsd2si, sse2
-AVX_INSTR cvtsd2ss, sse2
-AVX_INSTR cvtsi2sd, sse2
-AVX_INSTR cvtsi2ss, sse
-AVX_INSTR cvtss2sd, sse2
+AVX_INSTR cvtsd2ss, sse2, 1, 0, 0
+AVX_INSTR cvtsi2sd, sse2, 1, 0, 0
+AVX_INSTR cvtsi2ss, sse, 1, 0, 0
+AVX_INSTR cvtss2sd, sse2, 1, 0, 0
AVX_INSTR cvtss2si, sse
AVX_INSTR cvttpd2dq, sse2
AVX_INSTR cvttps2dq, sse2
@@ -1250,12 +1256,12 @@ AVX_INSTR ldmxcsr, sse
AVX_INSTR maskmovdqu, sse2
AVX_INSTR maxpd, sse2, 1, 0, 1
AVX_INSTR maxps, sse, 1, 0, 1
-AVX_INSTR maxsd, sse2, 1, 0, 1
-AVX_INSTR maxss, sse, 1, 0, 1
+AVX_INSTR maxsd, sse2, 1, 0, 0
+AVX_INSTR maxss, sse, 1, 0, 0
AVX_INSTR minpd, sse2, 1, 0, 1
AVX_INSTR minps, sse, 1, 0, 1
-AVX_INSTR minsd, sse2, 1, 0, 1
-AVX_INSTR minss, sse, 1, 0, 1
+AVX_INSTR minsd, sse2, 1, 0, 0
+AVX_INSTR minss, sse, 1, 0, 0
AVX_INSTR movapd, sse2
AVX_INSTR movaps, sse
AVX_INSTR movd, mmx
@@ -1281,11 +1287,11 @@ AVX_INSTR movsldup, sse3
AVX_INSTR movss, sse, 1, 0, 0
AVX_INSTR movupd, sse2
AVX_INSTR movups, sse
-AVX_INSTR mpsadbw, sse4
+AVX_INSTR mpsadbw, sse4, 0, 1, 0
AVX_INSTR mulpd, sse2, 1, 0, 1
AVX_INSTR mulps, sse, 1, 0, 1
-AVX_INSTR mulsd, sse2, 1, 0, 1
-AVX_INSTR mulss, sse, 1, 0, 1
+AVX_INSTR mulsd, sse2, 1, 0, 0
+AVX_INSTR mulss, sse, 1, 0, 0
AVX_INSTR orpd, sse2, 1, 0, 1
AVX_INSTR orps, sse, 1, 0, 1
AVX_INSTR pabsb, ssse3
@@ -1303,14 +1309,18 @@ AVX_INSTR paddsb, mmx, 0, 0, 1
AVX_INSTR paddsw, mmx, 0, 0, 1
AVX_INSTR paddusb, mmx, 0, 0, 1
AVX_INSTR paddusw, mmx, 0, 0, 1
-AVX_INSTR palignr, ssse3
+AVX_INSTR palignr, ssse3, 0, 1, 0
AVX_INSTR pand, mmx, 0, 0, 1
AVX_INSTR pandn, mmx, 0, 0, 0
AVX_INSTR pavgb, mmx2, 0, 0, 1
AVX_INSTR pavgw, mmx2, 0, 0, 1
-AVX_INSTR pblendvb, sse4, 0, 0, 0
-AVX_INSTR pblendw, sse4
-AVX_INSTR pclmulqdq
+AVX_INSTR pblendvb, sse4 ; can't be emulated
+AVX_INSTR pblendw, sse4, 0, 1, 0
+AVX_INSTR pclmulqdq, fnord, 0, 1, 0
+AVX_INSTR pclmulhqhqdq, fnord, 0, 0, 0
+AVX_INSTR pclmulhqlqdq, fnord, 0, 0, 0
+AVX_INSTR pclmullqhqdq, fnord, 0, 0, 0
+AVX_INSTR pclmullqlqdq, fnord, 0, 0, 0
AVX_INSTR pcmpestri, sse42
AVX_INSTR pcmpestrm, sse42
AVX_INSTR pcmpistri, sse42
@@ -1334,10 +1344,10 @@ AVX_INSTR phminposuw, sse4
AVX_INSTR phsubw, ssse3, 0, 0, 0
AVX_INSTR phsubd, ssse3, 0, 0, 0
AVX_INSTR phsubsw, ssse3, 0, 0, 0
-AVX_INSTR pinsrb, sse4
-AVX_INSTR pinsrd, sse4
-AVX_INSTR pinsrq, sse4
-AVX_INSTR pinsrw, mmx2
+AVX_INSTR pinsrb, sse4, 0, 1, 0
+AVX_INSTR pinsrd, sse4, 0, 1, 0
+AVX_INSTR pinsrq, sse4, 0, 1, 0
+AVX_INSTR pinsrw, mmx2, 0, 1, 0
AVX_INSTR pmaddwd, mmx, 0, 0, 1
AVX_INSTR pmaddubsw, ssse3, 0, 0, 0
AVX_INSTR pmaxsb, sse4, 0, 0, 1
@@ -1409,18 +1419,18 @@ AVX_INSTR punpcklwd, mmx, 0, 0, 0
AVX_INSTR punpckldq, mmx, 0, 0, 0
AVX_INSTR punpcklqdq, sse2, 0, 0, 0
AVX_INSTR pxor, mmx, 0, 0, 1
-AVX_INSTR rcpps, sse, 1, 0, 0
+AVX_INSTR rcpps, sse
AVX_INSTR rcpss, sse, 1, 0, 0
AVX_INSTR roundpd, sse4
AVX_INSTR roundps, sse4
-AVX_INSTR roundsd, sse4
-AVX_INSTR roundss, sse4
-AVX_INSTR rsqrtps, sse, 1, 0, 0
+AVX_INSTR roundsd, sse4, 1, 1, 0
+AVX_INSTR roundss, sse4, 1, 1, 0
+AVX_INSTR rsqrtps, sse
AVX_INSTR rsqrtss, sse, 1, 0, 0
AVX_INSTR shufpd, sse2, 1, 1, 0
AVX_INSTR shufps, sse, 1, 1, 0
-AVX_INSTR sqrtpd, sse2, 1, 0, 0
-AVX_INSTR sqrtps, sse, 1, 0, 0
+AVX_INSTR sqrtpd, sse2
+AVX_INSTR sqrtps, sse
AVX_INSTR sqrtsd, sse2, 1, 0, 0
AVX_INSTR sqrtss, sse, 1, 0, 0
AVX_INSTR stmxcsr, sse
@@ -1496,7 +1506,7 @@ FMA_INSTR pmadcswd, pmaddwd, paddd
v%5%6 %1, %2, %3, %4
%elifidn %1, %2
; If %3 or %4 is a memory operand it needs to be encoded as the last operand.
- %ifid %3
+ %ifnum sizeof%3
v%{5}213%6 %2, %3, %4
%else
v%{5}132%6 %2, %4, %3
diff --git a/libavutil/x86/x86util.asm b/libavutil/x86/x86util.asm
index 9f64dd13e1..b09fa813e2 100644
--- a/libavutil/x86/x86util.asm
+++ b/libavutil/x86/x86util.asm
@@ -6,20 +6,20 @@
;* Authors: Loren Merritt <lorenm@u.washington.edu>
;* Holger Lubitz <holger@lubitz.org>
;*
-;* This file is part of Libav.
+;* This file is part of FFmpeg.
;*
-;* Libav is free software; you can redistribute it and/or
+;* FFmpeg is free software; you can redistribute it and/or
;* modify it under the terms of the GNU Lesser General Public
;* License as published by the Free Software Foundation; either
;* version 2.1 of the License, or (at your option) any later version.
;*
-;* Libav is distributed in the hope that it will be useful,
+;* FFmpeg is distributed in the hope that it will be useful,
;* but WITHOUT ANY WARRANTY; without even the implied warranty of
;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
;* Lesser General Public License for more details.
;*
;* You should have received a copy of the GNU Lesser General Public
-;* License along with Libav; if not, write to the Free Software
+;* License along with FFmpeg; if not, write to the Free Software
;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
;******************************************************************************
@@ -69,6 +69,15 @@
SWAP %2, %3
%endmacro
+%macro TRANSPOSE2x4x4B 5
+ SBUTTERFLY bw, %1, %2, %5
+ SBUTTERFLY bw, %3, %4, %5
+ SBUTTERFLY wd, %1, %3, %5
+ SBUTTERFLY wd, %2, %4, %5
+ SBUTTERFLY dq, %1, %2, %5
+ SBUTTERFLY dq, %3, %4, %5
+%endmacro
+
%macro TRANSPOSE2x4x4W 5
SBUTTERFLY wd, %1, %2, %5
SBUTTERFLY wd, %3, %4, %5
@@ -98,6 +107,43 @@
SWAP %5, %2, %3
%endmacro
+%macro TRANSPOSE8x4D 9-11
+%if ARCH_X86_64
+ SBUTTERFLY dq, %1, %2, %9
+ SBUTTERFLY dq, %3, %4, %9
+ SBUTTERFLY dq, %5, %6, %9
+ SBUTTERFLY dq, %7, %8, %9
+ SBUTTERFLY qdq, %1, %3, %9
+ SBUTTERFLY qdq, %2, %4, %9
+ SBUTTERFLY qdq, %5, %7, %9
+ SBUTTERFLY qdq, %6, %8, %9
+ SWAP %2, %5
+ SWAP %4, %7
+%else
+; in: m0..m7
+; out: m0..m7, unless %11 in which case m2 is in %9
+; spills into %9 and %10
+ movdqa %9, m%7
+ SBUTTERFLY dq, %1, %2, %7
+ movdqa %10, m%2
+ movdqa m%7, %9
+ SBUTTERFLY dq, %3, %4, %2
+ SBUTTERFLY dq, %5, %6, %2
+ SBUTTERFLY dq, %7, %8, %2
+ SBUTTERFLY qdq, %1, %3, %2
+ movdqa %9, m%3
+ movdqa m%2, %10
+ SBUTTERFLY qdq, %2, %4, %3
+ SBUTTERFLY qdq, %5, %7, %3
+ SBUTTERFLY qdq, %6, %8, %3
+ SWAP %2, %5
+ SWAP %4, %7
+%if %0<11
+ movdqa m%3, %9
+%endif
+%endif
+%endmacro
+
%macro TRANSPOSE8x8W 9-11
%if ARCH_X86_64
SBUTTERFLY wd, %1, %2, %9
@@ -164,13 +210,13 @@
%endif
%endmacro
-%macro PSIGNW_MMX 2
+%macro PSIGNW 2
+%if cpuflag(ssse3)
+ psignw %1, %2
+%else
pxor %1, %2
psubw %1, %2
-%endmacro
-
-%macro PSIGNW_SSSE3 2
- psignw %1, %2
+%endif
%endmacro
%macro ABS1 2
@@ -273,6 +319,44 @@
%endif
%endmacro
+%macro HADDD 2 ; sum junk
+%if sizeof%1 == 32
+%define %2 xmm%2
+ vextracti128 %2, %1, 1
+%define %1 xmm%1
+ paddd %1, %2
+%endif
+%if mmsize >= 16
+%if cpuflag(xop) && sizeof%1 == 16
+ vphadddq %1, %1
+%endif
+ movhlps %2, %1
+ paddd %1, %2
+%endif
+%if notcpuflag(xop) || sizeof%1 != 16
+%if cpuflag(mmxext)
+ PSHUFLW %2, %1, q0032
+%else ; mmx
+ mova %2, %1
+ psrlq %2, 32
+%endif
+ paddd %1, %2
+%endif
+%undef %1
+%undef %2
+%endmacro
+
+%macro HADDW 2 ; reg, tmp
+%if cpuflag(xop) && sizeof%1 == 16
+ vphaddwq %1, %1
+ movhlps %2, %1
+ paddd %1, %2
+%else
+ pmaddwd %1, [pw_1]
+ HADDD %1, %2
+%endif
+%endmacro
+
%macro PALIGNR 4-5
%if cpuflag(ssse3)
%if %0==5
@@ -302,11 +386,19 @@
%endif
%endmacro
-%macro PAVGB 2
+%macro PAVGB 2-4
%if cpuflag(mmxext)
pavgb %1, %2
%elif cpuflag(3dnow)
pavgusb %1, %2
+%elif cpuflag(mmx)
+ movu %3, %2
+ por %3, %1
+ pxor %1, %2
+ pand %1, %4
+ psrlq %1, 1
+ psubb %3, %1
+ SWAP %1, %3
%endif
%endmacro
@@ -552,7 +644,9 @@
%endmacro
%macro SPLATW 2-3 0
-%if mmsize == 16
+%if cpuflag(avx2) && %3 == 0
+ vpbroadcastw %1, %2
+%elif mmsize == 16
pshuflw %1, %2, (%3)*0x55
punpcklqdq %1, %1
%elif cpuflag(mmxext)
@@ -683,3 +777,19 @@
addps %1, %4
%endif
%endmacro
+
+%macro LSHIFT 2
+%if mmsize > 8
+ pslldq %1, %2
+%else
+ psllq %1, 8*(%2)
+%endif
+%endmacro
+
+%macro RSHIFT 2
+%if mmsize > 8
+ psrldq %1, %2
+%else
+ psrlq %1, 8*(%2)
+%endif
+%endmacro