From f7cafb5d02aa3f26c185f6f9851413ad77a73872 Mon Sep 17 00:00:00 2001 From: James Almer Date: Tue, 26 May 2015 14:29:06 -0300 Subject: x86: add AV_CPU_FLAG_AVXSLOW flag Signed-off-by: James Almer Signed-off-by: Luca Barbato --- libavutil/x86/cpu.c | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) (limited to 'libavutil/x86') diff --git a/libavutil/x86/cpu.c b/libavutil/x86/cpu.c index 8be6d94742..098ccf7004 100644 --- a/libavutil/x86/cpu.c +++ b/libavutil/x86/cpu.c @@ -167,6 +167,7 @@ int ff_get_cpu_flags_x86(void) if (ext_caps & (1 << 22)) rval |= AV_CPU_FLAG_MMXEXT; + if (!strncmp(vendor.c, "AuthenticAMD", 12)) { /* Allow for selectively disabling SSE2 functions on AMD processors with SSE2 support but not SSE4a. This includes Athlon64, some Opteron, and some Sempron processors. MMX, SSE, or 3DNow! are faster @@ -174,9 +175,19 @@ int ff_get_cpu_flags_x86(void) AV_CPU_FLAG_SSE2 and AV_CPU_FLAG_SSE2SLOW are both set in this case so that SSE2 is used unless explicitly disabled by checking AV_CPU_FLAG_SSE2SLOW. */ - if (!strncmp(vendor.c, "AuthenticAMD", 12) && - rval & AV_CPU_FLAG_SSE2 && !(ecx & 0x00000040)) { - rval |= AV_CPU_FLAG_SSE2SLOW; + if (rval & AV_CPU_FLAG_SSE2 && !(ecx & 0x00000040)) + rval |= AV_CPU_FLAG_SSE2SLOW; + + /* Similar to the above but for AVX functions on AMD processors. + This is necessary only for functions using YMM registers on Bulldozer + based CPUs as they lack 256-bits execution units. SSE/AVX functions + using XMM registers are always faster on them. + AV_CPU_FLAG_AVX and AV_CPU_FLAG_AVXSLOW are both set so that AVX is + used unless explicitly disabled by checking AV_CPU_FLAG_AVXSLOW. + TODO: Confirm if Excavator is affected or not by this once it's + released, and update the check if necessary. Same for btver2. */ + if (family == 0x15 && (rval & AV_CPU_FLAG_AVX)) + rval |= AV_CPU_FLAG_AVXSLOW; } /* XOP and FMA4 use the AVX instruction coding scheme, so they can't be -- cgit v1.2.3