summaryrefslogtreecommitdiff
path: root/libavcodec/x86
diff options
context:
space:
mode:
authorJames Almer <jamrial@gmail.com>2017-01-07 19:04:39 -0300
committerJames Almer <jamrial@gmail.com>2017-01-12 22:53:04 -0300
commit5ac1dd8e231987c022a860c6b1961b038a84b613 (patch)
tree594a5e75a7b36a95985fef7779071900a763266a /libavcodec/x86
parent3222786c5ad9f6ca94ca4cd80a4329a276d65aaa (diff)
lossless_videodsp: move shared functions from huffyuvdsp
Several codecs other than huffyuv use them. Signed-off-by: James Almer <jamrial@gmail.com>
Diffstat (limited to 'libavcodec/x86')
-rw-r--r--libavcodec/x86/huffyuvdsp.asm188
-rw-r--r--libavcodec/x86/huffyuvdsp_init.c73
-rw-r--r--libavcodec/x86/lossless_videodsp.asm190
-rw-r--r--libavcodec/x86/lossless_videodsp_init.c76
4 files changed, 265 insertions, 262 deletions
diff --git a/libavcodec/x86/huffyuvdsp.asm b/libavcodec/x86/huffyuvdsp.asm
index 0dbe598421..0befd3baa8 100644
--- a/libavcodec/x86/huffyuvdsp.asm
+++ b/libavcodec/x86/huffyuvdsp.asm
@@ -22,196 +22,8 @@
%include "libavutil/x86/x86util.asm"
-SECTION_RODATA
-cextern pb_15
-pb_zzzzzzzz77777777: times 8 db -1
-pb_7: times 8 db 7
-pb_zzzz3333zzzzbbbb: db -1,-1,-1,-1,3,3,3,3,-1,-1,-1,-1,11,11,11,11
-pb_zz11zz55zz99zzdd: db -1,-1,1,1,-1,-1,5,5,-1,-1,9,9,-1,-1,13,13
-
SECTION .text
-; void ff_add_hfyu_median_pred_mmxext(uint8_t *dst, const uint8_t *top,
-; const uint8_t *diff, int w,
-; int *left, int *left_top)
-%macro HFYU_MEDIAN 0
-cglobal add_hfyu_median_pred, 6,6,8, dst, top, diff, w, left, left_top
- movu m0, [topq]
- mova m2, m0
- movd m4, [left_topq]
- LSHIFT m2, 1
- mova m1, m0
- por m4, m2
- movd m3, [leftq]
- psubb m0, m4 ; t-tl
- add dstq, wq
- add topq, wq
- add diffq, wq
- neg wq
- jmp .skip
-.loop:
- movu m4, [topq+wq]
- mova m0, m4
- LSHIFT m4, 1
- por m4, m1
- mova m1, m0 ; t
- psubb m0, m4 ; t-tl
-.skip:
- movu m2, [diffq+wq]
-%assign i 0
-%rep mmsize
- mova m4, m0
- paddb m4, m3 ; t-tl+l
- mova m5, m3
- pmaxub m3, m1
- pminub m5, m1
- pminub m3, m4
- pmaxub m3, m5 ; median
- paddb m3, m2 ; +residual
-%if i==0
- mova m7, m3
- LSHIFT m7, mmsize-1
-%else
- mova m6, m3
- RSHIFT m7, 1
- LSHIFT m6, mmsize-1
- por m7, m6
-%endif
-%if i<mmsize-1
- RSHIFT m0, 1
- RSHIFT m1, 1
- RSHIFT m2, 1
-%endif
-%assign i i+1
-%endrep
- movu [dstq+wq], m7
- add wq, mmsize
- jl .loop
- movzx r2d, byte [dstq-1]
- mov [leftq], r2d
- movzx r2d, byte [topq-1]
- mov [left_topq], r2d
- RET
-%endmacro
-
-%if ARCH_X86_32
-INIT_MMX mmxext
-HFYU_MEDIAN
-%endif
-INIT_XMM sse2
-HFYU_MEDIAN
-
-
-%macro ADD_HFYU_LEFT_LOOP 2 ; %1 = dst_is_aligned, %2 = src_is_aligned
- add srcq, wq
- add dstq, wq
- neg wq
-%%.loop:
-%if %2
- mova m1, [srcq+wq]
-%else
- movu m1, [srcq+wq]
-%endif
- mova m2, m1
- psllw m1, 8
- paddb m1, m2
- mova m2, m1
- pshufb m1, m3
- paddb m1, m2
- pshufb m0, m5
- mova m2, m1
- pshufb m1, m4
- paddb m1, m2
-%if mmsize == 16
- mova m2, m1
- pshufb m1, m6
- paddb m1, m2
-%endif
- paddb m0, m1
-%if %1
- mova [dstq+wq], m0
-%else
- movq [dstq+wq], m0
- movhps [dstq+wq+8], m0
-%endif
- add wq, mmsize
- jl %%.loop
- mov eax, mmsize-1
- sub eax, wd
- movd m1, eax
- pshufb m0, m1
- movd eax, m0
- RET
-%endmacro
-
-; int ff_add_hfyu_left_pred(uint8_t *dst, const uint8_t *src, int w, int left)
-INIT_MMX ssse3
-cglobal add_hfyu_left_pred, 3,3,7, dst, src, w, left
-.skip_prologue:
- mova m5, [pb_7]
- mova m4, [pb_zzzz3333zzzzbbbb]
- mova m3, [pb_zz11zz55zz99zzdd]
- movd m0, leftm
- psllq m0, 56
- ADD_HFYU_LEFT_LOOP 1, 1
-
-INIT_XMM sse4
-cglobal add_hfyu_left_pred, 3,3,7, dst, src, w, left
- mova m5, [pb_15]
- mova m6, [pb_zzzzzzzz77777777]
- mova m4, [pb_zzzz3333zzzzbbbb]
- mova m3, [pb_zz11zz55zz99zzdd]
- movd m0, leftm
- pslldq m0, 15
- test srcq, 15
- jnz .src_unaligned
- test dstq, 15
- jnz .dst_unaligned
- ADD_HFYU_LEFT_LOOP 1, 1
-.dst_unaligned:
- ADD_HFYU_LEFT_LOOP 0, 1
-.src_unaligned:
- ADD_HFYU_LEFT_LOOP 0, 0
-
-%macro ADD_BYTES 0
-cglobal add_bytes, 3,4,2, dst, src, w, size
- mov sizeq, wq
- and sizeq, -2*mmsize
- jz .2
- add dstq, sizeq
- add srcq, sizeq
- neg sizeq
-.1:
- mova m0, [srcq + sizeq]
- mova m1, [srcq + sizeq + mmsize]
- paddb m0, [dstq + sizeq]
- paddb m1, [dstq + sizeq + mmsize]
- mova [dstq + sizeq], m0
- mova [dstq + sizeq + mmsize], m1
- add sizeq, 2*mmsize
- jl .1
-.2:
- and wq, 2*mmsize-1
- jz .end
- add dstq, wq
- add srcq, wq
- neg wq
-.3:
- mov sizeb, [srcq + wq]
- add [dstq + wq], sizeb
- inc wq
- jl .3
-.end:
- REP_RET
-%endmacro
-
-%if ARCH_X86_32
-INIT_MMX mmx
-ADD_BYTES
-%endif
-INIT_XMM sse2
-ADD_BYTES
-
; void add_hfyu_left_pred_bgr32(uint8_t *dst, const uint8_t *src,
; intptr_t w, uint8_t *left)
%macro LEFT_BGR32 0
diff --git a/libavcodec/x86/huffyuvdsp_init.c b/libavcodec/x86/huffyuvdsp_init.c
index 3ced3c0a1c..fc87c3844b 100644
--- a/libavcodec/x86/huffyuvdsp_init.c
+++ b/libavcodec/x86/huffyuvdsp_init.c
@@ -25,93 +25,20 @@
#include "libavutil/x86/cpu.h"
#include "libavcodec/huffyuvdsp.h"
-void ff_add_bytes_mmx(uint8_t *dst, uint8_t *src, intptr_t w);
-void ff_add_bytes_sse2(uint8_t *dst, uint8_t *src, intptr_t w);
-
-void ff_add_hfyu_median_pred_mmxext(uint8_t *dst, const uint8_t *top,
- const uint8_t *diff, intptr_t w,
- int *left, int *left_top);
-void ff_add_hfyu_median_pred_sse2(uint8_t *dst, const uint8_t *top,
- const uint8_t *diff, intptr_t w,
- int *left, int *left_top);
-
-int ff_add_hfyu_left_pred_ssse3(uint8_t *dst, const uint8_t *src,
- intptr_t w, int left);
-int ff_add_hfyu_left_pred_sse4(uint8_t *dst, const uint8_t *src,
- intptr_t w, int left);
-
void ff_add_hfyu_left_pred_bgr32_mmx(uint8_t *dst, const uint8_t *src,
intptr_t w, uint8_t *left);
void ff_add_hfyu_left_pred_bgr32_sse2(uint8_t *dst, const uint8_t *src,
intptr_t w, uint8_t *left);
-#if HAVE_INLINE_ASM && HAVE_7REGS && ARCH_X86_32
-static void add_hfyu_median_pred_cmov(uint8_t *dst, const uint8_t *top,
- const uint8_t *diff, intptr_t w,
- int *left, int *left_top)
-{
- x86_reg w2 = -w;
- x86_reg x;
- int l = *left & 0xff;
- int tl = *left_top & 0xff;
- int t;
- __asm__ volatile (
- "mov %7, %3 \n"
- "1: \n"
- "movzbl (%3, %4), %2 \n"
- "mov %2, %k3 \n"
- "sub %b1, %b3 \n"
- "add %b0, %b3 \n"
- "mov %2, %1 \n"
- "cmp %0, %2 \n"
- "cmovg %0, %2 \n"
- "cmovg %1, %0 \n"
- "cmp %k3, %0 \n"
- "cmovg %k3, %0 \n"
- "mov %7, %3 \n"
- "cmp %2, %0 \n"
- "cmovl %2, %0 \n"
- "add (%6, %4), %b0 \n"
- "mov %b0, (%5, %4) \n"
- "inc %4 \n"
- "jl 1b \n"
- : "+&q"(l), "+&q"(tl), "=&r"(t), "=&q"(x), "+&r"(w2)
- : "r"(dst + w), "r"(diff + w), "rm"(top + w)
- );
- *left = l;
- *left_top = tl;
-}
-#endif
-
av_cold void ff_huffyuvdsp_init_x86(HuffYUVDSPContext *c)
{
int cpu_flags = av_get_cpu_flags();
-#if HAVE_INLINE_ASM && HAVE_7REGS && ARCH_X86_32
- if (cpu_flags & AV_CPU_FLAG_CMOV)
- c->add_hfyu_median_pred = add_hfyu_median_pred_cmov;
-#endif
-
if (ARCH_X86_32 && EXTERNAL_MMX(cpu_flags)) {
- c->add_bytes = ff_add_bytes_mmx;
c->add_hfyu_left_pred_bgr32 = ff_add_hfyu_left_pred_bgr32_mmx;
}
- if (ARCH_X86_32 && EXTERNAL_MMXEXT(cpu_flags)) {
- /* slower than cmov version on AMD */
- if (!(cpu_flags & AV_CPU_FLAG_3DNOW))
- c->add_hfyu_median_pred = ff_add_hfyu_median_pred_mmxext;
- }
-
if (EXTERNAL_SSE2(cpu_flags)) {
- c->add_bytes = ff_add_bytes_sse2;
- c->add_hfyu_median_pred = ff_add_hfyu_median_pred_sse2;
c->add_hfyu_left_pred_bgr32 = ff_add_hfyu_left_pred_bgr32_sse2;
}
-
- if (EXTERNAL_SSSE3(cpu_flags)) {
- c->add_hfyu_left_pred = ff_add_hfyu_left_pred_ssse3;
- if (cpu_flags & AV_CPU_FLAG_SSE4) // not really SSE4, just slow on Conroe
- c->add_hfyu_left_pred = ff_add_hfyu_left_pred_sse4;
- }
}
diff --git a/libavcodec/x86/lossless_videodsp.asm b/libavcodec/x86/lossless_videodsp.asm
index f06fcdf7cf..a6ce5fe62b 100644
--- a/libavcodec/x86/lossless_videodsp.asm
+++ b/libavcodec/x86/lossless_videodsp.asm
@@ -24,13 +24,199 @@
SECTION_RODATA
+cextern pb_15
+pb_zzzzzzzz77777777: times 8 db -1
+pb_7: times 8 db 7
pb_ef: times 8 db 14,15
pb_67: times 8 db 6, 7
+pb_zzzz3333zzzzbbbb: db -1,-1,-1,-1,3,3,3,3,-1,-1,-1,-1,11,11,11,11
+pb_zz11zz55zz99zzdd: db -1,-1,1,1,-1,-1,5,5,-1,-1,9,9,-1,-1,13,13
pb_zzzz2323zzzzabab: db -1,-1,-1,-1, 2, 3, 2, 3,-1,-1,-1,-1,10,11,10,11
pb_zzzzzzzz67676767: db -1,-1,-1,-1,-1,-1,-1,-1, 6, 7, 6, 7, 6, 7, 6, 7
SECTION .text
+; void ff_add_median_pred_mmxext(uint8_t *dst, const uint8_t *top,
+; const uint8_t *diff, int w,
+; int *left, int *left_top)
+%macro MEDIAN_PRED 0
+cglobal add_median_pred, 6,6,8, dst, top, diff, w, left, left_top
+ movu m0, [topq]
+ mova m2, m0
+ movd m4, [left_topq]
+ LSHIFT m2, 1
+ mova m1, m0
+ por m4, m2
+ movd m3, [leftq]
+ psubb m0, m4 ; t-tl
+ add dstq, wq
+ add topq, wq
+ add diffq, wq
+ neg wq
+ jmp .skip
+.loop:
+ movu m4, [topq+wq]
+ mova m0, m4
+ LSHIFT m4, 1
+ por m4, m1
+ mova m1, m0 ; t
+ psubb m0, m4 ; t-tl
+.skip:
+ movu m2, [diffq+wq]
+%assign i 0
+%rep mmsize
+ mova m4, m0
+ paddb m4, m3 ; t-tl+l
+ mova m5, m3
+ pmaxub m3, m1
+ pminub m5, m1
+ pminub m3, m4
+ pmaxub m3, m5 ; median
+ paddb m3, m2 ; +residual
+%if i==0
+ mova m7, m3
+ LSHIFT m7, mmsize-1
+%else
+ mova m6, m3
+ RSHIFT m7, 1
+ LSHIFT m6, mmsize-1
+ por m7, m6
+%endif
+%if i<mmsize-1
+ RSHIFT m0, 1
+ RSHIFT m1, 1
+ RSHIFT m2, 1
+%endif
+%assign i i+1
+%endrep
+ movu [dstq+wq], m7
+ add wq, mmsize
+ jl .loop
+ movzx r2d, byte [dstq-1]
+ mov [leftq], r2d
+ movzx r2d, byte [topq-1]
+ mov [left_topq], r2d
+ RET
+%endmacro
+
+%if ARCH_X86_32
+INIT_MMX mmxext
+MEDIAN_PRED
+%endif
+INIT_XMM sse2
+MEDIAN_PRED
+
+
+%macro ADD_LEFT_LOOP 2 ; %1 = dst_is_aligned, %2 = src_is_aligned
+ add srcq, wq
+ add dstq, wq
+ neg wq
+%%.loop:
+%if %2
+ mova m1, [srcq+wq]
+%else
+ movu m1, [srcq+wq]
+%endif
+ mova m2, m1
+ psllw m1, 8
+ paddb m1, m2
+ mova m2, m1
+ pshufb m1, m3
+ paddb m1, m2
+ pshufb m0, m5
+ mova m2, m1
+ pshufb m1, m4
+ paddb m1, m2
+%if mmsize == 16
+ mova m2, m1
+ pshufb m1, m6
+ paddb m1, m2
+%endif
+ paddb m0, m1
+%if %1
+ mova [dstq+wq], m0
+%else
+ movq [dstq+wq], m0
+ movhps [dstq+wq+8], m0
+%endif
+ add wq, mmsize
+ jl %%.loop
+ mov eax, mmsize-1
+ sub eax, wd
+ movd m1, eax
+ pshufb m0, m1
+ movd eax, m0
+ RET
+%endmacro
+
+; int ff_add_left_pred(uint8_t *dst, const uint8_t *src, int w, int left)
+INIT_MMX ssse3
+cglobal add_left_pred, 3,3,7, dst, src, w, left
+.skip_prologue:
+ mova m5, [pb_7]
+ mova m4, [pb_zzzz3333zzzzbbbb]
+ mova m3, [pb_zz11zz55zz99zzdd]
+ movd m0, leftm
+ psllq m0, 56
+ ADD_LEFT_LOOP 1, 1
+
+INIT_XMM sse4
+cglobal add_left_pred, 3,3,7, dst, src, w, left
+ mova m5, [pb_15]
+ mova m6, [pb_zzzzzzzz77777777]
+ mova m4, [pb_zzzz3333zzzzbbbb]
+ mova m3, [pb_zz11zz55zz99zzdd]
+ movd m0, leftm
+ pslldq m0, 15
+ test srcq, 15
+ jnz .src_unaligned
+ test dstq, 15
+ jnz .dst_unaligned
+ ADD_LEFT_LOOP 1, 1
+.dst_unaligned:
+ ADD_LEFT_LOOP 0, 1
+.src_unaligned:
+ ADD_LEFT_LOOP 0, 0
+
+%macro ADD_BYTES 0
+cglobal add_bytes, 3,4,2, dst, src, w, size
+ mov sizeq, wq
+ and sizeq, -2*mmsize
+ jz .2
+ add dstq, sizeq
+ add srcq, sizeq
+ neg sizeq
+.1:
+ mova m0, [srcq + sizeq]
+ mova m1, [srcq + sizeq + mmsize]
+ paddb m0, [dstq + sizeq]
+ paddb m1, [dstq + sizeq + mmsize]
+ mova [dstq + sizeq], m0
+ mova [dstq + sizeq + mmsize], m1
+ add sizeq, 2*mmsize
+ jl .1
+.2:
+ and wq, 2*mmsize-1
+ jz .end
+ add dstq, wq
+ add srcq, wq
+ neg wq
+.3:
+ mov sizeb, [srcq + wq]
+ add [dstq + wq], sizeb
+ inc wq
+ jl .3
+.end:
+ REP_RET
+%endmacro
+
+%if ARCH_X86_32
+INIT_MMX mmx
+ADD_BYTES
+%endif
+INIT_XMM sse2
+ADD_BYTES
+
%macro INT16_LOOP 2 ; %1 = a/u (aligned/unaligned), %2 = add/sub
movd m4, maskd
SPLATW m4, m4
@@ -86,9 +272,11 @@ SECTION .text
RET
%endmacro
+%if ARCH_X86_32
INIT_MMX mmx
cglobal add_int16, 4,4,5, dst, src, mask, w, tmp
INT16_LOOP a, add
+%endif
INIT_XMM sse2
cglobal add_int16, 4,4,5, dst, src, mask, w, tmp
@@ -100,9 +288,11 @@ cglobal add_int16, 4,4,5, dst, src, mask, w, tmp
.unaligned:
INT16_LOOP u, add
+%if ARCH_X86_32
INIT_MMX mmx
cglobal diff_int16, 5,5,5, dst, src1, src2, mask, w, tmp
INT16_LOOP a, sub
+%endif
INIT_XMM sse2
cglobal diff_int16, 5,5,5, dst, src1, src2, mask, w, tmp
diff --git a/libavcodec/x86/lossless_videodsp_init.c b/libavcodec/x86/lossless_videodsp_init.c
index 548d0433e1..465feef81b 100644
--- a/libavcodec/x86/lossless_videodsp_init.c
+++ b/libavcodec/x86/lossless_videodsp_init.c
@@ -18,10 +18,27 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
+#include "config.h"
+#include "libavutil/x86/asm.h"
#include "../lossless_videodsp.h"
#include "libavutil/pixdesc.h"
#include "libavutil/x86/cpu.h"
+void ff_add_bytes_mmx(uint8_t *dst, uint8_t *src, intptr_t w);
+void ff_add_bytes_sse2(uint8_t *dst, uint8_t *src, intptr_t w);
+
+void ff_add_median_pred_mmxext(uint8_t *dst, const uint8_t *top,
+ const uint8_t *diff, intptr_t w,
+ int *left, int *left_top);
+void ff_add_median_pred_sse2(uint8_t *dst, const uint8_t *top,
+ const uint8_t *diff, intptr_t w,
+ int *left, int *left_top);
+
+int ff_add_left_pred_ssse3(uint8_t *dst, const uint8_t *src,
+ intptr_t w, int left);
+int ff_add_left_pred_sse4(uint8_t *dst, const uint8_t *src,
+ intptr_t w, int left);
+
void ff_add_int16_mmx(uint16_t *dst, const uint16_t *src, unsigned mask, int w);
void ff_add_int16_sse2(uint16_t *dst, const uint16_t *src, unsigned mask, int w);
void ff_diff_int16_mmx (uint16_t *dst, const uint16_t *src1, const uint16_t *src2, unsigned mask, int w);
@@ -31,28 +48,85 @@ int ff_add_hfyu_left_pred_int16_sse4(uint16_t *dst, const uint16_t *src, unsigne
void ff_add_hfyu_median_pred_int16_mmxext(uint16_t *dst, const uint16_t *top, const uint16_t *diff, unsigned mask, int w, int *left, int *left_top);
void ff_sub_hfyu_median_pred_int16_mmxext(uint16_t *dst, const uint16_t *src1, const uint16_t *src2, unsigned mask, int w, int *left, int *left_top);
+#if HAVE_INLINE_ASM && HAVE_7REGS && ARCH_X86_32
+static void add_median_pred_cmov(uint8_t *dst, const uint8_t *top,
+ const uint8_t *diff, intptr_t w,
+ int *left, int *left_top)
+{
+ x86_reg w2 = -w;
+ x86_reg x;
+ int l = *left & 0xff;
+ int tl = *left_top & 0xff;
+ int t;
+ __asm__ volatile (
+ "mov %7, %3 \n"
+ "1: \n"
+ "movzbl (%3, %4), %2 \n"
+ "mov %2, %k3 \n"
+ "sub %b1, %b3 \n"
+ "add %b0, %b3 \n"
+ "mov %2, %1 \n"
+ "cmp %0, %2 \n"
+ "cmovg %0, %2 \n"
+ "cmovg %1, %0 \n"
+ "cmp %k3, %0 \n"
+ "cmovg %k3, %0 \n"
+ "mov %7, %3 \n"
+ "cmp %2, %0 \n"
+ "cmovl %2, %0 \n"
+ "add (%6, %4), %b0 \n"
+ "mov %b0, (%5, %4) \n"
+ "inc %4 \n"
+ "jl 1b \n"
+ : "+&q"(l), "+&q"(tl), "=&r"(t), "=&q"(x), "+&r"(w2)
+ : "r"(dst + w), "r"(diff + w), "rm"(top + w)
+ );
+ *left = l;
+ *left_top = tl;
+}
+#endif
void ff_llviddsp_init_x86(LLVidDSPContext *c, AVCodecContext *avctx)
{
int cpu_flags = av_get_cpu_flags();
const AVPixFmtDescriptor *pix_desc = av_pix_fmt_desc_get(avctx->pix_fmt);
- if (EXTERNAL_MMX(cpu_flags)) {
+#if HAVE_INLINE_ASM && HAVE_7REGS && ARCH_X86_32
+ if (cpu_flags & AV_CPU_FLAG_CMOV)
+ c->add_median_pred = add_median_pred_cmov;
+#endif
+
+ if (ARCH_X86_32 && EXTERNAL_MMX(cpu_flags)) {
+ c->add_bytes = ff_add_bytes_mmx;
+
c->add_int16 = ff_add_int16_mmx;
c->diff_int16 = ff_diff_int16_mmx;
}
+ if (ARCH_X86_32 && EXTERNAL_MMXEXT(cpu_flags)) {
+ /* slower than cmov version on AMD */
+ if (!(cpu_flags & AV_CPU_FLAG_3DNOW))
+ c->add_median_pred = ff_add_median_pred_mmxext;
+ }
+
if (EXTERNAL_MMXEXT(cpu_flags) && pix_desc && pix_desc->comp[0].depth<16) {
c->add_hfyu_median_pred_int16 = ff_add_hfyu_median_pred_int16_mmxext;
c->sub_hfyu_median_pred_int16 = ff_sub_hfyu_median_pred_int16_mmxext;
}
if (EXTERNAL_SSE2(cpu_flags)) {
+ c->add_bytes = ff_add_bytes_sse2;
+ c->add_median_pred = ff_add_median_pred_sse2;
+
c->add_int16 = ff_add_int16_sse2;
c->diff_int16 = ff_diff_int16_sse2;
}
if (EXTERNAL_SSSE3(cpu_flags)) {
+ c->add_left_pred = ff_add_left_pred_ssse3;
+ if (cpu_flags & AV_CPU_FLAG_SSE4) // not really SSE4, just slow on Conroe
+ c->add_left_pred = ff_add_left_pred_sse4;
+
c->add_hfyu_left_pred_int16 = ff_add_hfyu_left_pred_int16_ssse3;
}