summaryrefslogtreecommitdiff
path: root/libavcodec/x86
diff options
context:
space:
mode:
authorMichael Niedermayer <michaelni@gmx.at>2011-10-22 01:03:27 +0200
committerMichael Niedermayer <michaelni@gmx.at>2011-10-22 01:16:41 +0200
commitaedc908601de7396751a9a4504e064782d9f6a0b (patch)
tree8f04b899142439893bac426ac83d05c4068b099c /libavcodec/x86
parent1a7090bfafe986d4470ba8059c815939171ddb74 (diff)
parentf4b51d061f0f34e36be876b562b8abe47f4b9c1c (diff)
Merge remote-tracking branch 'qatar/master'
* qatar/master: (35 commits) flvdec: Do not call parse_keyframes_index with a NULL stream libspeexdec: include system headers before local headers libspeexdec: return meaningful error codes libspeexdec: cosmetics: reindent libspeexdec: decode one frame at a time. swscale: fix signed shift overflows in ff_yuv2rgb_c_init_tables() Move timefilter code from lavf to lavd. mov: add support for hdvd and pgapmetadata atoms mov: rename function _stik, some indentation cosmetics mov: rename function _int8 to remove ambiguity, some indentation cosmetics mov: parse the gnre atom mp3on4: check for allocation failures in decode_init_mp3on4() mp3on4: create a separate flush function for MP3onMP4. mp3on4: ensure that the frame channel count does not exceed the codec channel count. mp3on4: set channel layout mp3on4: fix the output channel order mp3on4: allocate temp buffer with av_malloc() instead of on the stack. mp3on4: copy MPADSPContext from first context to all contexts. fmtconvert: port float_to_int16_interleave() 2-channel x86 inline asm to yasm fmtconvert: port int32_to_float_fmul_scalar() x86 inline asm to yasm ... Conflicts: libavcodec/arm/h264dsp_init_arm.c libavcodec/h264.c libavcodec/h264.h libavcodec/h264_cabac.c libavcodec/h264_cavlc.c libavcodec/h264_ps.c libavcodec/h264dsp_template.c libavcodec/h264idct_template.c libavcodec/h264pred.c libavcodec/h264pred_template.c libavcodec/x86/h264dsp_mmx.c libavdevice/Makefile libavdevice/jack_audio.c libavformat/Makefile libavformat/flvdec.c libavformat/flvenc.c libavutil/pixfmt.h libswscale/utils.c Merged-by: Michael Niedermayer <michaelni@gmx.at>
Diffstat (limited to 'libavcodec/x86')
-rw-r--r--libavcodec/x86/dsputil_yasm.asm8
-rw-r--r--libavcodec/x86/fmtconvert.asm140
-rw-r--r--libavcodec/x86/fmtconvert_mmx.c213
-rw-r--r--libavcodec/x86/h264_weight.asm210
-rw-r--r--libavcodec/x86/h264_weight_10bit.asm145
-rw-r--r--libavcodec/x86/h264dsp_mmx.c177
6 files changed, 360 insertions, 533 deletions
diff --git a/libavcodec/x86/dsputil_yasm.asm b/libavcodec/x86/dsputil_yasm.asm
index 70f1b86c44..6627d21bd8 100644
--- a/libavcodec/x86/dsputil_yasm.asm
+++ b/libavcodec/x86/dsputil_yasm.asm
@@ -1055,14 +1055,6 @@ emu_edge mmx
; int32_t max, unsigned int len)
;-----------------------------------------------------------------------------
-%macro SPLATD_MMX 1
- punpckldq %1, %1
-%endmacro
-
-%macro SPLATD_SSE2 1
- pshufd %1, %1, 0
-%endmacro
-
%macro VECTOR_CLIP_INT32 4
cglobal vector_clip_int32_%1, 5,5,%2, dst, src, min, max, len
%ifidn %1, sse2
diff --git a/libavcodec/x86/fmtconvert.asm b/libavcodec/x86/fmtconvert.asm
index 2deb577ca6..37e7a094ce 100644
--- a/libavcodec/x86/fmtconvert.asm
+++ b/libavcodec/x86/fmtconvert.asm
@@ -24,6 +24,146 @@
SECTION_TEXT
+;---------------------------------------------------------------------------------
+; void int32_to_float_fmul_scalar(float *dst, const int *src, float mul, int len);
+;---------------------------------------------------------------------------------
+%macro INT32_TO_FLOAT_FMUL_SCALAR 2
+%ifdef ARCH_X86_64
+cglobal int32_to_float_fmul_scalar_%1, 3,3,%2, dst, src, len
+%else
+cglobal int32_to_float_fmul_scalar_%1, 4,4,%2, dst, src, mul, len
+ movss m0, mulm
+%endif
+ SPLATD m0
+ shl lenq, 2
+ add srcq, lenq
+ add dstq, lenq
+ neg lenq
+.loop:
+%ifidn %1, sse2
+ cvtdq2ps m1, [srcq+lenq ]
+ cvtdq2ps m2, [srcq+lenq+16]
+%else
+ cvtpi2ps m1, [srcq+lenq ]
+ cvtpi2ps m3, [srcq+lenq+ 8]
+ cvtpi2ps m2, [srcq+lenq+16]
+ cvtpi2ps m4, [srcq+lenq+24]
+ movlhps m1, m3
+ movlhps m2, m4
+%endif
+ mulps m1, m0
+ mulps m2, m0
+ mova [dstq+lenq ], m1
+ mova [dstq+lenq+16], m2
+ add lenq, 32
+ jl .loop
+ REP_RET
+%endmacro
+
+INIT_XMM
+%define SPLATD SPLATD_SSE
+%define movdqa movaps
+INT32_TO_FLOAT_FMUL_SCALAR sse, 5
+%undef movdqa
+%define SPLATD SPLATD_SSE2
+INT32_TO_FLOAT_FMUL_SCALAR sse2, 3
+%undef SPLATD
+
+
+;------------------------------------------------------------------------------
+; void ff_float_to_int16(int16_t *dst, const float *src, long len);
+;------------------------------------------------------------------------------
+%macro FLOAT_TO_INT16 2
+cglobal float_to_int16_%1, 3,3,%2, dst, src, len
+ add lenq, lenq
+ lea srcq, [srcq+2*lenq]
+ add dstq, lenq
+ neg lenq
+.loop:
+%ifidn %1, sse2
+ cvtps2dq m0, [srcq+2*lenq ]
+ cvtps2dq m1, [srcq+2*lenq+16]
+ packssdw m0, m1
+ mova [dstq+lenq], m0
+%else
+ cvtps2pi m0, [srcq+2*lenq ]
+ cvtps2pi m1, [srcq+2*lenq+ 8]
+ cvtps2pi m2, [srcq+2*lenq+16]
+ cvtps2pi m3, [srcq+2*lenq+24]
+ packssdw m0, m1
+ packssdw m2, m3
+ mova [dstq+lenq ], m0
+ mova [dstq+lenq+8], m2
+%endif
+ add lenq, 16
+ js .loop
+%ifnidn %1, sse2
+ emms
+%endif
+ REP_RET
+%endmacro
+
+INIT_XMM
+FLOAT_TO_INT16 sse2, 2
+INIT_MMX
+FLOAT_TO_INT16 sse, 0
+%define cvtps2pi pf2id
+FLOAT_TO_INT16 3dnow, 0
+%undef cvtps2pi
+
+
+;-------------------------------------------------------------------------------
+; void ff_float_to_int16_interleave2(int16_t *dst, const float **src, long len);
+;-------------------------------------------------------------------------------
+%macro FLOAT_TO_INT16_INTERLEAVE2 1
+cglobal float_to_int16_interleave2_%1, 3,4,2, dst, src0, src1, len
+ lea lenq, [4*r2q]
+ mov src1q, [src0q+gprsize]
+ mov src0q, [src0q]
+ add dstq, lenq
+ add src0q, lenq
+ add src1q, lenq
+ neg lenq
+.loop:
+%ifidn %1, sse2
+ cvtps2dq m0, [src0q+lenq]
+ cvtps2dq m1, [src1q+lenq]
+ packssdw m0, m1
+ movhlps m1, m0
+ punpcklwd m0, m1
+ mova [dstq+lenq], m0
+%else
+ cvtps2pi m0, [src0q+lenq ]
+ cvtps2pi m1, [src0q+lenq+8]
+ cvtps2pi m2, [src1q+lenq ]
+ cvtps2pi m3, [src1q+lenq+8]
+ packssdw m0, m1
+ packssdw m2, m3
+ mova m1, m0
+ punpcklwd m0, m2
+ punpckhwd m1, m2
+ mova [dstq+lenq ], m0
+ mova [dstq+lenq+8], m1
+%endif
+ add lenq, 16
+ js .loop
+%ifnidn %1, sse2
+ emms
+%endif
+ REP_RET
+%endmacro
+
+INIT_MMX
+%define cvtps2pi pf2id
+FLOAT_TO_INT16_INTERLEAVE2 3dnow
+%undef cvtps2pi
+%define movdqa movaps
+FLOAT_TO_INT16_INTERLEAVE2 sse
+%undef movdqa
+INIT_XMM
+FLOAT_TO_INT16_INTERLEAVE2 sse2
+
+
%macro PSWAPD_SSE 2
pshufw %1, %2, 0x4e
%endmacro
diff --git a/libavcodec/x86/fmtconvert_mmx.c b/libavcodec/x86/fmtconvert_mmx.c
index ba2c2c9bd5..a3d8f89816 100644
--- a/libavcodec/x86/fmtconvert_mmx.c
+++ b/libavcodec/x86/fmtconvert_mmx.c
@@ -26,133 +26,32 @@
#include "libavutil/x86_cpu.h"
#include "libavcodec/fmtconvert.h"
-static void int32_to_float_fmul_scalar_sse(float *dst, const int *src, float mul, int len)
-{
- x86_reg i = -4*len;
- __asm__ volatile(
- "movss %3, %%xmm4 \n"
- "shufps $0, %%xmm4, %%xmm4 \n"
- "1: \n"
- "cvtpi2ps (%2,%0), %%xmm0 \n"
- "cvtpi2ps 8(%2,%0), %%xmm1 \n"
- "cvtpi2ps 16(%2,%0), %%xmm2 \n"
- "cvtpi2ps 24(%2,%0), %%xmm3 \n"
- "movlhps %%xmm1, %%xmm0 \n"
- "movlhps %%xmm3, %%xmm2 \n"
- "mulps %%xmm4, %%xmm0 \n"
- "mulps %%xmm4, %%xmm2 \n"
- "movaps %%xmm0, (%1,%0) \n"
- "movaps %%xmm2, 16(%1,%0) \n"
- "add $32, %0 \n"
- "jl 1b \n"
- :"+r"(i)
- :"r"(dst+len), "r"(src+len), "m"(mul)
- );
-}
-
-static void int32_to_float_fmul_scalar_sse2(float *dst, const int *src, float mul, int len)
-{
- x86_reg i = -4*len;
- __asm__ volatile(
- "movss %3, %%xmm4 \n"
- "shufps $0, %%xmm4, %%xmm4 \n"
- "1: \n"
- "cvtdq2ps (%2,%0), %%xmm0 \n"
- "cvtdq2ps 16(%2,%0), %%xmm1 \n"
- "mulps %%xmm4, %%xmm0 \n"
- "mulps %%xmm4, %%xmm1 \n"
- "movaps %%xmm0, (%1,%0) \n"
- "movaps %%xmm1, 16(%1,%0) \n"
- "add $32, %0 \n"
- "jl 1b \n"
- :"+r"(i)
- :"r"(dst+len), "r"(src+len), "m"(mul)
- );
-}
+#if HAVE_YASM
-static void float_to_int16_3dnow(int16_t *dst, const float *src, long len){
- x86_reg reglen = len;
- // not bit-exact: pf2id uses different rounding than C and SSE
- __asm__ volatile(
- "add %0 , %0 \n\t"
- "lea (%2,%0,2) , %2 \n\t"
- "add %0 , %1 \n\t"
- "neg %0 \n\t"
- "1: \n\t"
- "pf2id (%2,%0,2) , %%mm0 \n\t"
- "pf2id 8(%2,%0,2) , %%mm1 \n\t"
- "pf2id 16(%2,%0,2) , %%mm2 \n\t"
- "pf2id 24(%2,%0,2) , %%mm3 \n\t"
- "packssdw %%mm1 , %%mm0 \n\t"
- "packssdw %%mm3 , %%mm2 \n\t"
- "movq %%mm0 , (%1,%0) \n\t"
- "movq %%mm2 , 8(%1,%0) \n\t"
- "add $16 , %0 \n\t"
- " js 1b \n\t"
- "femms \n\t"
- :"+r"(reglen), "+r"(dst), "+r"(src)
- );
-}
+void ff_int32_to_float_fmul_scalar_sse (float *dst, const int *src, float mul, int len);
+void ff_int32_to_float_fmul_scalar_sse2(float *dst, const int *src, float mul, int len);
-static void float_to_int16_sse(int16_t *dst, const float *src, long len){
- x86_reg reglen = len;
- __asm__ volatile(
- "add %0 , %0 \n\t"
- "lea (%2,%0,2) , %2 \n\t"
- "add %0 , %1 \n\t"
- "neg %0 \n\t"
- "1: \n\t"
- "cvtps2pi (%2,%0,2) , %%mm0 \n\t"
- "cvtps2pi 8(%2,%0,2) , %%mm1 \n\t"
- "cvtps2pi 16(%2,%0,2) , %%mm2 \n\t"
- "cvtps2pi 24(%2,%0,2) , %%mm3 \n\t"
- "packssdw %%mm1 , %%mm0 \n\t"
- "packssdw %%mm3 , %%mm2 \n\t"
- "movq %%mm0 , (%1,%0) \n\t"
- "movq %%mm2 , 8(%1,%0) \n\t"
- "add $16 , %0 \n\t"
- " js 1b \n\t"
- "emms \n\t"
- :"+r"(reglen), "+r"(dst), "+r"(src)
- );
-}
+void ff_float_to_int16_3dnow(int16_t *dst, const float *src, long len);
+void ff_float_to_int16_sse (int16_t *dst, const float *src, long len);
+void ff_float_to_int16_sse2 (int16_t *dst, const float *src, long len);
-static void float_to_int16_sse2(int16_t *dst, const float *src, long len){
- x86_reg reglen = len;
- __asm__ volatile(
- "add %0 , %0 \n\t"
- "lea (%2,%0,2) , %2 \n\t"
- "add %0 , %1 \n\t"
- "neg %0 \n\t"
- "1: \n\t"
- "cvtps2dq (%2,%0,2) , %%xmm0 \n\t"
- "cvtps2dq 16(%2,%0,2) , %%xmm1 \n\t"
- "packssdw %%xmm1 , %%xmm0 \n\t"
- "movdqa %%xmm0 , (%1,%0) \n\t"
- "add $16 , %0 \n\t"
- " js 1b \n\t"
- :"+r"(reglen), "+r"(dst), "+r"(src)
- );
-}
+void ff_float_to_int16_interleave2_3dnow(int16_t *dst, const float **src, long len);
+void ff_float_to_int16_interleave2_sse (int16_t *dst, const float **src, long len);
+void ff_float_to_int16_interleave2_sse2 (int16_t *dst, const float **src, long len);
void ff_float_to_int16_interleave6_sse(int16_t *dst, const float **src, int len);
void ff_float_to_int16_interleave6_3dnow(int16_t *dst, const float **src, int len);
void ff_float_to_int16_interleave6_3dn2(int16_t *dst, const float **src, int len);
-#if !HAVE_YASM
-#define ff_float_to_int16_interleave6_sse(a,b,c) float_to_int16_interleave_misc_sse(a,b,c,6)
-#define ff_float_to_int16_interleave6_3dnow(a,b,c) float_to_int16_interleave_misc_3dnow(a,b,c,6)
-#define ff_float_to_int16_interleave6_3dn2(a,b,c) float_to_int16_interleave_misc_3dnow(a,b,c,6)
-#endif
#define ff_float_to_int16_interleave6_sse2 ff_float_to_int16_interleave6_sse
-#define FLOAT_TO_INT16_INTERLEAVE(cpu, body) \
+#define FLOAT_TO_INT16_INTERLEAVE(cpu) \
/* gcc pessimizes register allocation if this is in the same function as float_to_int16_interleave_sse2*/\
static av_noinline void float_to_int16_interleave_misc_##cpu(int16_t *dst, const float **src, long len, int channels){\
DECLARE_ALIGNED(16, int16_t, tmp)[len];\
int i,j,c;\
for(c=0; c<channels; c++){\
- float_to_int16_##cpu(tmp, src[c], len);\
+ ff_float_to_int16_##cpu(tmp, src[c], len);\
for(i=0, j=c; i<len; i++, j+=channels)\
dst[j] = tmp[i];\
}\
@@ -160,73 +59,18 @@ static av_noinline void float_to_int16_interleave_misc_##cpu(int16_t *dst, const
\
static void float_to_int16_interleave_##cpu(int16_t *dst, const float **src, long len, int channels){\
if(channels==1)\
- float_to_int16_##cpu(dst, src[0], len);\
+ ff_float_to_int16_##cpu(dst, src[0], len);\
else if(channels==2){\
- x86_reg reglen = len; \
- const float *src0 = src[0];\
- const float *src1 = src[1];\
- __asm__ volatile(\
- "shl $2, %0 \n"\
- "add %0, %1 \n"\
- "add %0, %2 \n"\
- "add %0, %3 \n"\
- "neg %0 \n"\
- body\
- :"+r"(reglen), "+r"(dst), "+r"(src0), "+r"(src1)\
- );\
+ ff_float_to_int16_interleave2_##cpu(dst, src, len);\
}else if(channels==6){\
ff_float_to_int16_interleave6_##cpu(dst, src, len);\
}else\
float_to_int16_interleave_misc_##cpu(dst, src, len, channels);\
}
-FLOAT_TO_INT16_INTERLEAVE(3dnow,
- "1: \n"
- "pf2id (%2,%0), %%mm0 \n"
- "pf2id 8(%2,%0), %%mm1 \n"
- "pf2id (%3,%0), %%mm2 \n"
- "pf2id 8(%3,%0), %%mm3 \n"
- "packssdw %%mm1, %%mm0 \n"
- "packssdw %%mm3, %%mm2 \n"
- "movq %%mm0, %%mm1 \n"
- "punpcklwd %%mm2, %%mm0 \n"
- "punpckhwd %%mm2, %%mm1 \n"
- "movq %%mm0, (%1,%0)\n"
- "movq %%mm1, 8(%1,%0)\n"
- "add $16, %0 \n"
- "js 1b \n"
- "femms \n"
-)
-
-FLOAT_TO_INT16_INTERLEAVE(sse,
- "1: \n"
- "cvtps2pi (%2,%0), %%mm0 \n"
- "cvtps2pi 8(%2,%0), %%mm1 \n"
- "cvtps2pi (%3,%0), %%mm2 \n"
- "cvtps2pi 8(%3,%0), %%mm3 \n"
- "packssdw %%mm1, %%mm0 \n"
- "packssdw %%mm3, %%mm2 \n"
- "movq %%mm0, %%mm1 \n"
- "punpcklwd %%mm2, %%mm0 \n"
- "punpckhwd %%mm2, %%mm1 \n"
- "movq %%mm0, (%1,%0)\n"
- "movq %%mm1, 8(%1,%0)\n"
- "add $16, %0 \n"
- "js 1b \n"
- "emms \n"
-)
-
-FLOAT_TO_INT16_INTERLEAVE(sse2,
- "1: \n"
- "cvtps2dq (%2,%0), %%xmm0 \n"
- "cvtps2dq (%3,%0), %%xmm1 \n"
- "packssdw %%xmm1, %%xmm0 \n"
- "movhlps %%xmm0, %%xmm1 \n"
- "punpcklwd %%xmm1, %%xmm0 \n"
- "movdqa %%xmm0, (%1,%0) \n"
- "add $16, %0 \n"
- "js 1b \n"
-)
+FLOAT_TO_INT16_INTERLEAVE(3dnow)
+FLOAT_TO_INT16_INTERLEAVE(sse)
+FLOAT_TO_INT16_INTERLEAVE(sse2)
static void float_to_int16_interleave_3dn2(int16_t *dst, const float **src, long len, int channels){
if(channels==6)
@@ -235,7 +79,6 @@ static void float_to_int16_interleave_3dn2(int16_t *dst, const float **src, long
float_to_int16_interleave_3dnow(dst, src, len, channels);
}
-#if HAVE_YASM
void ff_float_interleave2_mmx(float *dst, const float **src, unsigned int len);
void ff_float_interleave2_sse(float *dst, const float **src, unsigned int len);
@@ -269,34 +112,32 @@ void ff_fmt_convert_init_x86(FmtConvertContext *c, AVCodecContext *avctx)
{
int mm_flags = av_get_cpu_flags();
- if (mm_flags & AV_CPU_FLAG_MMX) {
#if HAVE_YASM
+ if (mm_flags & AV_CPU_FLAG_MMX) {
c->float_interleave = float_interleave_mmx;
-#endif
- if(mm_flags & AV_CPU_FLAG_3DNOW){
+ if (HAVE_AMD3DNOW && mm_flags & AV_CPU_FLAG_3DNOW) {
if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
- c->float_to_int16 = float_to_int16_3dnow;
+ c->float_to_int16 = ff_float_to_int16_3dnow;
c->float_to_int16_interleave = float_to_int16_interleave_3dnow;
}
}
- if(mm_flags & AV_CPU_FLAG_3DNOWEXT){
+ if (HAVE_AMD3DNOWEXT && mm_flags & AV_CPU_FLAG_3DNOWEXT) {
if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
c->float_to_int16_interleave = float_to_int16_interleave_3dn2;
}
}
- if(mm_flags & AV_CPU_FLAG_SSE){
- c->int32_to_float_fmul_scalar = int32_to_float_fmul_scalar_sse;
- c->float_to_int16 = float_to_int16_sse;
+ if (HAVE_SSE && mm_flags & AV_CPU_FLAG_SSE) {
+ c->int32_to_float_fmul_scalar = ff_int32_to_float_fmul_scalar_sse;
+ c->float_to_int16 = ff_float_to_int16_sse;
c->float_to_int16_interleave = float_to_int16_interleave_sse;
-#if HAVE_YASM
c->float_interleave = float_interleave_sse;
-#endif
}
- if(mm_flags & AV_CPU_FLAG_SSE2){
- c->int32_to_float_fmul_scalar = int32_to_float_fmul_scalar_sse2;
- c->float_to_int16 = float_to_int16_sse2;
+ if (HAVE_SSE && mm_flags & AV_CPU_FLAG_SSE2) {
+ c->int32_to_float_fmul_scalar = ff_int32_to_float_fmul_scalar_sse2;
+ c->float_to_int16 = ff_float_to_int16_sse2;
c->float_to_int16_interleave = float_to_int16_interleave_sse2;
}
}
+#endif
}
diff --git a/libavcodec/x86/h264_weight.asm b/libavcodec/x86/h264_weight.asm
index bb0af86097..cc96cb1f3b 100644
--- a/libavcodec/x86/h264_weight.asm
+++ b/libavcodec/x86/h264_weight.asm
@@ -28,21 +28,20 @@ SECTION .text
;-----------------------------------------------------------------------------
; biweight pred:
;
-; void h264_biweight_16x16_sse2(uint8_t *dst, uint8_t *src, int stride,
-; int log2_denom, int weightd, int weights,
-; int offset);
+; void h264_biweight_16_sse2(uint8_t *dst, uint8_t *src, int stride,
+; int height, int log2_denom, int weightd,
+; int weights, int offset);
; and
-; void h264_weight_16x16_sse2(uint8_t *dst, int stride,
-; int log2_denom, int weight,
-; int offset);
+; void h264_weight_16_sse2(uint8_t *dst, int stride, int height,
+; int log2_denom, int weight, int offset);
;-----------------------------------------------------------------------------
%macro WEIGHT_SETUP 0
- add r4, r4
- inc r4
- movd m3, r3d
- movd m5, r4d
- movd m6, r2d
+ add r5, r5
+ inc r5
+ movd m3, r4d
+ movd m5, r5d
+ movd m6, r3d
pslld m5, m6
psrld m5, 1
%if mmsize == 16
@@ -71,60 +70,41 @@ SECTION .text
packuswb m0, m1
%endmacro
-%macro WEIGHT_FUNC_DBL_MM 1
-cglobal h264_weight_16x%1_mmx2, 5, 5, 0
+INIT_MMX
+cglobal h264_weight_16_mmx2, 6, 6, 0
WEIGHT_SETUP
- mov r2, %1
-%if %1 == 16
.nextrow
WEIGHT_OP 0, 4
mova [r0 ], m0
WEIGHT_OP 8, 12
mova [r0+8], m0
add r0, r1
- dec r2
+ dec r2d
jnz .nextrow
REP_RET
-%else
- jmp mangle(ff_h264_weight_16x16_mmx2.nextrow)
-%endif
-%endmacro
-INIT_MMX
-WEIGHT_FUNC_DBL_MM 16
-WEIGHT_FUNC_DBL_MM 8
-
-%macro WEIGHT_FUNC_MM 4
-cglobal h264_weight_%1x%2_%4, 7, 7, %3
+%macro WEIGHT_FUNC_MM 3
+cglobal h264_weight_%1_%3, 6, 6, %2
WEIGHT_SETUP
- mov r2, %2
-%if %2 == 16
.nextrow
WEIGHT_OP 0, mmsize/2
mova [r0], m0
add r0, r1
- dec r2
+ dec r2d
jnz .nextrow
REP_RET
-%else
- jmp mangle(ff_h264_weight_%1x16_%4.nextrow)
-%endif
%endmacro
INIT_MMX
-WEIGHT_FUNC_MM 8, 16, 0, mmx2
-WEIGHT_FUNC_MM 8, 8, 0, mmx2
-WEIGHT_FUNC_MM 8, 4, 0, mmx2
+WEIGHT_FUNC_MM 8, 0, mmx2
INIT_XMM
-WEIGHT_FUNC_MM 16, 16, 8, sse2
-WEIGHT_FUNC_MM 16, 8, 8, sse2
+WEIGHT_FUNC_MM 16, 8, sse2
-%macro WEIGHT_FUNC_HALF_MM 5
-cglobal h264_weight_%1x%2_%5, 5, 5, %4
+%macro WEIGHT_FUNC_HALF_MM 3
+cglobal h264_weight_%1_%3, 6, 6, %2
WEIGHT_SETUP
- mov r2, %2/2
+ sar r2d, 1
lea r3, [r1*2]
-%if %2 == mmsize
.nextrow
WEIGHT_OP 0, r1
movh [r0], m0
@@ -135,31 +115,34 @@ cglobal h264_weight_%1x%2_%5, 5, 5, %4
movh [r0+r1], m0
%endif
add r0, r3
- dec r2
+ dec r2d
jnz .nextrow
REP_RET
-%else
- jmp mangle(ff_h264_weight_%1x%3_%5.nextrow)
-%endif
%endmacro
INIT_MMX
-WEIGHT_FUNC_HALF_MM 4, 8, 8, 0, mmx2
-WEIGHT_FUNC_HALF_MM 4, 4, 8, 0, mmx2
-WEIGHT_FUNC_HALF_MM 4, 2, 8, 0, mmx2
+WEIGHT_FUNC_HALF_MM 4, 0, mmx2
+WEIGHT_FUNC_HALF_MM 4, 0, mmx2
+WEIGHT_FUNC_HALF_MM 4, 0, mmx2
INIT_XMM
-WEIGHT_FUNC_HALF_MM 8, 16, 16, 8, sse2
-WEIGHT_FUNC_HALF_MM 8, 8, 16, 8, sse2
-WEIGHT_FUNC_HALF_MM 8, 4, 16, 8, sse2
+WEIGHT_FUNC_HALF_MM 8, 8, sse2
+WEIGHT_FUNC_HALF_MM 8, 8, sse2
+WEIGHT_FUNC_HALF_MM 8, 8, sse2
%macro BIWEIGHT_SETUP 0
- add r6, 1
- or r6, 1
- add r3, 1
- movd m3, r4d
- movd m4, r5d
- movd m5, r6d
- movd m6, r3d
+%ifdef ARCH_X86_64
+%define off_regd r11d
+%else
+%define off_regd r3d
+%endif
+ mov off_regd, r7m
+ add off_regd, 1
+ or off_regd, 1
+ add r4, 1
+ movd m3, r5d
+ movd m4, r6d
+ movd m5, off_regd
+ movd m6, r4d
pslld m5, m6
psrld m5, 1
%if mmsize == 16
@@ -195,11 +178,10 @@ WEIGHT_FUNC_HALF_MM 8, 4, 16, 8, sse2
packuswb m0, m1
%endmacro
-%macro BIWEIGHT_FUNC_DBL_MM 1
-cglobal h264_biweight_16x%1_mmx2, 7, 7, 0
+INIT_MMX
+cglobal h264_biweight_16_mmx2, 7, 7, 0
BIWEIGHT_SETUP
- mov r3, %1
-%if %1 == 16
+ movifnidn r3d, r3m
.nextrow
BIWEIGHT_STEPA 0, 1, 0
BIWEIGHT_STEPA 1, 2, 4
@@ -211,23 +193,14 @@ cglobal h264_biweight_16x%1_mmx2, 7, 7, 0
mova [r0+8], m0
add r0, r2
add r1, r2
- dec r3
+ dec r3d
jnz .nextrow
REP_RET
-%else
- jmp mangle(ff_h264_biweight_16x16_mmx2.nextrow)
-%endif
-%endmacro
-INIT_MMX
-BIWEIGHT_FUNC_DBL_MM 16
-BIWEIGHT_FUNC_DBL_MM 8
-
-%macro BIWEIGHT_FUNC_MM 4
-cglobal h264_biweight_%1x%2_%4, 7, 7, %3
+%macro BIWEIGHT_FUNC_MM 3
+cglobal h264_biweight_%1_%3, 7, 7, %2
BIWEIGHT_SETUP
- mov r3, %2
-%if %2 == 16
+ movifnidn r3d, r3m
.nextrow
BIWEIGHT_STEPA 0, 1, 0
BIWEIGHT_STEPA 1, 2, mmsize/2
@@ -235,28 +208,22 @@ cglobal h264_biweight_%1x%2_%4, 7, 7, %3
mova [r0], m0
add r0, r2
add r1, r2
- dec r3
+ dec r3d
jnz .nextrow
REP_RET
-%else
- jmp mangle(ff_h264_biweight_%1x16_%4.nextrow)
-%endif
%endmacro
INIT_MMX
-BIWEIGHT_FUNC_MM 8, 16, 0, mmx2
-BIWEIGHT_FUNC_MM 8, 8, 0, mmx2
-BIWEIGHT_FUNC_MM 8, 4, 0, mmx2
+BIWEIGHT_FUNC_MM 8, 0, mmx2
INIT_XMM
-BIWEIGHT_FUNC_MM 16, 16, 8, sse2
-BIWEIGHT_FUNC_MM 16, 8, 8, sse2
+BIWEIGHT_FUNC_MM 16, 8, sse2
-%macro BIWEIGHT_FUNC_HALF_MM 5
-cglobal h264_biweight_%1x%2_%5, 7, 7, %4
+%macro BIWEIGHT_FUNC_HALF_MM 3
+cglobal h264_biweight_%1_%3, 7, 7, %2
BIWEIGHT_SETUP
- mov r3, %2/2
+ movifnidn r3d, r3m
+ sar r3, 1
lea r4, [r2*2]
-%if %2 == mmsize
.nextrow
BIWEIGHT_STEPA 0, 1, 0
BIWEIGHT_STEPA 1, 2, r2
@@ -270,31 +237,30 @@ cglobal h264_biweight_%1x%2_%5, 7, 7, %4
%endif
add r0, r4
add r1, r4
- dec r3
+ dec r3d
jnz .nextrow
REP_RET
-%else
- jmp mangle(ff_h264_biweight_%1x%3_%5.nextrow)
-%endif
%endmacro
INIT_MMX
-BIWEIGHT_FUNC_HALF_MM 4, 8, 8, 0, mmx2
-BIWEIGHT_FUNC_HALF_MM 4, 4, 8, 0, mmx2
-BIWEIGHT_FUNC_HALF_MM 4, 2, 8, 0, mmx2
+BIWEIGHT_FUNC_HALF_MM 4, 0, mmx2
INIT_XMM
-BIWEIGHT_FUNC_HALF_MM 8, 16, 16, 8, sse2
-BIWEIGHT_FUNC_HALF_MM 8, 8, 16, 8, sse2
-BIWEIGHT_FUNC_HALF_MM 8, 4, 16, 8, sse2
+BIWEIGHT_FUNC_HALF_MM 8, 8, sse2
%macro BIWEIGHT_SSSE3_SETUP 0
- add r6, 1
- or r6, 1
- add r3, 1
- movd m4, r4d
- movd m0, r5d
- movd m5, r6d
- movd m6, r3d
+%ifdef ARCH_X86_64
+%define off_regd r11d
+%else
+%define off_regd r3d
+%endif
+ mov off_regd, r7m
+ add off_regd, 1
+ or off_regd, 1
+ add r4, 1
+ movd m4, r5d
+ movd m0, r6d
+ movd m5, off_regd
+ movd m6, r4d
pslld m5, m6
psrld m5, 1
punpcklbw m4, m0
@@ -314,12 +280,11 @@ BIWEIGHT_FUNC_HALF_MM 8, 4, 16, 8, sse2
packuswb m0, m2
%endmacro
-%macro BIWEIGHT_SSSE3_16 1
-cglobal h264_biweight_16x%1_ssse3, 7, 7, 8
+INIT_XMM
+cglobal h264_biweight_16_ssse3, 7, 7, 8
BIWEIGHT_SSSE3_SETUP
- mov r3, %1
+ movifnidn r3d, r3m
-%if %1 == 16
.nextrow
movh m0, [r0]
movh m2, [r0+8]
@@ -330,25 +295,17 @@ cglobal h264_biweight_16x%1_ssse3, 7, 7, 8
mova [r0], m0
add r0, r2
add r1, r2
- dec r3
+ dec r3d
jnz .nextrow
REP_RET
-%else
- jmp mangle(ff_h264_biweight_16x16_ssse3.nextrow)
-%endif
-%endmacro
INIT_XMM
-BIWEIGHT_SSSE3_16 16
-BIWEIGHT_SSSE3_16 8
-
-%macro BIWEIGHT_SSSE3_8 1
-cglobal h264_biweight_8x%1_ssse3, 7, 7, 8
+cglobal h264_biweight_8_ssse3, 7, 7, 8
BIWEIGHT_SSSE3_SETUP
- mov r3, %1/2
+ movifnidn r3d, r3m
+ sar r3, 1
lea r4, [r2*2]
-%if %1 == 16
.nextrow
movh m0, [r0]
movh m1, [r1]
@@ -361,15 +318,6 @@ cglobal h264_biweight_8x%1_ssse3, 7, 7, 8
movhps [r0+r2], m0
add r0, r4
add r1, r4
- dec r3
+ dec r3d
jnz .nextrow
REP_RET
-%else
- jmp mangle(ff_h264_biweight_8x16_ssse3.nextrow)
-%endif
-%endmacro
-
-INIT_XMM
-BIWEIGHT_SSSE3_8 16
-BIWEIGHT_SSSE3_8 8
-BIWEIGHT_SSSE3_8 4
diff --git a/libavcodec/x86/h264_weight_10bit.asm b/libavcodec/x86/h264_weight_10bit.asm
index 1c58d72d94..20df6fbab5 100644
--- a/libavcodec/x86/h264_weight_10bit.asm
+++ b/libavcodec/x86/h264_weight_10bit.asm
@@ -36,33 +36,26 @@ cextern pw_1
SECTION .text
;-----------------------------------------------------------------------------
-; void h264_weight(uint8_t *dst, int stride, int log2_denom,
+; void h264_weight(uint8_t *dst, int stride, int height, int log2_denom,
; int weight, int offset);
;-----------------------------------------------------------------------------
-%ifdef ARCH_X86_32
-DECLARE_REG_TMP 2
-%else
-DECLARE_REG_TMP 10
-%endif
-
-%macro WEIGHT_PROLOGUE 1
- mov t0, %1
+%macro WEIGHT_PROLOGUE 0
.prologue
- PROLOGUE 0,5,8
+ PROLOGUE 0,6,8
movifnidn r0, r0mp
movifnidn r1d, r1m
- movifnidn r3d, r3m
movifnidn r4d, r4m
+ movifnidn r5d, r5m
%endmacro
%macro WEIGHT_SETUP 1
mova m0, [pw_1]
- movd m2, r2m
+ movd m2, r3m
pslld m0, m2 ; 1<<log2_denom
SPLATW m0, m0
- shl r4, 19 ; *8, move to upper half of dword
- lea r4, [r4+r3*2+0x10000]
- movd m3, r4d ; weight<<1 | 1+(offset<<(3))
+ shl r5, 19 ; *8, move to upper half of dword
+ lea r5, [r5+r4*2+0x10000]
+ movd m3, r5d ; weight<<1 | 1+(offset<<(3))
pshufd m3, m3, 0
mova m4, [pw_pixel_max]
paddw m2, [sq_1] ; log2_denom+1
@@ -96,8 +89,8 @@ DECLARE_REG_TMP 10
%endmacro
%macro WEIGHT_FUNC_DBL 1
-cglobal h264_weight_16x16_10_%1
- WEIGHT_PROLOGUE 16
+cglobal h264_weight_16_10_%1
+ WEIGHT_PROLOGUE
WEIGHT_SETUP %1
.nextrow
WEIGHT_OP %1, 0
@@ -105,13 +98,9 @@ cglobal h264_weight_16x16_10_%1
WEIGHT_OP %1, 16
mova [r0+16], m5
add r0, r1
- dec t0
+ dec r2d
jnz .nextrow
REP_RET
-
-cglobal h264_weight_16x8_10_%1
- mov t0, 8
- jmp mangle(ff_h264_weight_16x16_10_%1.prologue)
%endmacro
INIT_XMM
@@ -120,24 +109,16 @@ WEIGHT_FUNC_DBL sse4
%macro WEIGHT_FUNC_MM 1
-cglobal h264_weight_8x16_10_%1
- WEIGHT_PROLOGUE 16
+cglobal h264_weight_8_10_%1
+ WEIGHT_PROLOGUE
WEIGHT_SETUP %1
.nextrow
WEIGHT_OP %1, 0
mova [r0], m5
add r0, r1
- dec t0
+ dec r2d
jnz .nextrow
REP_RET
-
-cglobal h264_weight_8x8_10_%1
- mov t0, 8
- jmp mangle(ff_h264_weight_8x16_10_%1.prologue)
-
-cglobal h264_weight_8x4_10_%1
- mov t0, 4
- jmp mangle(ff_h264_weight_8x16_10_%1.prologue)
%endmacro
INIT_XMM
@@ -146,8 +127,9 @@ WEIGHT_FUNC_MM sse4
%macro WEIGHT_FUNC_HALF_MM 1
-cglobal h264_weight_4x8_10_%1
- WEIGHT_PROLOGUE 4
+cglobal h264_weight_4_10_%1
+ WEIGHT_PROLOGUE
+ sar r2d, 1
WEIGHT_SETUP %1
lea r3, [r1*2]
.nextrow
@@ -155,17 +137,9 @@ cglobal h264_weight_4x8_10_%1
movh [r0], m5
movhps [r0+r1], m5
add r0, r3
- dec t0
+ dec r2d
jnz .nextrow
REP_RET
-
-cglobal h264_weight_4x4_10_%1
- mov t0, 2
- jmp mangle(ff_h264_weight_4x8_10_%1.prologue)
-
-cglobal h264_weight_4x2_10_%1
- mov t0, 1
- jmp mangle(ff_h264_weight_4x8_10_%1.prologue)
%endmacro
INIT_XMM
@@ -174,40 +148,40 @@ WEIGHT_FUNC_HALF_MM sse4
;-----------------------------------------------------------------------------
-; void h264_biweight(uint8_t *dst, uint8_t *src, int stride, int log2_denom,
-; int weightd, int weights, int offset);
+; void h264_biweight(uint8_t *dst, uint8_t *src, int stride, int height,
+; int log2_denom, int weightd, int weights, int offset);
;-----------------------------------------------------------------------------
%ifdef ARCH_X86_32
-DECLARE_REG_TMP 2,3
+DECLARE_REG_TMP 3
%else
-DECLARE_REG_TMP 10,2
+DECLARE_REG_TMP 10
%endif
-%macro BIWEIGHT_PROLOGUE 1
- mov t0, %1
+%macro BIWEIGHT_PROLOGUE 0
.prologue
PROLOGUE 0,7,8
movifnidn r0, r0mp
movifnidn r1, r1mp
- movifnidn t1d, r2m
- movifnidn r4d, r4m
+ movifnidn r2d, r2m
movifnidn r5d, r5m
movifnidn r6d, r6m
+ movifnidn t0d, r7m
%endmacro
%macro BIWEIGHT_SETUP 1
- lea r6, [r6*4+1] ; (offset<<2)+1
- or r6, 1
- shl r5, 16
- or r4, r5
- movd m4, r4d ; weightd | weights
- movd m5, r6d ; (offset+1)|1
- movd m6, r3m ; log2_denom
+ lea t0, [t0*4+1] ; (offset<<2)+1
+ or t0, 1
+ shl r6, 16
+ or r5, r6
+ movd m4, r5d ; weightd | weights
+ movd m5, t0d ; (offset+1)|1
+ movd m6, r4m ; log2_denom
pslld m5, m6 ; (((offset<<2)+1)|1)<<log2_denom
paddd m6, [sq_1]
pshufd m4, m4, 0
pshufd m5, m5, 0
mova m3, [pw_pixel_max]
+ movifnidn r3d, r3m
%ifnidn %1, sse4
pxor m7, m7
%endif
@@ -243,23 +217,19 @@ DECLARE_REG_TMP 10,2
%endmacro
%macro BIWEIGHT_FUNC_DBL 1
-cglobal h264_biweight_16x16_10_%1
- BIWEIGHT_PROLOGUE 16
+cglobal h264_biweight_16_10_%1
+ BIWEIGHT_PROLOGUE
BIWEIGHT_SETUP %1
.nextrow
BIWEIGHT %1, 0
mova [r0 ], m0
BIWEIGHT %1, 16
mova [r0+16], m0
- add r0, t1
- add r1, t1
- dec t0
+ add r0, r2
+ add r1, r2
+ dec r3d
jnz .nextrow
REP_RET
-
-cglobal h264_biweight_16x8_10_%1
- mov t0, 8
- jmp mangle(ff_h264_biweight_16x16_10_%1.prologue)
%endmacro
INIT_XMM
@@ -267,25 +237,17 @@ BIWEIGHT_FUNC_DBL sse2
BIWEIGHT_FUNC_DBL sse4
%macro BIWEIGHT_FUNC 1
-cglobal h264_biweight_8x16_10_%1
- BIWEIGHT_PROLOGUE 16
+cglobal h264_biweight_8_10_%1
+ BIWEIGHT_PROLOGUE
BIWEIGHT_SETUP %1
.nextrow
BIWEIGHT %1, 0
mova [r0], m0
- add r0, t1
- add r1, t1
- dec t0
+ add r0, r2
+ add r1, r2
+ dec r3d
jnz .nextrow
REP_RET
-
-cglobal h264_biweight_8x8_10_%1
- mov t0, 8
- jmp mangle(ff_h264_biweight_8x16_10_%1.prologue)
-
-cglobal h264_biweight_8x4_10_%1
- mov t0, 4
- jmp mangle(ff_h264_biweight_8x16_10_%1.prologue)
%endmacro
INIT_XMM
@@ -293,27 +255,20 @@ BIWEIGHT_FUNC sse2
BIWEIGHT_FUNC sse4
%macro BIWEIGHT_FUNC_HALF 1
-cglobal h264_biweight_4x8_10_%1
- BIWEIGHT_PROLOGUE 4
+cglobal h264_biweight_4_10_%1
+ BIWEIGHT_PROLOGUE
BIWEIGHT_SETUP %1
- lea r4, [t1*2]
+ sar r3d, 1
+ lea r4, [r2*2]
.nextrow
- BIWEIGHT %1, 0, t1
+ BIWEIGHT %1, 0, r2
movh [r0 ], m0
- movhps [r0+t1], m0
+ movhps [r0+r2], m0
add r0, r4
add r1, r4
- dec t0
+ dec r3d
jnz .nextrow
REP_RET
-
-cglobal h264_biweight_4x4_10_%1
- mov t0, 2
- jmp mangle(ff_h264_biweight_4x8_10_%1.prologue)
-
-cglobal h264_biweight_4x2_10_%1
- mov t0, 1
- jmp mangle(ff_h264_biweight_4x8_10_%1.prologue)
%endmacro
INIT_XMM
diff --git a/libavcodec/x86/h264dsp_mmx.c b/libavcodec/x86/h264dsp_mmx.c
index 71beb262c9..b337462aec 100644
--- a/libavcodec/x86/h264dsp_mmx.c
+++ b/libavcodec/x86/h264dsp_mmx.c
@@ -298,63 +298,53 @@ LF_IFUNC(v, luma_intra, 10, mmxext)
/***********************************/
/* weighted prediction */
-#define H264_WEIGHT(W, H, OPT) \
-void ff_h264_weight_ ## W ## x ## H ## _ ## OPT(uint8_t *dst, \
- int stride, int log2_denom, int weight, int offset);
+#define H264_WEIGHT(W, OPT) \
+void ff_h264_weight_ ## W ## _ ## OPT(uint8_t *dst, \
+ int stride, int height, int log2_denom, int weight, int offset);
-#define H264_BIWEIGHT(W, H, OPT) \
-void ff_h264_biweight_ ## W ## x ## H ## _ ## OPT(uint8_t *dst, \
- uint8_t *src, int stride, int log2_denom, int weightd, \
+#define H264_BIWEIGHT(W, OPT) \
+void ff_h264_biweight_ ## W ## _ ## OPT(uint8_t *dst, \
+ uint8_t *src, int stride, int height, int log2_denom, int weightd, \
int weights, int offset);
-#define H264_BIWEIGHT_MMX(W,H) \
-H264_WEIGHT (W, H, mmx2) \
-H264_BIWEIGHT(W, H, mmx2)
-
-#define H264_BIWEIGHT_MMX_SSE(W,H) \
-H264_BIWEIGHT_MMX(W, H) \
-H264_WEIGHT (W, H, sse2) \
-H264_BIWEIGHT (W, H, sse2) \
-H264_BIWEIGHT (W, H, ssse3)
-
-H264_BIWEIGHT_MMX_SSE(16, 16)
-H264_BIWEIGHT_MMX_SSE(16, 8)
-H264_BIWEIGHT_MMX_SSE( 8, 16)
-H264_BIWEIGHT_MMX_SSE( 8, 8)
-H264_BIWEIGHT_MMX_SSE( 8, 4)
-H264_BIWEIGHT_MMX ( 4, 8)
-H264_BIWEIGHT_MMX ( 4, 4)
-H264_BIWEIGHT_MMX ( 4, 2)
-
-#define H264_WEIGHT_10(W, H, DEPTH, OPT) \
-void ff_h264_weight_ ## W ## x ## H ## _ ## DEPTH ## _ ## OPT(uint8_t *dst, \
- int stride, int log2_denom, int weight, int offset);
-
-#define H264_BIWEIGHT_10(W, H, DEPTH, OPT) \
-void ff_h264_biweight_ ## W ## x ## H ## _ ## DEPTH ## _ ## OPT \
- (uint8_t *dst, uint8_t *src, int stride, int log2_denom, \
+#define H264_BIWEIGHT_MMX(W) \
+H264_WEIGHT (W, mmx2) \
+H264_BIWEIGHT(W, mmx2)
+
+#define H264_BIWEIGHT_MMX_SSE(W) \
+H264_BIWEIGHT_MMX(W) \
+H264_WEIGHT (W, sse2) \
+H264_BIWEIGHT (W, sse2) \
+H264_BIWEIGHT (W, ssse3)
+
+H264_BIWEIGHT_MMX_SSE(16)
+H264_BIWEIGHT_MMX_SSE( 8)
+H264_BIWEIGHT_MMX ( 4)
+
+#define H264_WEIGHT_10(W, DEPTH, OPT) \
+void ff_h264_weight_ ## W ## _ ## DEPTH ## _ ## OPT(uint8_t *dst, \
+ int stride, int height, int log2_denom, int weight, int offset);
+
+#define H264_BIWEIGHT_10(W, DEPTH, OPT) \
+void ff_h264_biweight_ ## W ## _ ## DEPTH ## _ ## OPT \
+ (uint8_t *dst, uint8_t *src, int stride, int height, int log2_denom, \
int weightd, int weights, int offset);
-#define H264_BIWEIGHT_10_SSE(W, H, DEPTH) \
-H264_WEIGHT_10 (W, H, DEPTH, sse2) \
-H264_WEIGHT_10 (W, H, DEPTH, sse4) \
-H264_BIWEIGHT_10(W, H, DEPTH, sse2) \
-H264_BIWEIGHT_10(W, H, DEPTH, sse4)
-
-H264_BIWEIGHT_10_SSE(16, 16, 10)
-H264_BIWEIGHT_10_SSE(16, 8, 10)
-H264_BIWEIGHT_10_SSE( 8, 16, 10)
-H264_BIWEIGHT_10_SSE( 8, 8, 10)
-H264_BIWEIGHT_10_SSE( 8, 4, 10)
-H264_BIWEIGHT_10_SSE( 4, 8, 10)
-H264_BIWEIGHT_10_SSE( 4, 4, 10)
-H264_BIWEIGHT_10_SSE( 4, 2, 10)
+#define H264_BIWEIGHT_10_SSE(W, DEPTH) \
+H264_WEIGHT_10 (W, DEPTH, sse2) \
+H264_WEIGHT_10 (W, DEPTH, sse4) \
+H264_BIWEIGHT_10(W, DEPTH, sse2) \
+H264_BIWEIGHT_10(W, DEPTH, sse4)
+
+H264_BIWEIGHT_10_SSE(16, 10)
+H264_BIWEIGHT_10_SSE( 8, 10)
+H264_BIWEIGHT_10_SSE( 4, 10)
void ff_h264dsp_init_x86(H264DSPContext *c, const int bit_depth, const int chroma_format_idc)
{
int mm_flags = av_get_cpu_flags();
- if (mm_flags & AV_CPU_FLAG_MMX2) {
+ if (chroma_format_idc == 1 && mm_flags & AV_CPU_FLAG_MMX2) {
c->h264_loop_filter_strength= h264_loop_filter_strength_mmx2;
}
@@ -394,23 +384,13 @@ void ff_h264dsp_init_x86(H264DSPContext *c, const int bit_depth, const int chrom
c->h264_v_loop_filter_luma_intra = ff_deblock_v_luma_intra_8_mmxext;
c->h264_h_loop_filter_luma_intra = ff_deblock_h_luma_intra_8_mmxext;
#endif
- c->weight_h264_pixels_tab[0]= ff_h264_weight_16x16_mmx2;
- c->weight_h264_pixels_tab[1]= ff_h264_weight_16x8_mmx2;
- c->weight_h264_pixels_tab[2]= ff_h264_weight_8x16_mmx2;
- c->weight_h264_pixels_tab[3]= ff_h264_weight_8x8_mmx2;
- c->weight_h264_pixels_tab[4]= ff_h264_weight_8x4_mmx2;
- c->weight_h264_pixels_tab[5]= ff_h264_weight_4x8_mmx2;
- c->weight_h264_pixels_tab[6]= ff_h264_weight_4x4_mmx2;
- c->weight_h264_pixels_tab[7]= ff_h264_weight_4x2_mmx2;
-
- c->biweight_h264_pixels_tab[0]= ff_h264_biweight_16x16_mmx2;
- c->biweight_h264_pixels_tab[1]= ff_h264_biweight_16x8_mmx2;
- c->biweight_h264_pixels_tab[2]= ff_h264_biweight_8x16_mmx2;
- c->biweight_h264_pixels_tab[3]= ff_h264_biweight_8x8_mmx2;
- c->biweight_h264_pixels_tab[4]= ff_h264_biweight_8x4_mmx2;
- c->biweight_h264_pixels_tab[5]= ff_h264_biweight_4x8_mmx2;
- c->biweight_h264_pixels_tab[6]= ff_h264_biweight_4x4_mmx2;
- c->biweight_h264_pixels_tab[7]= ff_h264_biweight_4x2_mmx2;
+ c->weight_h264_pixels_tab[0]= ff_h264_weight_16_mmx2;
+ c->weight_h264_pixels_tab[1]= ff_h264_weight_8_mmx2;
+ c->weight_h264_pixels_tab[2]= ff_h264_weight_4_mmx2;
+
+ c->biweight_h264_pixels_tab[0]= ff_h264_biweight_16_mmx2;
+ c->biweight_h264_pixels_tab[1]= ff_h264_biweight_8_mmx2;
+ c->biweight_h264_pixels_tab[2]= ff_h264_biweight_4_mmx2;
if (mm_flags&AV_CPU_FLAG_SSE2) {
c->h264_idct8_add = ff_h264_idct8_add_8_sse2;
@@ -422,17 +402,11 @@ void ff_h264dsp_init_x86(H264DSPContext *c, const int bit_depth, const int chrom
c->h264_idct_add16intra = ff_h264_idct_add16intra_8_sse2;
c->h264_luma_dc_dequant_idct= ff_h264_luma_dc_dequant_idct_sse2;
- c->weight_h264_pixels_tab[0]= ff_h264_weight_16x16_sse2;
- c->weight_h264_pixels_tab[1]= ff_h264_weight_16x8_sse2;
- c->weight_h264_pixels_tab[2]= ff_h264_weight_8x16_sse2;
- c->weight_h264_pixels_tab[3]= ff_h264_weight_8x8_sse2;
- c->weight_h264_pixels_tab[4]= ff_h264_weight_8x4_sse2;
+ c->weight_h264_pixels_tab[0]= ff_h264_weight_16_sse2;
+ c->weight_h264_pixels_tab[1]= ff_h264_weight_8_sse2;
- c->biweight_h264_pixels_tab[0]= ff_h264_biweight_16x16_sse2;
- c->biweight_h264_pixels_tab[1]= ff_h264_biweight_16x8_sse2;
- c->biweight_h264_pixels_tab[2]= ff_h264_biweight_8x16_sse2;
- c->biweight_h264_pixels_tab[3]= ff_h264_biweight_8x8_sse2;
- c->biweight_h264_pixels_tab[4]= ff_h264_biweight_8x4_sse2;
+ c->biweight_h264_pixels_tab[0]= ff_h264_biweight_16_sse2;
+ c->biweight_h264_pixels_tab[1]= ff_h264_biweight_8_sse2;
#if HAVE_ALIGNED_STACK
c->h264_v_loop_filter_luma = ff_deblock_v_luma_8_sse2;
@@ -442,11 +416,8 @@ void ff_h264dsp_init_x86(H264DSPContext *c, const int bit_depth, const int chrom
#endif
}
if (mm_flags&AV_CPU_FLAG_SSSE3) {
- c->biweight_h264_pixels_tab[0]= ff_h264_biweight_16x16_ssse3;
- c->biweight_h264_pixels_tab[1]= ff_h264_biweight_16x8_ssse3;
- c->biweight_h264_pixels_tab[2]= ff_h264_biweight_8x16_ssse3;
- c->biweight_h264_pixels_tab[3]= ff_h264_biweight_8x8_ssse3;
- c->biweight_h264_pixels_tab[4]= ff_h264_biweight_8x4_ssse3;
+ c->biweight_h264_pixels_tab[0]= ff_h264_biweight_16_ssse3;
+ c->biweight_h264_pixels_tab[1]= ff_h264_biweight_8_ssse3;
}
if (HAVE_AVX && mm_flags&AV_CPU_FLAG_AVX) {
#if HAVE_ALIGNED_STACK
@@ -485,23 +456,13 @@ void ff_h264dsp_init_x86(H264DSPContext *c, const int bit_depth, const int chrom
c->h264_idct8_add4 = ff_h264_idct8_add4_10_sse2;
#endif
- c->weight_h264_pixels_tab[0] = ff_h264_weight_16x16_10_sse2;
- c->weight_h264_pixels_tab[1] = ff_h264_weight_16x8_10_sse2;
- c->weight_h264_pixels_tab[2] = ff_h264_weight_8x16_10_sse2;
- c->weight_h264_pixels_tab[3] = ff_h264_weight_8x8_10_sse2;
- c->weight_h264_pixels_tab[4] = ff_h264_weight_8x4_10_sse2;
- c->weight_h264_pixels_tab[5] = ff_h264_weight_4x8_10_sse2;
- c->weight_h264_pixels_tab[6] = ff_h264_weight_4x4_10_sse2;
- c->weight_h264_pixels_tab[7] = ff_h264_weight_4x2_10_sse2;
-
- c->biweight_h264_pixels_tab[0] = ff_h264_biweight_16x16_10_sse2;
- c->biweight_h264_pixels_tab[1] = ff_h264_biweight_16x8_10_sse2;
- c->biweight_h264_pixels_tab[2] = ff_h264_biweight_8x16_10_sse2;
- c->biweight_h264_pixels_tab[3] = ff_h264_biweight_8x8_10_sse2;
- c->biweight_h264_pixels_tab[4] = ff_h264_biweight_8x4_10_sse2;
- c->biweight_h264_pixels_tab[5] = ff_h264_biweight_4x8_10_sse2;
- c->biweight_h264_pixels_tab[6] = ff_h264_biweight_4x4_10_sse2;
- c->biweight_h264_pixels_tab[7] = ff_h264_biweight_4x2_10_sse2;
+ c->weight_h264_pixels_tab[0] = ff_h264_weight_16_10_sse2;
+ c->weight_h264_pixels_tab[1] = ff_h264_weight_8_10_sse2;
+ c->weight_h264_pixels_tab[2] = ff_h264_weight_4_10_sse2;
+
+ c->biweight_h264_pixels_tab[0] = ff_h264_biweight_16_10_sse2;
+ c->biweight_h264_pixels_tab[1] = ff_h264_biweight_8_10_sse2;
+ c->biweight_h264_pixels_tab[2] = ff_h264_biweight_4_10_sse2;
c->h264_v_loop_filter_chroma= ff_deblock_v_chroma_10_sse2;
c->h264_v_loop_filter_chroma_intra= ff_deblock_v_chroma_intra_10_sse2;
@@ -513,23 +474,13 @@ void ff_h264dsp_init_x86(H264DSPContext *c, const int bit_depth, const int chrom
#endif
}
if (mm_flags&AV_CPU_FLAG_SSE4) {
- c->weight_h264_pixels_tab[0] = ff_h264_weight_16x16_10_sse4;
- c->weight_h264_pixels_tab[1] = ff_h264_weight_16x8_10_sse4;
- c->weight_h264_pixels_tab[2] = ff_h264_weight_8x16_10_sse4;
- c->weight_h264_pixels_tab[3] = ff_h264_weight_8x8_10_sse4;
- c->weight_h264_pixels_tab[4] = ff_h264_weight_8x4_10_sse4;
- c->weight_h264_pixels_tab[5] = ff_h264_weight_4x8_10_sse4;
- c->weight_h264_pixels_tab[6] = ff_h264_weight_4x4_10_sse4;
- c->weight_h264_pixels_tab[7] = ff_h264_weight_4x2_10_sse4;
-
- c->biweight_h264_pixels_tab[0] = ff_h264_biweight_16x16_10_sse4;
- c->biweight_h264_pixels_tab[1] = ff_h264_biweight_16x8_10_sse4;
- c->biweight_h264_pixels_tab[2] = ff_h264_biweight_8x16_10_sse4;
- c->biweight_h264_pixels_tab[3] = ff_h264_biweight_8x8_10_sse4;
- c->biweight_h264_pixels_tab[4] = ff_h264_biweight_8x4_10_sse4;
- c->biweight_h264_pixels_tab[5] = ff_h264_biweight_4x8_10_sse4;
- c->biweight_h264_pixels_tab[6] = ff_h264_biweight_4x4_10_sse4;
- c->biweight_h264_pixels_tab[7] = ff_h264_biweight_4x2_10_sse4;
+ c->weight_h264_pixels_tab[0] = ff_h264_weight_16_10_sse4;
+ c->weight_h264_pixels_tab[1] = ff_h264_weight_8_10_sse4;
+ c->weight_h264_pixels_tab[2] = ff_h264_weight_4_10_sse4;
+
+ c->biweight_h264_pixels_tab[0] = ff_h264_biweight_16_10_sse4;
+ c->biweight_h264_pixels_tab[1] = ff_h264_biweight_8_10_sse4;
+ c->biweight_h264_pixels_tab[2] = ff_h264_biweight_4_10_sse4;
}
#if HAVE_AVX
if (mm_flags&AV_CPU_FLAG_AVX) {