summaryrefslogtreecommitdiff
path: root/libavcodec/x86
diff options
context:
space:
mode:
authorRonald S. Bultje <rsbultje@gmail.com>2012-03-01 20:39:49 -0800
committerRonald S. Bultje <rsbultje@gmail.com>2012-03-02 10:32:05 -0800
commit45549339bc072734015eaf6c907e5a5e68a1ccee (patch)
treef686e171cc3b4b8a2cd3cc58d71721432cf67224 /libavcodec/x86
parentbd66f073fe7286bd3c03e608f923577e4768445a (diff)
vp8: disable mmx functions with sse/sse2 counterparts on x86-64.
x86-64 is guaranteed to have at least SSE2, therefore the MMX/MMX2 functions will never be used in practice.
Diffstat (limited to 'libavcodec/x86')
-rw-r--r--libavcodec/x86/vp8dsp-init.c24
-rw-r--r--libavcodec/x86/vp8dsp.asm15
2 files changed, 35 insertions, 4 deletions
diff --git a/libavcodec/x86/vp8dsp-init.c b/libavcodec/x86/vp8dsp-init.c
index f6589da8c4..3e05bb2fb9 100644
--- a/libavcodec/x86/vp8dsp-init.c
+++ b/libavcodec/x86/vp8dsp-init.c
@@ -138,6 +138,7 @@ static void ff_put_vp8_ ## FILTERTYPE ## 8_ ## TAPTYPE ## _ ## OPT( \
dst + 4, dststride, src + 4, srcstride, height, mx, my); \
}
+#if ARCH_X86_32
TAP_W8 (mmxext, epel, h4)
TAP_W8 (mmxext, epel, h6)
TAP_W16(mmxext, epel, h6)
@@ -148,6 +149,7 @@ TAP_W8 (mmxext, bilinear, h)
TAP_W16(mmxext, bilinear, h)
TAP_W8 (mmxext, bilinear, v)
TAP_W16(mmxext, bilinear, v)
+#endif
TAP_W16(sse2, epel, h6)
TAP_W16(sse2, epel, v6)
@@ -173,15 +175,21 @@ static void ff_put_vp8_epel ## SIZE ## _h ## TAPNUMX ## v ## TAPNUMY ## _ ## OPT
dst, dststride, tmpptr, SIZE, height, mx, my); \
}
+#if ARCH_X86_32
#define HVTAPMMX(x, y) \
HVTAP(mmxext, 8, x, y, 4, 8) \
HVTAP(mmxext, 8, x, y, 8, 16)
+HVTAP(mmxext, 8, 6, 6, 16, 16)
+#else
+#define HVTAPMMX(x, y) \
+HVTAP(mmxext, 8, x, y, 4, 8)
+#endif
+
HVTAPMMX(4, 4)
HVTAPMMX(4, 6)
HVTAPMMX(6, 4)
HVTAPMMX(6, 6)
-HVTAP(mmxext, 8, 6, 6, 16, 16)
#define HVTAPSSE2(x, y, w) \
HVTAP(sse2, 16, x, y, w, 16) \
@@ -211,8 +219,10 @@ static void ff_put_vp8_bilinear ## SIZE ## _hv_ ## OPT( \
}
HVBILIN(mmxext, 8, 4, 8)
+#if ARCH_X86_32
HVBILIN(mmxext, 8, 8, 16)
HVBILIN(mmxext, 8, 16, 16)
+#endif
HVBILIN(sse2, 8, 8, 16)
HVBILIN(sse2, 8, 16, 16)
HVBILIN(ssse3, 8, 4, 8)
@@ -311,15 +321,18 @@ av_cold void ff_vp8dsp_init_x86(VP8DSPContext* c)
if (mm_flags & AV_CPU_FLAG_MMX) {
c->vp8_idct_dc_add = ff_vp8_idct_dc_add_mmx;
- c->vp8_idct_dc_add4y = ff_vp8_idct_dc_add4y_mmx;
c->vp8_idct_dc_add4uv = ff_vp8_idct_dc_add4uv_mmx;
+#if ARCH_X86_32
+ c->vp8_idct_dc_add4y = ff_vp8_idct_dc_add4y_mmx;
c->vp8_idct_add = ff_vp8_idct_add_mmx;
c->vp8_luma_dc_wht = ff_vp8_luma_dc_wht_mmx;
c->put_vp8_epel_pixels_tab[0][0][0] =
c->put_vp8_bilinear_pixels_tab[0][0][0] = ff_put_vp8_pixels16_mmx;
+#endif
c->put_vp8_epel_pixels_tab[1][0][0] =
c->put_vp8_bilinear_pixels_tab[1][0][0] = ff_put_vp8_pixels8_mmx;
+#if ARCH_X86_32
c->vp8_v_loop_filter_simple = ff_vp8_v_loop_filter_simple_mmx;
c->vp8_h_loop_filter_simple = ff_vp8_h_loop_filter_simple_mmx;
@@ -332,17 +345,19 @@ av_cold void ff_vp8dsp_init_x86(VP8DSPContext* c)
c->vp8_h_loop_filter16y = ff_vp8_h_loop_filter16y_mbedge_mmx;
c->vp8_v_loop_filter8uv = ff_vp8_v_loop_filter8uv_mbedge_mmx;
c->vp8_h_loop_filter8uv = ff_vp8_h_loop_filter8uv_mbedge_mmx;
+#endif
}
/* note that 4-tap width=16 functions are missing because w=16
* is only used for luma, and luma is always a copy or sixtap. */
if (mm_flags & AV_CPU_FLAG_MMX2) {
+ VP8_MC_FUNC(2, 4, mmxext);
+ VP8_BILINEAR_MC_FUNC(2, 4, mmxext);
+#if ARCH_X86_32
VP8_LUMA_MC_FUNC(0, 16, mmxext);
VP8_MC_FUNC(1, 8, mmxext);
- VP8_MC_FUNC(2, 4, mmxext);
VP8_BILINEAR_MC_FUNC(0, 16, mmxext);
VP8_BILINEAR_MC_FUNC(1, 8, mmxext);
- VP8_BILINEAR_MC_FUNC(2, 4, mmxext);
c->vp8_v_loop_filter_simple = ff_vp8_v_loop_filter_simple_mmxext;
c->vp8_h_loop_filter_simple = ff_vp8_h_loop_filter_simple_mmxext;
@@ -356,6 +371,7 @@ av_cold void ff_vp8dsp_init_x86(VP8DSPContext* c)
c->vp8_h_loop_filter16y = ff_vp8_h_loop_filter16y_mbedge_mmxext;
c->vp8_v_loop_filter8uv = ff_vp8_v_loop_filter8uv_mbedge_mmxext;
c->vp8_h_loop_filter8uv = ff_vp8_h_loop_filter8uv_mbedge_mmxext;
+#endif
}
if (mm_flags & AV_CPU_FLAG_SSE) {
diff --git a/libavcodec/x86/vp8dsp.asm b/libavcodec/x86/vp8dsp.asm
index 7d9ebc9463..a7b83797ea 100644
--- a/libavcodec/x86/vp8dsp.asm
+++ b/libavcodec/x86/vp8dsp.asm
@@ -865,6 +865,7 @@ cglobal put_vp8_pixels8_mmx, 5,5
jg .nextrow
REP_RET
+%if ARCH_X86_32
cglobal put_vp8_pixels16_mmx, 5,5
.nextrow:
movq mm0, [r2+r3*0+0]
@@ -880,6 +881,7 @@ cglobal put_vp8_pixels16_mmx, 5,5
sub r4d, 2
jg .nextrow
REP_RET
+%endif
cglobal put_vp8_pixels16_sse, 5,5,2
.nextrow:
@@ -973,6 +975,7 @@ cglobal vp8_idct_dc_add_sse4, 3, 3, 6
; void vp8_idct_dc_add4y_<opt>(uint8_t *dst, DCTELEM block[4][16], int stride);
;-----------------------------------------------------------------------------
+%if ARCH_X86_32
INIT_MMX
cglobal vp8_idct_dc_add4y_mmx, 3, 3
; load data
@@ -1007,6 +1010,7 @@ cglobal vp8_idct_dc_add4y_mmx, 3, 3
ADD_DC m0, m6, 0, mova
ADD_DC m1, m7, 8, mova
RET
+%endif
INIT_XMM
cglobal vp8_idct_dc_add4y_sse2, 3, 3, 6
@@ -1152,7 +1156,9 @@ cglobal vp8_idct_add_%1, 3, 3
RET
%endmacro
+%if ARCH_X86_32
VP8_IDCT_ADD mmx
+%endif
VP8_IDCT_ADD sse
;-----------------------------------------------------------------------------
@@ -1217,7 +1223,9 @@ cglobal vp8_luma_dc_wht_%1, 2,3
%endmacro
INIT_MMX
+%if ARCH_X86_32
VP8_DC_WHT mmx
+%endif
VP8_DC_WHT sse
;-----------------------------------------------------------------------------
@@ -1610,6 +1618,7 @@ cglobal vp8_%2_loop_filter_simple_%1, 3, %3, %4
%endif
%endmacro
+%if ARCH_X86_32
INIT_MMX
%define SPLATB_REG SPLATB_REG_MMX
SIMPLE_LOOPFILTER mmx, v, 4, 0
@@ -1617,6 +1626,8 @@ SIMPLE_LOOPFILTER mmx, h, 5, 0
%define SPLATB_REG SPLATB_REG_MMXEXT
SIMPLE_LOOPFILTER mmxext, v, 4, 0
SIMPLE_LOOPFILTER mmxext, h, 5, 0
+%endif
+
INIT_XMM
%define SPLATB_REG SPLATB_REG_SSE2
%define WRITE_8W WRITE_8W_SSE2
@@ -2118,6 +2129,7 @@ cglobal vp8_%2_loop_filter16y_inner_%1, 5, %3, %5
RET
%endmacro
+%if ARCH_X86_32
INIT_MMX
%define SPLATB_REG SPLATB_REG_MMX
INNER_LOOPFILTER mmx, v, 6, 16, 0
@@ -2130,6 +2142,7 @@ INNER_LOOPFILTER mmxext, v, 6, 16, 0
INNER_LOOPFILTER mmxext, h, 6, 16, 0
INNER_LOOPFILTER mmxext, v, 6, 8, 0
INNER_LOOPFILTER mmxext, h, 6, 8, 0
+%endif
INIT_XMM
%define SPLATB_REG SPLATB_REG_SSE2
@@ -2814,6 +2827,7 @@ cglobal vp8_%2_loop_filter16y_mbedge_%1, 5, %3, %5
RET
%endmacro
+%if ARCH_X86_32
INIT_MMX
%define SPLATB_REG SPLATB_REG_MMX
MBEDGE_LOOPFILTER mmx, v, 6, 16, 0
@@ -2826,6 +2840,7 @@ MBEDGE_LOOPFILTER mmxext, v, 6, 16, 0
MBEDGE_LOOPFILTER mmxext, h, 6, 16, 0
MBEDGE_LOOPFILTER mmxext, v, 6, 8, 0
MBEDGE_LOOPFILTER mmxext, h, 6, 8, 0
+%endif
INIT_XMM
%define SPLATB_REG SPLATB_REG_SSE2