summaryrefslogtreecommitdiff
path: root/libavcodec/x86/dsputil_mmx.c
diff options
context:
space:
mode:
Diffstat (limited to 'libavcodec/x86/dsputil_mmx.c')
-rw-r--r--libavcodec/x86/dsputil_mmx.c286
1 files changed, 241 insertions, 45 deletions
diff --git a/libavcodec/x86/dsputil_mmx.c b/libavcodec/x86/dsputil_mmx.c
index 86a08cb06c..85c88f88b8 100644
--- a/libavcodec/x86/dsputil_mmx.c
+++ b/libavcodec/x86/dsputil_mmx.c
@@ -3,23 +3,23 @@
* Copyright (c) 2000, 2001 Fabrice Bellard
* Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
*
- * MMX optimization by Nick Kurshev <nickols_k@mail.ru>
- *
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ * MMX optimization by Nick Kurshev <nickols_k@mail.ru>
*/
#include "libavutil/cpu.h"
@@ -30,6 +30,7 @@
#include "libavcodec/simple_idct.h"
#include "dsputil_mmx.h"
#include "idct_xvid.h"
+#include "diracdsp_mmx.h"
//#undef NDEBUG
//#include <assert.h>
@@ -835,7 +836,7 @@ static void draw_edges_mmx(uint8_t *buf, int wrap, int width, int height,
: "+r"(ptr)
: "r"((x86_reg)wrap), "r"((x86_reg)width), "r"(ptr + wrap * height)
);
- } else {
+ } else if(w==16){
__asm__ volatile (
"1: \n\t"
"movd (%0), %%mm0 \n\t"
@@ -856,6 +857,25 @@ static void draw_edges_mmx(uint8_t *buf, int wrap, int width, int height,
: "+r"(ptr)
: "r"((x86_reg)wrap), "r"((x86_reg)width), "r"(ptr + wrap * height)
);
+ } else {
+ av_assert1(w == 4);
+ __asm__ volatile (
+ "1: \n\t"
+ "movd (%0), %%mm0 \n\t"
+ "punpcklbw %%mm0, %%mm0 \n\t"
+ "punpcklwd %%mm0, %%mm0 \n\t"
+ "movd %%mm0, -4(%0) \n\t"
+ "movd -4(%0, %2), %%mm1 \n\t"
+ "punpcklbw %%mm1, %%mm1 \n\t"
+ "punpckhwd %%mm1, %%mm1 \n\t"
+ "punpckhdq %%mm1, %%mm1 \n\t"
+ "movd %%mm1, (%0, %2) \n\t"
+ "add %1, %0 \n\t"
+ "cmp %3, %0 \n\t"
+ "jb 1b \n\t"
+ : "+r"(ptr)
+ : "r"((x86_reg)wrap), "r"((x86_reg)width), "r"(ptr + wrap * height)
+ );
}
/* top and bottom (and hopefully also the corners) */
@@ -1876,8 +1896,8 @@ static av_always_inline void emulated_edge_mc(uint8_t *buf, const uint8_t *src,
start_x = FFMAX(0, -src_x);
end_y = FFMIN(block_h, h-src_y);
end_x = FFMIN(block_w, w-src_x);
- assert(start_x < end_x && block_w > 0);
- assert(start_y < end_y && block_h > 0);
+ av_assert2(start_x < end_x && block_w > 0);
+ av_assert2(start_y < end_y && block_h > 0);
// fill in the to-be-copied part plus all above/below
src += (src_y_add + start_y) * linesize + start_x;
@@ -1909,10 +1929,15 @@ static av_noinline void emulated_edge_mc_sse(uint8_t *buf, const uint8_t *src,
#if HAVE_INLINE_ASM
-static void gmc_mmx(uint8_t *dst, uint8_t *src,
- int stride, int h, int ox, int oy,
- int dxx, int dxy, int dyx, int dyy,
- int shift, int r, int width, int height)
+typedef void emulated_edge_mc_func(uint8_t *dst, const uint8_t *src,
+ int linesize, int block_w, int block_h,
+ int src_x, int src_y, int w, int h);
+
+static av_always_inline void gmc(uint8_t *dst, uint8_t *src,
+ int stride, int h, int ox, int oy,
+ int dxx, int dxy, int dyx, int dyy,
+ int shift, int r, int width, int height,
+ emulated_edge_mc_func *emu_edge_fn)
{
const int w = 8;
const int ix = ox >> (16 + shift);
@@ -1927,19 +1952,24 @@ static void gmc_mmx(uint8_t *dst, uint8_t *src,
const uint16_t dxy4[4] = { dxys, dxys, dxys, dxys };
const uint16_t dyy4[4] = { dyys, dyys, dyys, dyys };
const uint64_t shift2 = 2 * shift;
+#define MAX_STRIDE 4096U
+#define MAX_H 8U
+ uint8_t edge_buf[(MAX_H + 1) * MAX_STRIDE];
int x, y;
const int dxw = (dxx - (1 << (16 + shift))) * (w - 1);
const int dyh = (dyy - (1 << (16 + shift))) * (h - 1);
const int dxh = dxy * (h - 1);
const int dyw = dyx * (w - 1);
+ int need_emu = (unsigned)ix >= width - w ||
+ (unsigned)iy >= height - h;
+
if ( // non-constant fullpel offset (3% of blocks)
((ox ^ (ox + dxw)) | (ox ^ (ox + dxh)) | (ox ^ (ox + dxw + dxh)) |
(oy ^ (oy + dyw)) | (oy ^ (oy + dyh)) | (oy ^ (oy + dyw + dyh))) >> (16 + shift)
// uses more than 16 bits of subpel mv (only at huge resolution)
- || (dxx | dxy | dyx | dyy) & 15 ||
- (unsigned)ix >= width - w ||
- (unsigned)iy >= height - h) {
+ || (dxx | dxy | dyx | dyy) & 15
+ || (need_emu && (h > MAX_H || stride > MAX_STRIDE))) {
// FIXME could still use mmx for some of the rows
ff_gmc_c(dst, src, stride, h, ox, oy, dxx, dxy, dyx, dyy,
shift, r, width, height);
@@ -1947,6 +1977,10 @@ static void gmc_mmx(uint8_t *dst, uint8_t *src,
}
src += ix + iy * stride;
+ if (need_emu) {
+ emu_edge_fn(edge_buf, src, stride, w + 1, h + 1, ix, iy, width, height);
+ src = edge_buf;
+ }
__asm__ volatile (
"movd %0, %%mm6 \n\t"
@@ -2025,6 +2059,36 @@ static void gmc_mmx(uint8_t *dst, uint8_t *src,
}
}
+#if HAVE_YASM
+#if ARCH_X86_32
+static void gmc_mmx(uint8_t *dst, uint8_t *src,
+ int stride, int h, int ox, int oy,
+ int dxx, int dxy, int dyx, int dyy,
+ int shift, int r, int width, int height)
+{
+ gmc(dst, src, stride, h, ox, oy, dxx, dxy, dyx, dyy, shift, r,
+ width, height, &emulated_edge_mc_mmx);
+}
+#endif
+static void gmc_sse(uint8_t *dst, uint8_t *src,
+ int stride, int h, int ox, int oy,
+ int dxx, int dxy, int dyx, int dyy,
+ int shift, int r, int width, int height)
+{
+ gmc(dst, src, stride, h, ox, oy, dxx, dxy, dyx, dyy, shift, r,
+ width, height, &emulated_edge_mc_sse);
+}
+#else
+static void gmc_mmx(uint8_t *dst, uint8_t *src,
+ int stride, int h, int ox, int oy,
+ int dxx, int dxy, int dyx, int dyy,
+ int shift, int r, int width, int height)
+{
+ gmc(dst, src, stride, h, ox, oy, dxx, dxy, dyx, dyy, shift, r,
+ width, height, &ff_emulated_edge_mc_8);
+}
+#endif
+
#define PREFETCH(name, op) \
static void name(void *mem, int stride, int h) \
{ \
@@ -2122,6 +2186,116 @@ void ff_avg_vc1_mspel_mc00_mmx2(uint8_t *dst, const uint8_t *src,
avg_pixels8_mmx2(dst, src, stride, 8);
}
+/* only used in VP3/5/6 */
+static void put_vp_no_rnd_pixels8_l2_mmx(uint8_t *dst, const uint8_t *a, const uint8_t *b, int stride, int h)
+{
+// START_TIMER
+ MOVQ_BFE(mm6);
+ __asm__ volatile(
+ "1: \n\t"
+ "movq (%1), %%mm0 \n\t"
+ "movq (%2), %%mm1 \n\t"
+ "movq (%1,%4), %%mm2 \n\t"
+ "movq (%2,%4), %%mm3 \n\t"
+ PAVGBP_MMX_NO_RND(%%mm0, %%mm1, %%mm4, %%mm2, %%mm3, %%mm5)
+ "movq %%mm4, (%3) \n\t"
+ "movq %%mm5, (%3,%4) \n\t"
+
+ "movq (%1,%4,2), %%mm0 \n\t"
+ "movq (%2,%4,2), %%mm1 \n\t"
+ "movq (%1,%5), %%mm2 \n\t"
+ "movq (%2,%5), %%mm3 \n\t"
+ "lea (%1,%4,4), %1 \n\t"
+ "lea (%2,%4,4), %2 \n\t"
+ PAVGBP_MMX_NO_RND(%%mm0, %%mm1, %%mm4, %%mm2, %%mm3, %%mm5)
+ "movq %%mm4, (%3,%4,2) \n\t"
+ "movq %%mm5, (%3,%5) \n\t"
+ "lea (%3,%4,4), %3 \n\t"
+ "subl $4, %0 \n\t"
+ "jnz 1b \n\t"
+ :"+r"(h), "+r"(a), "+r"(b), "+r"(dst)
+ :"r"((x86_reg)stride), "r"((x86_reg)3L*stride)
+ :"memory");
+// STOP_TIMER("put_vp_no_rnd_pixels8_l2_mmx")
+}
+static void put_vp_no_rnd_pixels16_l2_mmx(uint8_t *dst, const uint8_t *a, const uint8_t *b, int stride, int h)
+{
+ put_vp_no_rnd_pixels8_l2_mmx(dst, a, b, stride, h);
+ put_vp_no_rnd_pixels8_l2_mmx(dst+8, a+8, b+8, stride, h);
+}
+
+#if CONFIG_DIRAC_DECODER
+#define DIRAC_PIXOP(OPNAME, EXT)\
+void ff_ ## OPNAME ## _dirac_pixels8_ ## EXT(uint8_t *dst, const uint8_t *src[5], int stride, int h)\
+{\
+ OPNAME ## _pixels8_ ## EXT(dst, src[0], stride, h);\
+}\
+void ff_ ## OPNAME ## _dirac_pixels16_ ## EXT(uint8_t *dst, const uint8_t *src[5], int stride, int h)\
+{\
+ OPNAME ## _pixels16_ ## EXT(dst, src[0], stride, h);\
+}\
+void ff_ ## OPNAME ## _dirac_pixels32_ ## EXT(uint8_t *dst, const uint8_t *src[5], int stride, int h)\
+{\
+ OPNAME ## _pixels16_ ## EXT(dst , src[0] , stride, h);\
+ OPNAME ## _pixels16_ ## EXT(dst+16, src[0]+16, stride, h);\
+}
+
+DIRAC_PIXOP(put, mmx)
+DIRAC_PIXOP(avg, mmx)
+DIRAC_PIXOP(avg, mmx2)
+
+void ff_put_dirac_pixels16_sse2(uint8_t *dst, const uint8_t *src[5], int stride, int h)
+{
+ put_pixels16_sse2(dst, src[0], stride, h);
+}
+void ff_avg_dirac_pixels16_sse2(uint8_t *dst, const uint8_t *src[5], int stride, int h)
+{
+ avg_pixels16_sse2(dst, src[0], stride, h);
+}
+void ff_put_dirac_pixels32_sse2(uint8_t *dst, const uint8_t *src[5], int stride, int h)
+{
+ put_pixels16_sse2(dst , src[0] , stride, h);
+ put_pixels16_sse2(dst+16, src[0]+16, stride, h);
+}
+void ff_avg_dirac_pixels32_sse2(uint8_t *dst, const uint8_t *src[5], int stride, int h)
+{
+ avg_pixels16_sse2(dst , src[0] , stride, h);
+ avg_pixels16_sse2(dst+16, src[0]+16, stride, h);
+}
+#endif
+
+/* XXX: Those functions should be suppressed ASAP when all IDCTs are
+ * converted. */
+#if CONFIG_GPL
+static void ff_libmpeg2mmx_idct_put(uint8_t *dest, int line_size,
+ DCTELEM *block)
+{
+ ff_mmx_idct(block);
+ ff_put_pixels_clamped_mmx(block, dest, line_size);
+}
+
+static void ff_libmpeg2mmx_idct_add(uint8_t *dest, int line_size,
+ DCTELEM *block)
+{
+ ff_mmx_idct(block);
+ ff_add_pixels_clamped_mmx(block, dest, line_size);
+}
+
+static void ff_libmpeg2mmx2_idct_put(uint8_t *dest, int line_size,
+ DCTELEM *block)
+{
+ ff_mmxext_idct(block);
+ ff_put_pixels_clamped_mmx(block, dest, line_size);
+}
+
+static void ff_libmpeg2mmx2_idct_add(uint8_t *dest, int line_size,
+ DCTELEM *block)
+{
+ ff_mmxext_idct(block);
+ ff_add_pixels_clamped_mmx(block, dest, line_size);
+}
+#endif
+
static void vorbis_inverse_coupling_3dnow(float *mag, float *ang, int blocksize)
{
int i;
@@ -2413,27 +2587,17 @@ static void dsputil_init_mmx(DSPContext *c, AVCodecContext *avctx, int mm_flags)
SET_HPEL_FUNCS(put_no_rnd, 1, 8, mmx);
SET_HPEL_FUNCS(avg, 1, 8, mmx);
SET_HPEL_FUNCS(avg_no_rnd, 1, 8, mmx);
-
- switch (avctx->idct_algo) {
- case FF_IDCT_AUTO:
- case FF_IDCT_SIMPLEMMX:
- c->idct_put = ff_simple_idct_put_mmx;
- c->idct_add = ff_simple_idct_add_mmx;
- c->idct = ff_simple_idct_mmx;
- c->idct_permutation_type = FF_SIMPLE_IDCT_PERM;
- break;
- case FF_IDCT_XVIDMMX:
- c->idct_put = ff_idct_xvid_mmx_put;
- c->idct_add = ff_idct_xvid_mmx_add;
- c->idct = ff_idct_xvid_mmx;
- break;
- }
}
+#if ARCH_X86_32 || !HAVE_YASM
c->gmc = gmc_mmx;
+#endif
c->add_bytes = add_bytes_mmx;
+ c->put_no_rnd_pixels_l2[0]= put_vp_no_rnd_pixels16_l2_mmx;
+ c->put_no_rnd_pixels_l2[1]= put_vp_no_rnd_pixels8_l2_mmx;
+
if (CONFIG_H263_DECODER || CONFIG_H263_ENCODER) {
c->h263_v_loop_filter = h263_v_loop_filter_mmx;
c->h263_h_loop_filter = h263_h_loop_filter_mmx;
@@ -2493,12 +2657,6 @@ static void dsputil_init_mmx2(DSPContext *c, AVCodecContext *avctx,
}
}
- if (!high_bit_depth && avctx->idct_algo == FF_IDCT_XVIDMMX) {
- c->idct_put = ff_idct_xvid_mmx2_put;
- c->idct_add = ff_idct_xvid_mmx2_add;
- c->idct = ff_idct_xvid_mmx2;
- }
-
if (CONFIG_VP3_DECODER && (avctx->codec_id == AV_CODEC_ID_VP3 ||
avctx->codec_id == AV_CODEC_ID_THEORA)) {
c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_exact_mmx2;
@@ -2688,6 +2846,9 @@ static void dsputil_init_sse(DSPContext *c, AVCodecContext *avctx, int mm_flags)
if (!high_bit_depth)
c->emulated_edge_mc = emulated_edge_mc_sse;
+#if HAVE_INLINE_ASM
+ c->gmc = gmc_sse;
+#endif
#endif /* HAVE_YASM */
}
@@ -2724,13 +2885,6 @@ static void dsputil_init_sse2(DSPContext *c, AVCodecContext *avctx,
H264_QPEL_FUNCS(3, 2, sse2);
H264_QPEL_FUNCS(3, 3, sse2);
}
-
- if (!high_bit_depth && avctx->idct_algo == FF_IDCT_XVIDMMX) {
- c->idct_put = ff_idct_xvid_sse2_put;
- c->idct_add = ff_idct_xvid_sse2_add;
- c->idct = ff_idct_xvid_sse2;
- c->idct_permutation_type = FF_SSE2_IDCT_PERM;
- }
#endif /* HAVE_INLINE_ASM */
#if HAVE_YASM
@@ -2857,8 +3011,50 @@ void ff_dsputil_init_mmx(DSPContext *c, AVCodecContext *avctx)
c->add_hfyu_median_prediction = add_hfyu_median_prediction_cmov;
#endif
- if (mm_flags & AV_CPU_FLAG_MMX)
+ if (mm_flags & AV_CPU_FLAG_MMX) {
+#if HAVE_INLINE_ASM
+ const int idct_algo = avctx->idct_algo;
+
+ if (avctx->lowres == 0 && avctx->bits_per_raw_sample <= 8) {
+ if (idct_algo == FF_IDCT_AUTO || idct_algo == FF_IDCT_SIMPLEMMX) {
+ c->idct_put = ff_simple_idct_put_mmx;
+ c->idct_add = ff_simple_idct_add_mmx;
+ c->idct = ff_simple_idct_mmx;
+ c->idct_permutation_type = FF_SIMPLE_IDCT_PERM;
+#if CONFIG_GPL
+ } else if (idct_algo == FF_IDCT_LIBMPEG2MMX) {
+ if (mm_flags & AV_CPU_FLAG_MMX2) {
+ c->idct_put = ff_libmpeg2mmx2_idct_put;
+ c->idct_add = ff_libmpeg2mmx2_idct_add;
+ c->idct = ff_mmxext_idct;
+ } else {
+ c->idct_put = ff_libmpeg2mmx_idct_put;
+ c->idct_add = ff_libmpeg2mmx_idct_add;
+ c->idct = ff_mmx_idct;
+ }
+ c->idct_permutation_type = FF_LIBMPEG2_IDCT_PERM;
+#endif
+ } else if (idct_algo == FF_IDCT_XVIDMMX) {
+ if (mm_flags & AV_CPU_FLAG_SSE2) {
+ c->idct_put = ff_idct_xvid_sse2_put;
+ c->idct_add = ff_idct_xvid_sse2_add;
+ c->idct = ff_idct_xvid_sse2;
+ c->idct_permutation_type = FF_SSE2_IDCT_PERM;
+ } else if (mm_flags & AV_CPU_FLAG_MMXEXT) {
+ c->idct_put = ff_idct_xvid_mmx2_put;
+ c->idct_add = ff_idct_xvid_mmx2_add;
+ c->idct = ff_idct_xvid_mmx2;
+ } else {
+ c->idct_put = ff_idct_xvid_mmx_put;
+ c->idct_add = ff_idct_xvid_mmx_add;
+ c->idct = ff_idct_xvid_mmx;
+ }
+ }
+ }
+#endif /* HAVE_INLINE_ASM */
+
dsputil_init_mmx(c, avctx, mm_flags);
+ }
if (mm_flags & AV_CPU_FLAG_MMXEXT)
dsputil_init_mmx2(c, avctx, mm_flags);