From 5fe64d88f67637af6037fa864b1c66e41148597c Mon Sep 17 00:00:00 2001 From: Mans Rullgard Date: Sun, 2 Sep 2012 16:05:56 +0100 Subject: x86: allow using add_hfyu_median_prediction_cmov on any cpu with cmov For some reason add_hfyu_median_prediction_cmov is only selected on 3Dnow-capable CPUs, even though it uses no 3Dnow instructions. This patch allows it to be selected on any cpu with cmov with the possibility of being overridden by the mmxext version. Signed-off-by: Mans Rullgard --- libavcodec/x86/dsputil_mmx.c | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/libavcodec/x86/dsputil_mmx.c b/libavcodec/x86/dsputil_mmx.c index 9a3cb4931e..0876ceac9b 100644 --- a/libavcodec/x86/dsputil_mmx.c +++ b/libavcodec/x86/dsputil_mmx.c @@ -2717,7 +2717,9 @@ static void dsputil_init_mmx2(DSPContext *c, AVCodecContext *avctx, c->avg_h264_chroma_pixels_tab[1] = ff_avg_h264_chroma_mc4_10_mmx2; } - c->add_hfyu_median_prediction = ff_add_hfyu_median_prediction_mmx2; + /* slower than cmov version on AMD */ + if (!(mm_flags & AV_CPU_FLAG_3DNOW)) + c->add_hfyu_median_prediction = ff_add_hfyu_median_prediction_mmx2; c->scalarproduct_int16 = ff_scalarproduct_int16_mmx2; c->scalarproduct_and_madd_int16 = ff_scalarproduct_and_madd_int16_mmx2; @@ -2794,11 +2796,6 @@ static void dsputil_init_3dnow(DSPContext *c, AVCodecContext *avctx, } c->vorbis_inverse_coupling = vorbis_inverse_coupling_3dnow; - -#if HAVE_7REGS - if (mm_flags & AV_CPU_FLAG_CMOV) - c->add_hfyu_median_prediction = add_hfyu_median_prediction_cmov; -#endif #endif /* HAVE_INLINE_ASM */ #if HAVE_YASM @@ -3009,6 +3006,11 @@ void ff_dsputil_init_mmx(DSPContext *c, AVCodecContext *avctx) { int mm_flags = av_get_cpu_flags(); +#if HAVE_7REGS && HAVE_INLINE_ASM + if (mm_flags & AV_CPU_FLAG_CMOV) + c->add_hfyu_median_prediction = add_hfyu_median_prediction_cmov; +#endif + if (mm_flags & AV_CPU_FLAG_MMX) { #if HAVE_INLINE_ASM const int idct_algo = avctx->idct_algo; -- cgit v1.2.3 From 6efb698883507b13e90eb9ca03813f928066e5dd Mon Sep 17 00:00:00 2001 From: Mans Rullgard Date: Tue, 4 Sep 2012 13:52:01 +0100 Subject: cavsdsp: set idct permutation independently of dsputil CAVS uses its own idct so using dsputil to set the permutation is fragile. Signed-off-by: Mans Rullgard --- libavcodec/cavsdec.c | 3 ++- libavcodec/cavsdsp.c | 1 + libavcodec/cavsdsp.h | 1 + libavcodec/x86/cavsdsp.c | 2 ++ libavcodec/x86/dsputil_mmx.c | 2 -- 5 files changed, 6 insertions(+), 3 deletions(-) diff --git a/libavcodec/cavsdec.c b/libavcodec/cavsdec.c index e70dad038a..33e639b9ae 100644 --- a/libavcodec/cavsdec.c +++ b/libavcodec/cavsdec.c @@ -915,9 +915,10 @@ static int decode_pic(AVSContext *h) { enum cavs_mb mb_type; if (!s->context_initialized) { - s->avctx->idct_algo = FF_IDCT_CAVS; if (ff_MPV_common_init(s) < 0) return -1; + ff_init_scantable_permutation(s->dsp.idct_permutation, + h->cdsp.idct_perm); ff_init_scantable(s->dsp.idct_permutation,&h->scantable,ff_zigzag_direct); } skip_bits(&s->gb,16);//bbv_dwlay diff --git a/libavcodec/cavsdsp.c b/libavcodec/cavsdsp.c index 983d9d7f22..bdb8d6d064 100644 --- a/libavcodec/cavsdsp.c +++ b/libavcodec/cavsdsp.c @@ -544,6 +544,7 @@ av_cold void ff_cavsdsp_init(CAVSDSPContext* c, AVCodecContext *avctx) { c->cavs_filter_cv = cavs_filter_cv_c; c->cavs_filter_ch = cavs_filter_ch_c; c->cavs_idct8_add = cavs_idct8_add_c; + c->idct_perm = FF_NO_IDCT_PERM; if (HAVE_MMX) ff_cavsdsp_init_mmx(c, avctx); } diff --git a/libavcodec/cavsdsp.h b/libavcodec/cavsdsp.h index b1133b7264..99b0ea3774 100644 --- a/libavcodec/cavsdsp.h +++ b/libavcodec/cavsdsp.h @@ -33,6 +33,7 @@ typedef struct CAVSDSPContext { void (*cavs_filter_cv)(uint8_t *pix, int stride, int alpha, int beta, int tc, int bs1, int bs2); void (*cavs_filter_ch)(uint8_t *pix, int stride, int alpha, int beta, int tc, int bs1, int bs2); void (*cavs_idct8_add)(uint8_t *dst, DCTELEM *block, int stride); + int idct_perm; } CAVSDSPContext; void ff_cavsdsp_init(CAVSDSPContext* c, AVCodecContext *avctx); diff --git a/libavcodec/x86/cavsdsp.c b/libavcodec/x86/cavsdsp.c index e94003956f..aef74c4d36 100644 --- a/libavcodec/x86/cavsdsp.c +++ b/libavcodec/x86/cavsdsp.c @@ -461,6 +461,7 @@ static void ff_cavsdsp_init_mmx2(CAVSDSPContext* c, AVCodecContext *avctx) { dspfunc(avg_cavs_qpel, 1, 8); #undef dspfunc c->cavs_idct8_add = cavs_idct8_add_mmx; + c->idct_perm = FF_TRANSPOSE_IDCT_PERM; } static void ff_cavsdsp_init_3dnow(CAVSDSPContext* c, AVCodecContext *avctx) { @@ -477,6 +478,7 @@ static void ff_cavsdsp_init_3dnow(CAVSDSPContext* c, AVCodecContext *avctx) { dspfunc(avg_cavs_qpel, 1, 8); #undef dspfunc c->cavs_idct8_add = cavs_idct8_add_mmx; + c->idct_perm = FF_TRANSPOSE_IDCT_PERM; } #endif /* HAVE_INLINE_ASM */ diff --git a/libavcodec/x86/dsputil_mmx.c b/libavcodec/x86/dsputil_mmx.c index 0876ceac9b..af21765e65 100644 --- a/libavcodec/x86/dsputil_mmx.c +++ b/libavcodec/x86/dsputil_mmx.c @@ -3021,8 +3021,6 @@ void ff_dsputil_init_mmx(DSPContext *c, AVCodecContext *avctx) c->idct_add = ff_simple_idct_add_mmx; c->idct = ff_simple_idct_mmx; c->idct_permutation_type = FF_SIMPLE_IDCT_PERM; - } else if (idct_algo == FF_IDCT_CAVS) { - c->idct_permutation_type = FF_TRANSPOSE_IDCT_PERM; } else if (idct_algo == FF_IDCT_XVIDMMX) { if (mm_flags & AV_CPU_FLAG_SSE2) { c->idct_put = ff_idct_xvid_sse2_put; -- cgit v1.2.3 From 74c8414462126881fab4b14011be4997eed78862 Mon Sep 17 00:00:00 2001 From: Diego Biurrun Date: Sun, 26 Aug 2012 13:02:40 +0200 Subject: fate: Drop redundant setting of FUZZ to 1 --- tests/fate/amrnb.mak | 8 -------- tests/fate/amrwb.mak | 7 ------- tests/fate/mpc.mak | 1 - 3 files changed, 16 deletions(-) diff --git a/tests/fate/amrnb.mak b/tests/fate/amrnb.mak index 1fe138136c..d163dc4e96 100644 --- a/tests/fate/amrnb.mak +++ b/tests/fate/amrnb.mak @@ -2,49 +2,41 @@ FATE_AMRNB += fate-amrnb-4k75 fate-amrnb-4k75: CMD = pcm -i $(SAMPLES)/amrnb/4.75k.amr fate-amrnb-4k75: CMP = stddev fate-amrnb-4k75: REF = $(SAMPLES)/amrnb/4.75k.pcm -fate-amrnb-4k75: FUZZ = 1 FATE_AMRNB += fate-amrnb-5k15 fate-amrnb-5k15: CMD = pcm -i $(SAMPLES)/amrnb/5.15k.amr fate-amrnb-5k15: CMP = stddev fate-amrnb-5k15: REF = $(SAMPLES)/amrnb/5.15k.pcm -fate-amrnb-5k15: FUZZ = 1 FATE_AMRNB += fate-amrnb-5k9 fate-amrnb-5k9: CMD = pcm -i $(SAMPLES)/amrnb/5.9k.amr fate-amrnb-5k9: CMP = stddev fate-amrnb-5k9: REF = $(SAMPLES)/amrnb/5.9k.pcm -fate-amrnb-5k9: FUZZ = 1 FATE_AMRNB += fate-amrnb-6k7 fate-amrnb-6k7: CMD = pcm -i $(SAMPLES)/amrnb/6.7k.amr fate-amrnb-6k7: CMP = stddev fate-amrnb-6k7: REF = $(SAMPLES)/amrnb/6.7k.pcm -fate-amrnb-6k7: FUZZ = 1 FATE_AMRNB += fate-amrnb-7k4 fate-amrnb-7k4: CMD = pcm -i $(SAMPLES)/amrnb/7.4k.amr fate-amrnb-7k4: CMP = stddev fate-amrnb-7k4: REF = $(SAMPLES)/amrnb/7.4k.pcm -fate-amrnb-7k4: FUZZ = 1 FATE_AMRNB += fate-amrnb-7k95 fate-amrnb-7k95: CMD = pcm -i $(SAMPLES)/amrnb/7.95k.amr fate-amrnb-7k95: CMP = stddev fate-amrnb-7k95: REF = $(SAMPLES)/amrnb/7.95k.pcm -fate-amrnb-7k95: FUZZ = 1 FATE_AMRNB += fate-amrnb-10k2 fate-amrnb-10k2: CMD = pcm -i $(SAMPLES)/amrnb/10.2k.amr fate-amrnb-10k2: CMP = stddev fate-amrnb-10k2: REF = $(SAMPLES)/amrnb/10.2k.pcm -fate-amrnb-10k2: FUZZ = 1 FATE_AMRNB += fate-amrnb-12k2 fate-amrnb-12k2: CMD = pcm -i $(SAMPLES)/amrnb/12.2k.amr fate-amrnb-12k2: CMP = stddev fate-amrnb-12k2: REF = $(SAMPLES)/amrnb/12.2k.pcm -fate-amrnb-12k2: FUZZ = 1 FATE_SAMPLES_AVCONV += $(FATE_AMRNB) fate-amrnb: $(FATE_AMRNB) diff --git a/tests/fate/amrwb.mak b/tests/fate/amrwb.mak index 571fac0b35..cdbe2acd63 100644 --- a/tests/fate/amrwb.mak +++ b/tests/fate/amrwb.mak @@ -2,19 +2,16 @@ FATE_AMRWB += fate-amrwb-6k60 fate-amrwb-6k60: CMD = pcm -i $(SAMPLES)/amrwb/seed-6k60.awb fate-amrwb-6k60: CMP = stddev fate-amrwb-6k60: REF = $(SAMPLES)/amrwb/seed-6k60.pcm -fate-amrwb-6k60: FUZZ = 1 FATE_AMRWB += fate-amrwb-8k85 fate-amrwb-8k85: CMD = pcm -i $(SAMPLES)/amrwb/seed-8k85.awb fate-amrwb-8k85: CMP = stddev fate-amrwb-8k85: REF = $(SAMPLES)/amrwb/seed-8k85.pcm -fate-amrwb-8k85: FUZZ = 1 FATE_AMRWB += fate-amrwb-12k65 fate-amrwb-12k65: CMD = pcm -i $(SAMPLES)/amrwb/seed-12k65.awb fate-amrwb-12k65: CMP = stddev fate-amrwb-12k65: REF = $(SAMPLES)/amrwb/seed-12k65.pcm -fate-amrwb-12k65: FUZZ = 1 FATE_AMRWB += fate-amrwb-14k25 fate-amrwb-14k25: CMD = pcm -i $(SAMPLES)/amrwb/seed-14k25.awb @@ -26,19 +23,16 @@ FATE_AMRWB += fate-amrwb-15k85 fate-amrwb-15k85: CMD = pcm -i $(SAMPLES)/amrwb/seed-15k85.awb fate-amrwb-15k85: CMP = stddev fate-amrwb-15k85: REF = $(SAMPLES)/amrwb/seed-15k85.pcm -fate-amrwb-15k85: FUZZ = 1 FATE_AMRWB += fate-amrwb-18k25 fate-amrwb-18k25: CMD = pcm -i $(SAMPLES)/amrwb/seed-18k25.awb fate-amrwb-18k25: CMP = stddev fate-amrwb-18k25: REF = $(SAMPLES)/amrwb/seed-18k25.pcm -fate-amrwb-18k25: FUZZ = 1 FATE_AMRWB += fate-amrwb-19k85 fate-amrwb-19k85: CMD = pcm -i $(SAMPLES)/amrwb/seed-19k85.awb fate-amrwb-19k85: CMP = stddev fate-amrwb-19k85: REF = $(SAMPLES)/amrwb/seed-19k85.pcm -fate-amrwb-19k85: FUZZ = 1 FATE_AMRWB += fate-amrwb-23k05 fate-amrwb-23k05: CMD = pcm -i $(SAMPLES)/amrwb/seed-23k05.awb @@ -56,7 +50,6 @@ FATE_AMRWB += fate-amrwb-23k85-2 fate-amrwb-23k85-2: CMD = pcm -i $(SAMPLES)/amrwb/deus-23k85.awb fate-amrwb-23k85-2: CMP = stddev fate-amrwb-23k85-2: REF = $(SAMPLES)/amrwb/deus-23k85.pcm -fate-amrwb-23k85-2: FUZZ = 1 FATE_SAMPLES_AVCONV += $(FATE_AMRWB) fate-amrwb: $(FATE_AMRWB) diff --git a/tests/fate/mpc.mak b/tests/fate/mpc.mak index 6f429d3b95..f30ba604ef 100644 --- a/tests/fate/mpc.mak +++ b/tests/fate/mpc.mak @@ -8,7 +8,6 @@ FATE_MPC += fate-musepack7 fate-musepack7: CMD = pcm -i $(SAMPLES)/musepack/inside-mp7.mpc fate-musepack7: CMP = oneoff fate-musepack7: REF = $(SAMPLES)/musepack/inside-mp7.pcm -fate-musepack7: FUZZ = 1 FATE_SAMPLES_AVCONV += $(FATE_MPC) fate-mpc: $(FATE_MPC) -- cgit v1.2.3 From 8cb7ed5562c438388b1dd7dc7d10c26f54c740b5 Mon Sep 17 00:00:00 2001 From: Diego Biurrun Date: Sat, 11 Aug 2012 22:35:27 +0200 Subject: x86: avcodec: Drop silly "_mmx" suffix from dsputil template names --- libavcodec/x86/dsputil_avg_template.c | 977 ++++++++++++++++++++++++++++++ libavcodec/x86/dsputil_mmx.c | 8 +- libavcodec/x86/dsputil_mmx_avg_template.c | 977 ------------------------------ libavcodec/x86/dsputil_mmx_qns_template.c | 101 --- libavcodec/x86/dsputil_mmx_rnd_template.c | 590 ------------------ libavcodec/x86/dsputil_qns_template.c | 101 +++ libavcodec/x86/dsputil_rnd_template.c | 590 ++++++++++++++++++ libavcodec/x86/dsputilenc_mmx.c | 6 +- 8 files changed, 1675 insertions(+), 1675 deletions(-) create mode 100644 libavcodec/x86/dsputil_avg_template.c delete mode 100644 libavcodec/x86/dsputil_mmx_avg_template.c delete mode 100644 libavcodec/x86/dsputil_mmx_qns_template.c delete mode 100644 libavcodec/x86/dsputil_mmx_rnd_template.c create mode 100644 libavcodec/x86/dsputil_qns_template.c create mode 100644 libavcodec/x86/dsputil_rnd_template.c diff --git a/libavcodec/x86/dsputil_avg_template.c b/libavcodec/x86/dsputil_avg_template.c new file mode 100644 index 0000000000..8b116b74e2 --- /dev/null +++ b/libavcodec/x86/dsputil_avg_template.c @@ -0,0 +1,977 @@ +/* + * DSP utils : average functions are compiled twice for 3dnow/mmx2 + * Copyright (c) 2000, 2001 Fabrice Bellard + * Copyright (c) 2002-2004 Michael Niedermayer + * + * MMX optimization by Nick Kurshev + * mostly rewritten by Michael Niedermayer + * and improved by Zdenek Kabelac + * + * This file is part of Libav. + * + * Libav is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * Libav is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with Libav; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/* XXX: we use explicit registers to avoid a gcc 2.95.2 register asm + clobber bug - now it will work with 2.95.2 and also with -fPIC + */ +static void DEF(put_pixels8_x2)(uint8_t *block, const uint8_t *pixels, int line_size, int h) +{ + __asm__ volatile( + "lea (%3, %3), %%"REG_a" \n\t" + "1: \n\t" + "movq (%1), %%mm0 \n\t" + "movq (%1, %3), %%mm1 \n\t" + PAVGB" 1(%1), %%mm0 \n\t" + PAVGB" 1(%1, %3), %%mm1 \n\t" + "movq %%mm0, (%2) \n\t" + "movq %%mm1, (%2, %3) \n\t" + "add %%"REG_a", %1 \n\t" + "add %%"REG_a", %2 \n\t" + "movq (%1), %%mm0 \n\t" + "movq (%1, %3), %%mm1 \n\t" + PAVGB" 1(%1), %%mm0 \n\t" + PAVGB" 1(%1, %3), %%mm1 \n\t" + "add %%"REG_a", %1 \n\t" + "movq %%mm0, (%2) \n\t" + "movq %%mm1, (%2, %3) \n\t" + "add %%"REG_a", %2 \n\t" + "subl $4, %0 \n\t" + "jnz 1b \n\t" + :"+g"(h), "+S"(pixels), "+D"(block) + :"r" ((x86_reg)line_size) + :"%"REG_a, "memory"); +} + +static void DEF(put_pixels4_l2)(uint8_t *dst, uint8_t *src1, uint8_t *src2, int dstStride, int src1Stride, int h) +{ + __asm__ volatile( + "testl $1, %0 \n\t" + " jz 1f \n\t" + "movd (%1), %%mm0 \n\t" + "movd (%2), %%mm1 \n\t" + "add %4, %1 \n\t" + "add $4, %2 \n\t" + PAVGB" %%mm1, %%mm0 \n\t" + "movd %%mm0, (%3) \n\t" + "add %5, %3 \n\t" + "decl %0 \n\t" + "1: \n\t" + "movd (%1), %%mm0 \n\t" + "add %4, %1 \n\t" + "movd (%1), %%mm1 \n\t" + "movd (%2), %%mm2 \n\t" + "movd 4(%2), %%mm3 \n\t" + "add %4, %1 \n\t" + PAVGB" %%mm2, %%mm0 \n\t" + PAVGB" %%mm3, %%mm1 \n\t" + "movd %%mm0, (%3) \n\t" + "add %5, %3 \n\t" + "movd %%mm1, (%3) \n\t" + "add %5, %3 \n\t" + "movd (%1), %%mm0 \n\t" + "add %4, %1 \n\t" + "movd (%1), %%mm1 \n\t" + "movd 8(%2), %%mm2 \n\t" + "movd 12(%2), %%mm3 \n\t" + "add %4, %1 \n\t" + PAVGB" %%mm2, %%mm0 \n\t" + PAVGB" %%mm3, %%mm1 \n\t" + "movd %%mm0, (%3) \n\t" + "add %5, %3 \n\t" + "movd %%mm1, (%3) \n\t" + "add %5, %3 \n\t" + "add $16, %2 \n\t" + "subl $4, %0 \n\t" + "jnz 1b \n\t" +#if !HAVE_EBX_AVAILABLE //Note "+bm" and "+mb" are buggy too (with gcc 3.2.2 at least) and cannot be used + :"+m"(h), "+a"(src1), "+c"(src2), "+d"(dst) +#else + :"+b"(h), "+a"(src1), "+c"(src2), "+d"(dst) +#endif + :"S"((x86_reg)src1Stride), "D"((x86_reg)dstStride) + :"memory"); +} + + +static void DEF(put_pixels8_l2)(uint8_t *dst, uint8_t *src1, uint8_t *src2, int dstStride, int src1Stride, int h) +{ + __asm__ volatile( + "testl $1, %0 \n\t" + " jz 1f \n\t" + "movq (%1), %%mm0 \n\t" + "movq (%2), %%mm1 \n\t" + "add %4, %1 \n\t" + "add $8, %2 \n\t" + PAVGB" %%mm1, %%mm0 \n\t" + "movq %%mm0, (%3) \n\t" + "add %5, %3 \n\t" + "decl %0 \n\t" + "1: \n\t" + "movq (%1), %%mm0 \n\t" + "add %4, %1 \n\t" + "movq (%1), %%mm1 \n\t" + "add %4, %1 \n\t" + PAVGB" (%2), %%mm0 \n\t" + PAVGB" 8(%2), %%mm1 \n\t" + "movq %%mm0, (%3) \n\t" + "add %5, %3 \n\t" + "movq %%mm1, (%3) \n\t" + "add %5, %3 \n\t" + "movq (%1), %%mm0 \n\t" + "add %4, %1 \n\t" + "movq (%1), %%mm1 \n\t" + "add %4, %1 \n\t" + PAVGB" 16(%2), %%mm0 \n\t" + PAVGB" 24(%2), %%mm1 \n\t" + "movq %%mm0, (%3) \n\t" + "add %5, %3 \n\t" + "movq %%mm1, (%3) \n\t" + "add %5, %3 \n\t" + "add $32, %2 \n\t" + "subl $4, %0 \n\t" + "jnz 1b \n\t" +#if !HAVE_EBX_AVAILABLE //Note "+bm" and "+mb" are buggy too (with gcc 3.2.2 at least) and cannot be used + :"+m"(h), "+a"(src1), "+c"(src2), "+d"(dst) +#else + :"+b"(h), "+a"(src1), "+c"(src2), "+d"(dst) +#endif + :"S"((x86_reg)src1Stride), "D"((x86_reg)dstStride) + :"memory"); +//the following should be used, though better not with gcc ... +/* :"+g"(h), "+r"(src1), "+r"(src2), "+r"(dst) + :"r"(src1Stride), "r"(dstStride) + :"memory");*/ +} + +static void DEF(put_no_rnd_pixels8_l2)(uint8_t *dst, uint8_t *src1, uint8_t *src2, int dstStride, int src1Stride, int h) +{ + __asm__ volatile( + "pcmpeqb %%mm6, %%mm6 \n\t" + "testl $1, %0 \n\t" + " jz 1f \n\t" + "movq (%1), %%mm0 \n\t" + "movq (%2), %%mm1 \n\t" + "add %4, %1 \n\t" + "add $8, %2 \n\t" + "pxor %%mm6, %%mm0 \n\t" + "pxor %%mm6, %%mm1 \n\t" + PAVGB" %%mm1, %%mm0 \n\t" + "pxor %%mm6, %%mm0 \n\t" + "movq %%mm0, (%3) \n\t" + "add %5, %3 \n\t" + "decl %0 \n\t" + "1: \n\t" + "movq (%1), %%mm0 \n\t" + "add %4, %1 \n\t" + "movq (%1), %%mm1 \n\t" + "add %4, %1 \n\t" + "movq (%2), %%mm2 \n\t" + "movq 8(%2), %%mm3 \n\t" + "pxor %%mm6, %%mm0 \n\t" + "pxor %%mm6, %%mm1 \n\t" + "pxor %%mm6, %%mm2 \n\t" + "pxor %%mm6, %%mm3 \n\t" + PAVGB" %%mm2, %%mm0 \n\t" + PAVGB" %%mm3, %%mm1 \n\t" + "pxor %%mm6, %%mm0 \n\t" + "pxor %%mm6, %%mm1 \n\t" + "movq %%mm0, (%3) \n\t" + "add %5, %3 \n\t" + "movq %%mm1, (%3) \n\t" + "add %5, %3 \n\t" + "movq (%1), %%mm0 \n\t" + "add %4, %1 \n\t" + "movq (%1), %%mm1 \n\t" + "add %4, %1 \n\t" + "movq 16(%2), %%mm2 \n\t" + "movq 24(%2), %%mm3 \n\t" + "pxor %%mm6, %%mm0 \n\t" + "pxor %%mm6, %%mm1 \n\t" + "pxor %%mm6, %%mm2 \n\t" + "pxor %%mm6, %%mm3 \n\t" + PAVGB" %%mm2, %%mm0 \n\t" + PAVGB" %%mm3, %%mm1 \n\t" + "pxor %%mm6, %%mm0 \n\t" + "pxor %%mm6, %%mm1 \n\t" + "movq %%mm0, (%3) \n\t" + "add %5, %3 \n\t" + "movq %%mm1, (%3) \n\t" + "add %5, %3 \n\t" + "add $32, %2 \n\t" + "subl $4, %0 \n\t" + "jnz 1b \n\t" +#if !HAVE_EBX_AVAILABLE //Note "+bm" and "+mb" are buggy too (with gcc 3.2.2 at least) and cannot be used + :"+m"(h), "+a"(src1), "+c"(src2), "+d"(dst) +#else + :"+b"(h), "+a"(src1), "+c"(src2), "+d"(dst) +#endif + :"S"((x86_reg)src1Stride), "D"((x86_reg)dstStride) + :"memory"); +//the following should be used, though better not with gcc ... +/* :"+g"(h), "+r"(src1), "+r"(src2), "+r"(dst) + :"r"(src1Stride), "r"(dstStride) + :"memory");*/ +} + +static void DEF(avg_pixels4_l2)(uint8_t *dst, uint8_t *src1, uint8_t *src2, int dstStride, int src1Stride, int h) +{ + __asm__ volatile( + "testl $1, %0 \n\t" + " jz 1f \n\t" + "movd (%1), %%mm0 \n\t" + "movd (%2), %%mm1 \n\t" + "add %4, %1 \n\t" + "add $4, %2 \n\t" + PAVGB" %%mm1, %%mm0 \n\t" + PAVGB" (%3), %%mm0 \n\t" + "movd %%mm0, (%3) \n\t" + "add %5, %3 \n\t" + "decl %0 \n\t" + "1: \n\t" + "movd (%1), %%mm0 \n\t" + "add %4, %1 \n\t" + "movd (%1), %%mm1 \n\t" + "add %4, %1 \n\t" + PAVGB" (%2), %%mm0 \n\t" + PAVGB" 4(%2), %%mm1 \n\t" + PAVGB" (%3), %%mm0 \n\t" + "movd %%mm0, (%3) \n\t" + "add %5, %3 \n\t" + PAVGB" (%3), %%mm1 \n\t" + "movd %%mm1, (%3) \n\t" + "add %5, %3 \n\t" + "movd (%1), %%mm0 \n\t" + "add %4, %1 \n\t" + "movd (%1), %%mm1 \n\t" + "add %4, %1 \n\t" + PAVGB" 8(%2), %%mm0 \n\t" + PAVGB" 12(%2), %%mm1 \n\t" + PAVGB" (%3), %%mm0 \n\t" + "movd %%mm0, (%3) \n\t" + "add %5, %3 \n\t" + PAVGB" (%3), %%mm1 \n\t" + "movd %%mm1, (%3) \n\t" + "add %5, %3 \n\t" + "add $16, %2 \n\t" + "subl $4, %0 \n\t" + "jnz 1b \n\t" +#if !HAVE_EBX_AVAILABLE //Note "+bm" and "+mb" are buggy too (with gcc 3.2.2 at least) and cannot be used + :"+m"(h), "+a"(src1), "+c"(src2), "+d"(dst) +#else + :"+b"(h), "+a"(src1), "+c"(src2), "+d"(dst) +#endif + :"S"((x86_reg)src1Stride), "D"((x86_reg)dstStride) + :"memory"); +} + + +static void DEF(avg_pixels8_l2)(uint8_t *dst, uint8_t *src1, uint8_t *src2, int dstStride, int src1Stride, int h) +{ + __asm__ volatile( + "testl $1, %0 \n\t" + " jz 1f \n\t" + "movq (%1), %%mm0 \n\t" + "movq (%2), %%mm1 \n\t" + "add %4, %1 \n\t" + "add $8, %2 \n\t" + PAVGB" %%mm1, %%mm0 \n\t" + PAVGB" (%3), %%mm0 \n\t" + "movq %%mm0, (%3) \n\t" + "add %5, %3 \n\t" + "decl %0 \n\t" + "1: \n\t" + "movq (%1), %%mm0 \n\t" + "add %4, %1 \n\t" + "movq (%1), %%mm1 \n\t" + "add %4, %1 \n\t" + PAVGB" (%2), %%mm0 \n\t" + PAVGB" 8(%2), %%mm1 \n\t" + PAVGB" (%3), %%mm0 \n\t" + "movq %%mm0, (%3) \n\t" + "add %5, %3 \n\t" + PAVGB" (%3), %%mm1 \n\t" + "movq %%mm1, (%3) \n\t" + "add %5, %3 \n\t" + "movq (%1), %%mm0 \n\t" + "add %4, %1 \n\t" + "movq (%1), %%mm1 \n\t" + "add %4, %1 \n\t" + PAVGB" 16(%2), %%mm0 \n\t" + PAVGB" 24(%2), %%mm1 \n\t" + PAVGB" (%3), %%mm0 \n\t" + "movq %%mm0, (%3) \n\t" + "add %5, %3 \n\t" + PAVGB" (%3), %%mm1 \n\t" + "movq %%mm1, (%3) \n\t" + "add %5, %3 \n\t" + "add $32, %2 \n\t" + "subl $4, %0 \n\t" + "jnz 1b \n\t" +#if !HAVE_EBX_AVAILABLE //Note "+bm" and "+mb" are buggy too (with gcc 3.2.2 at least) and cannot be used + :"+m"(h), "+a"(src1), "+c"(src2), "+d"(dst) +#else + :"+b"(h), "+a"(src1), "+c"(src2), "+d"(dst) +#endif + :"S"((x86_reg)src1Stride), "D"((x86_reg)dstStride) + :"memory"); +//the following should be used, though better not with gcc ... +/* :"+g"(h), "+r"(src1), "+r"(src2), "+r"(dst) + :"r"(src1Stride), "r"(dstStride) + :"memory");*/ +} + +static void DEF(put_pixels16_x2)(uint8_t *block, const uint8_t *pixels, int line_size, int h) +{ + __asm__ volatile( + "lea (%3, %3), %%"REG_a" \n\t" + "1: \n\t" + "movq (%1), %%mm0 \n\t" + "movq (%1, %3), %%mm1 \n\t" + "movq 8(%1), %%mm2 \n\t" + "movq 8(%1, %3), %%mm3 \n\t" + PAVGB" 1(%1), %%mm0 \n\t" + PAVGB" 1(%1, %3), %%mm1 \n\t" + PAVGB" 9(%1), %%mm2 \n\t" + PAVGB" 9(%1, %3), %%mm3 \n\t" + "movq %%mm0, (%2) \n\t" + "movq %%mm1, (%2, %3) \n\t" + "movq %%mm2, 8(%2) \n\t" + "movq %%mm3, 8(%2, %3) \n\t" + "add %%"REG_a", %1 \n\t" + "add %%"REG_a", %2 \n\t" + "movq (%1), %%mm0 \n\t" + "movq (%1, %3), %%mm1 \n\t" + "movq 8(%1), %%mm2 \n\t" + "movq 8(%1, %3), %%mm3 \n\t" + PAVGB" 1(%1), %%mm0 \n\t" + PAVGB" 1(%1, %3), %%mm1 \n\t" + PAVGB" 9(%1), %%mm2 \n\t" + PAVGB" 9(%1, %3), %%mm3 \n\t" + "add %%"REG_a", %1 \n\t" + "movq %%mm0, (%2) \n\t" + "movq %%mm1, (%2, %3) \n\t" + "movq %%mm2, 8(%2) \n\t" + "movq %%mm3, 8(%2, %3) \n\t" + "add %%"REG_a", %2 \n\t" + "subl $4, %0 \n\t" + "jnz 1b \n\t" + :"+g"(h), "+S"(pixels), "+D"(block) + :"r" ((x86_reg)line_size) + :"%"REG_a, "memory"); +} + +static void DEF(put_pixels16_l2)(uint8_t *dst, uint8_t *src1, uint8_t *src2, int dstStride, int src1Stride, int h) +{ + __asm__ volatile( + "testl $1, %0 \n\t" + " jz 1f \n\t" + "movq (%1), %%mm0 \n\t" + "movq 8(%1), %%mm1 \n\t" + PAVGB" (%2), %%mm0 \n\t" + PAVGB" 8(%2), %%mm1 \n\t" + "add %4, %1 \n\t" + "add $16, %2 \n\t" + "movq %%mm0, (%3) \n\t" + "movq %%mm1, 8(%3) \n\t" + "add %5, %3 \n\t" + "decl %0 \n\t" + "1: \n\t" + "movq (%1), %%mm0 \n\t" + "movq 8(%1), %%mm1 \n\t" + "add %4, %1 \n\t" + PAVGB" (%2), %%mm0 \n\t" + PAVGB" 8(%2), %%mm1 \n\t" + "movq %%mm0, (%3) \n\t" + "movq %%mm1, 8(%3) \n\t" + "add %5, %3 \n\t" + "movq (%1), %%mm0 \n\t" + "movq 8(%1), %%mm1 \n\t" + "add %4, %1 \n\t" + PAVGB" 16(%2), %%mm0 \n\t" + PAVGB" 24(%2), %%mm1 \n\t" + "movq %%mm0, (%3) \n\t" + "movq %%mm1, 8(%3) \n\t" + "add %5, %3 \n\t" + "add $32, %2 \n\t" + "subl $2, %0 \n\t" + "jnz 1b \n\t" +#if !HAVE_EBX_AVAILABLE //Note "+bm" and "+mb" are buggy too (with gcc 3.2.2 at least) and cannot be used + :"+m"(h), "+a"(src1), "+c"(src2), "+d"(dst) +#else + :"+b"(h), "+a"(src1), "+c"(src2), "+d"(dst) +#endif + :"S"((x86_reg)src1Stride), "D"((x86_reg)dstStride) + :"memory"); +//the following should be used, though better not with gcc ... +/* :"+g"(h), "+r"(src1), "+r"(src2), "+r"(dst) + :"r"(src1Stride), "r"(dstStride) + :"memory");*/ +} + +static void DEF(avg_pixels16_l2)(uint8_t *dst, uint8_t *src1, uint8_t *src2, int dstStride, int src1Stride, int h) +{ + __asm__ volatile( + "testl $1, %0 \n\t" + " jz 1f \n\t" + "movq (%1), %%mm0 \n\t" + "movq 8(%1), %%mm1 \n\t" + PAVGB" (%2), %%mm0 \n\t" + PAVGB" 8(%2), %%mm1 \n\t" + "add %4, %1 \n\t" + "add $16, %2 \n\t" + PAVGB" (%3), %%mm0 \n\t" + PAVGB" 8(%3), %%mm1 \n\t" + "movq %%mm0, (%3) \n\t" + "movq %%mm1, 8(%3) \n\t" + "add %5, %3 \n\t" + "decl %0 \n\t" + "1: \n\t" + "movq (%1), %%mm0 \n\t" + "movq 8(%1), %%mm1 \n\t" + "add %4, %1 \n\t" + PAVGB" (%2), %%mm0 \n\t" + PAVGB" 8(%2), %%mm1 \n\t" + PAVGB" (%3), %%mm0 \n\t" + PAVGB" 8(%3), %%mm1 \n\t" + "movq %%mm0, (%3) \n\t" + "movq %%mm1, 8(%3) \n\t" + "add %5, %3 \n\t" + "movq (%1), %%mm0 \n\t" + "movq 8(%1), %%mm1 \n\t" + "add %4, %1 \n\t" + PAVGB" 16(%2), %%mm0 \n\t" + PAVGB" 24(%2), %%mm1 \n\t" + PAVGB" (%3), %%mm0 \n\t" + PAVGB" 8(%3), %%mm1 \n\t" + "movq %%mm0, (%3) \n\t" + "movq %%mm1, 8(%3) \n\t" + "add %5, %3 \n\t" + "add $32, %2 \n\t" + "subl $2, %0 \n\t" + "jnz 1b \n\t" +#if !HAVE_EBX_AVAILABLE //Note "+bm" and "+mb" are buggy too (with gcc 3.2.2 at least) and cannot be used + :"+m"(h), "+a"(src1), "+c"(src2), "+d"(dst) +#else + :"+b"(h), "+a"(src1), "+c"(src2), "+d"(dst) +#endif + :"S"((x86_reg)src1Stride), "D"((x86_reg)dstStride) + :"memory"); +//the following should be used, though better not with gcc ... +/* :"+g"(h), "+r"(src1), "+r"(src2), "+r"(dst) + :"r"(src1Stride), "r"(dstStride) + :"memory");*/ +} + +static void DEF(put_no_rnd_pixels16_l2)(uint8_t *dst, uint8_t *src1, uint8_t *src2, int dstStride, int src1Stride, int h) +{ + __asm__ volatile( + "pcmpeqb %%mm6, %%mm6 \n\t" + "testl $1, %0 \n\t" + " jz 1f \n\t" + "movq (%1), %%mm0 \n\t" + "movq 8(%1), %%mm1 \n\t" + "movq (%2), %%mm2 \n\t" + "movq 8(%2), %%mm3 \n\t" + "pxor %%mm6, %%mm0 \n\t" + "pxor %%mm6, %%mm1 \n\t" + "pxor %%mm6, %%mm2 \n\t" + "pxor %%mm6, %%mm3 \n\t" + PAVGB" %%mm2, %%mm0 \n\t" + PAVGB" %%mm3, %%mm1 \n\t" + "pxor %%mm6, %%mm0 \n\t" + "pxor %%mm6, %%mm1 \n\t" + "add %4, %1 \n\t" + "add $16, %2 \n\t" + "movq %%mm0, (%3) \n\t" + "movq %%mm1, 8(%3) \n\t" + "add %5, %3 \n\t" + "decl %0 \n\t" + "1: \n\t" + "movq (%1), %%mm0 \n\t" + "movq 8(%1), %%mm1 \n\t" + "add %4, %1 \n\t" + "movq (%2), %%mm2 \n\t" + "movq 8(%2), %%mm3 \n\t" + "pxor %%mm6, %%mm0 \n\t" + "pxor %%mm6, %%mm1 \n\t" + "pxor %%mm6, %%mm2 \n\t" + "pxor %%mm6, %%mm3 \n\t" + PAVGB" %%mm2, %%mm0 \n\t" + PAVGB" %%mm3, %%mm1 \n\t" + "pxor %%mm6, %%mm0 \n\t" + "pxor %%mm6, %%mm1 \n\t" + "movq %%mm0, (%3) \n\t" + "movq %%mm1, 8(%3) \n\t" + "add %5, %3 \n\t" + "movq (%1), %%mm0 \n\t" + "movq 8(%1), %%mm1 \n\t" + "add %4, %1 \n\t" + "movq 16(%2), %%mm2 \n\t" + "movq 24(%2), %%mm3 \n\t" + "pxor %%mm6, %%mm0 \n\t" + "pxor %%mm6, %%mm1 \n\t" + "pxor %%mm6, %%mm2 \n\t" + "pxor %%mm6, %%mm3 \n\t" + PAVGB" %%mm2, %%mm0 \n\t" + PAVGB" %%mm3, %%mm1 \n\t" + "pxor %%mm6, %%mm0 \n\t" + "pxor %%mm6, %%mm1 \n\t" + "movq %%mm0, (%3) \n\t" + "movq %%mm1, 8(%3) \n\t" + "add %5, %3 \n\t" + "add $32, %2 \n\t" + "subl $2, %0 \n\t" + "jnz 1b \n\t" +#if !HAVE_EBX_AVAILABLE //Note "+bm" and "+mb" are buggy too (with gcc 3.2.2 at least) and cannot be used + :"+m"(h), "+a"(src1), "+c"(src2), "+d"(dst) +#else + :"+b"(h), "+a"(src1), "+c"(src2), "+d"(dst) +#endif + :"S"((x86_reg)src1Stride), "D"((x86_reg)dstStride) + :"memory"); +//the following should be used, though better not with gcc ... +/* :"+g"(h), "+r"(src1), "+r"(src2), "+r"(dst) + :"r"(src1Stride), "r"(dstStride) + :"memory");*/ +} + +/* GL: this function does incorrect rounding if overflow */ +static void DEF(put_no_rnd_pixels8_x2)(uint8_t *block, const uint8_t *pixels, int line_size, int h) +{ + MOVQ_BONE(mm6); + __asm__ volatile( + "lea (%3, %3), %%"REG_a" \n\t" + "1: \n\t" + "movq (%1), %%mm0 \n\t" + "movq (%1, %3), %%mm2 \n\t" + "movq 1(%1), %%mm1 \n\t" + "movq 1(%1, %3), %%mm3 \n\t" + "add %%"REG_a", %1 \n\t" + "psubusb %%mm6, %%mm0 \n\t" + "psubusb %%mm6, %%mm2 \n\t" + PAVGB" %%mm1, %%mm0 \n\t" + PAVGB" %%mm3, %%mm2 \n\t" + "movq %%mm0, (%2) \n\t" + "movq %%mm2, (%2, %3) \n\t" + "movq (%1), %%mm0 \n\t" + "movq 1(%1), %%mm1 \n\t" + "movq (%1, %3), %%mm2 \n\t" + "movq 1(%1, %3), %%mm3 \n\t" + "add %%"REG_a", %2 \n\t" + "add %%"REG_a", %1 \n\t" + "psubusb %%mm6, %%mm0 \n\t" + "psubusb %%mm6, %%mm2 \n\t" + PAVGB" %%mm1, %%mm0 \n\t" + PAVGB" %%mm3, %%mm2 \n\t" + "movq %%mm0, (%2) \n\t" + "movq %%mm2, (%2, %3) \n\t" + "add %%"REG_a", %2 \n\t" + "subl $4, %0 \n\t" + "jnz 1b \n\t" + :"+g"(h), "+S"(pixels), "+D"(block) + :"r" ((x86_reg)line_size) + :"%"REG_a, "memory"); +} + +static void DEF(put_no_rnd_pixels8_x2_exact)(uint8_t *block, const uint8_t *pixels, int line_size, int h) +{ + __asm__ volatile ( + "pcmpeqb %%mm6, %%mm6 \n\t" + "1: \n\t" + "movq (%1), %%mm0 \n\t" + "movq (%1, %3), %%mm2 \n\t" + "movq 1(%1), %%mm1 \n\t" + "movq 1(%1, %3), %%mm3 \n\t" + "pxor %%mm6, %%mm0 \n\t" + "pxor %%mm6, %%mm2 \n\t" + "pxor %%mm6, %%mm1 \n\t" + "pxor %%mm6, %%mm3 \n\t" + PAVGB" %%mm1, %%mm0 \n\t" + PAVGB" %%mm3, %%mm2 \n\t" + "pxor %%mm6, %%mm0 \n\t" + "pxor %%mm6, %%mm2 \n\t" + "movq %%mm0, (%2) \n\t" + "movq %%mm2, (%2, %3) \n\t" + "movq (%1, %3,2), %%mm0 \n\t" + "movq 1(%1, %3,2), %%mm1 \n\t" + "movq (%1, %4), %%mm2 \n\t" + "movq 1(%1, %4), %%mm3 \n\t" + "pxor %%mm6, %%mm0 \n\t" + "pxor %%mm6, %%mm1 \n\t" + "pxor %%mm6, %%mm2 \n\t" + "pxor %%mm6, %%mm3 \n\t" + PAVGB" %%mm1, %%mm0 \n\t" + PAVGB" %%mm3, %%mm2 \n\t" + "pxor %%mm6, %%mm0 \n\t" + "pxor %%mm6, %%mm2 \n\t" + "movq %%mm0, (%2, %3,2) \n\t" + "movq %%mm2, (%2, %4) \n\t" + "lea (%1, %3,4), %1 \n\t" + "lea (%2, %3,4), %2 \n\t" + "subl $4, %0 \n\t" + "jg 1b \n\t" + : "+g"(h), "+r"(pixels), "+r"(block) + : "r" ((x86_reg)line_size), "r"((x86_reg)3*line_size) + : "memory" + ); +} + +static void DEF(put_pixels8_y2)(uint8_t *block, const uint8_t *pixels, int line_size, int h) +{ + __asm__ volatile( + "lea (%3, %3), %%"REG_a" \n\t" + "movq (%1), %%mm0 \n\t" + "sub %3, %2 \n\t" + "1: \n\t" + "movq (%1, %3), %%mm1 \n\t" + "movq (%1, %%"REG_a"), %%mm2 \n\t" + "add %%"REG_a", %1 \n\t" + PAVGB" %%mm1, %%mm0 \n\t" + PAVGB" %%mm2, %%mm1 \n\t" + "movq %%mm0, (%2, %3) \n\t" + "movq %%mm1, (%2, %%"REG_a") \n\t" + "movq (%1, %3), %%mm1 \n\t" + "movq (%1, %%"REG_a"), %%mm0 \n\t" + "add %%"REG_a", %2 \n\t" + "add %%"REG_a", %1 \n\t" + PAVGB" %%mm1, %%mm2 \n\t" + PAVGB" %%mm0, %%mm1 \n\t" + "movq %%mm2, (%2, %3) \n\t" + "movq %%mm1, (%2, %%"REG_a") \n\t" + "add %%"REG_a", %2 \n\t" + "subl $4, %0 \n\t" + "jnz 1b \n\t" + :"+g"(h), "+S"(pixels), "+D" (block) + :"r" ((x86_reg)line_size) + :"%"REG_a, "memory"); +} + +/* GL: this function does incorrect rounding if overflow */ +static void DEF(put_no_rnd_pixels8_y2)(uint8_t *block, const uint8_t *pixels, int line_size, int h) +{ + MOVQ_BONE(mm6); + __asm__ volatile( + "lea (%3, %3), %%"REG_a" \n\t" + "movq (%1), %%mm0 \n\t" + "sub %3, %2 \n\t" + "1: \n\t" + "movq (%1, %3), %%mm1 \n\t" + "movq (%1, %%"REG_a"), %%mm2 \n\t" + "add %%"REG_a", %1 \n\t" + "psubusb %%mm6, %%mm1 \n\t" + PAVGB" %%mm1, %%mm0 \n\t" + PAVGB" %%mm2, %%mm1 \n\t" + "movq %%mm0, (%2, %3) \n\t" + "movq %%mm1, (%2, %%"REG_a") \n\t" + "movq (%1, %3), %%mm1 \n\t" + "movq (%1, %%"REG_a"), %%mm0 \n\t" + "add %%"REG_a", %2 \n\t" + "add %%"REG_a", %1 \n\t" + "psubusb %%mm6, %%mm1 \n\t" + PAVGB" %%mm1, %%mm2 \n\t" + PAVGB" %%mm0, %%mm1 \n\t" + "movq %%mm2, (%2, %3) \n\t" + "movq %%mm1, (%2, %%"REG_a") \n\t" + "add %%"REG_a", %2 \n\t" + "subl $4, %0 \n\t" + "jnz 1b \n\t" + :"+g"(h), "+S"(pixels), "+D" (block) + :"r" ((x86_reg)line_size) + :"%"REG_a, "memory"); +} + +static void DEF(put_no_rnd_pixels8_y2_exact)(uint8_t *block, const uint8_t *pixels, int line_size, int h) +{ + __asm__ volatile ( + "movq (%1), %%mm0 \n\t" + "pcmpeqb %%mm6, %%mm6 \n\t" + "add %3, %1 \n\t" + "pxor %%mm6, %%mm0 \n\t" + "1: \n\t" + "movq (%1), %%mm1 \n\t" + "movq (%1, %3), %%mm2 \n\t" + "pxor %%mm6, %%mm1 \n\t" + "pxor %%mm6, %%mm2 \n\t" + PAVGB" %%mm1, %%mm0 \n\t" + PAVGB" %%mm2, %%mm1 \n\t" + "pxor %%mm6, %%mm0 \n\t" + "pxor %%mm6, %%mm1 \n\t" + "movq %%mm0, (%2) \n\t" + "movq %%mm1, (%2, %3) \n\t" + "movq (%1, %3,2), %%mm1 \n\t" + "movq (%1, %4), %%mm0 \n\t" + "pxor %%mm6, %%mm1 \n\t" + "pxor %%mm6, %%mm0 \n\t" + PAVGB" %%mm1, %%mm2 \n\t" + PAVGB" %%mm0, %%mm1 \n\t" + "pxor %%mm6, %%mm2 \n\t" + "pxor %%mm6, %%mm1 \n\t" + "movq %%mm2, (%2, %3,2) \n\t" + "movq %%mm1, (%2, %4) \n\t" + "lea (%1, %3,4), %1 \n\t" + "lea (%2, %3,4), %2 \n\t" + "subl $4, %0 \n\t" + "jg 1b \n\t" + :"+g"(h), "+r"(pixels), "+r" (block) + :"r" ((x86_reg)line_size), "r"((x86_reg)3*line_size) + :"memory" + ); +} + +static void DEF(avg_pixels8)(uint8_t *block, const uint8_t *pixels, int line_size, int h) +{ + __asm__ volatile( + "lea (%3, %3), %%"REG_a" \n\t" + "1: \n\t" + "movq (%2), %%mm0 \n\t" + "movq (%2, %3), %%mm1 \n\t" + PAVGB" (%1), %%mm0 \n\t" + PAVGB" (%1, %3), %%mm1 \n\t" + "movq %%mm0, (%2) \n\t" + "movq %%mm1, (%2, %3) \n\t" + "add %%"REG_a", %1 \n\t" + "add %%"REG_a", %2 \n\t" + "movq (%2), %%mm0 \n\t" + "movq (%2, %3), %%mm1 \n\t" + PAVGB" (%1), %%mm0 \n\t" + PAVGB" (%1, %3), %%mm1 \n\t" + "add %%"REG_a", %1 \n\t" + "movq %%mm0, (%2) \n\t" + "movq %%mm1, (%2, %3) \n\t" + "add %%"REG_a", %2 \n\t" + "subl $4, %0 \n\t" + "jnz 1b \n\t" + :"+g"(h), "+S"(pixels), "+D"(block) + :"r" ((x86_reg)line_size) + :"%"REG_a, "memory"); +} + +static void DEF(avg_pixels8_x2)(uint8_t *block, const uint8_t *pixels, int line_size, int h) +{ + __asm__ volatile( + "lea (%3, %3), %%"REG_a" \n\t" + "1: \n\t" + "movq (%1), %%mm0 \n\t" + "movq (%1, %3), %%mm2 \n\t" + PAVGB" 1(%1), %%mm0 \n\t" + PAVGB" 1(%1, %3), %%mm2 \n\t" + PAVGB" (%2), %%mm0 \n\t" + PAVGB" (%2, %3), %%mm2 \n\t" + "add %%"REG_a", %1 \n\t" + "movq %%mm0, (%2) \n\t" + "movq %%mm2, (%2, %3) \n\t" + "movq (%1), %%mm0 \n\t" + "movq (%1, %3), %%mm2 \n\t" + PAVGB" 1(%1), %%mm0 \n\t" + PAVGB" 1(%1, %3), %%mm2 \n\t" + "add %%"REG_a", %2 \n\t" + "add %%"REG_a", %1 \n\t" + PAVGB" (%2), %%mm0 \n\t" + PAVGB" (%2, %3), %%mm2 \n\t" + "movq %%mm0, (%2) \n\t" + "movq %%mm2, (%2, %3) \n\t" + "add %%"REG_a", %2 \n\t" + "subl $4, %0 \n\t" + "jnz 1b \n\t" + :"+g"(h), "+S"(pixels), "+D"(block) + :"r" ((x86_reg)line_size) + :"%"REG_a, "memory"); +} + +static void DEF(avg_pixels8_y2)(uint8_t *block, const uint8_t *pixels, int line_size, int h) +{ + __asm__ volatile( + "lea (%3, %3), %%"REG_a" \n\t" + "movq (%1), %%mm0 \n\t" + "sub %3, %2 \n\t" + "1: \n\t" + "movq (%1, %3), %%mm1 \n\t" + "movq (%1, %%"REG_a"), %%mm2 \n\t" + "add %%"REG_a", %1 \n\t" + PAVGB" %%mm1, %%mm0 \n\t" + PAVGB" %%mm2, %%mm1 \n\t" + "movq (%2, %3), %%mm3 \n\t" + "movq (%2, %%"REG_a"), %%mm4 \n\t" + PAVGB" %%mm3, %%mm0 \n\t" + PAVGB" %%mm4, %%mm1 \n\t" + "movq %%mm0, (%2, %3) \n\t" + "movq %%mm1, (%2, %%"REG_a") \n\t" + "movq (%1, %3), %%mm1 \n\t" + "movq (%1, %%"REG_a"), %%mm0 \n\t" + PAVGB" %%mm1, %%mm2 \n\t" + PAVGB" %%mm0, %%mm1 \n\t" + "add %%"REG_a", %2 \n\t" + "add %%"REG_a", %1 \n\t" + "movq (%2, %3), %%mm3 \n\t" + "movq (%2, %%"REG_a"), %%mm4 \n\t" + PAVGB" %%mm3, %%mm2 \n\t" + PAVGB" %%mm4, %%mm1 \n\t" + "movq %%mm2, (%2, %3) \n\t" + "movq %%mm1, (%2, %%"REG_a") \n\t" + "add %%"REG_a", %2 \n\t" + "subl $4, %0 \n\t" + "jnz 1b \n\t" + :"+g"(h), "+S"(pixels), "+D"(block) + :"r" ((x86_reg)line_size) + :"%"REG_a, "memory"); +} + +/* Note this is not correctly rounded, but this function is only + * used for B-frames so it does not matter. */ +static void DEF(avg_pixels8_xy2)(uint8_t *block, const uint8_t *pixels, int line_size, int h) +{ + MOVQ_BONE(mm6); + __asm__ volatile( + "lea (%3, %3), %%"REG_a" \n\t" + "movq (%1), %%mm0 \n\t" + PAVGB" 1(%1), %%mm0 \n\t" + ".p2align 3 \n\t" + "1: \n\t" + "movq (%1, %%"REG_a"), %%mm2 \n\t" + "movq (%1, %3), %%mm1 \n\t" + "psubusb %%mm6, %%mm2 \n\t" + PAVGB" 1(%1, %3), %%mm1 \n\t" + PAVGB" 1(%1, %%"REG_a"), %%mm2 \n\t" + "add %%"REG_a", %1 \n\t" + PAVGB" %%mm1, %%mm0 \n\t" + PAVGB" %%mm2, %%mm1 \n\t" + PAVGB" (%2), %%mm0 \n\t" + PAVGB" (%2, %3), %%mm1 \n\t" + "movq %%mm0, (%2) \n\t" + "movq %%mm1, (%2, %3) \n\t" + "movq (%1, %3), %%mm1 \n\t" + "movq (%1, %%"REG_a"), %%mm0 \n\t" + PAVGB" 1(%1, %3), %%mm1 \n\t" + PAVGB" 1(%1, %%"REG_a"), %%mm0 \n\t" + "add %%"REG_a", %2 \n\t" + "add %%"REG_a", %1 \n\t" + PAVGB" %%mm1, %%mm2 \n\t" + PAVGB" %%mm0, %%mm1 \n\t" + PAVGB" (%2), %%mm2 \n\t" + PAVGB" (%2, %3), %%mm1 \n\t" + "movq %%mm2, (%2) \n\t" + "movq %%mm1, (%2, %3) \n\t" + "add %%"REG_a", %2 \n\t" + "subl $4, %0 \n\t" + "jnz 1b \n\t" + :"+g"(h), "+S"(pixels), "+D"(block) + :"r" ((x86_reg)line_size) + :"%"REG_a, "memory"); +} + +static void DEF(avg_pixels4)(uint8_t *block, const uint8_t *pixels, int line_size, int h) +{ + do { + __asm__ volatile( + "movd (%1), %%mm0 \n\t" + "movd (%1, %2), %%mm1 \n\t" + "movd (%1, %2, 2), %%mm2 \n\t" + "movd (%1, %3), %%mm3 \n\t" + PAVGB" (%0), %%mm0 \n\t" + PAVGB" (%0, %2), %%mm1 \n\t" + PAVGB" (%0, %2, 2), %%mm2 \n\t" + PAVGB" (%0, %3), %%mm3 \n\t" + "movd %%mm0, (%1) \n\t" + "movd %%mm1, (%1, %2) \n\t" + "movd %%mm2, (%1, %2, 2) \n\t" + "movd %%mm3, (%1, %3) \n\t" + ::"S"(pixels), "D"(block), + "r" ((x86_reg)line_size), "r"((x86_reg)3L*line_size) + :"memory"); + block += 4*line_size; + pixels += 4*line_size; + h -= 4; + } while(h > 0); +} + +//FIXME the following could be optimized too ... +static void DEF(put_no_rnd_pixels16_x2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){ + DEF(put_no_rnd_pixels8_x2)(block , pixels , line_size, h); + DEF(put_no_rnd_pixels8_x2)(block+8, pixels+8, line_size, h); +} +static void DEF(put_pixels16_y2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){ + DEF(put_pixels8_y2)(block , pixels , line_size, h); + DEF(put_pixels8_y2)(block+8, pixels+8, line_size, h); +} +static void DEF(put_no_rnd_pixels16_y2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){ + DEF(put_no_rnd_pixels8_y2)(block , pixels , line_size, h); + DEF(put_no_rnd_pixels8_y2)(block+8, pixels+8, line_size, h); +} +static void DEF(avg_pixels16)(uint8_t *block, const uint8_t *pixels, int line_size, int h){ + DEF(avg_pixels8)(block , pixels , line_size, h); + DEF(avg_pixels8)(block+8, pixels+8, line_size, h); +} +static void DEF(avg_pixels16_x2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){ + DEF(avg_pixels8_x2)(block , pixels , line_size, h); + DEF(avg_pixels8_x2)(block+8, pixels+8, line_size, h); +} +static void DEF(avg_pixels16_y2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){ + DEF(avg_pixels8_y2)(block , pixels , line_size, h); + DEF(avg_pixels8_y2)(block+8, pixels+8, line_size, h); +} +static void DEF(avg_pixels16_xy2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){ + DEF(avg_pixels8_xy2)(block , pixels , line_size, h); + DEF(avg_pixels8_xy2)(block+8, pixels+8, line_size, h); +} + +#define QPEL_2TAP_L3(OPNAME) \ +static void DEF(OPNAME ## 2tap_qpel16_l3)(uint8_t *dst, uint8_t *src, int stride, int h, int off1, int off2){\ + __asm__ volatile(\ + "1: \n\t"\ + "movq (%1,%2), %%mm0 \n\t"\ + "movq 8(%1,%2), %%mm1 \n\t"\ + PAVGB" (%1,%3), %%mm0 \n\t"\ + PAVGB" 8(%1,%3), %%mm1 \n\t"\ + PAVGB" (%1), %%mm0 \n\t"\ + PAVGB" 8(%1), %%mm1 \n\t"\ + STORE_OP( (%1,%4),%%mm0)\ + STORE_OP(8(%1,%4),%%mm1)\ + "movq %%mm0, (%1,%4) \n\t"\ + "movq %%mm1, 8(%1,%4) \n\t"\ + "add %5, %1 \n\t"\ + "decl %0 \n\t"\ + "jnz 1b \n\t"\ + :"+g"(h), "+r"(src)\ + :"r"((x86_reg)off1), "r"((x86_reg)off2),\ + "r"((x86_reg)(dst-src)), "r"((x86_reg)stride)\ + :"memory"\ + );\ +}\ +static void DEF(OPNAME ## 2tap_qpel8_l3)(uint8_t *dst, uint8_t *src, int stride, int h, int off1, int off2){\ + __asm__ volatile(\ + "1: \n\t"\ + "movq (%1,%2), %%mm0 \n\t"\ + PAVGB" (%1,%3), %%mm0 \n\t"\ + PAVGB" (%1), %%mm0 \n\t"\ + STORE_OP((%1,%4),%%mm0)\ + "movq %%mm0, (%1,%4) \n\t"\ + "add %5, %1 \n\t"\ + "decl %0 \n\t"\ + "jnz 1b \n\t"\ + :"+g"(h), "+r"(src)\ + :"r"((x86_reg)off1), "r"((x86_reg)off2),\ + "r"((x86_reg)(dst-src)), "r"((x86_reg)stride)\ + :"memory"\ + );\ +} + +#define STORE_OP(a,b) PAVGB" "#a","#b" \n\t" +QPEL_2TAP_L3(avg_) +#undef STORE_OP +#define STORE_OP(a,b) +QPEL_2TAP_L3(put_) +#undef STORE_OP +#undef QPEL_2TAP_L3 diff --git a/libavcodec/x86/dsputil_mmx.c b/libavcodec/x86/dsputil_mmx.c index af21765e65..f3ee342d72 100644 --- a/libavcodec/x86/dsputil_mmx.c +++ b/libavcodec/x86/dsputil_mmx.c @@ -170,7 +170,7 @@ DECLARE_ALIGNED(16, const double, ff_pd_2)[2] = { 2.0, 2.0 }; #define PAVGB(a, b, c, e) PAVGB_MMX_NO_RND(a, b, c, e) #define OP_AVG(a, b, c, e) PAVGB_MMX(a, b, c, e) -#include "dsputil_mmx_rnd_template.c" +#include "dsputil_rnd_template.c" #undef DEF #undef SET_RND @@ -184,7 +184,7 @@ DECLARE_ALIGNED(16, const double, ff_pd_2)[2] = { 2.0, 2.0 }; #define PAVGBP(a, b, c, d, e, f) PAVGBP_MMX(a, b, c, d, e, f) #define PAVGB(a, b, c, e) PAVGB_MMX(a, b, c, e) -#include "dsputil_mmx_rnd_template.c" +#include "dsputil_rnd_template.c" #undef DEF #undef SET_RND @@ -199,7 +199,7 @@ DECLARE_ALIGNED(16, const double, ff_pd_2)[2] = { 2.0, 2.0 }; #define PAVGB "pavgusb" #define OP_AVG PAVGB -#include "dsputil_mmx_avg_template.c" +#include "dsputil_avg_template.c" #undef DEF #undef PAVGB @@ -214,7 +214,7 @@ DECLARE_ALIGNED(16, const double, ff_pd_2)[2] = { 2.0, 2.0 }; #define PAVGB "pavgb" #define OP_AVG PAVGB -#include "dsputil_mmx_avg_template.c" +#include "dsputil_avg_template.c" #undef DEF #undef PAVGB diff --git a/libavcodec/x86/dsputil_mmx_avg_template.c b/libavcodec/x86/dsputil_mmx_avg_template.c deleted file mode 100644 index 8b116b74e2..0000000000 --- a/libavcodec/x86/dsputil_mmx_avg_template.c +++ /dev/null @@ -1,977 +0,0 @@ -/* - * DSP utils : average functions are compiled twice for 3dnow/mmx2 - * Copyright (c) 2000, 2001 Fabrice Bellard - * Copyright (c) 2002-2004 Michael Niedermayer - * - * MMX optimization by Nick Kurshev - * mostly rewritten by Michael Niedermayer - * and improved by Zdenek Kabelac - * - * This file is part of Libav. - * - * Libav is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * Libav is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with Libav; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -/* XXX: we use explicit registers to avoid a gcc 2.95.2 register asm - clobber bug - now it will work with 2.95.2 and also with -fPIC - */ -static void DEF(put_pixels8_x2)(uint8_t *block, const uint8_t *pixels, int line_size, int h) -{ - __asm__ volatile( - "lea (%3, %3), %%"REG_a" \n\t" - "1: \n\t" - "movq (%1), %%mm0 \n\t" - "movq (%1, %3), %%mm1 \n\t" - PAVGB" 1(%1), %%mm0 \n\t" - PAVGB" 1(%1, %3), %%mm1 \n\t" - "movq %%mm0, (%2) \n\t" - "movq %%mm1, (%2, %3) \n\t" - "add %%"REG_a", %1 \n\t" - "add %%"REG_a", %2 \n\t" - "movq (%1), %%mm0 \n\t" - "movq (%1, %3), %%mm1 \n\t" - PAVGB" 1(%1), %%mm0 \n\t" - PAVGB" 1(%1, %3), %%mm1 \n\t" - "add %%"REG_a", %1 \n\t" - "movq %%mm0, (%2) \n\t" - "movq %%mm1, (%2, %3) \n\t" - "add %%"REG_a", %2 \n\t" - "subl $4, %0 \n\t" - "jnz 1b \n\t" - :"+g"(h), "+S"(pixels), "+D"(block) - :"r" ((x86_reg)line_size) - :"%"REG_a, "memory"); -} - -static void DEF(put_pixels4_l2)(uint8_t *dst, uint8_t *src1, uint8_t *src2, int dstStride, int src1Stride, int h) -{ - __asm__ volatile( - "testl $1, %0 \n\t" - " jz 1f \n\t" - "movd (%1), %%mm0 \n\t" - "movd (%2), %%mm1 \n\t" - "add %4, %1 \n\t" - "add $4, %2 \n\t" - PAVGB" %%mm1, %%mm0 \n\t" - "movd %%mm0, (%3) \n\t" - "add %5, %3 \n\t" - "decl %0 \n\t" - "1: \n\t" - "movd (%1), %%mm0 \n\t" - "add %4, %1 \n\t" - "movd (%1), %%mm1 \n\t" - "movd (%2), %%mm2 \n\t" - "movd 4(%2), %%mm3 \n\t" - "add %4, %1 \n\t" - PAVGB" %%mm2, %%mm0 \n\t" - PAVGB" %%mm3, %%mm1 \n\t" - "movd %%mm0, (%3) \n\t" - "add %5, %3 \n\t" - "movd %%mm1, (%3) \n\t" - "add %5, %3 \n\t" - "movd (%1), %%mm0 \n\t" - "add %4, %1 \n\t" - "movd (%1), %%mm1 \n\t" - "movd 8(%2), %%mm2 \n\t" - "movd 12(%2), %%mm3 \n\t" - "add %4, %1 \n\t" - PAVGB" %%mm2, %%mm0 \n\t" - PAVGB" %%mm3, %%mm1 \n\t" - "movd %%mm0, (%3) \n\t" - "add %5, %3 \n\t" - "movd %%mm1, (%3) \n\t" - "add %5, %3 \n\t" - "add $16, %2 \n\t" - "subl $4, %0 \n\t" - "jnz 1b \n\t" -#if !HAVE_EBX_AVAILABLE //Note "+bm" and "+mb" are buggy too (with gcc 3.2.2 at least) and cannot be used - :"+m"(h), "+a"(src1), "+c"(src2), "+d"(dst) -#else - :"+b"(h), "+a"(src1), "+c"(src2), "+d"(dst) -#endif - :"S"((x86_reg)src1Stride), "D"((x86_reg)dstStride) - :"memory"); -} - - -static void DEF(put_pixels8_l2)(uint8_t *dst, uint8_t *src1, uint8_t *src2, int dstStride, int src1Stride, int h) -{ - __asm__ volatile( - "testl $1, %0 \n\t" - " jz 1f \n\t" - "movq (%1), %%mm0 \n\t" - "movq (%2), %%mm1 \n\t" - "add %4, %1 \n\t" - "add $8, %2 \n\t" - PAVGB" %%mm1, %%mm0 \n\t" - "movq %%mm0, (%3) \n\t" - "add %5, %3 \n\t" - "decl %0 \n\t" - "1: \n\t" - "movq (%1), %%mm0 \n\t" - "add %4, %1 \n\t" - "movq (%1), %%mm1 \n\t" - "add %4, %1 \n\t" - PAVGB" (%2), %%mm0 \n\t" - PAVGB" 8(%2), %%mm1 \n\t" - "movq %%mm0, (%3) \n\t" - "add %5, %3 \n\t" - "movq %%mm1, (%3) \n\t" - "add %5, %3 \n\t" - "movq (%1), %%mm0 \n\t" - "add %4, %1 \n\t" - "movq (%1), %%mm1 \n\t" - "add %4, %1 \n\t" - PAVGB" 16(%2), %%mm0 \n\t" - PAVGB" 24(%2), %%mm1 \n\t" - "movq %%mm0, (%3) \n\t" - "add %5, %3 \n\t" - "movq %%mm1, (%3) \n\t" - "add %5, %3 \n\t" - "add $32, %2 \n\t" - "subl $4, %0 \n\t" - "jnz 1b \n\t" -#if !HAVE_EBX_AVAILABLE //Note "+bm" and "+mb" are buggy too (with gcc 3.2.2 at least) and cannot be used - :"+m"(h), "+a"(src1), "+c"(src2), "+d"(dst) -#else - :"+b"(h), "+a"(src1), "+c"(src2), "+d"(dst) -#endif - :"S"((x86_reg)src1Stride), "D"((x86_reg)dstStride) - :"memory"); -//the following should be used, though better not with gcc ... -/* :"+g"(h), "+r"(src1), "+r"(src2), "+r"(dst) - :"r"(src1Stride), "r"(dstStride) - :"memory");*/ -} - -static void DEF(put_no_rnd_pixels8_l2)(uint8_t *dst, uint8_t *src1, uint8_t *src2, int dstStride, int src1Stride, int h) -{ - __asm__ volatile( - "pcmpeqb %%mm6, %%mm6 \n\t" - "testl $1, %0 \n\t" - " jz 1f \n\t" - "movq (%1), %%mm0 \n\t" - "movq (%2), %%mm1 \n\t" - "add %4, %1 \n\t" - "add $8, %2 \n\t" - "pxor %%mm6, %%mm0 \n\t" - "pxor %%mm6, %%mm1 \n\t" - PAVGB" %%mm1, %%mm0 \n\t" - "pxor %%mm6, %%mm0 \n\t" - "movq %%mm0, (%3) \n\t" - "add %5, %3 \n\t" - "decl %0 \n\t" - "1: \n\t" - "movq (%1), %%mm0 \n\t" - "add %4, %1 \n\t" - "movq (%1), %%mm1 \n\t" - "add %4, %1 \n\t" - "movq (%2), %%mm2 \n\t" - "movq 8(%2), %%mm3 \n\t" - "pxor %%mm6, %%mm0 \n\t" - "pxor %%mm6, %%mm1 \n\t" - "pxor %%mm6, %%mm2 \n\t" - "pxor %%mm6, %%mm3 \n\t" - PAVGB" %%mm2, %%mm0 \n\t" - PAVGB" %%mm3, %%mm1 \n\t" - "pxor %%mm6, %%mm0 \n\t" - "pxor %%mm6, %%mm1 \n\t" - "movq %%mm0, (%3) \n\t" - "add %5, %3 \n\t" - "movq %%mm1, (%3) \n\t" - "add %5, %3 \n\t" - "movq (%1), %%mm0 \n\t" - "add %4, %1 \n\t" - "movq (%1), %%mm1 \n\t" - "add %4, %1 \n\t" - "movq 16(%2), %%mm2 \n\t" - "movq 24(%2), %%mm3 \n\t" - "pxor %%mm6, %%mm0 \n\t" - "pxor %%mm6, %%mm1 \n\t" - "pxor %%mm6, %%mm2 \n\t" - "pxor %%mm6, %%mm3 \n\t" - PAVGB" %%mm2, %%mm0 \n\t" - PAVGB" %%mm3, %%mm1 \n\t" - "pxor %%mm6, %%mm0 \n\t" - "pxor %%mm6, %%mm1 \n\t" - "movq %%mm0, (%3) \n\t" - "add %5, %3 \n\t" - "movq %%mm1, (%3) \n\t" - "add %5, %3 \n\t" - "add $32, %2 \n\t" - "subl $4, %0 \n\t" - "jnz 1b \n\t" -#if !HAVE_EBX_AVAILABLE //Note "+bm" and "+mb" are buggy too (with gcc 3.2.2 at least) and cannot be used - :"+m"(h), "+a"(src1), "+c"(src2), "+d"(dst) -#else - :"+b"(h), "+a"(src1), "+c"(src2), "+d"(dst) -#endif - :"S"((x86_reg)src1Stride), "D"((x86_reg)dstStride) - :"memory"); -//the following should be used, though better not with gcc ... -/* :"+g"(h), "+r"(src1), "+r"(src2), "+r"(dst) - :"r"(src1Stride), "r"(dstStride) - :"memory");*/ -} - -static void DEF(avg_pixels4_l2)(uint8_t *dst, uint8_t *src1, uint8_t *src2, int dstStride, int src1Stride, int h) -{ - __asm__ volatile( - "testl $1, %0 \n\t" - " jz 1f \n\t" - "movd (%1), %%mm0 \n\t" - "movd (%2), %%mm1 \n\t" - "add %4, %1 \n\t" - "add $4, %2 \n\t" - PAVGB" %%mm1, %%mm0 \n\t" - PAVGB" (%3), %%mm0 \n\t" - "movd %%mm0, (%3) \n\t" - "add %5, %3 \n\t" - "decl %0 \n\t" - "1: \n\t" - "movd (%1), %%mm0 \n\t" - "add %4, %1 \n\t" - "movd (%1), %%mm1 \n\t" - "add %4, %1 \n\t" - PAVGB" (%2), %%mm0 \n\t" - PAVGB" 4(%2), %%mm1 \n\t" - PAVGB" (%3), %%mm0 \n\t" - "movd %%mm0, (%3) \n\t" - "add %5, %3 \n\t" - PAVGB" (%3), %%mm1 \n\t" - "movd %%mm1, (%3) \n\t" - "add %5, %3 \n\t" - "movd (%1), %%mm0 \n\t" - "add %4, %1 \n\t" - "movd (%1), %%mm1 \n\t" - "add %4, %1 \n\t" - PAVGB" 8(%2), %%mm0 \n\t" - PAVGB" 12(%2), %%mm1 \n\t" - PAVGB" (%3), %%mm0 \n\t" - "movd %%mm0, (%3) \n\t" - "add %5, %3 \n\t" - PAVGB" (%3), %%mm1 \n\t" - "movd %%mm1, (%3) \n\t" - "add %5, %3 \n\t" - "add $16, %2 \n\t" - "subl $4, %0 \n\t" - "jnz 1b \n\t" -#if !HAVE_EBX_AVAILABLE //Note "+bm" and "+mb" are buggy too (with gcc 3.2.2 at least) and cannot be used - :"+m"(h), "+a"(src1), "+c"(src2), "+d"(dst) -#else - :"+b"(h), "+a"(src1), "+c"(src2), "+d"(dst) -#endif - :"S"((x86_reg)src1Stride), "D"((x86_reg)dstStride) - :"memory"); -} - - -static void DEF(avg_pixels8_l2)(uint8_t *dst, uint8_t *src1, uint8_t *src2, int dstStride, int src1Stride, int h) -{ - __asm__ volatile( - "testl $1, %0 \n\t" - " jz 1f \n\t" - "movq (%1), %%mm0 \n\t" - "movq (%2), %%mm1 \n\t" - "add %4, %1 \n\t" - "add $8, %2 \n\t" - PAVGB" %%mm1, %%mm0 \n\t" - PAVGB" (%3), %%mm0 \n\t" - "movq %%mm0, (%3) \n\t" - "add %5, %3 \n\t" - "decl %0 \n\t" - "1: \n\t" - "movq (%1), %%mm0 \n\t" - "add %4, %1 \n\t" - "movq (%1), %%mm1 \n\t" - "add %4, %1 \n\t" - PAVGB" (%2), %%mm0 \n\t" - PAVGB" 8(%2), %%mm1 \n\t" - PAVGB" (%3), %%mm0 \n\t" - "movq %%mm0, (%3) \n\t" - "add %5, %3 \n\t" - PAVGB" (%3), %%mm1 \n\t" - "movq %%mm1, (%3) \n\t" - "add %5, %3 \n\t" - "movq (%1), %%mm0 \n\t" - "add %4, %1 \n\t" - "movq (%1), %%mm1 \n\t" - "add %4, %1 \n\t" - PAVGB" 16(%2), %%mm0 \n\t" - PAVGB" 24(%2), %%mm1 \n\t" - PAVGB" (%3), %%mm0 \n\t" - "movq %%mm0, (%3) \n\t" - "add %5, %3 \n\t" - PAVGB" (%3), %%mm1 \n\t" - "movq %%mm1, (%3) \n\t" - "add %5, %3 \n\t" - "add $32, %2 \n\t" - "subl $4, %0 \n\t" - "jnz 1b \n\t" -#if !HAVE_EBX_AVAILABLE //Note "+bm" and "+mb" are buggy too (with gcc 3.2.2 at least) and cannot be used - :"+m"(h), "+a"(src1), "+c"(src2), "+d"(dst) -#else - :"+b"(h), "+a"(src1), "+c"(src2), "+d"(dst) -#endif - :"S"((x86_reg)src1Stride), "D"((x86_reg)dstStride) - :"memory"); -//the following should be used, though better not with gcc ... -/* :"+g"(h), "+r"(src1), "+r"(src2), "+r"(dst) - :"r"(src1Stride), "r"(dstStride) - :"memory");*/ -} - -static void DEF(put_pixels16_x2)(uint8_t *block, const uint8_t *pixels, int line_size, int h) -{ - __asm__ volatile( - "lea (%3, %3), %%"REG_a" \n\t" - "1: \n\t" - "movq (%1), %%mm0 \n\t" - "movq (%1, %3), %%mm1 \n\t" - "movq 8(%1), %%mm2 \n\t" - "movq 8(%1, %3), %%mm3 \n\t" - PAVGB" 1(%1), %%mm0 \n\t" - PAVGB" 1(%1, %3), %%mm1 \n\t" - PAVGB" 9(%1), %%mm2 \n\t" - PAVGB" 9(%1, %3), %%mm3 \n\t" - "movq %%mm0, (%2) \n\t" - "movq %%mm1, (%2, %3) \n\t" - "movq %%mm2, 8(%2) \n\t" - "movq %%mm3, 8(%2, %3) \n\t" - "add %%"REG_a", %1 \n\t" - "add %%"REG_a", %2 \n\t" - "movq (%1), %%mm0 \n\t" - "movq (%1, %3), %%mm1 \n\t" - "movq 8(%1), %%mm2 \n\t" - "movq 8(%1, %3), %%mm3 \n\t" - PAVGB" 1(%1), %%mm0 \n\t" - PAVGB" 1(%1, %3), %%mm1 \n\t" - PAVGB" 9(%1), %%mm2 \n\t" - PAVGB" 9(%1, %3), %%mm3 \n\t" - "add %%"REG_a", %1 \n\t" - "movq %%mm0, (%2) \n\t" - "movq %%mm1, (%2, %3) \n\t" - "movq %%mm2, 8(%2) \n\t" - "movq %%mm3, 8(%2, %3) \n\t" - "add %%"REG_a", %2 \n\t" - "subl $4, %0 \n\t" - "jnz 1b \n\t" - :"+g"(h), "+S"(pixels), "+D"(block) - :"r" ((x86_reg)line_size) - :"%"REG_a, "memory"); -} - -static void DEF(put_pixels16_l2)(uint8_t *dst, uint8_t *src1, uint8_t *src2, int dstStride, int src1Stride, int h) -{ - __asm__ volatile( - "testl $1, %0 \n\t" - " jz 1f \n\t" - "movq (%1), %%mm0 \n\t" - "movq 8(%1), %%mm1 \n\t" - PAVGB" (%2), %%mm0 \n\t" - PAVGB" 8(%2), %%mm1 \n\t" - "add %4, %1 \n\t" - "add $16, %2 \n\t" - "movq %%mm0, (%3) \n\t" - "movq %%mm1, 8(%3) \n\t" - "add %5, %3 \n\t" - "decl %0 \n\t" - "1: \n\t" - "movq (%1), %%mm0 \n\t" - "movq 8(%1), %%mm1 \n\t" - "add %4, %1 \n\t" - PAVGB" (%2), %%mm0 \n\t" - PAVGB" 8(%2), %%mm1 \n\t" - "movq %%mm0, (%3) \n\t" - "movq %%mm1, 8(%3) \n\t" - "add %5, %3 \n\t" - "movq (%1), %%mm0 \n\t" - "movq 8(%1), %%mm1 \n\t" - "add %4, %1 \n\t" - PAVGB" 16(%2), %%mm0 \n\t" - PAVGB" 24(%2), %%mm1 \n\t" - "movq %%mm0, (%3) \n\t" - "movq %%mm1, 8(%3) \n\t" - "add %5, %3 \n\t" - "add $32, %2 \n\t" - "subl $2, %0 \n\t" - "jnz 1b \n\t" -#if !HAVE_EBX_AVAILABLE //Note "+bm" and "+mb" are buggy too (with gcc 3.2.2 at least) and cannot be used - :"+m"(h), "+a"(src1), "+c"(src2), "+d"(dst) -#else - :"+b"(h), "+a"(src1), "+c"(src2), "+d"(dst) -#endif - :"S"((x86_reg)src1Stride), "D"((x86_reg)dstStride) - :"memory"); -//the following should be used, though better not with gcc ... -/* :"+g"(h), "+r"(src1), "+r"(src2), "+r"(dst) - :"r"(src1Stride), "r"(dstStride) - :"memory");*/ -} - -static void DEF(avg_pixels16_l2)(uint8_t *dst, uint8_t *src1, uint8_t *src2, int dstStride, int src1Stride, int h) -{ - __asm__ volatile( - "testl $1, %0 \n\t" - " jz 1f \n\t" - "movq (%1), %%mm0 \n\t" - "movq 8(%1), %%mm1 \n\t" - PAVGB" (%2), %%mm0 \n\t" - PAVGB" 8(%2), %%mm1 \n\t" - "add %4, %1 \n\t" - "add $16, %2 \n\t" - PAVGB" (%3), %%mm0 \n\t" - PAVGB" 8(%3), %%mm1 \n\t" - "movq %%mm0, (%3) \n\t" - "movq %%mm1, 8(%3) \n\t" - "add %5, %3 \n\t" - "decl %0 \n\t" - "1: \n\t" - "movq (%1), %%mm0 \n\t" - "movq 8(%1), %%mm1 \n\t" - "add %4, %1 \n\t" - PAVGB" (%2), %%mm0 \n\t" - PAVGB" 8(%2), %%mm1 \n\t" - PAVGB" (%3), %%mm0 \n\t" - PAVGB" 8(%3), %%mm1 \n\t" - "movq %%mm0, (%3) \n\t" - "movq %%mm1, 8(%3) \n\t" - "add %5, %3 \n\t" - "movq (%1), %%mm0 \n\t" - "movq 8(%1), %%mm1 \n\t" - "add %4, %1 \n\t" - PAVGB" 16(%2), %%mm0 \n\t" - PAVGB" 24(%2), %%mm1 \n\t" - PAVGB" (%3), %%mm0 \n\t" - PAVGB" 8(%3), %%mm1 \n\t" - "movq %%mm0, (%3) \n\t" - "movq %%mm1, 8(%3) \n\t" - "add %5, %3 \n\t" - "add $32, %2 \n\t" - "subl $2, %0 \n\t" - "jnz 1b \n\t" -#if !HAVE_EBX_AVAILABLE //Note "+bm" and "+mb" are buggy too (with gcc 3.2.2 at least) and cannot be used - :"+m"(h), "+a"(src1), "+c"(src2), "+d"(dst) -#else - :"+b"(h), "+a"(src1), "+c"(src2), "+d"(dst) -#endif - :"S"((x86_reg)src1Stride), "D"((x86_reg)dstStride) - :"memory"); -//the following should be used, though better not with gcc ... -/* :"+g"(h), "+r"(src1), "+r"(src2), "+r"(dst) - :"r"(src1Stride), "r"(dstStride) - :"memory");*/ -} - -static void DEF(put_no_rnd_pixels16_l2)(uint8_t *dst, uint8_t *src1, uint8_t *src2, int dstStride, int src1Stride, int h) -{ - __asm__ volatile( - "pcmpeqb %%mm6, %%mm6 \n\t" - "testl $1, %0 \n\t" - " jz 1f \n\t" - "movq (%1), %%mm0 \n\t" - "movq 8(%1), %%mm1 \n\t" - "movq (%2), %%mm2 \n\t" - "movq 8(%2), %%mm3 \n\t" - "pxor %%mm6, %%mm0 \n\t" - "pxor %%mm6, %%mm1 \n\t" - "pxor %%mm6, %%mm2 \n\t" - "pxor %%mm6, %%mm3 \n\t" - PAVGB" %%mm2, %%mm0 \n\t" - PAVGB" %%mm3, %%mm1 \n\t" - "pxor %%mm6, %%mm0 \n\t" - "pxor %%mm6, %%mm1 \n\t" - "add %4, %1 \n\t" - "add $16, %2 \n\t" - "movq %%mm0, (%3) \n\t" - "movq %%mm1, 8(%3) \n\t" - "add %5, %3 \n\t" - "decl %0 \n\t" - "1: \n\t" - "movq (%1), %%mm0 \n\t" - "movq 8(%1), %%mm1 \n\t" - "add %4, %1 \n\t" - "movq (%2), %%mm2 \n\t" - "movq 8(%2), %%mm3 \n\t" - "pxor %%mm6, %%mm0 \n\t" - "pxor %%mm6, %%mm1 \n\t" - "pxor %%mm6, %%mm2 \n\t" - "pxor %%mm6, %%mm3 \n\t" - PAVGB" %%mm2, %%mm0 \n\t" - PAVGB" %%mm3, %%mm1 \n\t" - "pxor %%mm6, %%mm0 \n\t" - "pxor %%mm6, %%mm1 \n\t" - "movq %%mm0, (%3) \n\t" - "movq %%mm1, 8(%3) \n\t" - "add %5, %3 \n\t" - "movq (%1), %%mm0 \n\t" - "movq 8(%1), %%mm1 \n\t" - "add %4, %1 \n\t" - "movq 16(%2), %%mm2 \n\t" - "movq 24(%2), %%mm3 \n\t" - "pxor %%mm6, %%mm0 \n\t" - "pxor %%mm6, %%mm1 \n\t" - "pxor %%mm6, %%mm2 \n\t" - "pxor %%mm6, %%mm3 \n\t" - PAVGB" %%mm2, %%mm0 \n\t" - PAVGB" %%mm3, %%mm1 \n\t" - "pxor %%mm6, %%mm0 \n\t" - "pxor %%mm6, %%mm1 \n\t" - "movq %%mm0, (%3) \n\t" - "movq %%mm1, 8(%3) \n\t" - "add %5, %3 \n\t" - "add $32, %2 \n\t" - "subl $2, %0 \n\t" - "jnz 1b \n\t" -#if !HAVE_EBX_AVAILABLE //Note "+bm" and "+mb" are buggy too (with gcc 3.2.2 at least) and cannot be used - :"+m"(h), "+a"(src1), "+c"(src2), "+d"(dst) -#else - :"+b"(h), "+a"(src1), "+c"(src2), "+d"(dst) -#endif - :"S"((x86_reg)src1Stride), "D"((x86_reg)dstStride) - :"memory"); -//the following should be used, though better not with gcc ... -/* :"+g"(h), "+r"(src1), "+r"(src2), "+r"(dst) - :"r"(src1Stride), "r"(dstStride) - :"memory");*/ -} - -/* GL: this function does incorrect rounding if overflow */ -static void DEF(put_no_rnd_pixels8_x2)(uint8_t *block, const uint8_t *pixels, int line_size, int h) -{ - MOVQ_BONE(mm6); - __asm__ volatile( - "lea (%3, %3), %%"REG_a" \n\t" - "1: \n\t" - "movq (%1), %%mm0 \n\t" - "movq (%1, %3), %%mm2 \n\t" - "movq 1(%1), %%mm1 \n\t" - "movq 1(%1, %3), %%mm3 \n\t" - "add %%"REG_a", %1 \n\t" - "psubusb %%mm6, %%mm0 \n\t" - "psubusb %%mm6, %%mm2 \n\t" - PAVGB" %%mm1, %%mm0 \n\t" - PAVGB" %%mm3, %%mm2 \n\t" - "movq %%mm0, (%2) \n\t" - "movq %%mm2, (%2, %3) \n\t" - "movq (%1), %%mm0 \n\t" - "movq 1(%1), %%mm1 \n\t" - "movq (%1, %3), %%mm2 \n\t" - "movq 1(%1, %3), %%mm3 \n\t" - "add %%"REG_a", %2 \n\t" - "add %%"REG_a", %1 \n\t" - "psubusb %%mm6, %%mm0 \n\t" - "psubusb %%mm6, %%mm2 \n\t" - PAVGB" %%mm1, %%mm0 \n\t" - PAVGB" %%mm3, %%mm2 \n\t" - "movq %%mm0, (%2) \n\t" - "movq %%mm2, (%2, %3) \n\t" - "add %%"REG_a", %2 \n\t" - "subl $4, %0 \n\t" - "jnz 1b \n\t" - :"+g"(h), "+S"(pixels), "+D"(block) - :"r" ((x86_reg)line_size) - :"%"REG_a, "memory"); -} - -static void DEF(put_no_rnd_pixels8_x2_exact)(uint8_t *block, const uint8_t *pixels, int line_size, int h) -{ - __asm__ volatile ( - "pcmpeqb %%mm6, %%mm6 \n\t" - "1: \n\t" - "movq (%1), %%mm0 \n\t" - "movq (%1, %3), %%mm2 \n\t" - "movq 1(%1), %%mm1 \n\t" - "movq 1(%1, %3), %%mm3 \n\t" - "pxor %%mm6, %%mm0 \n\t" - "pxor %%mm6, %%mm2 \n\t" - "pxor %%mm6, %%mm1 \n\t" - "pxor %%mm6, %%mm3 \n\t" - PAVGB" %%mm1, %%mm0 \n\t" - PAVGB" %%mm3, %%mm2 \n\t" - "pxor %%mm6, %%mm0 \n\t" - "pxor %%mm6, %%mm2 \n\t" - "movq %%mm0, (%2) \n\t" - "movq %%mm2, (%2, %3) \n\t" - "movq (%1, %3,2), %%mm0 \n\t" - "movq 1(%1, %3,2), %%mm1 \n\t" - "movq (%1, %4), %%mm2 \n\t" - "movq 1(%1, %4), %%mm3 \n\t" - "pxor %%mm6, %%mm0 \n\t" - "pxor %%mm6, %%mm1 \n\t" - "pxor %%mm6, %%mm2 \n\t" - "pxor %%mm6, %%mm3 \n\t" - PAVGB" %%mm1, %%mm0 \n\t" - PAVGB" %%mm3, %%mm2 \n\t" - "pxor %%mm6, %%mm0 \n\t" - "pxor %%mm6, %%mm2 \n\t" - "movq %%mm0, (%2, %3,2) \n\t" - "movq %%mm2, (%2, %4) \n\t" - "lea (%1, %3,4), %1 \n\t" - "lea (%2, %3,4), %2 \n\t" - "subl $4, %0 \n\t" - "jg 1b \n\t" - : "+g"(h), "+r"(pixels), "+r"(block) - : "r" ((x86_reg)line_size), "r"((x86_reg)3*line_size) - : "memory" - ); -} - -static void DEF(put_pixels8_y2)(uint8_t *block, const uint8_t *pixels, int line_size, int h) -{ - __asm__ volatile( - "lea (%3, %3), %%"REG_a" \n\t" - "movq (%1), %%mm0 \n\t" - "sub %3, %2 \n\t" - "1: \n\t" - "movq (%1, %3), %%mm1 \n\t" - "movq (%1, %%"REG_a"), %%mm2 \n\t" - "add %%"REG_a", %1 \n\t" - PAVGB" %%mm1, %%mm0 \n\t" - PAVGB" %%mm2, %%mm1 \n\t" - "movq %%mm0, (%2, %3) \n\t" - "movq %%mm1, (%2, %%"REG_a") \n\t" - "movq (%1, %3), %%mm1 \n\t" - "movq (%1, %%"REG_a"), %%mm0 \n\t" - "add %%"REG_a", %2 \n\t" - "add %%"REG_a", %1 \n\t" - PAVGB" %%mm1, %%mm2 \n\t" - PAVGB" %%mm0, %%mm1 \n\t" - "movq %%mm2, (%2, %3) \n\t" - "movq %%mm1, (%2, %%"REG_a") \n\t" - "add %%"REG_a", %2 \n\t" - "subl $4, %0 \n\t" - "jnz 1b \n\t" - :"+g"(h), "+S"(pixels), "+D" (block) - :"r" ((x86_reg)line_size) - :"%"REG_a, "memory"); -} - -/* GL: this function does incorrect rounding if overflow */ -static void DEF(put_no_rnd_pixels8_y2)(uint8_t *block, const uint8_t *pixels, int line_size, int h) -{ - MOVQ_BONE(mm6); - __asm__ volatile( - "lea (%3, %3), %%"REG_a" \n\t" - "movq (%1), %%mm0 \n\t" - "sub %3, %2 \n\t" - "1: \n\t" - "movq (%1, %3), %%mm1 \n\t" - "movq (%1, %%"REG_a"), %%mm2 \n\t" - "add %%"REG_a", %1 \n\t" - "psubusb %%mm6, %%mm1 \n\t" - PAVGB" %%mm1, %%mm0 \n\t" - PAVGB" %%mm2, %%mm1 \n\t" - "movq %%mm0, (%2, %3) \n\t" - "movq %%mm1, (%2, %%"REG_a") \n\t" - "movq (%1, %3), %%mm1 \n\t" - "movq (%1, %%"REG_a"), %%mm0 \n\t" - "add %%"REG_a", %2 \n\t" - "add %%"REG_a", %1 \n\t" - "psubusb %%mm6, %%mm1 \n\t" - PAVGB" %%mm1, %%mm2 \n\t" - PAVGB" %%mm0, %%mm1 \n\t" - "movq %%mm2, (%2, %3) \n\t" - "movq %%mm1, (%2, %%"REG_a") \n\t" - "add %%"REG_a", %2 \n\t" - "subl $4, %0 \n\t" - "jnz 1b \n\t" - :"+g"(h), "+S"(pixels), "+D" (block) - :"r" ((x86_reg)line_size) - :"%"REG_a, "memory"); -} - -static void DEF(put_no_rnd_pixels8_y2_exact)(uint8_t *block, const uint8_t *pixels, int line_size, int h) -{ - __asm__ volatile ( - "movq (%1), %%mm0 \n\t" - "pcmpeqb %%mm6, %%mm6 \n\t" - "add %3, %1 \n\t" - "pxor %%mm6, %%mm0 \n\t" - "1: \n\t" - "movq (%1), %%mm1 \n\t" - "movq (%1, %3), %%mm2 \n\t" - "pxor %%mm6, %%mm1 \n\t" - "pxor %%mm6, %%mm2 \n\t" - PAVGB" %%mm1, %%mm0 \n\t" - PAVGB" %%mm2, %%mm1 \n\t" - "pxor %%mm6, %%mm0 \n\t" - "pxor %%mm6, %%mm1 \n\t" - "movq %%mm0, (%2) \n\t" - "movq %%mm1, (%2, %3) \n\t" - "movq (%1, %3,2), %%mm1 \n\t" - "movq (%1, %4), %%mm0 \n\t" - "pxor %%mm6, %%mm1 \n\t" - "pxor %%mm6, %%mm0 \n\t" - PAVGB" %%mm1, %%mm2 \n\t" - PAVGB" %%mm0, %%mm1 \n\t" - "pxor %%mm6, %%mm2 \n\t" - "pxor %%mm6, %%mm1 \n\t" - "movq %%mm2, (%2, %3,2) \n\t" - "movq %%mm1, (%2, %4) \n\t" - "lea (%1, %3,4), %1 \n\t" - "lea (%2, %3,4), %2 \n\t" - "subl $4, %0 \n\t" - "jg 1b \n\t" - :"+g"(h), "+r"(pixels), "+r" (block) - :"r" ((x86_reg)line_size), "r"((x86_reg)3*line_size) - :"memory" - ); -} - -static void DEF(avg_pixels8)(uint8_t *block, const uint8_t *pixels, int line_size, int h) -{ - __asm__ volatile( - "lea (%3, %3), %%"REG_a" \n\t" - "1: \n\t" - "movq (%2), %%mm0 \n\t" - "movq (%2, %3), %%mm1 \n\t" - PAVGB" (%1), %%mm0 \n\t" - PAVGB" (%1, %3), %%mm1 \n\t" - "movq %%mm0, (%2) \n\t" - "movq %%mm1, (%2, %3) \n\t" - "add %%"REG_a", %1 \n\t" - "add %%"REG_a", %2 \n\t" - "movq (%2), %%mm0 \n\t" - "movq (%2, %3), %%mm1 \n\t" - PAVGB" (%1), %%mm0 \n\t" - PAVGB" (%1, %3), %%mm1 \n\t" - "add %%"REG_a", %1 \n\t" - "movq %%mm0, (%2) \n\t" - "movq %%mm1, (%2, %3) \n\t" - "add %%"REG_a", %2 \n\t" - "subl $4, %0 \n\t" - "jnz 1b \n\t" - :"+g"(h), "+S"(pixels), "+D"(block) - :"r" ((x86_reg)line_size) - :"%"REG_a, "memory"); -} - -static void DEF(avg_pixels8_x2)(uint8_t *block, const uint8_t *pixels, int line_size, int h) -{ - __asm__ volatile( - "lea (%3, %3), %%"REG_a" \n\t" - "1: \n\t" - "movq (%1), %%mm0 \n\t" - "movq (%1, %3), %%mm2 \n\t" - PAVGB" 1(%1), %%mm0 \n\t" - PAVGB" 1(%1, %3), %%mm2 \n\t" - PAVGB" (%2), %%mm0 \n\t" - PAVGB" (%2, %3), %%mm2 \n\t" - "add %%"REG_a", %1 \n\t" - "movq %%mm0, (%2) \n\t" - "movq %%mm2, (%2, %3) \n\t" - "movq (%1), %%mm0 \n\t" - "movq (%1, %3), %%mm2 \n\t" - PAVGB" 1(%1), %%mm0 \n\t" - PAVGB" 1(%1, %3), %%mm2 \n\t" - "add %%"REG_a", %2 \n\t" - "add %%"REG_a", %1 \n\t" - PAVGB" (%2), %%mm0 \n\t" - PAVGB" (%2, %3), %%mm2 \n\t" - "movq %%mm0, (%2) \n\t" - "movq %%mm2, (%2, %3) \n\t" - "add %%"REG_a", %2 \n\t" - "subl $4, %0 \n\t" - "jnz 1b \n\t" - :"+g"(h), "+S"(pixels), "+D"(block) - :"r" ((x86_reg)line_size) - :"%"REG_a, "memory"); -} - -static void DEF(avg_pixels8_y2)(uint8_t *block, const uint8_t *pixels, int line_size, int h) -{ - __asm__ volatile( - "lea (%3, %3), %%"REG_a" \n\t" - "movq (%1), %%mm0 \n\t" - "sub %3, %2 \n\t" - "1: \n\t" - "movq (%1, %3), %%mm1 \n\t" - "movq (%1, %%"REG_a"), %%mm2 \n\t" - "add %%"REG_a", %1 \n\t" - PAVGB" %%mm1, %%mm0 \n\t" - PAVGB" %%mm2, %%mm1 \n\t" - "movq (%2, %3), %%mm3 \n\t" - "movq (%2, %%"REG_a"), %%mm4 \n\t" - PAVGB" %%mm3, %%mm0 \n\t" - PAVGB" %%mm4, %%mm1 \n\t" - "movq %%mm0, (%2, %3) \n\t" - "movq %%mm1, (%2, %%"REG_a") \n\t" - "movq (%1, %3), %%mm1 \n\t" - "movq (%1, %%"REG_a"), %%mm0 \n\t" - PAVGB" %%mm1, %%mm2 \n\t" - PAVGB" %%mm0, %%mm1 \n\t" - "add %%"REG_a", %2 \n\t" - "add %%"REG_a", %1 \n\t" - "movq (%2, %3), %%mm3 \n\t" - "movq (%2, %%"REG_a"), %%mm4 \n\t" - PAVGB" %%mm3, %%mm2 \n\t" - PAVGB" %%mm4, %%mm1 \n\t" - "movq %%mm2, (%2, %3) \n\t" - "movq %%mm1, (%2, %%"REG_a") \n\t" - "add %%"REG_a", %2 \n\t" - "subl $4, %0 \n\t" - "jnz 1b \n\t" - :"+g"(h), "+S"(pixels), "+D"(block) - :"r" ((x86_reg)line_size) - :"%"REG_a, "memory"); -} - -/* Note this is not correctly rounded, but this function is only - * used for B-frames so it does not matter. */ -static void DEF(avg_pixels8_xy2)(uint8_t *block, const uint8_t *pixels, int line_size, int h) -{ - MOVQ_BONE(mm6); - __asm__ volatile( - "lea (%3, %3), %%"REG_a" \n\t" - "movq (%1), %%mm0 \n\t" - PAVGB" 1(%1), %%mm0 \n\t" - ".p2align 3 \n\t" - "1: \n\t" - "movq (%1, %%"REG_a"), %%mm2 \n\t" - "movq (%1, %3), %%mm1 \n\t" - "psubusb %%mm6, %%mm2 \n\t" - PAVGB" 1(%1, %3), %%mm1 \n\t" - PAVGB" 1(%1, %%"REG_a"), %%mm2 \n\t" - "add %%"REG_a", %1 \n\t" - PAVGB" %%mm1, %%mm0 \n\t" - PAVGB" %%mm2, %%mm1 \n\t" - PAVGB" (%2), %%mm0 \n\t" - PAVGB" (%2, %3), %%mm1 \n\t" - "movq %%mm0, (%2) \n\t" - "movq %%mm1, (%2, %3) \n\t" - "movq (%1, %3), %%mm1 \n\t" - "movq (%1, %%"REG_a"), %%mm0 \n\t" - PAVGB" 1(%1, %3), %%mm1 \n\t" - PAVGB" 1(%1, %%"REG_a"), %%mm0 \n\t" - "add %%"REG_a", %2 \n\t" - "add %%"REG_a", %1 \n\t" - PAVGB" %%mm1, %%mm2 \n\t" - PAVGB" %%mm0, %%mm1 \n\t" - PAVGB" (%2), %%mm2 \n\t" - PAVGB" (%2, %3), %%mm1 \n\t" - "movq %%mm2, (%2) \n\t" - "movq %%mm1, (%2, %3) \n\t" - "add %%"REG_a", %2 \n\t" - "subl $4, %0 \n\t" - "jnz 1b \n\t" - :"+g"(h), "+S"(pixels), "+D"(block) - :"r" ((x86_reg)line_size) - :"%"REG_a, "memory"); -} - -static void DEF(avg_pixels4)(uint8_t *block, const uint8_t *pixels, int line_size, int h) -{ - do { - __asm__ volatile( - "movd (%1), %%mm0 \n\t" - "movd (%1, %2), %%mm1 \n\t" - "movd (%1, %2, 2), %%mm2 \n\t" - "movd (%1, %3), %%mm3 \n\t" - PAVGB" (%0), %%mm0 \n\t" - PAVGB" (%0, %2), %%mm1 \n\t" - PAVGB" (%0, %2, 2), %%mm2 \n\t" - PAVGB" (%0, %3), %%mm3 \n\t" - "movd %%mm0, (%1) \n\t" - "movd %%mm1, (%1, %2) \n\t" - "movd %%mm2, (%1, %2, 2) \n\t" - "movd %%mm3, (%1, %3) \n\t" - ::"S"(pixels), "D"(block), - "r" ((x86_reg)line_size), "r"((x86_reg)3L*line_size) - :"memory"); - block += 4*line_size; - pixels += 4*line_size; - h -= 4; - } while(h > 0); -} - -//FIXME the following could be optimized too ... -static void DEF(put_no_rnd_pixels16_x2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){ - DEF(put_no_rnd_pixels8_x2)(block , pixels , line_size, h); - DEF(put_no_rnd_pixels8_x2)(block+8, pixels+8, line_size, h); -} -static void DEF(put_pixels16_y2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){ - DEF(put_pixels8_y2)(block , pixels , line_size, h); - DEF(put_pixels8_y2)(block+8, pixels+8, line_size, h); -} -static void DEF(put_no_rnd_pixels16_y2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){ - DEF(put_no_rnd_pixels8_y2)(block , pixels , line_size, h); - DEF(put_no_rnd_pixels8_y2)(block+8, pixels+8, line_size, h); -} -static void DEF(avg_pixels16)(uint8_t *block, const uint8_t *pixels, int line_size, int h){ - DEF(avg_pixels8)(block , pixels , line_size, h); - DEF(avg_pixels8)(block+8, pixels+8, line_size, h); -} -static void DEF(avg_pixels16_x2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){ - DEF(avg_pixels8_x2)(block , pixels , line_size, h); - DEF(avg_pixels8_x2)(block+8, pixels+8, line_size, h); -} -static void DEF(avg_pixels16_y2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){ - DEF(avg_pixels8_y2)(block , pixels , line_size, h); - DEF(avg_pixels8_y2)(block+8, pixels+8, line_size, h); -} -static void DEF(avg_pixels16_xy2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){ - DEF(avg_pixels8_xy2)(block , pixels , line_size, h); - DEF(avg_pixels8_xy2)(block+8, pixels+8, line_size, h); -} - -#define QPEL_2TAP_L3(OPNAME) \ -static void DEF(OPNAME ## 2tap_qpel16_l3)(uint8_t *dst, uint8_t *src, int stride, int h, int off1, int off2){\ - __asm__ volatile(\ - "1: \n\t"\ - "movq (%1,%2), %%mm0 \n\t"\ - "movq 8(%1,%2), %%mm1 \n\t"\ - PAVGB" (%1,%3), %%mm0 \n\t"\ - PAVGB" 8(%1,%3), %%mm1 \n\t"\ - PAVGB" (%1), %%mm0 \n\t"\ - PAVGB" 8(%1), %%mm1 \n\t"\ - STORE_OP( (%1,%4),%%mm0)\ - STORE_OP(8(%1,%4),%%mm1)\ - "movq %%mm0, (%1,%4) \n\t"\ - "movq %%mm1, 8(%1,%4) \n\t"\ - "add %5, %1 \n\t"\ - "decl %0 \n\t"\ - "jnz 1b \n\t"\ - :"+g"(h), "+r"(src)\ - :"r"((x86_reg)off1), "r"((x86_reg)off2),\ - "r"((x86_reg)(dst-src)), "r"((x86_reg)stride)\ - :"memory"\ - );\ -}\ -static void DEF(OPNAME ## 2tap_qpel8_l3)(uint8_t *dst, uint8_t *src, int stride, int h, int off1, int off2){\ - __asm__ volatile(\ - "1: \n\t"\ - "movq (%1,%2), %%mm0 \n\t"\ - PAVGB" (%1,%3), %%mm0 \n\t"\ - PAVGB" (%1), %%mm0 \n\t"\ - STORE_OP((%1,%4),%%mm0)\ - "movq %%mm0, (%1,%4) \n\t"\ - "add %5, %1 \n\t"\ - "decl %0 \n\t"\ - "jnz 1b \n\t"\ - :"+g"(h), "+r"(src)\ - :"r"((x86_reg)off1), "r"((x86_reg)off2),\ - "r"((x86_reg)(dst-src)), "r"((x86_reg)stride)\ - :"memory"\ - );\ -} - -#define STORE_OP(a,b) PAVGB" "#a","#b" \n\t" -QPEL_2TAP_L3(avg_) -#undef STORE_OP -#define STORE_OP(a,b) -QPEL_2TAP_L3(put_) -#undef STORE_OP -#undef QPEL_2TAP_L3 diff --git a/libavcodec/x86/dsputil_mmx_qns_template.c b/libavcodec/x86/dsputil_mmx_qns_template.c deleted file mode 100644 index 20a40a175e..0000000000 --- a/libavcodec/x86/dsputil_mmx_qns_template.c +++ /dev/null @@ -1,101 +0,0 @@ -/* - * DSP utils : QNS functions are compiled 3 times for mmx/3dnow/ssse3 - * Copyright (c) 2004 Michael Niedermayer - * - * MMX optimization by Michael Niedermayer - * 3DNow! and SSSE3 optimization by Zuxy Meng - * - * This file is part of Libav. - * - * Libav is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * Libav is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with Libav; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -#define MAX_ABS (512 >> (SCALE_OFFSET>0 ? SCALE_OFFSET : 0)) - -static int DEF(try_8x8basis)(int16_t rem[64], int16_t weight[64], int16_t basis[64], int scale) -{ - x86_reg i=0; - - assert(FFABS(scale) < MAX_ABS); - scale<<= 16 + SCALE_OFFSET - BASIS_SHIFT + RECON_SHIFT; - - SET_RND(mm6); - __asm__ volatile( - "pxor %%mm7, %%mm7 \n\t" - "movd %4, %%mm5 \n\t" - "punpcklwd %%mm5, %%mm5 \n\t" - "punpcklwd %%mm5, %%mm5 \n\t" - ".p2align 4 \n\t" - "1: \n\t" - "movq (%1, %0), %%mm0 \n\t" - "movq 8(%1, %0), %%mm1 \n\t" - PMULHRW(%%mm0, %%mm1, %%mm5, %%mm6) - "paddw (%2, %0), %%mm0 \n\t" - "paddw 8(%2, %0), %%mm1 \n\t" - "psraw $6, %%mm0 \n\t" - "psraw $6, %%mm1 \n\t" - "pmullw (%3, %0), %%mm0 \n\t" - "pmullw 8(%3, %0), %%mm1 \n\t" - "pmaddwd %%mm0, %%mm0 \n\t" - "pmaddwd %%mm1, %%mm1 \n\t" - "paddd %%mm1, %%mm0 \n\t" - "psrld $4, %%mm0 \n\t" - "paddd %%mm0, %%mm7 \n\t" - "add $16, %0 \n\t" - "cmp $128, %0 \n\t" //FIXME optimize & bench - " jb 1b \n\t" - PHADDD(%%mm7, %%mm6) - "psrld $2, %%mm7 \n\t" - "movd %%mm7, %0 \n\t" - - : "+r" (i) - : "r"(basis), "r"(rem), "r"(weight), "g"(scale) - ); - return i; -} - -static void DEF(add_8x8basis)(int16_t rem[64], int16_t basis[64], int scale) -{ - x86_reg i=0; - - if(FFABS(scale) < MAX_ABS){ - scale<<= 16 + SCALE_OFFSET - BASIS_SHIFT + RECON_SHIFT; - SET_RND(mm6); - __asm__ volatile( - "movd %3, %%mm5 \n\t" - "punpcklwd %%mm5, %%mm5 \n\t" - "punpcklwd %%mm5, %%mm5 \n\t" - ".p2align 4 \n\t" - "1: \n\t" - "movq (%1, %0), %%mm0 \n\t" - "movq 8(%1, %0), %%mm1 \n\t" - PMULHRW(%%mm0, %%mm1, %%mm5, %%mm6) - "paddw (%2, %0), %%mm0 \n\t" - "paddw 8(%2, %0), %%mm1 \n\t" - "movq %%mm0, (%2, %0) \n\t" - "movq %%mm1, 8(%2, %0) \n\t" - "add $16, %0 \n\t" - "cmp $128, %0 \n\t" // FIXME optimize & bench - " jb 1b \n\t" - - : "+r" (i) - : "r"(basis), "r"(rem), "g"(scale) - ); - }else{ - for(i=0; i<8*8; i++){ - rem[i] += (basis[i]*scale + (1<<(BASIS_SHIFT - RECON_SHIFT-1)))>>(BASIS_SHIFT - RECON_SHIFT); - } - } -} diff --git a/libavcodec/x86/dsputil_mmx_rnd_template.c b/libavcodec/x86/dsputil_mmx_rnd_template.c deleted file mode 100644 index 34a2c0bca8..0000000000 --- a/libavcodec/x86/dsputil_mmx_rnd_template.c +++ /dev/null @@ -1,590 +0,0 @@ -/* - * DSP utils mmx functions are compiled twice for rnd/no_rnd - * Copyright (c) 2000, 2001 Fabrice Bellard - * Copyright (c) 2003-2004 Michael Niedermayer - * - * MMX optimization by Nick Kurshev - * mostly rewritten by Michael Niedermayer - * and improved by Zdenek Kabelac - * - * This file is part of Libav. - * - * Libav is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * Libav is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with Libav; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -// put_pixels -static void DEF(put, pixels8_x2)(uint8_t *block, const uint8_t *pixels, int line_size, int h) -{ - MOVQ_BFE(mm6); - __asm__ volatile( - "lea (%3, %3), %%"REG_a" \n\t" - ".p2align 3 \n\t" - "1: \n\t" - "movq (%1), %%mm0 \n\t" - "movq 1(%1), %%mm1 \n\t" - "movq (%1, %3), %%mm2 \n\t" - "movq 1(%1, %3), %%mm3 \n\t" - PAVGBP(%%mm0, %%mm1, %%mm4, %%mm2, %%mm3, %%mm5) - "movq %%mm4, (%2) \n\t" - "movq %%mm5, (%2, %3) \n\t" - "add %%"REG_a", %1 \n\t" - "add %%"REG_a", %2 \n\t" - "movq (%1), %%mm0 \n\t" - "movq 1(%1), %%mm1 \n\t" - "movq (%1, %3), %%mm2 \n\t" - "movq 1(%1, %3), %%mm3 \n\t" - PAVGBP(%%mm0, %%mm1, %%mm4, %%mm2, %%mm3, %%mm5) - "movq %%mm4, (%2) \n\t" - "movq %%mm5, (%2, %3) \n\t" - "add %%"REG_a", %1 \n\t" - "add %%"REG_a", %2 \n\t" - "subl $4, %0 \n\t" - "jnz 1b \n\t" - :"+g"(h), "+S"(pixels), "+D"(block) - :"r"((x86_reg)line_size) - :REG_a, "memory"); -} - -static void av_unused DEF(put, pixels8_l2)(uint8_t *dst, uint8_t *src1, uint8_t *src2, int dstStride, int src1Stride, int h) -{ - MOVQ_BFE(mm6); - __asm__ volatile( - "testl $1, %0 \n\t" - " jz 1f \n\t" - "movq (%1), %%mm0 \n\t" - "movq (%2), %%mm1 \n\t" - "add %4, %1 \n\t" - "add $8, %2 \n\t" - PAVGB(%%mm0, %%mm1, %%mm4, %%mm6) - "movq %%mm4, (%3) \n\t" - "add %5, %3 \n\t" - "decl %0 \n\t" - ".p2align 3 \n\t" - "1: \n\t" - "movq (%1), %%mm0 \n\t" - "movq (%2), %%mm1 \n\t" - "add %4, %1 \n\t" - "movq (%1), %%mm2 \n\t" - "movq 8(%2), %%mm3 \n\t" - "add %4, %1 \n\t" - PAVGBP(%%mm0, %%mm1, %%mm4, %%mm2, %%mm3, %%mm5) - "movq %%mm4, (%3) \n\t" - "add %5, %3 \n\t" - "movq %%mm5, (%3) \n\t" - "add %5, %3 \n\t" - "movq (%1), %%mm0 \n\t" - "movq 16(%2), %%mm1 \n\t" - "add %4, %1 \n\t" - "movq (%1), %%mm2 \n\t" - "movq 24(%2), %%mm3 \n\t" - "add %4, %1 \n\t" - "add $32, %2 \n\t" - PAVGBP(%%mm0, %%mm1, %%mm4, %%mm2, %%mm3, %%mm5) - "movq %%mm4, (%3) \n\t" - "add %5, %3 \n\t" - "movq %%mm5, (%3) \n\t" - "add %5, %3 \n\t" - "subl $4, %0 \n\t" - "jnz 1b \n\t" -#if !HAVE_EBX_AVAILABLE //Note "+bm" and "+mb" are buggy too (with gcc 3.2.2 at least) and cannot be used - :"+m"(h), "+a"(src1), "+c"(src2), "+d"(dst) -#else - :"+b"(h), "+a"(src1), "+c"(src2), "+d"(dst) -#endif - :"S"((x86_reg)src1Stride), "D"((x86_reg)dstStride) - :"memory"); -} - -static void DEF(put, pixels16_x2)(uint8_t *block, const uint8_t *pixels, int line_size, int h) -{ - MOVQ_BFE(mm6); - __asm__ volatile( - "lea (%3, %3), %%"REG_a" \n\t" - ".p2align 3 \n\t" - "1: \n\t" - "movq (%1), %%mm0 \n\t" - "movq 1(%1), %%mm1 \n\t" - "movq (%1, %3), %%mm2 \n\t" - "movq 1(%1, %3), %%mm3 \n\t" - PAVGBP(%%mm0, %%mm1, %%mm4, %%mm2, %%mm3, %%mm5) - "movq %%mm4, (%2) \n\t" - "movq %%mm5, (%2, %3) \n\t" - "movq 8(%1), %%mm0 \n\t" - "movq 9(%1), %%mm1 \n\t" - "movq 8(%1, %3), %%mm2 \n\t" - "movq 9(%1, %3), %%mm3 \n\t" - PAVGBP(%%mm0, %%mm1, %%mm4, %%mm2, %%mm3, %%mm5) - "movq %%mm4, 8(%2) \n\t" - "movq %%mm5, 8(%2, %3) \n\t" - "add %%"REG_a", %1 \n\t" - "add %%"REG_a", %2 \n\t" - "movq (%1), %%mm0 \n\t" - "movq 1(%1), %%mm1 \n\t" - "movq (%1, %3), %%mm2 \n\t" - "movq 1(%1, %3), %%mm3 \n\t" - PAVGBP(%%mm0, %%mm1, %%mm4, %%mm2, %%mm3, %%mm5) - "movq %%mm4, (%2) \n\t" - "movq %%mm5, (%2, %3) \n\t" - "movq 8(%1), %%mm0 \n\t" - "movq 9(%1), %%mm1 \n\t" - "movq 8(%1, %3), %%mm2 \n\t" - "movq 9(%1, %3), %%mm3 \n\t" - PAVGBP(%%mm0, %%mm1, %%mm4, %%mm2, %%mm3, %%mm5) - "movq %%mm4, 8(%2) \n\t" - "movq %%mm5, 8(%2, %3) \n\t" - "add %%"REG_a", %1 \n\t" - "add %%"REG_a", %2 \n\t" - "subl $4, %0 \n\t" - "jnz 1b \n\t" - :"+g"(h), "+S"(pixels), "+D"(block) - :"r"((x86_reg)line_size) - :REG_a, "memory"); -} - -static void av_unused DEF(put, pixels16_l2)(uint8_t *dst, uint8_t *src1, uint8_t *src2, int dstStride, int src1Stride, int h) -{ - MOVQ_BFE(mm6); - __asm__ volatile( - "testl $1, %0 \n\t" - " jz 1f \n\t" - "movq (%1), %%mm0 \n\t" - "movq (%2), %%mm1 \n\t" - "movq 8(%1), %%mm2 \n\t" - "movq 8(%2), %%mm3 \n\t" - "add %4, %1 \n\t" - "add $16, %2 \n\t" - PAVGBP(%%mm0, %%mm1, %%mm4, %%mm2, %%mm3, %%mm5) - "movq %%mm4, (%3) \n\t" - "movq %%mm5, 8(%3) \n\t" - "add %5, %3 \n\t" - "decl %0 \n\t" - ".p2align 3 \n\t" - "1: \n\t" - "movq (%1), %%mm0 \n\t" - "movq (%2), %%mm1 \n\t" - "movq 8(%1), %%mm2 \n\t" - "movq 8(%2), %%mm3 \n\t" - "add %4, %1 \n\t" - PAVGBP(%%mm0, %%mm1, %%mm4, %%mm2, %%mm3, %%mm5) - "movq %%mm4, (%3) \n\t" - "movq %%mm5, 8(%3) \n\t" - "add %5, %3 \n\t" - "movq (%1), %%mm0 \n\t" - "movq 16(%2), %%mm1 \n\t" - "movq 8(%1), %%mm2 \n\t" - "movq 24(%2), %%mm3 \n\t" - "add %4, %1 \n\t" - PAVGBP(%%mm0, %%mm1, %%mm4, %%mm2, %%mm3, %%mm5) - "movq %%mm4, (%3) \n\t" - "movq %%mm5, 8(%3) \n\t" - "add %5, %3 \n\t" - "add $32, %2 \n\t" - "subl $2, %0 \n\t" - "jnz 1b \n\t" -#if !HAVE_EBX_AVAILABLE //Note "+bm" and "+mb" are buggy too (with gcc 3.2.2 at least) and cannot be used - :"+m"(h), "+a"(src1), "+c"(src2), "+d"(dst) -#else - :"+b"(h), "+a"(src1), "+c"(src2), "+d"(dst) -#endif - :"S"((x86_reg)src1Stride), "D"((x86_reg)dstStride) - :"memory"); -} - -static void DEF(put, pixels8_y2)(uint8_t *block, const uint8_t *pixels, int line_size, int h) -{ - MOVQ_BFE(mm6); - __asm__ volatile( - "lea (%3, %3), %%"REG_a" \n\t" - "movq (%1), %%mm0 \n\t" - ".p2align 3 \n\t" - "1: \n\t" - "movq (%1, %3), %%mm1 \n\t" - "movq (%1, %%"REG_a"),%%mm2 \n\t" - PAVGBP(%%mm1, %%mm0, %%mm4, %%mm2, %%mm1, %%mm5) - "movq %%mm4, (%2) \n\t" - "movq %%mm5, (%2, %3) \n\t" - "add %%"REG_a", %1 \n\t" - "add %%"REG_a", %2 \n\t" - "movq (%1, %3), %%mm1 \n\t" - "movq (%1, %%"REG_a"),%%mm0 \n\t" - PAVGBP(%%mm1, %%mm2, %%mm4, %%mm0, %%mm1, %%mm5) - "movq %%mm4, (%2) \n\t" - "movq %%mm5, (%2, %3) \n\t" - "add %%"REG_a", %1 \n\t" - "add %%"REG_a", %2 \n\t" - "subl $4, %0 \n\t" - "jnz 1b \n\t" - :"+g"(h), "+S"(pixels), "+D"(block) - :"r"((x86_reg)line_size) - :REG_a, "memory"); -} - -static void DEF(put, pixels8_xy2)(uint8_t *block, const uint8_t *pixels, int line_size, int h) -{ - MOVQ_ZERO(mm7); - SET_RND(mm6); // =2 for rnd and =1 for no_rnd version - __asm__ volatile( - "movq (%1), %%mm0 \n\t" - "movq 1(%1), %%mm4 \n\t" - "movq %%mm0, %%mm1 \n\t" - "movq %%mm4, %%mm5 \n\t" - "punpcklbw %%mm7, %%mm0 \n\t" - "punpcklbw %%mm7, %%mm4 \n\t" - "punpckhbw %%mm7, %%mm1 \n\t" - "punpckhbw %%mm7, %%mm5 \n\t" - "paddusw %%mm0, %%mm4 \n\t" - "paddusw %%mm1, %%mm5 \n\t" - "xor %%"REG_a", %%"REG_a" \n\t" - "add %3, %1 \n\t" - ".p2align 3 \n\t" - "1: \n\t" - "movq (%1, %%"REG_a"), %%mm0 \n\t" - "movq 1(%1, %%"REG_a"), %%mm2 \n\t" - "movq %%mm0, %%mm1 \n\t" - "movq %%mm2, %%mm3 \n\t" - "punpcklbw %%mm7, %%mm0 \n\t" - "punpcklbw %%mm7, %%mm2 \n\t" - "punpckhbw %%mm7, %%mm1 \n\t" - "punpckhbw %%mm7, %%mm3 \n\t" - "paddusw %%mm2, %%mm0 \n\t" - "paddusw %%mm3, %%mm1 \n\t" - "paddusw %%mm6, %%mm4 \n\t" - "paddusw %%mm6, %%mm5 \n\t" - "paddusw %%mm0, %%mm4 \n\t" - "paddusw %%mm1, %%mm5 \n\t" - "psrlw $2, %%mm4 \n\t" - "psrlw $2, %%mm5 \n\t" - "packuswb %%mm5, %%mm4 \n\t" - "movq %%mm4, (%2, %%"REG_a") \n\t" - "add %3, %%"REG_a" \n\t" - - "movq (%1, %%"REG_a"), %%mm2 \n\t" // 0 <-> 2 1 <-> 3 - "movq 1(%1, %%"REG_a"), %%mm4 \n\t" - "movq %%mm2, %%mm3 \n\t" - "movq %%mm4, %%mm5 \n\t" - "punpcklbw %%mm7, %%mm2 \n\t" - "punpcklbw %%mm7, %%mm4 \n\t" - "punpckhbw %%mm7, %%mm3 \n\t" - "punpckhbw %%mm7, %%mm5 \n\t" - "paddusw %%mm2, %%mm4 \n\t" - "paddusw %%mm3, %%mm5 \n\t" - "paddusw %%mm6, %%mm0 \n\t" - "paddusw %%mm6, %%mm1 \n\t" - "paddusw %%mm4, %%mm0 \n\t" - "paddusw %%mm5, %%mm1 \n\t" - "psrlw $2, %%mm0 \n\t" - "psrlw $2, %%mm1 \n\t" - "packuswb %%mm1, %%mm0 \n\t" - "movq %%mm0, (%2, %%"REG_a") \n\t" - "add %3, %%"REG_a" \n\t" - - "subl $2, %0 \n\t" - "jnz 1b \n\t" - :"+g"(h), "+S"(pixels) - :"D"(block), "r"((x86_reg)line_size) - :REG_a, "memory"); -} - -// avg_pixels -static void av_unused DEF(avg, pixels4)(uint8_t *block, const uint8_t *pixels, int line_size, int h) -{ - MOVQ_BFE(mm6); - JUMPALIGN(); - do { - __asm__ volatile( - "movd %0, %%mm0 \n\t" - "movd %1, %%mm1 \n\t" - OP_AVG(%%mm0, %%mm1, %%mm2, %%mm6) - "movd %%mm2, %0 \n\t" - :"+m"(*block) - :"m"(*pixels) - :"memory"); - pixels += line_size; - block += line_size; - } - while (--h); -} - -// in case more speed is needed - unroling would certainly help -static void DEF(avg, pixels8)(uint8_t *block, const uint8_t *pixels, int line_size, int h) -{ - MOVQ_BFE(mm6); - JUMPALIGN(); - do { - __asm__ volatile( - "movq %0, %%mm0 \n\t" - "movq %1, %%mm1 \n\t" - OP_AVG(%%mm0, %%mm1, %%mm2, %%mm6) - "movq %%mm2, %0 \n\t" - :"+m"(*block) - :"m"(*pixels) - :"memory"); - pixels += line_size; - block += line_size; - } - while (--h); -} - -static void DEF(avg, pixels16)(uint8_t *block, const uint8_t *pixels, int line_size, int h) -{ - MOVQ_BFE(mm6); - JUMPALIGN(); - do { - __asm__ volatile( - "movq %0, %%mm0 \n\t" - "movq %1, %%mm1 \n\t" - OP_AVG(%%mm0, %%mm1, %%mm2, %%mm6) - "movq %%mm2, %0 \n\t" - "movq 8%0, %%mm0 \n\t" - "movq 8%1, %%mm1 \n\t" - OP_AVG(%%mm0, %%mm1, %%mm2, %%mm6) - "movq %%mm2, 8%0 \n\t" - :"+m"(*block) - :"m"(*pixels) - :"memory"); - pixels += line_size; - block += line_size; - } - while (--h); -} - -static void DEF(avg, pixels8_x2)(uint8_t *block, const uint8_t *pixels, int line_size, int h) -{ - MOVQ_BFE(mm6); - JUMPALIGN(); - do { - __asm__ volatile( - "movq %1, %%mm0 \n\t" - "movq 1%1, %%mm1 \n\t" - "movq %0, %%mm3 \n\t" - PAVGB(%%mm0, %%mm1, %%mm2, %%mm6) - OP_AVG(%%mm3, %%mm2, %%mm0, %%mm6) - "movq %%mm0, %0 \n\t" - :"+m"(*block) - :"m"(*pixels) - :"memory"); - pixels += line_size; - block += line_size; - } while (--h); -} - -static av_unused void DEF(avg, pixels8_l2)(uint8_t *dst, uint8_t *src1, uint8_t *src2, int dstStride, int src1Stride, int h) -{ - MOVQ_BFE(mm6); - JUMPALIGN(); - do { - __asm__ volatile( - "movq %1, %%mm0 \n\t" - "movq %2, %%mm1 \n\t" - "movq %0, %%mm3 \n\t" - PAVGB(%%mm0, %%mm1, %%mm2, %%mm6) - OP_AVG(%%mm3, %%mm2, %%mm0, %%mm6) - "movq %%mm0, %0 \n\t" - :"+m"(*dst) - :"m"(*src1), "m"(*src2) - :"memory"); - dst += dstStride; - src1 += src1Stride; - src2 += 8; - } while (--h); -} - -static void DEF(avg, pixels16_x2)(uint8_t *block, const uint8_t *pixels, int line_size, int h) -{ - MOVQ_BFE(mm6); - JUMPALIGN(); - do { - __asm__ volatile( - "movq %1, %%mm0 \n\t" - "movq 1%1, %%mm1 \n\t" - "movq %0, %%mm3 \n\t" - PAVGB(%%mm0, %%mm1, %%mm2, %%mm6) - OP_AVG(%%mm3, %%mm2, %%mm0, %%mm6) - "movq %%mm0, %0 \n\t" - "movq 8%1, %%mm0 \n\t" - "movq 9%1, %%mm1 \n\t" - "movq 8%0, %%mm3 \n\t" - PAVGB(%%mm0, %%mm1, %%mm2, %%mm6) - OP_AVG(%%mm3, %%mm2, %%mm0, %%mm6) - "movq %%mm0, 8%0 \n\t" - :"+m"(*block) - :"m"(*pixels) - :"memory"); - pixels += line_size; - block += line_size; - } while (--h); -} - -static av_unused void DEF(avg, pixels16_l2)(uint8_t *dst, uint8_t *src1, uint8_t *src2, int dstStride, int src1Stride, int h) -{ - MOVQ_BFE(mm6); - JUMPALIGN(); - do { - __asm__ volatile( - "movq %1, %%mm0 \n\t" - "movq %2, %%mm1 \n\t" - "movq %0, %%mm3 \n\t" - PAVGB(%%mm0, %%mm1, %%mm2, %%mm6) - OP_AVG(%%mm3, %%mm2, %%mm0, %%mm6) - "movq %%mm0, %0 \n\t" - "movq 8%1, %%mm0 \n\t" - "movq 8%2, %%mm1 \n\t" - "movq 8%0, %%mm3 \n\t" - PAVGB(%%mm0, %%mm1, %%mm2, %%mm6) - OP_AVG(%%mm3, %%mm2, %%mm0, %%mm6) - "movq %%mm0, 8%0 \n\t" - :"+m"(*dst) - :"m"(*src1), "m"(*src2) - :"memory"); - dst += dstStride; - src1 += src1Stride; - src2 += 16; - } while (--h); -} - -static void DEF(avg, pixels8_y2)(uint8_t *block, const uint8_t *pixels, int line_size, int h) -{ - MOVQ_BFE(mm6); - __asm__ volatile( - "lea (%3, %3), %%"REG_a" \n\t" - "movq (%1), %%mm0 \n\t" - ".p2align 3 \n\t" - "1: \n\t" - "movq (%1, %3), %%mm1 \n\t" - "movq (%1, %%"REG_a"), %%mm2 \n\t" - PAVGBP(%%mm1, %%mm0, %%mm4, %%mm2, %%mm1, %%mm5) - "movq (%2), %%mm3 \n\t" - OP_AVG(%%mm3, %%mm4, %%mm0, %%mm6) - "movq (%2, %3), %%mm3 \n\t" - OP_AVG(%%mm3, %%mm5, %%mm1, %%mm6) - "movq %%mm0, (%2) \n\t" - "movq %%mm1, (%2, %3) \n\t" - "add %%"REG_a", %1 \n\t" - "add %%"REG_a", %2 \n\t" - - "movq (%1, %3), %%mm1 \n\t" - "movq (%1, %%"REG_a"), %%mm0 \n\t" - PAVGBP(%%mm1, %%mm2, %%mm4, %%mm0, %%mm1, %%mm5) - "movq (%2), %%mm3 \n\t" - OP_AVG(%%mm3, %%mm4, %%mm2, %%mm6) - "movq (%2, %3), %%mm3 \n\t" - OP_AVG(%%mm3, %%mm5, %%mm1, %%mm6) - "movq %%mm2, (%2) \n\t" - "movq %%mm1, (%2, %3) \n\t" - "add %%"REG_a", %1 \n\t" - "add %%"REG_a", %2 \n\t" - - "subl $4, %0 \n\t" - "jnz 1b \n\t" - :"+g"(h), "+S"(pixels), "+D"(block) - :"r"((x86_reg)line_size) - :REG_a, "memory"); -} - -// this routine is 'slightly' suboptimal but mostly unused -static void DEF(avg, pixels8_xy2)(uint8_t *block, const uint8_t *pixels, int line_size, int h) -{ - MOVQ_ZERO(mm7); - SET_RND(mm6); // =2 for rnd and =1 for no_rnd version - __asm__ volatile( - "movq (%1), %%mm0 \n\t" - "movq 1(%1), %%mm4 \n\t" - "movq %%mm0, %%mm1 \n\t" - "movq %%mm4, %%mm5 \n\t" - "punpcklbw %%mm7, %%mm0 \n\t" - "punpcklbw %%mm7, %%mm4 \n\t" - "punpckhbw %%mm7, %%mm1 \n\t" - "punpckhbw %%mm7, %%mm5 \n\t" - "paddusw %%mm0, %%mm4 \n\t" - "paddusw %%mm1, %%mm5 \n\t" - "xor %%"REG_a", %%"REG_a" \n\t" - "add %3, %1 \n\t" - ".p2align 3 \n\t" - "1: \n\t" - "movq (%1, %%"REG_a"), %%mm0 \n\t" - "movq 1(%1, %%"REG_a"), %%mm2 \n\t" - "movq %%mm0, %%mm1 \n\t" - "movq %%mm2, %%mm3 \n\t" - "punpcklbw %%mm7, %%mm0 \n\t" - "punpcklbw %%mm7, %%mm2 \n\t" - "punpckhbw %%mm7, %%mm1 \n\t" - "punpckhbw %%mm7, %%mm3 \n\t" - "paddusw %%mm2, %%mm0 \n\t" - "paddusw %%mm3, %%mm1 \n\t" - "paddusw %%mm6, %%mm4 \n\t" - "paddusw %%mm6, %%mm5 \n\t" - "paddusw %%mm0, %%mm4 \n\t" - "paddusw %%mm1, %%mm5 \n\t" - "psrlw $2, %%mm4 \n\t" - "psrlw $2, %%mm5 \n\t" - "movq (%2, %%"REG_a"), %%mm3 \n\t" - "packuswb %%mm5, %%mm4 \n\t" - "pcmpeqd %%mm2, %%mm2 \n\t" - "paddb %%mm2, %%mm2 \n\t" - OP_AVG(%%mm3, %%mm4, %%mm5, %%mm2) - "movq %%mm5, (%2, %%"REG_a") \n\t" - "add %3, %%"REG_a" \n\t" - - "movq (%1, %%"REG_a"), %%mm2 \n\t" // 0 <-> 2 1 <-> 3 - "movq 1(%1, %%"REG_a"), %%mm4 \n\t" - "movq %%mm2, %%mm3 \n\t" - "movq %%mm4, %%mm5 \n\t" - "punpcklbw %%mm7, %%mm2 \n\t" - "punpcklbw %%mm7, %%mm4 \n\t" - "punpckhbw %%mm7, %%mm3 \n\t" - "punpckhbw %%mm7, %%mm5 \n\t" - "paddusw %%mm2, %%mm4 \n\t" - "paddusw %%mm3, %%mm5 \n\t" - "paddusw %%mm6, %%mm0 \n\t" - "paddusw %%mm6, %%mm1 \n\t" - "paddusw %%mm4, %%mm0 \n\t" - "paddusw %%mm5, %%mm1 \n\t" - "psrlw $2, %%mm0 \n\t" - "psrlw $2, %%mm1 \n\t" - "movq (%2, %%"REG_a"), %%mm3 \n\t" - "packuswb %%mm1, %%mm0 \n\t" - "pcmpeqd %%mm2, %%mm2 \n\t" - "paddb %%mm2, %%mm2 \n\t" - OP_AVG(%%mm3, %%mm0, %%mm1, %%mm2) - "movq %%mm1, (%2, %%"REG_a") \n\t" - "add %3, %%"REG_a" \n\t" - - "subl $2, %0 \n\t" - "jnz 1b \n\t" - :"+g"(h), "+S"(pixels) - :"D"(block), "r"((x86_reg)line_size) - :REG_a, "memory"); -} - -//FIXME optimize -static void DEF(put, pixels16_y2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){ - DEF(put, pixels8_y2)(block , pixels , line_size, h); - DEF(put, pixels8_y2)(block+8, pixels+8, line_size, h); -} - -static void DEF(put, pixels16_xy2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){ - DEF(put, pixels8_xy2)(block , pixels , line_size, h); - DEF(put, pixels8_xy2)(block+8, pixels+8, line_size, h); -} - -static void DEF(avg, pixels16_y2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){ - DEF(avg, pixels8_y2)(block , pixels , line_size, h); - DEF(avg, pixels8_y2)(block+8, pixels+8, line_size, h); -} - -static void DEF(avg, pixels16_xy2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){ - DEF(avg, pixels8_xy2)(block , pixels , line_size, h); - DEF(avg, pixels8_xy2)(block+8, pixels+8, line_size, h); -} diff --git a/libavcodec/x86/dsputil_qns_template.c b/libavcodec/x86/dsputil_qns_template.c new file mode 100644 index 0000000000..20a40a175e --- /dev/null +++ b/libavcodec/x86/dsputil_qns_template.c @@ -0,0 +1,101 @@ +/* + * DSP utils : QNS functions are compiled 3 times for mmx/3dnow/ssse3 + * Copyright (c) 2004 Michael Niedermayer + * + * MMX optimization by Michael Niedermayer + * 3DNow! and SSSE3 optimization by Zuxy Meng + * + * This file is part of Libav. + * + * Libav is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * Libav is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with Libav; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#define MAX_ABS (512 >> (SCALE_OFFSET>0 ? SCALE_OFFSET : 0)) + +static int DEF(try_8x8basis)(int16_t rem[64], int16_t weight[64], int16_t basis[64], int scale) +{ + x86_reg i=0; + + assert(FFABS(scale) < MAX_ABS); + scale<<= 16 + SCALE_OFFSET - BASIS_SHIFT + RECON_SHIFT; + + SET_RND(mm6); + __asm__ volatile( + "pxor %%mm7, %%mm7 \n\t" + "movd %4, %%mm5 \n\t" + "punpcklwd %%mm5, %%mm5 \n\t" + "punpcklwd %%mm5, %%mm5 \n\t" + ".p2align 4 \n\t" + "1: \n\t" + "movq (%1, %0), %%mm0 \n\t" + "movq 8(%1, %0), %%mm1 \n\t" + PMULHRW(%%mm0, %%mm1, %%mm5, %%mm6) + "paddw (%2, %0), %%mm0 \n\t" + "paddw 8(%2, %0), %%mm1 \n\t" + "psraw $6, %%mm0 \n\t" + "psraw $6, %%mm1 \n\t" + "pmullw (%3, %0), %%mm0 \n\t" + "pmullw 8(%3, %0), %%mm1 \n\t" + "pmaddwd %%mm0, %%mm0 \n\t" + "pmaddwd %%mm1, %%mm1 \n\t" + "paddd %%mm1, %%mm0 \n\t" + "psrld $4, %%mm0 \n\t" + "paddd %%mm0, %%mm7 \n\t" + "add $16, %0 \n\t" + "cmp $128, %0 \n\t" //FIXME optimize & bench + " jb 1b \n\t" + PHADDD(%%mm7, %%mm6) + "psrld $2, %%mm7 \n\t" + "movd %%mm7, %0 \n\t" + + : "+r" (i) + : "r"(basis), "r"(rem), "r"(weight), "g"(scale) + ); + return i; +} + +static void DEF(add_8x8basis)(int16_t rem[64], int16_t basis[64], int scale) +{ + x86_reg i=0; + + if(FFABS(scale) < MAX_ABS){ + scale<<= 16 + SCALE_OFFSET - BASIS_SHIFT + RECON_SHIFT; + SET_RND(mm6); + __asm__ volatile( + "movd %3, %%mm5 \n\t" + "punpcklwd %%mm5, %%mm5 \n\t" + "punpcklwd %%mm5, %%mm5 \n\t" + ".p2align 4 \n\t" + "1: \n\t" + "movq (%1, %0), %%mm0 \n\t" + "movq 8(%1, %0), %%mm1 \n\t" + PMULHRW(%%mm0, %%mm1, %%mm5, %%mm6) + "paddw (%2, %0), %%mm0 \n\t" + "paddw 8(%2, %0), %%mm1 \n\t" + "movq %%mm0, (%2, %0) \n\t" + "movq %%mm1, 8(%2, %0) \n\t" + "add $16, %0 \n\t" + "cmp $128, %0 \n\t" // FIXME optimize & bench + " jb 1b \n\t" + + : "+r" (i) + : "r"(basis), "r"(rem), "g"(scale) + ); + }else{ + for(i=0; i<8*8; i++){ + rem[i] += (basis[i]*scale + (1<<(BASIS_SHIFT - RECON_SHIFT-1)))>>(BASIS_SHIFT - RECON_SHIFT); + } + } +} diff --git a/libavcodec/x86/dsputil_rnd_template.c b/libavcodec/x86/dsputil_rnd_template.c new file mode 100644 index 0000000000..34a2c0bca8 --- /dev/null +++ b/libavcodec/x86/dsputil_rnd_template.c @@ -0,0 +1,590 @@ +/* + * DSP utils mmx functions are compiled twice for rnd/no_rnd + * Copyright (c) 2000, 2001 Fabrice Bellard + * Copyright (c) 2003-2004 Michael Niedermayer + * + * MMX optimization by Nick Kurshev + * mostly rewritten by Michael Niedermayer + * and improved by Zdenek Kabelac + * + * This file is part of Libav. + * + * Libav is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * Libav is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with Libav; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +// put_pixels +static void DEF(put, pixels8_x2)(uint8_t *block, const uint8_t *pixels, int line_size, int h) +{ + MOVQ_BFE(mm6); + __asm__ volatile( + "lea (%3, %3), %%"REG_a" \n\t" + ".p2align 3 \n\t" + "1: \n\t" + "movq (%1), %%mm0 \n\t" + "movq 1(%1), %%mm1 \n\t" + "movq (%1, %3), %%mm2 \n\t" + "movq 1(%1, %3), %%mm3 \n\t" + PAVGBP(%%mm0, %%mm1, %%mm4, %%mm2, %%mm3, %%mm5) + "movq %%mm4, (%2) \n\t" + "movq %%mm5, (%2, %3) \n\t" + "add %%"REG_a", %1 \n\t" + "add %%"REG_a", %2 \n\t" + "movq (%1), %%mm0 \n\t" + "movq 1(%1), %%mm1 \n\t" + "movq (%1, %3), %%mm2 \n\t" + "movq 1(%1, %3), %%mm3 \n\t" + PAVGBP(%%mm0, %%mm1, %%mm4, %%mm2, %%mm3, %%mm5) + "movq %%mm4, (%2) \n\t" + "movq %%mm5, (%2, %3) \n\t" + "add %%"REG_a", %1 \n\t" + "add %%"REG_a", %2 \n\t" + "subl $4, %0 \n\t" + "jnz 1b \n\t" + :"+g"(h), "+S"(pixels), "+D"(block) + :"r"((x86_reg)line_size) + :REG_a, "memory"); +} + +static void av_unused DEF(put, pixels8_l2)(uint8_t *dst, uint8_t *src1, uint8_t *src2, int dstStride, int src1Stride, int h) +{ + MOVQ_BFE(mm6); + __asm__ volatile( + "testl $1, %0 \n\t" + " jz 1f \n\t" + "movq (%1), %%mm0 \n\t" + "movq (%2), %%mm1 \n\t" + "add %4, %1 \n\t" + "add $8, %2 \n\t" + PAVGB(%%mm0, %%mm1, %%mm4, %%mm6) + "movq %%mm4, (%3) \n\t" + "add %5, %3 \n\t" + "decl %0 \n\t" + ".p2align 3 \n\t" + "1: \n\t" + "movq (%1), %%mm0 \n\t" + "movq (%2), %%mm1 \n\t" + "add %4, %1 \n\t" + "movq (%1), %%mm2 \n\t" + "movq 8(%2), %%mm3 \n\t" + "add %4, %1 \n\t" + PAVGBP(%%mm0, %%mm1, %%mm4, %%mm2, %%mm3, %%mm5) + "movq %%mm4, (%3) \n\t" + "add %5, %3 \n\t" + "movq %%mm5, (%3) \n\t" + "add %5, %3 \n\t" + "movq (%1), %%mm0 \n\t" + "movq 16(%2), %%mm1 \n\t" + "add %4, %1 \n\t" + "movq (%1), %%mm2 \n\t" + "movq 24(%2), %%mm3 \n\t" + "add %4, %1 \n\t" + "add $32, %2 \n\t" + PAVGBP(%%mm0, %%mm1, %%mm4, %%mm2, %%mm3, %%mm5) + "movq %%mm4, (%3) \n\t" + "add %5, %3 \n\t" + "movq %%mm5, (%3) \n\t" + "add %5, %3 \n\t" + "subl $4, %0 \n\t" + "jnz 1b \n\t" +#if !HAVE_EBX_AVAILABLE //Note "+bm" and "+mb" are buggy too (with gcc 3.2.2 at least) and cannot be used + :"+m"(h), "+a"(src1), "+c"(src2), "+d"(dst) +#else + :"+b"(h), "+a"(src1), "+c"(src2), "+d"(dst) +#endif + :"S"((x86_reg)src1Stride), "D"((x86_reg)dstStride) + :"memory"); +} + +static void DEF(put, pixels16_x2)(uint8_t *block, const uint8_t *pixels, int line_size, int h) +{ + MOVQ_BFE(mm6); + __asm__ volatile( + "lea (%3, %3), %%"REG_a" \n\t" + ".p2align 3 \n\t" + "1: \n\t" + "movq (%1), %%mm0 \n\t" + "movq 1(%1), %%mm1 \n\t" + "movq (%1, %3), %%mm2 \n\t" + "movq 1(%1, %3), %%mm3 \n\t" + PAVGBP(%%mm0, %%mm1, %%mm4, %%mm2, %%mm3, %%mm5) + "movq %%mm4, (%2) \n\t" + "movq %%mm5, (%2, %3) \n\t" + "movq 8(%1), %%mm0 \n\t" + "movq 9(%1), %%mm1 \n\t" + "movq 8(%1, %3), %%mm2 \n\t" + "movq 9(%1, %3), %%mm3 \n\t" + PAVGBP(%%mm0, %%mm1, %%mm4, %%mm2, %%mm3, %%mm5) + "movq %%mm4, 8(%2) \n\t" + "movq %%mm5, 8(%2, %3) \n\t" + "add %%"REG_a", %1 \n\t" + "add %%"REG_a", %2 \n\t" + "movq (%1), %%mm0 \n\t" + "movq 1(%1), %%mm1 \n\t" + "movq (%1, %3), %%mm2 \n\t" + "movq 1(%1, %3), %%mm3 \n\t" + PAVGBP(%%mm0, %%mm1, %%mm4, %%mm2, %%mm3, %%mm5) + "movq %%mm4, (%2) \n\t" + "movq %%mm5, (%2, %3) \n\t" + "movq 8(%1), %%mm0 \n\t" + "movq 9(%1), %%mm1 \n\t" + "movq 8(%1, %3), %%mm2 \n\t" + "movq 9(%1, %3), %%mm3 \n\t" + PAVGBP(%%mm0, %%mm1, %%mm4, %%mm2, %%mm3, %%mm5) + "movq %%mm4, 8(%2) \n\t" + "movq %%mm5, 8(%2, %3) \n\t" + "add %%"REG_a", %1 \n\t" + "add %%"REG_a", %2 \n\t" + "subl $4, %0 \n\t" + "jnz 1b \n\t" + :"+g"(h), "+S"(pixels), "+D"(block) + :"r"((x86_reg)line_size) + :REG_a, "memory"); +} + +static void av_unused DEF(put, pixels16_l2)(uint8_t *dst, uint8_t *src1, uint8_t *src2, int dstStride, int src1Stride, int h) +{ + MOVQ_BFE(mm6); + __asm__ volatile( + "testl $1, %0 \n\t" + " jz 1f \n\t" + "movq (%1), %%mm0 \n\t" + "movq (%2), %%mm1 \n\t" + "movq 8(%1), %%mm2 \n\t" + "movq 8(%2), %%mm3 \n\t" + "add %4, %1 \n\t" + "add $16, %2 \n\t" + PAVGBP(%%mm0, %%mm1, %%mm4, %%mm2, %%mm3, %%mm5) + "movq %%mm4, (%3) \n\t" + "movq %%mm5, 8(%3) \n\t" + "add %5, %3 \n\t" + "decl %0 \n\t" + ".p2align 3 \n\t" + "1: \n\t" + "movq (%1), %%mm0 \n\t" + "movq (%2), %%mm1 \n\t" + "movq 8(%1), %%mm2 \n\t" + "movq 8(%2), %%mm3 \n\t" + "add %4, %1 \n\t" + PAVGBP(%%mm0, %%mm1, %%mm4, %%mm2, %%mm3, %%mm5) + "movq %%mm4, (%3) \n\t" + "movq %%mm5, 8(%3) \n\t" + "add %5, %3 \n\t" + "movq (%1), %%mm0 \n\t" + "movq 16(%2), %%mm1 \n\t" + "movq 8(%1), %%mm2 \n\t" + "movq 24(%2), %%mm3 \n\t" + "add %4, %1 \n\t" + PAVGBP(%%mm0, %%mm1, %%mm4, %%mm2, %%mm3, %%mm5) + "movq %%mm4, (%3) \n\t" + "movq %%mm5, 8(%3) \n\t" + "add %5, %3 \n\t" + "add $32, %2 \n\t" + "subl $2, %0 \n\t" + "jnz 1b \n\t" +#if !HAVE_EBX_AVAILABLE //Note "+bm" and "+mb" are buggy too (with gcc 3.2.2 at least) and cannot be used + :"+m"(h), "+a"(src1), "+c"(src2), "+d"(dst) +#else + :"+b"(h), "+a"(src1), "+c"(src2), "+d"(dst) +#endif + :"S"((x86_reg)src1Stride), "D"((x86_reg)dstStride) + :"memory"); +} + +static void DEF(put, pixels8_y2)(uint8_t *block, const uint8_t *pixels, int line_size, int h) +{ + MOVQ_BFE(mm6); + __asm__ volatile( + "lea (%3, %3), %%"REG_a" \n\t" + "movq (%1), %%mm0 \n\t" + ".p2align 3 \n\t" + "1: \n\t" + "movq (%1, %3), %%mm1 \n\t" + "movq (%1, %%"REG_a"),%%mm2 \n\t" + PAVGBP(%%mm1, %%mm0, %%mm4, %%mm2, %%mm1, %%mm5) + "movq %%mm4, (%2) \n\t" + "movq %%mm5, (%2, %3) \n\t" + "add %%"REG_a", %1 \n\t" + "add %%"REG_a", %2 \n\t" + "movq (%1, %3), %%mm1 \n\t" + "movq (%1, %%"REG_a"),%%mm0 \n\t" + PAVGBP(%%mm1, %%mm2, %%mm4, %%mm0, %%mm1, %%mm5) + "movq %%mm4, (%2) \n\t" + "movq %%mm5, (%2, %3) \n\t" + "add %%"REG_a", %1 \n\t" + "add %%"REG_a", %2 \n\t" + "subl $4, %0 \n\t" + "jnz 1b \n\t" + :"+g"(h), "+S"(pixels), "+D"(block) + :"r"((x86_reg)line_size) + :REG_a, "memory"); +} + +static void DEF(put, pixels8_xy2)(uint8_t *block, const uint8_t *pixels, int line_size, int h) +{ + MOVQ_ZERO(mm7); + SET_RND(mm6); // =2 for rnd and =1 for no_rnd version + __asm__ volatile( + "movq (%1), %%mm0 \n\t" + "movq 1(%1), %%mm4 \n\t" + "movq %%mm0, %%mm1 \n\t" + "movq %%mm4, %%mm5 \n\t" + "punpcklbw %%mm7, %%mm0 \n\t" + "punpcklbw %%mm7, %%mm4 \n\t" + "punpckhbw %%mm7, %%mm1 \n\t" + "punpckhbw %%mm7, %%mm5 \n\t" + "paddusw %%mm0, %%mm4 \n\t" + "paddusw %%mm1, %%mm5 \n\t" + "xor %%"REG_a", %%"REG_a" \n\t" + "add %3, %1 \n\t" + ".p2align 3 \n\t" + "1: \n\t" + "movq (%1, %%"REG_a"), %%mm0 \n\t" + "movq 1(%1, %%"REG_a"), %%mm2 \n\t" + "movq %%mm0, %%mm1 \n\t" + "movq %%mm2, %%mm3 \n\t" + "punpcklbw %%mm7, %%mm0 \n\t" + "punpcklbw %%mm7, %%mm2 \n\t" + "punpckhbw %%mm7, %%mm1 \n\t" + "punpckhbw %%mm7, %%mm3 \n\t" + "paddusw %%mm2, %%mm0 \n\t" + "paddusw %%mm3, %%mm1 \n\t" + "paddusw %%mm6, %%mm4 \n\t" + "paddusw %%mm6, %%mm5 \n\t" + "paddusw %%mm0, %%mm4 \n\t" + "paddusw %%mm1, %%mm5 \n\t" + "psrlw $2, %%mm4 \n\t" + "psrlw $2, %%mm5 \n\t" + "packuswb %%mm5, %%mm4 \n\t" + "movq %%mm4, (%2, %%"REG_a") \n\t" + "add %3, %%"REG_a" \n\t" + + "movq (%1, %%"REG_a"), %%mm2 \n\t" // 0 <-> 2 1 <-> 3 + "movq 1(%1, %%"REG_a"), %%mm4 \n\t" + "movq %%mm2, %%mm3 \n\t" + "movq %%mm4, %%mm5 \n\t" + "punpcklbw %%mm7, %%mm2 \n\t" + "punpcklbw %%mm7, %%mm4 \n\t" + "punpckhbw %%mm7, %%mm3 \n\t" + "punpckhbw %%mm7, %%mm5 \n\t" + "paddusw %%mm2, %%mm4 \n\t" + "paddusw %%mm3, %%mm5 \n\t" + "paddusw %%mm6, %%mm0 \n\t" + "paddusw %%mm6, %%mm1 \n\t" + "paddusw %%mm4, %%mm0 \n\t" + "paddusw %%mm5, %%mm1 \n\t" + "psrlw $2, %%mm0 \n\t" + "psrlw $2, %%mm1 \n\t" + "packuswb %%mm1, %%mm0 \n\t" + "movq %%mm0, (%2, %%"REG_a") \n\t" + "add %3, %%"REG_a" \n\t" + + "subl $2, %0 \n\t" + "jnz 1b \n\t" + :"+g"(h), "+S"(pixels) + :"D"(block), "r"((x86_reg)line_size) + :REG_a, "memory"); +} + +// avg_pixels +static void av_unused DEF(avg, pixels4)(uint8_t *block, const uint8_t *pixels, int line_size, int h) +{ + MOVQ_BFE(mm6); + JUMPALIGN(); + do { + __asm__ volatile( + "movd %0, %%mm0 \n\t" + "movd %1, %%mm1 \n\t" + OP_AVG(%%mm0, %%mm1, %%mm2, %%mm6) + "movd %%mm2, %0 \n\t" + :"+m"(*block) + :"m"(*pixels) + :"memory"); + pixels += line_size; + block += line_size; + } + while (--h); +} + +// in case more speed is needed - unroling would certainly help +static void DEF(avg, pixels8)(uint8_t *block, const uint8_t *pixels, int line_size, int h) +{ + MOVQ_BFE(mm6); + JUMPALIGN(); + do { + __asm__ volatile( + "movq %0, %%mm0 \n\t" + "movq %1, %%mm1 \n\t" + OP_AVG(%%mm0, %%mm1, %%mm2, %%mm6) + "movq %%mm2, %0 \n\t" + :"+m"(*block) + :"m"(*pixels) + :"memory"); + pixels += line_size; + block += line_size; + } + while (--h); +} + +static void DEF(avg, pixels16)(uint8_t *block, const uint8_t *pixels, int line_size, int h) +{ + MOVQ_BFE(mm6); + JUMPALIGN(); + do { + __asm__ volatile( + "movq %0, %%mm0 \n\t" + "movq %1, %%mm1 \n\t" + OP_AVG(%%mm0, %%mm1, %%mm2, %%mm6) + "movq %%mm2, %0 \n\t" + "movq 8%0, %%mm0 \n\t" + "movq 8%1, %%mm1 \n\t" + OP_AVG(%%mm0, %%mm1, %%mm2, %%mm6) + "movq %%mm2, 8%0 \n\t" + :"+m"(*block) + :"m"(*pixels) + :"memory"); + pixels += line_size; + block += line_size; + } + while (--h); +} + +static void DEF(avg, pixels8_x2)(uint8_t *block, const uint8_t *pixels, int line_size, int h) +{ + MOVQ_BFE(mm6); + JUMPALIGN(); + do { + __asm__ volatile( + "movq %1, %%mm0 \n\t" + "movq 1%1, %%mm1 \n\t" + "movq %0, %%mm3 \n\t" + PAVGB(%%mm0, %%mm1, %%mm2, %%mm6) + OP_AVG(%%mm3, %%mm2, %%mm0, %%mm6) + "movq %%mm0, %0 \n\t" + :"+m"(*block) + :"m"(*pixels) + :"memory"); + pixels += line_size; + block += line_size; + } while (--h); +} + +static av_unused void DEF(avg, pixels8_l2)(uint8_t *dst, uint8_t *src1, uint8_t *src2, int dstStride, int src1Stride, int h) +{ + MOVQ_BFE(mm6); + JUMPALIGN(); + do { + __asm__ volatile( + "movq %1, %%mm0 \n\t" + "movq %2, %%mm1 \n\t" + "movq %0, %%mm3 \n\t" + PAVGB(%%mm0, %%mm1, %%mm2, %%mm6) + OP_AVG(%%mm3, %%mm2, %%mm0, %%mm6) + "movq %%mm0, %0 \n\t" + :"+m"(*dst) + :"m"(*src1), "m"(*src2) + :"memory"); + dst += dstStride; + src1 += src1Stride; + src2 += 8; + } while (--h); +} + +static void DEF(avg, pixels16_x2)(uint8_t *block, const uint8_t *pixels, int line_size, int h) +{ + MOVQ_BFE(mm6); + JUMPALIGN(); + do { + __asm__ volatile( + "movq %1, %%mm0 \n\t" + "movq 1%1, %%mm1 \n\t" + "movq %0, %%mm3 \n\t" + PAVGB(%%mm0, %%mm1, %%mm2, %%mm6) + OP_AVG(%%mm3, %%mm2, %%mm0, %%mm6) + "movq %%mm0, %0 \n\t" + "movq 8%1, %%mm0 \n\t" + "movq 9%1, %%mm1 \n\t" + "movq 8%0, %%mm3 \n\t" + PAVGB(%%mm0, %%mm1, %%mm2, %%mm6) + OP_AVG(%%mm3, %%mm2, %%mm0, %%mm6) + "movq %%mm0, 8%0 \n\t" + :"+m"(*block) + :"m"(*pixels) + :"memory"); + pixels += line_size; + block += line_size; + } while (--h); +} + +static av_unused void DEF(avg, pixels16_l2)(uint8_t *dst, uint8_t *src1, uint8_t *src2, int dstStride, int src1Stride, int h) +{ + MOVQ_BFE(mm6); + JUMPALIGN(); + do { + __asm__ volatile( + "movq %1, %%mm0 \n\t" + "movq %2, %%mm1 \n\t" + "movq %0, %%mm3 \n\t" + PAVGB(%%mm0, %%mm1, %%mm2, %%mm6) + OP_AVG(%%mm3, %%mm2, %%mm0, %%mm6) + "movq %%mm0, %0 \n\t" + "movq 8%1, %%mm0 \n\t" + "movq 8%2, %%mm1 \n\t" + "movq 8%0, %%mm3 \n\t" + PAVGB(%%mm0, %%mm1, %%mm2, %%mm6) + OP_AVG(%%mm3, %%mm2, %%mm0, %%mm6) + "movq %%mm0, 8%0 \n\t" + :"+m"(*dst) + :"m"(*src1), "m"(*src2) + :"memory"); + dst += dstStride; + src1 += src1Stride; + src2 += 16; + } while (--h); +} + +static void DEF(avg, pixels8_y2)(uint8_t *block, const uint8_t *pixels, int line_size, int h) +{ + MOVQ_BFE(mm6); + __asm__ volatile( + "lea (%3, %3), %%"REG_a" \n\t" + "movq (%1), %%mm0 \n\t" + ".p2align 3 \n\t" + "1: \n\t" + "movq (%1, %3), %%mm1 \n\t" + "movq (%1, %%"REG_a"), %%mm2 \n\t" + PAVGBP(%%mm1, %%mm0, %%mm4, %%mm2, %%mm1, %%mm5) + "movq (%2), %%mm3 \n\t" + OP_AVG(%%mm3, %%mm4, %%mm0, %%mm6) + "movq (%2, %3), %%mm3 \n\t" + OP_AVG(%%mm3, %%mm5, %%mm1, %%mm6) + "movq %%mm0, (%2) \n\t" + "movq %%mm1, (%2, %3) \n\t" + "add %%"REG_a", %1 \n\t" + "add %%"REG_a", %2 \n\t" + + "movq (%1, %3), %%mm1 \n\t" + "movq (%1, %%"REG_a"), %%mm0 \n\t" + PAVGBP(%%mm1, %%mm2, %%mm4, %%mm0, %%mm1, %%mm5) + "movq (%2), %%mm3 \n\t" + OP_AVG(%%mm3, %%mm4, %%mm2, %%mm6) + "movq (%2, %3), %%mm3 \n\t" + OP_AVG(%%mm3, %%mm5, %%mm1, %%mm6) + "movq %%mm2, (%2) \n\t" + "movq %%mm1, (%2, %3) \n\t" + "add %%"REG_a", %1 \n\t" + "add %%"REG_a", %2 \n\t" + + "subl $4, %0 \n\t" + "jnz 1b \n\t" + :"+g"(h), "+S"(pixels), "+D"(block) + :"r"((x86_reg)line_size) + :REG_a, "memory"); +} + +// this routine is 'slightly' suboptimal but mostly unused +static void DEF(avg, pixels8_xy2)(uint8_t *block, const uint8_t *pixels, int line_size, int h) +{ + MOVQ_ZERO(mm7); + SET_RND(mm6); // =2 for rnd and =1 for no_rnd version + __asm__ volatile( + "movq (%1), %%mm0 \n\t" + "movq 1(%1), %%mm4 \n\t" + "movq %%mm0, %%mm1 \n\t" + "movq %%mm4, %%mm5 \n\t" + "punpcklbw %%mm7, %%mm0 \n\t" + "punpcklbw %%mm7, %%mm4 \n\t" + "punpckhbw %%mm7, %%mm1 \n\t" + "punpckhbw %%mm7, %%mm5 \n\t" + "paddusw %%mm0, %%mm4 \n\t" + "paddusw %%mm1, %%mm5 \n\t" + "xor %%"REG_a", %%"REG_a" \n\t" + "add %3, %1 \n\t" + ".p2align 3 \n\t" + "1: \n\t" + "movq (%1, %%"REG_a"), %%mm0 \n\t" + "movq 1(%1, %%"REG_a"), %%mm2 \n\t" + "movq %%mm0, %%mm1 \n\t" + "movq %%mm2, %%mm3 \n\t" + "punpcklbw %%mm7, %%mm0 \n\t" + "punpcklbw %%mm7, %%mm2 \n\t" + "punpckhbw %%mm7, %%mm1 \n\t" + "punpckhbw %%mm7, %%mm3 \n\t" + "paddusw %%mm2, %%mm0 \n\t" + "paddusw %%mm3, %%mm1 \n\t" + "paddusw %%mm6, %%mm4 \n\t" + "paddusw %%mm6, %%mm5 \n\t" + "paddusw %%mm0, %%mm4 \n\t" + "paddusw %%mm1, %%mm5 \n\t" + "psrlw $2, %%mm4 \n\t" + "psrlw $2, %%mm5 \n\t" + "movq (%2, %%"REG_a"), %%mm3 \n\t" + "packuswb %%mm5, %%mm4 \n\t" + "pcmpeqd %%mm2, %%mm2 \n\t" + "paddb %%mm2, %%mm2 \n\t" + OP_AVG(%%mm3, %%mm4, %%mm5, %%mm2) + "movq %%mm5, (%2, %%"REG_a") \n\t" + "add %3, %%"REG_a" \n\t" + + "movq (%1, %%"REG_a"), %%mm2 \n\t" // 0 <-> 2 1 <-> 3 + "movq 1(%1, %%"REG_a"), %%mm4 \n\t" + "movq %%mm2, %%mm3 \n\t" + "movq %%mm4, %%mm5 \n\t" + "punpcklbw %%mm7, %%mm2 \n\t" + "punpcklbw %%mm7, %%mm4 \n\t" + "punpckhbw %%mm7, %%mm3 \n\t" + "punpckhbw %%mm7, %%mm5 \n\t" + "paddusw %%mm2, %%mm4 \n\t" + "paddusw %%mm3, %%mm5 \n\t" + "paddusw %%mm6, %%mm0 \n\t" + "paddusw %%mm6, %%mm1 \n\t" + "paddusw %%mm4, %%mm0 \n\t" + "paddusw %%mm5, %%mm1 \n\t" + "psrlw $2, %%mm0 \n\t" + "psrlw $2, %%mm1 \n\t" + "movq (%2, %%"REG_a"), %%mm3 \n\t" + "packuswb %%mm1, %%mm0 \n\t" + "pcmpeqd %%mm2, %%mm2 \n\t" + "paddb %%mm2, %%mm2 \n\t" + OP_AVG(%%mm3, %%mm0, %%mm1, %%mm2) + "movq %%mm1, (%2, %%"REG_a") \n\t" + "add %3, %%"REG_a" \n\t" + + "subl $2, %0 \n\t" + "jnz 1b \n\t" + :"+g"(h), "+S"(pixels) + :"D"(block), "r"((x86_reg)line_size) + :REG_a, "memory"); +} + +//FIXME optimize +static void DEF(put, pixels16_y2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){ + DEF(put, pixels8_y2)(block , pixels , line_size, h); + DEF(put, pixels8_y2)(block+8, pixels+8, line_size, h); +} + +static void DEF(put, pixels16_xy2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){ + DEF(put, pixels8_xy2)(block , pixels , line_size, h); + DEF(put, pixels8_xy2)(block+8, pixels+8, line_size, h); +} + +static void DEF(avg, pixels16_y2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){ + DEF(avg, pixels8_y2)(block , pixels , line_size, h); + DEF(avg, pixels8_y2)(block+8, pixels+8, line_size, h); +} + +static void DEF(avg, pixels16_xy2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){ + DEF(avg, pixels8_xy2)(block , pixels , line_size, h); + DEF(avg, pixels8_xy2)(block+8, pixels+8, line_size, h); +} diff --git a/libavcodec/x86/dsputilenc_mmx.c b/libavcodec/x86/dsputilenc_mmx.c index 10331327bf..c9797ef31f 100644 --- a/libavcodec/x86/dsputilenc_mmx.c +++ b/libavcodec/x86/dsputilenc_mmx.c @@ -1041,7 +1041,7 @@ static int ssd_int8_vs_int16_mmx(const int8_t *pix1, const int16_t *pix2, int si #define SET_RND MOVQ_WONE #define SCALE_OFFSET 1 -#include "dsputil_mmx_qns_template.c" +#include "dsputil_qns_template.c" #undef DEF #undef SET_RND @@ -1055,7 +1055,7 @@ static int ssd_int8_vs_int16_mmx(const int8_t *pix1, const int16_t *pix2, int si "pmulhrw " #s ", "#x " \n\t"\ "pmulhrw " #s ", "#y " \n\t" -#include "dsputil_mmx_qns_template.c" +#include "dsputil_qns_template.c" #undef DEF #undef SET_RND @@ -1074,7 +1074,7 @@ static int ssd_int8_vs_int16_mmx(const int8_t *pix1, const int16_t *pix2, int si "pmulhrsw " #s ", "#x " \n\t"\ "pmulhrsw " #s ", "#y " \n\t" -#include "dsputil_mmx_qns_template.c" +#include "dsputil_qns_template.c" #undef DEF #undef SET_RND -- cgit v1.2.3 From 1169f0d0afc0454633cfcfad73643f0458521c67 Mon Sep 17 00:00:00 2001 From: Diego Biurrun Date: Tue, 4 Sep 2012 08:30:16 +0200 Subject: x86: more specific checks for availability of required assembly capabilities --- libavcodec/x86/dnxhdenc.c | 8 ++++---- libavcodec/x86/lpc.c | 8 ++++---- libavcodec/x86/mpegaudiodec.c | 9 +++++---- libswscale/swscale.c | 2 +- 4 files changed, 14 insertions(+), 13 deletions(-) diff --git a/libavcodec/x86/dnxhdenc.c b/libavcodec/x86/dnxhdenc.c index c344afec55..43ee246221 100644 --- a/libavcodec/x86/dnxhdenc.c +++ b/libavcodec/x86/dnxhdenc.c @@ -24,7 +24,7 @@ #include "libavutil/x86/asm.h" #include "libavcodec/dnxhdenc.h" -#if HAVE_INLINE_ASM +#if HAVE_SSE2_INLINE static void get_pixels_8x4_sym_sse2(DCTELEM *block, const uint8_t *pixels, int line_size) { @@ -52,14 +52,14 @@ static void get_pixels_8x4_sym_sse2(DCTELEM *block, const uint8_t *pixels, int l ); } -#endif /* HAVE_INLINE_ASM */ +#endif /* HAVE_SSE2_INLINE */ void ff_dnxhdenc_init_x86(DNXHDEncContext *ctx) { -#if HAVE_INLINE_ASM +#if HAVE_SSE2_INLINE if (av_get_cpu_flags() & AV_CPU_FLAG_SSE2) { if (ctx->cid_table->bit_depth == 8) ctx->get_pixels_8x4_sym = get_pixels_8x4_sym_sse2; } -#endif /* HAVE_INLINE_ASM */ +#endif /* HAVE_SSE2_INLINE */ } diff --git a/libavcodec/x86/lpc.c b/libavcodec/x86/lpc.c index 82f77612f2..b8c77e28f4 100644 --- a/libavcodec/x86/lpc.c +++ b/libavcodec/x86/lpc.c @@ -24,7 +24,7 @@ #include "libavutil/internal.h" #include "libavcodec/lpc.h" -#if HAVE_INLINE_ASM +#if HAVE_SSE2_INLINE static void lpc_apply_welch_window_sse2(const int32_t *data, int len, double *w_data) @@ -139,16 +139,16 @@ static void lpc_compute_autocorr_sse2(const double *data, int len, int lag, } } -#endif /* HAVE_INLINE_ASM */ +#endif /* HAVE_SSE2_INLINE */ av_cold void ff_lpc_init_x86(LPCContext *c) { +#if HAVE_SSE2_INLINE int mm_flags = av_get_cpu_flags(); -#if HAVE_INLINE_ASM if (mm_flags & (AV_CPU_FLAG_SSE2|AV_CPU_FLAG_SSE2SLOW)) { c->lpc_apply_welch_window = lpc_apply_welch_window_sse2; c->lpc_compute_autocorr = lpc_compute_autocorr_sse2; } -#endif /* HAVE_INLINE_ASM */ +#endif /* HAVE_SSE2_INLINE */ } diff --git a/libavcodec/x86/mpegaudiodec.c b/libavcodec/x86/mpegaudiodec.c index d2573dd274..e7c7fbbf48 100644 --- a/libavcodec/x86/mpegaudiodec.c +++ b/libavcodec/x86/mpegaudiodec.c @@ -36,7 +36,7 @@ void ff_four_imdct36_float_avx(float *out, float *buf, float *in, float *win, DECLARE_ALIGNED(16, static float, mdct_win_sse)[2][4][4*40]; -#if HAVE_INLINE_ASM +#if HAVE_SSE2_INLINE #define MACS(rt, ra, rb) rt+=(ra)*(rb) #define MLSS(rt, ra, rb) rt-=(ra)*(rb) @@ -180,7 +180,7 @@ static void apply_window_mp3(float *in, float *win, int *unused, float *out, *out = sum; } -#endif /* HAVE_INLINE_ASM */ +#endif /* HAVE_SSE2_INLINE */ #if HAVE_YASM #define DECL_IMDCT_BLOCKS(CPU1, CPU2) \ @@ -240,11 +240,12 @@ void ff_mpadsp_init_mmx(MPADSPContext *s) } } -#if HAVE_INLINE_ASM +#if HAVE_SSE2_INLINE if (mm_flags & AV_CPU_FLAG_SSE2) { s->apply_window_float = apply_window_mp3; } -#endif /* HAVE_INLINE_ASM */ +#endif /* HAVE_SSE2_INLINE */ + #if HAVE_YASM if (mm_flags & AV_CPU_FLAG_AVX && HAVE_AVX) { s->imdct36_blocks_float = imdct36_blocks_avx; diff --git a/libswscale/swscale.c b/libswscale/swscale.c index 94f51cf5cb..4f0a049b3d 100644 --- a/libswscale/swscale.c +++ b/libswscale/swscale.c @@ -661,7 +661,7 @@ static int swScale(SwsContext *c, const uint8_t *src[], if (isPlanar(dstFormat) && isALPHA(dstFormat) && !alpPixBuf) fillPlane(dst[3], dstStride[3], dstW, dstY - lastDstY, lastDstY, 255); -#if HAVE_MMXEXT && HAVE_INLINE_ASM +#if HAVE_MMXEXT_INLINE if (av_get_cpu_flags() & AV_CPU_FLAG_MMXEXT) __asm__ volatile ("sfence" ::: "memory"); #endif -- cgit v1.2.3 From 73275259978e77f3dc52bfd01712249d96a585fe Mon Sep 17 00:00:00 2001 From: Justin Ruggles Date: Fri, 3 Aug 2012 17:38:49 -0400 Subject: x86: float_dsp: fix ff_vector_fmac_scalar_avx() on Win64 The SWAP macro does not work for explicit xmm/ymm usage, so instead just move the scalar value from xmm2 to xmm0. --- libavutil/x86/float_dsp.asm | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/libavutil/x86/float_dsp.asm b/libavutil/x86/float_dsp.asm index 934dac08d9..5b9b444676 100644 --- a/libavutil/x86/float_dsp.asm +++ b/libavutil/x86/float_dsp.asm @@ -60,12 +60,12 @@ cglobal vector_fmac_scalar, 3,3,3, dst, src, len %else cglobal vector_fmac_scalar, 4,4,3, dst, src, mul, len %endif -%if WIN64 - SWAP 0, 2 -%endif %if ARCH_X86_32 VBROADCASTSS m0, mulm %else +%if WIN64 + mova xmm0, xmm2 +%endif shufps xmm0, xmm0, 0 %if cpuflag(avx) vinsertf128 m0, m0, xmm0, 1 -- cgit v1.2.3 From 407eeb3474b9b2af5a105a036ea03077466e3a50 Mon Sep 17 00:00:00 2001 From: Carl Eugen Hoyos Date: Sun, 28 Aug 2011 19:43:04 +0200 Subject: mp2 muxer: mark as AVFMT_NOTIMESTAMPS. Signed-off-by: Alex Converse --- libavformat/mp3enc.c | 1 + 1 file changed, 1 insertion(+) diff --git a/libavformat/mp3enc.c b/libavformat/mp3enc.c index 9a54e01d9e..fac39d9efd 100644 --- a/libavformat/mp3enc.c +++ b/libavformat/mp3enc.c @@ -197,6 +197,7 @@ AVOutputFormat ff_mp2_muxer = { .video_codec = AV_CODEC_ID_NONE, .write_packet = ff_raw_write_packet, .write_trailer = mp3_write_trailer, + .flags = AVFMT_NOTIMESTAMPS, }; #endif -- cgit v1.2.3 From 5f72bc02f8ee7ccbdf779f560a11e9f228b7ac82 Mon Sep 17 00:00:00 2001 From: Martin Storsjö Date: Fri, 7 Sep 2012 10:41:37 +0300 Subject: mov_chan: Reindent an incorrectly indented line MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Martin Storsjö --- libavformat/mov_chan.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libavformat/mov_chan.c b/libavformat/mov_chan.c index 800bb102a0..b1fa13f753 100644 --- a/libavformat/mov_chan.c +++ b/libavformat/mov_chan.c @@ -580,7 +580,7 @@ int ff_mov_read_chan(AVFormatContext *s, AVIOContext *pb, AVStream *st, } } if (layout_tag == 0) - st->codec->channel_layout = label_mask; + st->codec->channel_layout = label_mask; else st->codec->channel_layout = ff_mov_get_channel_layout(layout_tag, bitmap); -- cgit v1.2.3 From 7b699d813681e4fad419ec63c96d595e98b2dbfa Mon Sep 17 00:00:00 2001 From: Martin Storsjö Date: Fri, 7 Sep 2012 10:45:16 +0300 Subject: mov_chan: Only set the channel_layout if setting it to a nonzero value MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit If regularly parsing new chan atoms (as in rtpdec_qt), but the chan atoms don't actually contain any channel layout, don't reset the value that the caller has filled in (by guessing). Signed-off-by: Martin Storsjö --- libavformat/mov_chan.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/libavformat/mov_chan.c b/libavformat/mov_chan.c index b1fa13f753..00a2a4bb92 100644 --- a/libavformat/mov_chan.c +++ b/libavformat/mov_chan.c @@ -579,9 +579,10 @@ int ff_mov_read_chan(AVFormatContext *s, AVIOContext *pb, AVStream *st, label_mask |= mask_incr; } } - if (layout_tag == 0) - st->codec->channel_layout = label_mask; - else + if (layout_tag == 0) { + if (label_mask) + st->codec->channel_layout = label_mask; + } else st->codec->channel_layout = ff_mov_get_channel_layout(layout_tag, bitmap); return 0; -- cgit v1.2.3