From 368f50359eb328b0b9d67451f56fda20b3255f9a Mon Sep 17 00:00:00 2001 From: Diego Biurrun Date: Wed, 8 Jan 2014 14:00:10 +0100 Subject: dsputil: Split off quarterpel bits into their own context --- libavcodec/Makefile | 1 + libavcodec/cavs.c | 1 + libavcodec/cavsdsp.h | 4 +- libavcodec/dsputil.c | 727 ---------------------------------------- libavcodec/dsputil.h | 37 -- libavcodec/dsputil_template.c | 223 ------------ libavcodec/h263dec.c | 16 +- libavcodec/h264.h | 1 + libavcodec/h264_mb.c | 1 + libavcodec/h264qpel.h | 2 +- libavcodec/motion_est.c | 16 +- libavcodec/mpegvideo.c | 1 + libavcodec/mpegvideo.h | 2 + libavcodec/mpegvideo_enc.c | 9 +- libavcodec/mpegvideo_motion.c | 1 + libavcodec/mss2.c | 7 +- libavcodec/qpel_template.c | 219 ++++++++++++ libavcodec/qpeldsp.c | 764 ++++++++++++++++++++++++++++++++++++++++++ libavcodec/qpeldsp.h | 78 +++++ libavcodec/rv34.c | 1 + libavcodec/rv34dsp.h | 2 +- libavcodec/vc1dec.c | 6 +- libavcodec/vc1dsp.c | 2 +- libavcodec/wmv2dsp.h | 2 +- libavcodec/x86/Makefile | 11 +- libavcodec/x86/dsputil_init.c | 464 ------------------------- libavcodec/x86/mpeg4qpel.asm | 558 ------------------------------ libavcodec/x86/qpeldsp.asm | 559 ++++++++++++++++++++++++++++++ libavcodec/x86/qpeldsp_init.c | 501 +++++++++++++++++++++++++++ 29 files changed, 2176 insertions(+), 2040 deletions(-) delete mode 100644 libavcodec/dsputil_template.c create mode 100644 libavcodec/qpel_template.c create mode 100644 libavcodec/qpeldsp.c create mode 100644 libavcodec/qpeldsp.h delete mode 100644 libavcodec/x86/mpeg4qpel.asm create mode 100644 libavcodec/x86/qpeldsp.asm create mode 100644 libavcodec/x86/qpeldsp_init.c (limited to 'libavcodec') diff --git a/libavcodec/Makefile b/libavcodec/Makefile index 7060979b45..a61e673d8b 100644 --- a/libavcodec/Makefile +++ b/libavcodec/Makefile @@ -63,6 +63,7 @@ OBJS-$(CONFIG_MPEGVIDEO) += mpegvideo.o mpegvideo_motion.o \ mpegutils.o OBJS-$(CONFIG_MPEGVIDEOENC) += mpegvideo_enc.o mpeg12data.o \ motion_est.o ratecontrol.o +OBJS-$(CONFIG_QPELDSP) += qpeldsp.o OBJS-$(CONFIG_RANGECODER) += rangecoder.o RDFT-OBJS-$(CONFIG_HARDCODED_TABLES) += sin_tables.o OBJS-$(CONFIG_RDFT) += rdft.o $(RDFT-OBJS-yes) diff --git a/libavcodec/cavs.c b/libavcodec/cavs.c index 4166b9e2f6..3f21dccedf 100644 --- a/libavcodec/cavs.c +++ b/libavcodec/cavs.c @@ -30,6 +30,7 @@ #include "golomb.h" #include "h264chroma.h" #include "mathops.h" +#include "qpeldsp.h" #include "cavs.h" static const uint8_t alpha_tab[64] = { diff --git a/libavcodec/cavsdsp.h b/libavcodec/cavsdsp.h index 333bd10cf2..248afd508c 100644 --- a/libavcodec/cavsdsp.h +++ b/libavcodec/cavsdsp.h @@ -23,7 +23,9 @@ #define AVCODEC_CAVSDSP_H #include -#include "dsputil.h" + +#include "avcodec.h" +#include "qpeldsp.h" typedef struct CAVSDSPContext { qpel_mc_func put_cavs_qpel_pixels_tab[2][16]; diff --git a/libavcodec/dsputil.c b/libavcodec/dsputil.c index 89c0da4ec3..aba2a4371f 100644 --- a/libavcodec/dsputil.c +++ b/libavcodec/dsputil.c @@ -48,9 +48,6 @@ uint32_t ff_square_tab[512] = { 0, }; #undef BIT_DEPTH #define BIT_DEPTH 8 -#include "hpel_template.c" -#include "tpel_template.c" -#include "dsputil_template.c" #include "dsputilenc_template.c" /* Input permutation for the simple_idct_mmx */ @@ -485,701 +482,6 @@ void ff_gmc_c(uint8_t *dst, uint8_t *src, int stride, int h, int ox, int oy, } } -#define QPEL_MC(r, OPNAME, RND, OP) \ -static void OPNAME ## mpeg4_qpel8_h_lowpass(uint8_t *dst, uint8_t *src, \ - int dstStride, int srcStride, \ - int h) \ -{ \ - const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP; \ - int i; \ - \ - for (i = 0; i < h; i++) { \ - OP(dst[0], (src[0] + src[1]) * 20 - (src[0] + src[2]) * 6 + (src[1] + src[3]) * 3 - (src[2] + src[4])); \ - OP(dst[1], (src[1] + src[2]) * 20 - (src[0] + src[3]) * 6 + (src[0] + src[4]) * 3 - (src[1] + src[5])); \ - OP(dst[2], (src[2] + src[3]) * 20 - (src[1] + src[4]) * 6 + (src[0] + src[5]) * 3 - (src[0] + src[6])); \ - OP(dst[3], (src[3] + src[4]) * 20 - (src[2] + src[5]) * 6 + (src[1] + src[6]) * 3 - (src[0] + src[7])); \ - OP(dst[4], (src[4] + src[5]) * 20 - (src[3] + src[6]) * 6 + (src[2] + src[7]) * 3 - (src[1] + src[8])); \ - OP(dst[5], (src[5] + src[6]) * 20 - (src[4] + src[7]) * 6 + (src[3] + src[8]) * 3 - (src[2] + src[8])); \ - OP(dst[6], (src[6] + src[7]) * 20 - (src[5] + src[8]) * 6 + (src[4] + src[8]) * 3 - (src[3] + src[7])); \ - OP(dst[7], (src[7] + src[8]) * 20 - (src[6] + src[8]) * 6 + (src[5] + src[7]) * 3 - (src[4] + src[6])); \ - dst += dstStride; \ - src += srcStride; \ - } \ -} \ - \ -static void OPNAME ## mpeg4_qpel8_v_lowpass(uint8_t *dst, uint8_t *src, \ - int dstStride, int srcStride) \ -{ \ - const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP; \ - const int w = 8; \ - int i; \ - \ - for (i = 0; i < w; i++) { \ - const int src0 = src[0 * srcStride]; \ - const int src1 = src[1 * srcStride]; \ - const int src2 = src[2 * srcStride]; \ - const int src3 = src[3 * srcStride]; \ - const int src4 = src[4 * srcStride]; \ - const int src5 = src[5 * srcStride]; \ - const int src6 = src[6 * srcStride]; \ - const int src7 = src[7 * srcStride]; \ - const int src8 = src[8 * srcStride]; \ - OP(dst[0 * dstStride], (src0 + src1) * 20 - (src0 + src2) * 6 + (src1 + src3) * 3 - (src2 + src4)); \ - OP(dst[1 * dstStride], (src1 + src2) * 20 - (src0 + src3) * 6 + (src0 + src4) * 3 - (src1 + src5)); \ - OP(dst[2 * dstStride], (src2 + src3) * 20 - (src1 + src4) * 6 + (src0 + src5) * 3 - (src0 + src6)); \ - OP(dst[3 * dstStride], (src3 + src4) * 20 - (src2 + src5) * 6 + (src1 + src6) * 3 - (src0 + src7)); \ - OP(dst[4 * dstStride], (src4 + src5) * 20 - (src3 + src6) * 6 + (src2 + src7) * 3 - (src1 + src8)); \ - OP(dst[5 * dstStride], (src5 + src6) * 20 - (src4 + src7) * 6 + (src3 + src8) * 3 - (src2 + src8)); \ - OP(dst[6 * dstStride], (src6 + src7) * 20 - (src5 + src8) * 6 + (src4 + src8) * 3 - (src3 + src7)); \ - OP(dst[7 * dstStride], (src7 + src8) * 20 - (src6 + src8) * 6 + (src5 + src7) * 3 - (src4 + src6)); \ - dst++; \ - src++; \ - } \ -} \ - \ -static void OPNAME ## mpeg4_qpel16_h_lowpass(uint8_t *dst, uint8_t *src, \ - int dstStride, int srcStride, \ - int h) \ -{ \ - const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP; \ - int i; \ - \ - for (i = 0; i < h; i++) { \ - OP(dst[0], (src[0] + src[1]) * 20 - (src[0] + src[2]) * 6 + (src[1] + src[3]) * 3 - (src[2] + src[4])); \ - OP(dst[1], (src[1] + src[2]) * 20 - (src[0] + src[3]) * 6 + (src[0] + src[4]) * 3 - (src[1] + src[5])); \ - OP(dst[2], (src[2] + src[3]) * 20 - (src[1] + src[4]) * 6 + (src[0] + src[5]) * 3 - (src[0] + src[6])); \ - OP(dst[3], (src[3] + src[4]) * 20 - (src[2] + src[5]) * 6 + (src[1] + src[6]) * 3 - (src[0] + src[7])); \ - OP(dst[4], (src[4] + src[5]) * 20 - (src[3] + src[6]) * 6 + (src[2] + src[7]) * 3 - (src[1] + src[8])); \ - OP(dst[5], (src[5] + src[6]) * 20 - (src[4] + src[7]) * 6 + (src[3] + src[8]) * 3 - (src[2] + src[9])); \ - OP(dst[6], (src[6] + src[7]) * 20 - (src[5] + src[8]) * 6 + (src[4] + src[9]) * 3 - (src[3] + src[10])); \ - OP(dst[7], (src[7] + src[8]) * 20 - (src[6] + src[9]) * 6 + (src[5] + src[10]) * 3 - (src[4] + src[11])); \ - OP(dst[8], (src[8] + src[9]) * 20 - (src[7] + src[10]) * 6 + (src[6] + src[11]) * 3 - (src[5] + src[12])); \ - OP(dst[9], (src[9] + src[10]) * 20 - (src[8] + src[11]) * 6 + (src[7] + src[12]) * 3 - (src[6] + src[13])); \ - OP(dst[10], (src[10] + src[11]) * 20 - (src[9] + src[12]) * 6 + (src[8] + src[13]) * 3 - (src[7] + src[14])); \ - OP(dst[11], (src[11] + src[12]) * 20 - (src[10] + src[13]) * 6 + (src[9] + src[14]) * 3 - (src[8] + src[15])); \ - OP(dst[12], (src[12] + src[13]) * 20 - (src[11] + src[14]) * 6 + (src[10] + src[15]) * 3 - (src[9] + src[16])); \ - OP(dst[13], (src[13] + src[14]) * 20 - (src[12] + src[15]) * 6 + (src[11] + src[16]) * 3 - (src[10] + src[16])); \ - OP(dst[14], (src[14] + src[15]) * 20 - (src[13] + src[16]) * 6 + (src[12] + src[16]) * 3 - (src[11] + src[15])); \ - OP(dst[15], (src[15] + src[16]) * 20 - (src[14] + src[16]) * 6 + (src[13] + src[15]) * 3 - (src[12] + src[14])); \ - dst += dstStride; \ - src += srcStride; \ - } \ -} \ - \ -static void OPNAME ## mpeg4_qpel16_v_lowpass(uint8_t *dst, uint8_t *src, \ - int dstStride, int srcStride) \ -{ \ - const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP; \ - const int w = 16; \ - int i; \ - \ - for (i = 0; i < w; i++) { \ - const int src0 = src[0 * srcStride]; \ - const int src1 = src[1 * srcStride]; \ - const int src2 = src[2 * srcStride]; \ - const int src3 = src[3 * srcStride]; \ - const int src4 = src[4 * srcStride]; \ - const int src5 = src[5 * srcStride]; \ - const int src6 = src[6 * srcStride]; \ - const int src7 = src[7 * srcStride]; \ - const int src8 = src[8 * srcStride]; \ - const int src9 = src[9 * srcStride]; \ - const int src10 = src[10 * srcStride]; \ - const int src11 = src[11 * srcStride]; \ - const int src12 = src[12 * srcStride]; \ - const int src13 = src[13 * srcStride]; \ - const int src14 = src[14 * srcStride]; \ - const int src15 = src[15 * srcStride]; \ - const int src16 = src[16 * srcStride]; \ - OP(dst[0 * dstStride], (src0 + src1) * 20 - (src0 + src2) * 6 + (src1 + src3) * 3 - (src2 + src4)); \ - OP(dst[1 * dstStride], (src1 + src2) * 20 - (src0 + src3) * 6 + (src0 + src4) * 3 - (src1 + src5)); \ - OP(dst[2 * dstStride], (src2 + src3) * 20 - (src1 + src4) * 6 + (src0 + src5) * 3 - (src0 + src6)); \ - OP(dst[3 * dstStride], (src3 + src4) * 20 - (src2 + src5) * 6 + (src1 + src6) * 3 - (src0 + src7)); \ - OP(dst[4 * dstStride], (src4 + src5) * 20 - (src3 + src6) * 6 + (src2 + src7) * 3 - (src1 + src8)); \ - OP(dst[5 * dstStride], (src5 + src6) * 20 - (src4 + src7) * 6 + (src3 + src8) * 3 - (src2 + src9)); \ - OP(dst[6 * dstStride], (src6 + src7) * 20 - (src5 + src8) * 6 + (src4 + src9) * 3 - (src3 + src10)); \ - OP(dst[7 * dstStride], (src7 + src8) * 20 - (src6 + src9) * 6 + (src5 + src10) * 3 - (src4 + src11)); \ - OP(dst[8 * dstStride], (src8 + src9) * 20 - (src7 + src10) * 6 + (src6 + src11) * 3 - (src5 + src12)); \ - OP(dst[9 * dstStride], (src9 + src10) * 20 - (src8 + src11) * 6 + (src7 + src12) * 3 - (src6 + src13)); \ - OP(dst[10 * dstStride], (src10 + src11) * 20 - (src9 + src12) * 6 + (src8 + src13) * 3 - (src7 + src14)); \ - OP(dst[11 * dstStride], (src11 + src12) * 20 - (src10 + src13) * 6 + (src9 + src14) * 3 - (src8 + src15)); \ - OP(dst[12 * dstStride], (src12 + src13) * 20 - (src11 + src14) * 6 + (src10 + src15) * 3 - (src9 + src16)); \ - OP(dst[13 * dstStride], (src13 + src14) * 20 - (src12 + src15) * 6 + (src11 + src16) * 3 - (src10 + src16)); \ - OP(dst[14 * dstStride], (src14 + src15) * 20 - (src13 + src16) * 6 + (src12 + src16) * 3 - (src11 + src15)); \ - OP(dst[15 * dstStride], (src15 + src16) * 20 - (src14 + src16) * 6 + (src13 + src15) * 3 - (src12 + src14)); \ - dst++; \ - src++; \ - } \ -} \ - \ -static void OPNAME ## qpel8_mc10_c(uint8_t *dst, uint8_t *src, \ - ptrdiff_t stride) \ -{ \ - uint8_t half[64]; \ - \ - put ## RND ## mpeg4_qpel8_h_lowpass(half, src, 8, stride, 8); \ - OPNAME ## pixels8_l2_8(dst, src, half, stride, stride, 8, 8); \ -} \ - \ -static void OPNAME ## qpel8_mc20_c(uint8_t *dst, uint8_t *src, \ - ptrdiff_t stride) \ -{ \ - OPNAME ## mpeg4_qpel8_h_lowpass(dst, src, stride, stride, 8); \ -} \ - \ -static void OPNAME ## qpel8_mc30_c(uint8_t *dst, uint8_t *src, \ - ptrdiff_t stride) \ -{ \ - uint8_t half[64]; \ - \ - put ## RND ## mpeg4_qpel8_h_lowpass(half, src, 8, stride, 8); \ - OPNAME ## pixels8_l2_8(dst, src + 1, half, stride, stride, 8, 8); \ -} \ - \ -static void OPNAME ## qpel8_mc01_c(uint8_t *dst, uint8_t *src, \ - ptrdiff_t stride) \ -{ \ - uint8_t full[16 * 9]; \ - uint8_t half[64]; \ - \ - copy_block9(full, src, 16, stride, 9); \ - put ## RND ## mpeg4_qpel8_v_lowpass(half, full, 8, 16); \ - OPNAME ## pixels8_l2_8(dst, full, half, stride, 16, 8, 8); \ -} \ - \ -static void OPNAME ## qpel8_mc02_c(uint8_t *dst, uint8_t *src, \ - ptrdiff_t stride) \ -{ \ - uint8_t full[16 * 9]; \ - \ - copy_block9(full, src, 16, stride, 9); \ - OPNAME ## mpeg4_qpel8_v_lowpass(dst, full, stride, 16); \ -} \ - \ -static void OPNAME ## qpel8_mc03_c(uint8_t *dst, uint8_t *src, \ - ptrdiff_t stride) \ -{ \ - uint8_t full[16 * 9]; \ - uint8_t half[64]; \ - \ - copy_block9(full, src, 16, stride, 9); \ - put ## RND ## mpeg4_qpel8_v_lowpass(half, full, 8, 16); \ - OPNAME ## pixels8_l2_8(dst, full + 16, half, stride, 16, 8, 8); \ -} \ - \ -void ff_ ## OPNAME ## qpel8_mc11_old_c(uint8_t *dst, uint8_t *src, \ - ptrdiff_t stride) \ -{ \ - uint8_t full[16 * 9]; \ - uint8_t halfH[72]; \ - uint8_t halfV[64]; \ - uint8_t halfHV[64]; \ - \ - copy_block9(full, src, 16, stride, 9); \ - put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9); \ - put ## RND ## mpeg4_qpel8_v_lowpass(halfV, full, 8, 16); \ - put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8); \ - OPNAME ## pixels8_l4_8(dst, full, halfH, halfV, halfHV, \ - stride, 16, 8, 8, 8, 8); \ -} \ - \ -static void OPNAME ## qpel8_mc11_c(uint8_t *dst, uint8_t *src, \ - ptrdiff_t stride) \ -{ \ - uint8_t full[16 * 9]; \ - uint8_t halfH[72]; \ - uint8_t halfHV[64]; \ - \ - copy_block9(full, src, 16, stride, 9); \ - put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9); \ - put ## RND ## pixels8_l2_8(halfH, halfH, full, 8, 8, 16, 9); \ - put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8); \ - OPNAME ## pixels8_l2_8(dst, halfH, halfHV, stride, 8, 8, 8); \ -} \ - \ -void ff_ ## OPNAME ## qpel8_mc31_old_c(uint8_t *dst, uint8_t *src, \ - ptrdiff_t stride) \ -{ \ - uint8_t full[16 * 9]; \ - uint8_t halfH[72]; \ - uint8_t halfV[64]; \ - uint8_t halfHV[64]; \ - \ - copy_block9(full, src, 16, stride, 9); \ - put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9); \ - put ## RND ## mpeg4_qpel8_v_lowpass(halfV, full + 1, 8, 16); \ - put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8); \ - OPNAME ## pixels8_l4_8(dst, full + 1, halfH, halfV, halfHV, \ - stride, 16, 8, 8, 8, 8); \ -} \ - \ -static void OPNAME ## qpel8_mc31_c(uint8_t *dst, uint8_t *src, \ - ptrdiff_t stride) \ -{ \ - uint8_t full[16 * 9]; \ - uint8_t halfH[72]; \ - uint8_t halfHV[64]; \ - \ - copy_block9(full, src, 16, stride, 9); \ - put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9); \ - put ## RND ## pixels8_l2_8(halfH, halfH, full + 1, 8, 8, 16, 9); \ - put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8); \ - OPNAME ## pixels8_l2_8(dst, halfH, halfHV, stride, 8, 8, 8); \ -} \ - \ -void ff_ ## OPNAME ## qpel8_mc13_old_c(uint8_t *dst, uint8_t *src, \ - ptrdiff_t stride) \ -{ \ - uint8_t full[16 * 9]; \ - uint8_t halfH[72]; \ - uint8_t halfV[64]; \ - uint8_t halfHV[64]; \ - \ - copy_block9(full, src, 16, stride, 9); \ - put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9); \ - put ## RND ## mpeg4_qpel8_v_lowpass(halfV, full, 8, 16); \ - put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8); \ - OPNAME ## pixels8_l4_8(dst, full + 16, halfH + 8, halfV, halfHV, \ - stride, 16, 8, 8, 8, 8); \ -} \ - \ -static void OPNAME ## qpel8_mc13_c(uint8_t *dst, uint8_t *src, \ - ptrdiff_t stride) \ -{ \ - uint8_t full[16 * 9]; \ - uint8_t halfH[72]; \ - uint8_t halfHV[64]; \ - \ - copy_block9(full, src, 16, stride, 9); \ - put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9); \ - put ## RND ## pixels8_l2_8(halfH, halfH, full, 8, 8, 16, 9); \ - put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8); \ - OPNAME ## pixels8_l2_8(dst, halfH + 8, halfHV, stride, 8, 8, 8); \ -} \ - \ -void ff_ ## OPNAME ## qpel8_mc33_old_c(uint8_t *dst, uint8_t *src, \ - ptrdiff_t stride) \ -{ \ - uint8_t full[16 * 9]; \ - uint8_t halfH[72]; \ - uint8_t halfV[64]; \ - uint8_t halfHV[64]; \ - \ - copy_block9(full, src, 16, stride, 9); \ - put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9); \ - put ## RND ## mpeg4_qpel8_v_lowpass(halfV, full + 1, 8, 16); \ - put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8); \ - OPNAME ## pixels8_l4_8(dst, full + 17, halfH + 8, halfV, halfHV, \ - stride, 16, 8, 8, 8, 8); \ -} \ - \ -static void OPNAME ## qpel8_mc33_c(uint8_t *dst, uint8_t *src, \ - ptrdiff_t stride) \ -{ \ - uint8_t full[16 * 9]; \ - uint8_t halfH[72]; \ - uint8_t halfHV[64]; \ - \ - copy_block9(full, src, 16, stride, 9); \ - put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9); \ - put ## RND ## pixels8_l2_8(halfH, halfH, full + 1, 8, 8, 16, 9); \ - put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8); \ - OPNAME ## pixels8_l2_8(dst, halfH + 8, halfHV, stride, 8, 8, 8); \ -} \ - \ -static void OPNAME ## qpel8_mc21_c(uint8_t *dst, uint8_t *src, \ - ptrdiff_t stride) \ -{ \ - uint8_t halfH[72]; \ - uint8_t halfHV[64]; \ - \ - put ## RND ## mpeg4_qpel8_h_lowpass(halfH, src, 8, stride, 9); \ - put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8); \ - OPNAME ## pixels8_l2_8(dst, halfH, halfHV, stride, 8, 8, 8); \ -} \ - \ -static void OPNAME ## qpel8_mc23_c(uint8_t *dst, uint8_t *src, \ - ptrdiff_t stride) \ -{ \ - uint8_t halfH[72]; \ - uint8_t halfHV[64]; \ - \ - put ## RND ## mpeg4_qpel8_h_lowpass(halfH, src, 8, stride, 9); \ - put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8); \ - OPNAME ## pixels8_l2_8(dst, halfH + 8, halfHV, stride, 8, 8, 8); \ -} \ - \ -void ff_ ## OPNAME ## qpel8_mc12_old_c(uint8_t *dst, uint8_t *src, \ - ptrdiff_t stride) \ -{ \ - uint8_t full[16 * 9]; \ - uint8_t halfH[72]; \ - uint8_t halfV[64]; \ - uint8_t halfHV[64]; \ - \ - copy_block9(full, src, 16, stride, 9); \ - put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9); \ - put ## RND ## mpeg4_qpel8_v_lowpass(halfV, full, 8, 16); \ - put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8); \ - OPNAME ## pixels8_l2_8(dst, halfV, halfHV, stride, 8, 8, 8); \ -} \ - \ -static void OPNAME ## qpel8_mc12_c(uint8_t *dst, uint8_t *src, \ - ptrdiff_t stride) \ -{ \ - uint8_t full[16 * 9]; \ - uint8_t halfH[72]; \ - \ - copy_block9(full, src, 16, stride, 9); \ - put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9); \ - put ## RND ## pixels8_l2_8(halfH, halfH, full, 8, 8, 16, 9); \ - OPNAME ## mpeg4_qpel8_v_lowpass(dst, halfH, stride, 8); \ -} \ - \ -void ff_ ## OPNAME ## qpel8_mc32_old_c(uint8_t *dst, uint8_t *src, \ - ptrdiff_t stride) \ -{ \ - uint8_t full[16 * 9]; \ - uint8_t halfH[72]; \ - uint8_t halfV[64]; \ - uint8_t halfHV[64]; \ - \ - copy_block9(full, src, 16, stride, 9); \ - put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9); \ - put ## RND ## mpeg4_qpel8_v_lowpass(halfV, full + 1, 8, 16); \ - put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8); \ - OPNAME ## pixels8_l2_8(dst, halfV, halfHV, stride, 8, 8, 8); \ -} \ - \ -static void OPNAME ## qpel8_mc32_c(uint8_t *dst, uint8_t *src, \ - ptrdiff_t stride) \ -{ \ - uint8_t full[16 * 9]; \ - uint8_t halfH[72]; \ - \ - copy_block9(full, src, 16, stride, 9); \ - put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9); \ - put ## RND ## pixels8_l2_8(halfH, halfH, full + 1, 8, 8, 16, 9); \ - OPNAME ## mpeg4_qpel8_v_lowpass(dst, halfH, stride, 8); \ -} \ - \ -static void OPNAME ## qpel8_mc22_c(uint8_t *dst, uint8_t *src, \ - ptrdiff_t stride) \ -{ \ - uint8_t halfH[72]; \ - \ - put ## RND ## mpeg4_qpel8_h_lowpass(halfH, src, 8, stride, 9); \ - OPNAME ## mpeg4_qpel8_v_lowpass(dst, halfH, stride, 8); \ -} \ - \ -static void OPNAME ## qpel16_mc10_c(uint8_t *dst, uint8_t *src, \ - ptrdiff_t stride) \ -{ \ - uint8_t half[256]; \ - \ - put ## RND ## mpeg4_qpel16_h_lowpass(half, src, 16, stride, 16); \ - OPNAME ## pixels16_l2_8(dst, src, half, stride, stride, 16, 16); \ -} \ - \ -static void OPNAME ## qpel16_mc20_c(uint8_t *dst, uint8_t *src, \ - ptrdiff_t stride) \ -{ \ - OPNAME ## mpeg4_qpel16_h_lowpass(dst, src, stride, stride, 16); \ -} \ - \ -static void OPNAME ## qpel16_mc30_c(uint8_t *dst, uint8_t *src, \ - ptrdiff_t stride) \ -{ \ - uint8_t half[256]; \ - \ - put ## RND ## mpeg4_qpel16_h_lowpass(half, src, 16, stride, 16); \ - OPNAME ## pixels16_l2_8(dst, src + 1, half, stride, stride, 16, 16); \ -} \ - \ -static void OPNAME ## qpel16_mc01_c(uint8_t *dst, uint8_t *src, \ - ptrdiff_t stride) \ -{ \ - uint8_t full[24 * 17]; \ - uint8_t half[256]; \ - \ - copy_block17(full, src, 24, stride, 17); \ - put ## RND ## mpeg4_qpel16_v_lowpass(half, full, 16, 24); \ - OPNAME ## pixels16_l2_8(dst, full, half, stride, 24, 16, 16); \ -} \ - \ -static void OPNAME ## qpel16_mc02_c(uint8_t *dst, uint8_t *src, \ - ptrdiff_t stride) \ -{ \ - uint8_t full[24 * 17]; \ - \ - copy_block17(full, src, 24, stride, 17); \ - OPNAME ## mpeg4_qpel16_v_lowpass(dst, full, stride, 24); \ -} \ - \ -static void OPNAME ## qpel16_mc03_c(uint8_t *dst, uint8_t *src, \ - ptrdiff_t stride) \ -{ \ - uint8_t full[24 * 17]; \ - uint8_t half[256]; \ - \ - copy_block17(full, src, 24, stride, 17); \ - put ## RND ## mpeg4_qpel16_v_lowpass(half, full, 16, 24); \ - OPNAME ## pixels16_l2_8(dst, full + 24, half, stride, 24, 16, 16); \ -} \ - \ -void ff_ ## OPNAME ## qpel16_mc11_old_c(uint8_t *dst, uint8_t *src, \ - ptrdiff_t stride) \ -{ \ - uint8_t full[24 * 17]; \ - uint8_t halfH[272]; \ - uint8_t halfV[256]; \ - uint8_t halfHV[256]; \ - \ - copy_block17(full, src, 24, stride, 17); \ - put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17); \ - put ## RND ## mpeg4_qpel16_v_lowpass(halfV, full, 16, 24); \ - put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16); \ - OPNAME ## pixels16_l4_8(dst, full, halfH, halfV, halfHV, \ - stride, 24, 16, 16, 16, 16); \ -} \ - \ -static void OPNAME ## qpel16_mc11_c(uint8_t *dst, uint8_t *src, \ - ptrdiff_t stride) \ -{ \ - uint8_t full[24 * 17]; \ - uint8_t halfH[272]; \ - uint8_t halfHV[256]; \ - \ - copy_block17(full, src, 24, stride, 17); \ - put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17); \ - put ## RND ## pixels16_l2_8(halfH, halfH, full, 16, 16, 24, 17); \ - put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16); \ - OPNAME ## pixels16_l2_8(dst, halfH, halfHV, stride, 16, 16, 16); \ -} \ - \ -void ff_ ## OPNAME ## qpel16_mc31_old_c(uint8_t *dst, uint8_t *src, \ - ptrdiff_t stride) \ -{ \ - uint8_t full[24 * 17]; \ - uint8_t halfH[272]; \ - uint8_t halfV[256]; \ - uint8_t halfHV[256]; \ - \ - copy_block17(full, src, 24, stride, 17); \ - put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17); \ - put ## RND ## mpeg4_qpel16_v_lowpass(halfV, full + 1, 16, 24); \ - put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16); \ - OPNAME ## pixels16_l4_8(dst, full + 1, halfH, halfV, halfHV, \ - stride, 24, 16, 16, 16, 16); \ -} \ - \ -static void OPNAME ## qpel16_mc31_c(uint8_t *dst, uint8_t *src, \ - ptrdiff_t stride) \ -{ \ - uint8_t full[24 * 17]; \ - uint8_t halfH[272]; \ - uint8_t halfHV[256]; \ - \ - copy_block17(full, src, 24, stride, 17); \ - put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17); \ - put ## RND ## pixels16_l2_8(halfH, halfH, full + 1, 16, 16, 24, 17); \ - put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16); \ - OPNAME ## pixels16_l2_8(dst, halfH, halfHV, stride, 16, 16, 16); \ -} \ - \ -void ff_ ## OPNAME ## qpel16_mc13_old_c(uint8_t *dst, uint8_t *src, \ - ptrdiff_t stride) \ -{ \ - uint8_t full[24 * 17]; \ - uint8_t halfH[272]; \ - uint8_t halfV[256]; \ - uint8_t halfHV[256]; \ - \ - copy_block17(full, src, 24, stride, 17); \ - put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17); \ - put ## RND ## mpeg4_qpel16_v_lowpass(halfV, full, 16, 24); \ - put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16); \ - OPNAME ## pixels16_l4_8(dst, full + 24, halfH + 16, halfV, halfHV, \ - stride, 24, 16, 16, 16, 16); \ -} \ - \ -static void OPNAME ## qpel16_mc13_c(uint8_t *dst, uint8_t *src, \ - ptrdiff_t stride) \ -{ \ - uint8_t full[24 * 17]; \ - uint8_t halfH[272]; \ - uint8_t halfHV[256]; \ - \ - copy_block17(full, src, 24, stride, 17); \ - put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17); \ - put ## RND ## pixels16_l2_8(halfH, halfH, full, 16, 16, 24, 17); \ - put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16); \ - OPNAME ## pixels16_l2_8(dst, halfH + 16, halfHV, stride, 16, 16, 16); \ -} \ - \ -void ff_ ## OPNAME ## qpel16_mc33_old_c(uint8_t *dst, uint8_t *src, \ - ptrdiff_t stride) \ -{ \ - uint8_t full[24 * 17]; \ - uint8_t halfH[272]; \ - uint8_t halfV[256]; \ - uint8_t halfHV[256]; \ - \ - copy_block17(full, src, 24, stride, 17); \ - put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17); \ - put ## RND ## mpeg4_qpel16_v_lowpass(halfV, full + 1, 16, 24); \ - put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16); \ - OPNAME ## pixels16_l4_8(dst, full + 25, halfH + 16, halfV, halfHV, \ - stride, 24, 16, 16, 16, 16); \ -} \ - \ -static void OPNAME ## qpel16_mc33_c(uint8_t *dst, uint8_t *src, \ - ptrdiff_t stride) \ -{ \ - uint8_t full[24 * 17]; \ - uint8_t halfH[272]; \ - uint8_t halfHV[256]; \ - \ - copy_block17(full, src, 24, stride, 17); \ - put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17); \ - put ## RND ## pixels16_l2_8(halfH, halfH, full + 1, 16, 16, 24, 17); \ - put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16); \ - OPNAME ## pixels16_l2_8(dst, halfH + 16, halfHV, stride, 16, 16, 16); \ -} \ - \ -static void OPNAME ## qpel16_mc21_c(uint8_t *dst, uint8_t *src, \ - ptrdiff_t stride) \ -{ \ - uint8_t halfH[272]; \ - uint8_t halfHV[256]; \ - \ - put ## RND ## mpeg4_qpel16_h_lowpass(halfH, src, 16, stride, 17); \ - put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16); \ - OPNAME ## pixels16_l2_8(dst, halfH, halfHV, stride, 16, 16, 16); \ -} \ - \ -static void OPNAME ## qpel16_mc23_c(uint8_t *dst, uint8_t *src, \ - ptrdiff_t stride) \ -{ \ - uint8_t halfH[272]; \ - uint8_t halfHV[256]; \ - \ - put ## RND ## mpeg4_qpel16_h_lowpass(halfH, src, 16, stride, 17); \ - put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16); \ - OPNAME ## pixels16_l2_8(dst, halfH + 16, halfHV, stride, 16, 16, 16); \ -} \ - \ -void ff_ ## OPNAME ## qpel16_mc12_old_c(uint8_t *dst, uint8_t *src, \ - ptrdiff_t stride) \ -{ \ - uint8_t full[24 * 17]; \ - uint8_t halfH[272]; \ - uint8_t halfV[256]; \ - uint8_t halfHV[256]; \ - \ - copy_block17(full, src, 24, stride, 17); \ - put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17); \ - put ## RND ## mpeg4_qpel16_v_lowpass(halfV, full, 16, 24); \ - put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16); \ - OPNAME ## pixels16_l2_8(dst, halfV, halfHV, stride, 16, 16, 16); \ -} \ - \ -static void OPNAME ## qpel16_mc12_c(uint8_t *dst, uint8_t *src, \ - ptrdiff_t stride) \ -{ \ - uint8_t full[24 * 17]; \ - uint8_t halfH[272]; \ - \ - copy_block17(full, src, 24, stride, 17); \ - put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17); \ - put ## RND ## pixels16_l2_8(halfH, halfH, full, 16, 16, 24, 17); \ - OPNAME ## mpeg4_qpel16_v_lowpass(dst, halfH, stride, 16); \ -} \ - \ -void ff_ ## OPNAME ## qpel16_mc32_old_c(uint8_t *dst, uint8_t *src, \ - ptrdiff_t stride) \ -{ \ - uint8_t full[24 * 17]; \ - uint8_t halfH[272]; \ - uint8_t halfV[256]; \ - uint8_t halfHV[256]; \ - \ - copy_block17(full, src, 24, stride, 17); \ - put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17); \ - put ## RND ## mpeg4_qpel16_v_lowpass(halfV, full + 1, 16, 24); \ - put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16); \ - OPNAME ## pixels16_l2_8(dst, halfV, halfHV, stride, 16, 16, 16); \ -} \ - \ -static void OPNAME ## qpel16_mc32_c(uint8_t *dst, uint8_t *src, \ - ptrdiff_t stride) \ -{ \ - uint8_t full[24 * 17]; \ - uint8_t halfH[272]; \ - \ - copy_block17(full, src, 24, stride, 17); \ - put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17); \ - put ## RND ## pixels16_l2_8(halfH, halfH, full + 1, 16, 16, 24, 17); \ - OPNAME ## mpeg4_qpel16_v_lowpass(dst, halfH, stride, 16); \ -} \ - \ -static void OPNAME ## qpel16_mc22_c(uint8_t *dst, uint8_t *src, \ - ptrdiff_t stride) \ -{ \ - uint8_t halfH[272]; \ - \ - put ## RND ## mpeg4_qpel16_h_lowpass(halfH, src, 16, stride, 17); \ - OPNAME ## mpeg4_qpel16_v_lowpass(dst, halfH, stride, 16); \ -} - -#define op_avg(a, b) a = (((a) + cm[((b) + 16) >> 5] + 1) >> 1) -#define op_avg_no_rnd(a, b) a = (((a) + cm[((b) + 15) >> 5]) >> 1) -#define op_put(a, b) a = cm[((b) + 16) >> 5] -#define op_put_no_rnd(a, b) a = cm[((b) + 15) >> 5] - -QPEL_MC(0, put_, _, op_put) -QPEL_MC(1, put_no_rnd_, _no_rnd_, op_put_no_rnd) -QPEL_MC(0, avg_, _, op_avg) - -#undef op_avg -#undef op_put -#undef op_put_no_rnd - -void ff_put_pixels8x8_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride) -{ - put_pixels8_8_c(dst, src, stride, 8); -} - -void ff_avg_pixels8x8_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride) -{ - avg_pixels8_8_c(dst, src, stride, 8); -} - -void ff_put_pixels16x16_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride) -{ - put_pixels16_8_c(dst, src, stride, 16); -} - -void ff_avg_pixels16x16_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride) -{ - avg_pixels16_8_c(dst, src, stride, 16); -} - -#define put_qpel8_mc00_c ff_put_pixels8x8_c -#define avg_qpel8_mc00_c ff_avg_pixels8x8_c -#define put_qpel16_mc00_c ff_put_pixels16x16_c -#define avg_qpel16_mc00_c ff_avg_pixels16x16_c -#define put_no_rnd_qpel8_mc00_c ff_put_pixels8x8_c -#define put_no_rnd_qpel16_mc00_c ff_put_pixels16x16_c - -void ff_put_pixels8_l2_8(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, - int dst_stride, int src_stride1, int src_stride2, - int h) -{ - put_pixels8_l2_8(dst, src1, src2, dst_stride, src_stride1, src_stride2, h); - -} - static inline int pix_abs16_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h) { @@ -2198,35 +1500,6 @@ av_cold void ff_dsputil_init(DSPContext *c, AVCodecContext *avctx) c->pix_abs[1][2] = pix_abs8_y2_c; c->pix_abs[1][3] = pix_abs8_xy2_c; -#define dspfunc(PFX, IDX, NUM) \ - c->PFX ## _pixels_tab[IDX][0] = PFX ## NUM ## _mc00_c; \ - c->PFX ## _pixels_tab[IDX][1] = PFX ## NUM ## _mc10_c; \ - c->PFX ## _pixels_tab[IDX][2] = PFX ## NUM ## _mc20_c; \ - c->PFX ## _pixels_tab[IDX][3] = PFX ## NUM ## _mc30_c; \ - c->PFX ## _pixels_tab[IDX][4] = PFX ## NUM ## _mc01_c; \ - c->PFX ## _pixels_tab[IDX][5] = PFX ## NUM ## _mc11_c; \ - c->PFX ## _pixels_tab[IDX][6] = PFX ## NUM ## _mc21_c; \ - c->PFX ## _pixels_tab[IDX][7] = PFX ## NUM ## _mc31_c; \ - c->PFX ## _pixels_tab[IDX][8] = PFX ## NUM ## _mc02_c; \ - c->PFX ## _pixels_tab[IDX][9] = PFX ## NUM ## _mc12_c; \ - c->PFX ## _pixels_tab[IDX][10] = PFX ## NUM ## _mc22_c; \ - c->PFX ## _pixels_tab[IDX][11] = PFX ## NUM ## _mc32_c; \ - c->PFX ## _pixels_tab[IDX][12] = PFX ## NUM ## _mc03_c; \ - c->PFX ## _pixels_tab[IDX][13] = PFX ## NUM ## _mc13_c; \ - c->PFX ## _pixels_tab[IDX][14] = PFX ## NUM ## _mc23_c; \ - c->PFX ## _pixels_tab[IDX][15] = PFX ## NUM ## _mc33_c - - dspfunc(put_qpel, 0, 16); - dspfunc(put_qpel, 1, 8); - - dspfunc(put_no_rnd_qpel, 0, 16); - dspfunc(put_no_rnd_qpel, 1, 8); - - dspfunc(avg_qpel, 0, 16); - dspfunc(avg_qpel, 1, 8); - -#undef dspfunc - #define SET_CMP_FUNC(name) \ c->name[0] = name ## 16_c; \ c->name[1] = name ## 8x8_c; diff --git a/libavcodec/dsputil.h b/libavcodec/dsputil.h index ec4eb29288..d7c2c2431f 100644 --- a/libavcodec/dsputil.h +++ b/libavcodec/dsputil.h @@ -34,15 +34,6 @@ extern uint32_t ff_square_tab[512]; -void ff_put_pixels8x8_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride); -void ff_avg_pixels8x8_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride); -void ff_put_pixels16x16_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride); -void ff_avg_pixels16x16_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride); - -void ff_put_pixels8_l2_8(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, - int dst_stride, int src_stride1, int src_stride2, - int h); - void ff_gmc_c(uint8_t *dst, uint8_t *src, int stride, int h, int ox, int oy, int dxx, int dxy, int dyx, int dyy, int shift, int r, int width, int height); @@ -64,33 +55,9 @@ void ff_gmc_c(uint8_t *dst, uint8_t *src, int stride, int h, int ox, int oy, * Block sizes for op_pixels_func are 8x4,8x8 16x8 16x16. * h for op_pixels_func is limited to { width / 2, width }, * but never larger than 16 and never smaller than 4. */ -typedef void (*qpel_mc_func)(uint8_t *dst /* align width (8 or 16) */, - uint8_t *src /* align 1 */, ptrdiff_t stride); - typedef void (*op_fill_func)(uint8_t *block /* align width (8 or 16) */, uint8_t value, int line_size, int h); -#define DEF_OLD_QPEL(name) \ - void ff_put_ ## name(uint8_t *dst /* align width (8 or 16) */, \ - uint8_t *src /* align 1 */, ptrdiff_t stride); \ - void ff_put_no_rnd_ ## name(uint8_t *dst /* align width (8 or 16) */, \ - uint8_t *src /* align 1 */, ptrdiff_t stride); \ - void ff_avg_ ## name(uint8_t *dst /* align width (8 or 16) */, \ - uint8_t *src /* align 1 */, ptrdiff_t stride); - -DEF_OLD_QPEL(qpel16_mc11_old_c) -DEF_OLD_QPEL(qpel16_mc31_old_c) -DEF_OLD_QPEL(qpel16_mc12_old_c) -DEF_OLD_QPEL(qpel16_mc32_old_c) -DEF_OLD_QPEL(qpel16_mc13_old_c) -DEF_OLD_QPEL(qpel16_mc33_old_c) -DEF_OLD_QPEL(qpel8_mc11_old_c) -DEF_OLD_QPEL(qpel8_mc31_old_c) -DEF_OLD_QPEL(qpel8_mc12_old_c) -DEF_OLD_QPEL(qpel8_mc32_old_c) -DEF_OLD_QPEL(qpel8_mc13_old_c) -DEF_OLD_QPEL(qpel8_mc33_old_c) - struct MpegEncContext; /* Motion estimation: * h is limited to { width / 2, width, 2 * width }, @@ -174,10 +141,6 @@ typedef struct DSPContext { me_cmp_func ildct_cmp[6]; // only width 16 used me_cmp_func frame_skip_cmp[6]; // only width 8 used - qpel_mc_func put_qpel_pixels_tab[2][16]; - qpel_mc_func avg_qpel_pixels_tab[2][16]; - qpel_mc_func put_no_rnd_qpel_pixels_tab[2][16]; - me_cmp_func pix_abs[2][4]; void (*bswap_buf)(uint32_t *dst, const uint32_t *src, int w); diff --git a/libavcodec/dsputil_template.c b/libavcodec/dsputil_template.c deleted file mode 100644 index 7a8eb223f4..0000000000 --- a/libavcodec/dsputil_template.c +++ /dev/null @@ -1,223 +0,0 @@ -/* - * DSP utils - * Copyright (c) 2000, 2001 Fabrice Bellard - * Copyright (c) 2002-2004 Michael Niedermayer - * - * gmc & q-pel & 32/64 bit based MC by Michael Niedermayer - * - * This file is part of Libav. - * - * Libav is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * Libav is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with Libav; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -/** - * @file - * DSP utils - */ - -#define PIXOP2(OPNAME, OP) \ -static inline void OPNAME ## _no_rnd_pixels8_l2_8(uint8_t *dst, \ - const uint8_t *src1, \ - const uint8_t *src2, \ - int dst_stride, \ - int src_stride1, \ - int src_stride2, \ - int h) \ -{ \ - int i; \ - \ - for (i = 0; i < h; i++) { \ - uint32_t a, b; \ - a = AV_RN32(&src1[i * src_stride1]); \ - b = AV_RN32(&src2[i * src_stride2]); \ - OP(*((uint32_t *) &dst[i * dst_stride]), \ - no_rnd_avg32(a, b)); \ - a = AV_RN32(&src1[i * src_stride1 + 4]); \ - b = AV_RN32(&src2[i * src_stride2 + 4]); \ - OP(*((uint32_t *) &dst[i * dst_stride + 4]), \ - no_rnd_avg32(a, b)); \ - } \ -} \ - \ -static inline void OPNAME ## _no_rnd_pixels16_l2_8(uint8_t *dst, \ - const uint8_t *src1, \ - const uint8_t *src2, \ - int dst_stride, \ - int src_stride1, \ - int src_stride2, \ - int h) \ -{ \ - OPNAME ## _no_rnd_pixels8_l2_8(dst, src1, src2, dst_stride, \ - src_stride1, src_stride2, h); \ - OPNAME ## _no_rnd_pixels8_l2_8(dst + 8, \ - src1 + 8, \ - src2 + 8, \ - dst_stride, src_stride1, \ - src_stride2, h); \ -} \ - \ -static inline void OPNAME ## _pixels8_l4_8(uint8_t *dst, \ - const uint8_t *src1, \ - const uint8_t *src2, \ - const uint8_t *src3, \ - const uint8_t *src4, \ - int dst_stride, \ - int src_stride1, \ - int src_stride2, \ - int src_stride3, \ - int src_stride4, \ - int h) \ -{ \ - /* FIXME HIGH BIT DEPTH */ \ - int i; \ - \ - for (i = 0; i < h; i++) { \ - uint32_t a, b, c, d, l0, l1, h0, h1; \ - a = AV_RN32(&src1[i * src_stride1]); \ - b = AV_RN32(&src2[i * src_stride2]); \ - c = AV_RN32(&src3[i * src_stride3]); \ - d = AV_RN32(&src4[i * src_stride4]); \ - l0 = (a & 0x03030303UL) + \ - (b & 0x03030303UL) + \ - 0x02020202UL; \ - h0 = ((a & 0xFCFCFCFCUL) >> 2) + \ - ((b & 0xFCFCFCFCUL) >> 2); \ - l1 = (c & 0x03030303UL) + \ - (d & 0x03030303UL); \ - h1 = ((c & 0xFCFCFCFCUL) >> 2) + \ - ((d & 0xFCFCFCFCUL) >> 2); \ - OP(*((uint32_t *) &dst[i * dst_stride]), \ - h0 + h1 + (((l0 + l1) >> 2) & 0x0F0F0F0FUL)); \ - a = AV_RN32(&src1[i * src_stride1 + 4]); \ - b = AV_RN32(&src2[i * src_stride2 + 4]); \ - c = AV_RN32(&src3[i * src_stride3 + 4]); \ - d = AV_RN32(&src4[i * src_stride4 + 4]); \ - l0 = (a & 0x03030303UL) + \ - (b & 0x03030303UL) + \ - 0x02020202UL; \ - h0 = ((a & 0xFCFCFCFCUL) >> 2) + \ - ((b & 0xFCFCFCFCUL) >> 2); \ - l1 = (c & 0x03030303UL) + \ - (d & 0x03030303UL); \ - h1 = ((c & 0xFCFCFCFCUL) >> 2) + \ - ((d & 0xFCFCFCFCUL) >> 2); \ - OP(*((uint32_t *) &dst[i * dst_stride + 4]), \ - h0 + h1 + (((l0 + l1) >> 2) & 0x0F0F0F0FUL)); \ - } \ -} \ - \ -static inline void OPNAME ## _no_rnd_pixels8_l4_8(uint8_t *dst, \ - const uint8_t *src1, \ - const uint8_t *src2, \ - const uint8_t *src3, \ - const uint8_t *src4, \ - int dst_stride, \ - int src_stride1, \ - int src_stride2, \ - int src_stride3, \ - int src_stride4, \ - int h) \ -{ \ - /* FIXME HIGH BIT DEPTH */ \ - int i; \ - \ - for (i = 0; i < h; i++) { \ - uint32_t a, b, c, d, l0, l1, h0, h1; \ - a = AV_RN32(&src1[i * src_stride1]); \ - b = AV_RN32(&src2[i * src_stride2]); \ - c = AV_RN32(&src3[i * src_stride3]); \ - d = AV_RN32(&src4[i * src_stride4]); \ - l0 = (a & 0x03030303UL) + \ - (b & 0x03030303UL) + \ - 0x01010101UL; \ - h0 = ((a & 0xFCFCFCFCUL) >> 2) + \ - ((b & 0xFCFCFCFCUL) >> 2); \ - l1 = (c & 0x03030303UL) + \ - (d & 0x03030303UL); \ - h1 = ((c & 0xFCFCFCFCUL) >> 2) + \ - ((d & 0xFCFCFCFCUL) >> 2); \ - OP(*((uint32_t *) &dst[i * dst_stride]), \ - h0 + h1 + (((l0 + l1) >> 2) & 0x0F0F0F0FUL)); \ - a = AV_RN32(&src1[i * src_stride1 + 4]); \ - b = AV_RN32(&src2[i * src_stride2 + 4]); \ - c = AV_RN32(&src3[i * src_stride3 + 4]); \ - d = AV_RN32(&src4[i * src_stride4 + 4]); \ - l0 = (a & 0x03030303UL) + \ - (b & 0x03030303UL) + \ - 0x01010101UL; \ - h0 = ((a & 0xFCFCFCFCUL) >> 2) + \ - ((b & 0xFCFCFCFCUL) >> 2); \ - l1 = (c & 0x03030303UL) + \ - (d & 0x03030303UL); \ - h1 = ((c & 0xFCFCFCFCUL) >> 2) + \ - ((d & 0xFCFCFCFCUL) >> 2); \ - OP(*((uint32_t *) &dst[i * dst_stride + 4]), \ - h0 + h1 + (((l0 + l1) >> 2) & 0x0F0F0F0FUL)); \ - } \ -} \ - \ -static inline void OPNAME ## _pixels16_l4_8(uint8_t *dst, \ - const uint8_t *src1, \ - const uint8_t *src2, \ - const uint8_t *src3, \ - const uint8_t *src4, \ - int dst_stride, \ - int src_stride1, \ - int src_stride2, \ - int src_stride3, \ - int src_stride4, \ - int h) \ -{ \ - OPNAME ## _pixels8_l4_8(dst, src1, src2, src3, src4, dst_stride, \ - src_stride1, src_stride2, src_stride3, \ - src_stride4, h); \ - OPNAME ## _pixels8_l4_8(dst + 8, \ - src1 + 8, src2 + 8, \ - src3 + 8, src4 + 8, \ - dst_stride, src_stride1, src_stride2, \ - src_stride3, src_stride4, h); \ -} \ - \ -static inline void OPNAME ## _no_rnd_pixels16_l4_8(uint8_t *dst, \ - const uint8_t *src1, \ - const uint8_t *src2, \ - const uint8_t *src3, \ - const uint8_t *src4, \ - int dst_stride, \ - int src_stride1, \ - int src_stride2, \ - int src_stride3, \ - int src_stride4, \ - int h) \ -{ \ - OPNAME ## _no_rnd_pixels8_l4_8(dst, src1, src2, src3, src4, \ - dst_stride, src_stride1, \ - src_stride2, src_stride3, \ - src_stride4, h); \ - OPNAME ## _no_rnd_pixels8_l4_8(dst + 8, \ - src1 + 8, src2 + 8, \ - src3 + 8, src4 + 8, \ - dst_stride, src_stride1, \ - src_stride2, src_stride3, \ - src_stride4, h); \ -} \ - -#define op_avg(a, b) a = rnd_avg32(a, b) -#define op_put(a, b) a = b -#define put_no_rnd_pixels8_8_c put_pixels8_8_c -PIXOP2(avg, op_avg) -PIXOP2(put, op_put) -#undef op_avg -#undef op_put diff --git a/libavcodec/h263dec.c b/libavcodec/h263dec.c index c380983826..f70feb99a5 100644 --- a/libavcodec/h263dec.c +++ b/libavcodec/h263dec.c @@ -36,6 +36,7 @@ #include "mpeg4video_parser.h" #include "mpegvideo.h" #include "msmpeg4.h" +#include "qpeldsp.h" #include "thread.h" av_cold int ff_h263_decode_init(AVCodecContext *avctx) @@ -116,6 +117,7 @@ av_cold int ff_h263_decode_init(AVCodecContext *avctx) return ret; ff_h263dsp_init(&s->h263dsp); + ff_qpeldsp_init(&s->qdsp); ff_h263_decode_init_vlc(); return 0; @@ -461,9 +463,9 @@ int ff_h263_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, avctx->has_b_frames = !s->low_delay; #define SET_QPEL_FUNC(postfix1, postfix2) \ - s->dsp.put_ ## postfix1 = ff_put_ ## postfix2; \ - s->dsp.put_no_rnd_ ## postfix1 = ff_put_no_rnd_ ## postfix2; \ - s->dsp.avg_ ## postfix1 = ff_avg_ ## postfix2; + s->qdsp.put_ ## postfix1 = ff_put_ ## postfix2; \ + s->qdsp.put_no_rnd_ ## postfix1 = ff_put_no_rnd_ ## postfix2; \ + s->qdsp.avg_ ## postfix1 = ff_avg_ ## postfix2; if (s->workaround_bugs & FF_BUG_STD_QPEL) { SET_QPEL_FUNC(qpel_pixels_tab[0][5], qpel16_mc11_old_c) @@ -527,11 +529,11 @@ int ff_h263_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, } if ((!s->no_rounding) || s->pict_type == AV_PICTURE_TYPE_B) { - s->me.qpel_put = s->dsp.put_qpel_pixels_tab; - s->me.qpel_avg = s->dsp.avg_qpel_pixels_tab; + s->me.qpel_put = s->qdsp.put_qpel_pixels_tab; + s->me.qpel_avg = s->qdsp.avg_qpel_pixels_tab; } else { - s->me.qpel_put = s->dsp.put_no_rnd_qpel_pixels_tab; - s->me.qpel_avg = s->dsp.avg_qpel_pixels_tab; + s->me.qpel_put = s->qdsp.put_no_rnd_qpel_pixels_tab; + s->me.qpel_avg = s->qdsp.avg_qpel_pixels_tab; } if ((ret = ff_MPV_frame_start(s, avctx)) < 0) diff --git a/libavcodec/h264.h b/libavcodec/h264.h index e259d0d7aa..3e99832571 100644 --- a/libavcodec/h264.h +++ b/libavcodec/h264.h @@ -39,6 +39,7 @@ #include "h264qpel.h" #include "mpegutils.h" #include "parser.h" +#include "qpeldsp.h" #include "rectangle.h" #include "videodsp.h" diff --git a/libavcodec/h264_mb.c b/libavcodec/h264_mb.c index 67d9202d07..61d68ab4ec 100644 --- a/libavcodec/h264_mb.c +++ b/libavcodec/h264_mb.c @@ -32,6 +32,7 @@ #include "libavutil/intreadwrite.h" #include "avcodec.h" #include "h264.h" +#include "qpeldsp.h" #include "svq3.h" #include "thread.h" diff --git a/libavcodec/h264qpel.h b/libavcodec/h264qpel.h index 202e97d53c..97ce195d6a 100644 --- a/libavcodec/h264qpel.h +++ b/libavcodec/h264qpel.h @@ -22,7 +22,7 @@ #ifndef AVCODEC_H264QPEL_H #define AVCODEC_H264QPEL_H -#include "dsputil.h" +#include "qpeldsp.h" typedef struct H264QpelContext { qpel_mc_func put_h264_qpel_pixels_tab[4][16]; diff --git a/libavcodec/motion_est.c b/libavcodec/motion_est.c index ebc89173a1..9677e04b3e 100644 --- a/libavcodec/motion_est.c +++ b/libavcodec/motion_est.c @@ -329,9 +329,11 @@ int ff_init_me(MpegEncContext *s){ /*FIXME s->no_rounding b_type*/ if(s->flags&CODEC_FLAG_QPEL){ c->sub_motion_search= qpel_motion_search; - c->qpel_avg= s->dsp.avg_qpel_pixels_tab; - if(s->no_rounding) c->qpel_put= s->dsp.put_no_rnd_qpel_pixels_tab; - else c->qpel_put= s->dsp.put_qpel_pixels_tab; + c->qpel_avg = s->qdsp.avg_qpel_pixels_tab; + if (s->no_rounding) + c->qpel_put = s->qdsp.put_no_rnd_qpel_pixels_tab; + else + c->qpel_put = s->qdsp.put_qpel_pixels_tab; }else{ if(c->avctx->me_sub_cmp&FF_CMP_CHROMA) c->sub_motion_search= hpel_motion_search; @@ -622,9 +624,9 @@ static inline int h263_mv4_search(MpegEncContext *s, int mx, int my, int shift) dxy = ((my4 & 3) << 2) | (mx4 & 3); if(s->no_rounding) - s->dsp.put_no_rnd_qpel_pixels_tab[1][dxy](dest_y , ref , stride); + s->qdsp.put_no_rnd_qpel_pixels_tab[1][dxy](dest_y, ref, stride); else - s->dsp.put_qpel_pixels_tab [1][dxy](dest_y , ref , stride); + s->qdsp.put_qpel_pixels_tab[1][dxy](dest_y, ref, stride); }else{ uint8_t *ref= c->ref[block][0] + (mx4>>1) + (my4>>1)*stride; dxy = ((my4 & 1) << 1) | (mx4 & 1); @@ -1208,14 +1210,14 @@ static inline int check_bidir_mv(MpegEncContext * s, src_y = motion_fy >> 2; ptr = ref_data[0] + (src_y * stride) + src_x; - s->dsp.put_qpel_pixels_tab[0][dxy](dest_y , ptr , stride); + s->qdsp.put_qpel_pixels_tab[0][dxy](dest_y, ptr, stride); dxy = ((motion_by & 3) << 2) | (motion_bx & 3); src_x = motion_bx >> 2; src_y = motion_by >> 2; ptr = ref2_data[0] + (src_y * stride) + src_x; - s->dsp.avg_qpel_pixels_tab[size][dxy](dest_y , ptr , stride); + s->qdsp.avg_qpel_pixels_tab[size][dxy](dest_y, ptr, stride); }else{ dxy = ((motion_fy & 1) << 1) | (motion_fx & 1); src_x = motion_fx >> 1; diff --git a/libavcodec/mpegvideo.c b/libavcodec/mpegvideo.c index 5f591cc450..49bc70c28b 100644 --- a/libavcodec/mpegvideo.c +++ b/libavcodec/mpegvideo.c @@ -40,6 +40,7 @@ #include "mpegvideo.h" #include "mjpegenc.h" #include "msmpeg4.h" +#include "qpeldsp.h" #include "xvmc_internal.h" #include "thread.h" #include diff --git a/libavcodec/mpegvideo.h b/libavcodec/mpegvideo.h index dfd8b28b14..a0114fdec1 100644 --- a/libavcodec/mpegvideo.h +++ b/libavcodec/mpegvideo.h @@ -38,6 +38,7 @@ #include "ratecontrol.h" #include "parser.h" #include "mpeg12data.h" +#include "qpeldsp.h" #include "rl.h" #include "thread.h" #include "videodsp.h" @@ -348,6 +349,7 @@ typedef struct MpegEncContext { DSPContext dsp; ///< pointers for accelerated dsp functions HpelDSPContext hdsp; + QpelDSPContext qdsp; VideoDSPContext vdsp; H263DSPContext h263dsp; int f_code; ///< forward MV resolution diff --git a/libavcodec/mpegvideo_enc.c b/libavcodec/mpegvideo_enc.c index c6b7cc8316..3550fdedc2 100644 --- a/libavcodec/mpegvideo_enc.c +++ b/libavcodec/mpegvideo_enc.c @@ -46,6 +46,7 @@ #include "mpegutils.h" #include "mjpegenc.h" #include "msmpeg4.h" +#include "qpeldsp.h" #include "faandct.h" #include "thread.h" #include "aandcttab.h" @@ -687,6 +688,8 @@ av_cold int ff_MPV_encode_init(AVCodecContext *avctx) if (ARCH_X86) ff_MPV_encode_init_x86(s); + ff_qpeldsp_init(&s->qdsp); + s->avctx->coded_frame = s->current_picture.f; if (s->msmpeg4_version) { @@ -1944,10 +1947,10 @@ static av_always_inline void encode_mb_internal(MpegEncContext *s, if ((!s->no_rounding) || s->pict_type == AV_PICTURE_TYPE_B) { op_pix = s->hdsp.put_pixels_tab; - op_qpix = s->dsp.put_qpel_pixels_tab; + op_qpix = s->qdsp.put_qpel_pixels_tab; } else { op_pix = s->hdsp.put_no_rnd_pixels_tab; - op_qpix = s->dsp.put_no_rnd_qpel_pixels_tab; + op_qpix = s->qdsp.put_no_rnd_qpel_pixels_tab; } if (s->mv_dir & MV_DIR_FORWARD) { @@ -1955,7 +1958,7 @@ static av_always_inline void encode_mb_internal(MpegEncContext *s, s->last_picture.f->data, op_pix, op_qpix); op_pix = s->hdsp.avg_pixels_tab; - op_qpix = s->dsp.avg_qpel_pixels_tab; + op_qpix = s->qdsp.avg_qpel_pixels_tab; } if (s->mv_dir & MV_DIR_BACKWARD) { ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 1, diff --git a/libavcodec/mpegvideo_motion.c b/libavcodec/mpegvideo_motion.c index 1289da1f19..c57a26d5f8 100644 --- a/libavcodec/mpegvideo_motion.c +++ b/libavcodec/mpegvideo_motion.c @@ -31,6 +31,7 @@ #include "mpegvideo.h" #include "mjpegenc.h" #include "msmpeg4.h" +#include "qpeldsp.h" #include static void gmc1_motion(MpegEncContext *s, diff --git a/libavcodec/mss2.c b/libavcodec/mss2.c index 6f9391f870..1f5dcc3dfd 100644 --- a/libavcodec/mss2.c +++ b/libavcodec/mss2.c @@ -27,6 +27,7 @@ #include "error_resilience.h" #include "internal.h" #include "msmpeg4data.h" +#include "qpeldsp.h" #include "vc1.h" #include "mss12.h" #include "mss2dsp.h" @@ -37,6 +38,7 @@ typedef struct MSS2Context { AVFrame *last_pic; MSS12Context c; MSS2DSPContext dsp; + QpelDSPContext qdsp; SliceContext sc[2]; } MSS2Context; @@ -787,8 +789,8 @@ static av_cold int wmv9_init(AVCodecContext *avctx) return ret; /* error concealment */ - v->s.me.qpel_put = v->s.dsp.put_qpel_pixels_tab; - v->s.me.qpel_avg = v->s.dsp.avg_qpel_pixels_tab; + v->s.me.qpel_put = v->s.qdsp.put_qpel_pixels_tab; + v->s.me.qpel_avg = v->s.qdsp.avg_qpel_pixels_tab; return 0; } @@ -827,6 +829,7 @@ static av_cold int mss2_decode_init(AVCodecContext *avctx) return ret; } ff_mss2dsp_init(&ctx->dsp); + ff_qpeldsp_init(&ctx->qdsp); avctx->pix_fmt = c->free_colours == 127 ? AV_PIX_FMT_RGB555 : AV_PIX_FMT_RGB24; diff --git a/libavcodec/qpel_template.c b/libavcodec/qpel_template.c new file mode 100644 index 0000000000..2106160741 --- /dev/null +++ b/libavcodec/qpel_template.c @@ -0,0 +1,219 @@ +/* + * quarterpel DSP function templates + * + * This file is part of Libav. + * + * Libav is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * Libav is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with Libav; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * quarterpel DSP function templates + */ + +#define PIXOP2(OPNAME, OP) \ +static inline void OPNAME ## _no_rnd_pixels8_l2_8(uint8_t *dst, \ + const uint8_t *src1, \ + const uint8_t *src2, \ + int dst_stride, \ + int src_stride1, \ + int src_stride2, \ + int h) \ +{ \ + int i; \ + \ + for (i = 0; i < h; i++) { \ + uint32_t a, b; \ + a = AV_RN32(&src1[i * src_stride1]); \ + b = AV_RN32(&src2[i * src_stride2]); \ + OP(*((uint32_t *) &dst[i * dst_stride]), \ + no_rnd_avg32(a, b)); \ + a = AV_RN32(&src1[i * src_stride1 + 4]); \ + b = AV_RN32(&src2[i * src_stride2 + 4]); \ + OP(*((uint32_t *) &dst[i * dst_stride + 4]), \ + no_rnd_avg32(a, b)); \ + } \ +} \ + \ +static inline void OPNAME ## _no_rnd_pixels16_l2_8(uint8_t *dst, \ + const uint8_t *src1, \ + const uint8_t *src2, \ + int dst_stride, \ + int src_stride1, \ + int src_stride2, \ + int h) \ +{ \ + OPNAME ## _no_rnd_pixels8_l2_8(dst, src1, src2, dst_stride, \ + src_stride1, src_stride2, h); \ + OPNAME ## _no_rnd_pixels8_l2_8(dst + 8, \ + src1 + 8, \ + src2 + 8, \ + dst_stride, src_stride1, \ + src_stride2, h); \ +} \ + \ +static inline void OPNAME ## _pixels8_l4_8(uint8_t *dst, \ + const uint8_t *src1, \ + const uint8_t *src2, \ + const uint8_t *src3, \ + const uint8_t *src4, \ + int dst_stride, \ + int src_stride1, \ + int src_stride2, \ + int src_stride3, \ + int src_stride4, \ + int h) \ +{ \ + /* FIXME HIGH BIT DEPTH */ \ + int i; \ + \ + for (i = 0; i < h; i++) { \ + uint32_t a, b, c, d, l0, l1, h0, h1; \ + a = AV_RN32(&src1[i * src_stride1]); \ + b = AV_RN32(&src2[i * src_stride2]); \ + c = AV_RN32(&src3[i * src_stride3]); \ + d = AV_RN32(&src4[i * src_stride4]); \ + l0 = (a & 0x03030303UL) + \ + (b & 0x03030303UL) + \ + 0x02020202UL; \ + h0 = ((a & 0xFCFCFCFCUL) >> 2) + \ + ((b & 0xFCFCFCFCUL) >> 2); \ + l1 = (c & 0x03030303UL) + \ + (d & 0x03030303UL); \ + h1 = ((c & 0xFCFCFCFCUL) >> 2) + \ + ((d & 0xFCFCFCFCUL) >> 2); \ + OP(*((uint32_t *) &dst[i * dst_stride]), \ + h0 + h1 + (((l0 + l1) >> 2) & 0x0F0F0F0FUL)); \ + a = AV_RN32(&src1[i * src_stride1 + 4]); \ + b = AV_RN32(&src2[i * src_stride2 + 4]); \ + c = AV_RN32(&src3[i * src_stride3 + 4]); \ + d = AV_RN32(&src4[i * src_stride4 + 4]); \ + l0 = (a & 0x03030303UL) + \ + (b & 0x03030303UL) + \ + 0x02020202UL; \ + h0 = ((a & 0xFCFCFCFCUL) >> 2) + \ + ((b & 0xFCFCFCFCUL) >> 2); \ + l1 = (c & 0x03030303UL) + \ + (d & 0x03030303UL); \ + h1 = ((c & 0xFCFCFCFCUL) >> 2) + \ + ((d & 0xFCFCFCFCUL) >> 2); \ + OP(*((uint32_t *) &dst[i * dst_stride + 4]), \ + h0 + h1 + (((l0 + l1) >> 2) & 0x0F0F0F0FUL)); \ + } \ +} \ + \ +static inline void OPNAME ## _no_rnd_pixels8_l4_8(uint8_t *dst, \ + const uint8_t *src1, \ + const uint8_t *src2, \ + const uint8_t *src3, \ + const uint8_t *src4, \ + int dst_stride, \ + int src_stride1, \ + int src_stride2, \ + int src_stride3, \ + int src_stride4, \ + int h) \ +{ \ + /* FIXME HIGH BIT DEPTH */ \ + int i; \ + \ + for (i = 0; i < h; i++) { \ + uint32_t a, b, c, d, l0, l1, h0, h1; \ + a = AV_RN32(&src1[i * src_stride1]); \ + b = AV_RN32(&src2[i * src_stride2]); \ + c = AV_RN32(&src3[i * src_stride3]); \ + d = AV_RN32(&src4[i * src_stride4]); \ + l0 = (a & 0x03030303UL) + \ + (b & 0x03030303UL) + \ + 0x01010101UL; \ + h0 = ((a & 0xFCFCFCFCUL) >> 2) + \ + ((b & 0xFCFCFCFCUL) >> 2); \ + l1 = (c & 0x03030303UL) + \ + (d & 0x03030303UL); \ + h1 = ((c & 0xFCFCFCFCUL) >> 2) + \ + ((d & 0xFCFCFCFCUL) >> 2); \ + OP(*((uint32_t *) &dst[i * dst_stride]), \ + h0 + h1 + (((l0 + l1) >> 2) & 0x0F0F0F0FUL)); \ + a = AV_RN32(&src1[i * src_stride1 + 4]); \ + b = AV_RN32(&src2[i * src_stride2 + 4]); \ + c = AV_RN32(&src3[i * src_stride3 + 4]); \ + d = AV_RN32(&src4[i * src_stride4 + 4]); \ + l0 = (a & 0x03030303UL) + \ + (b & 0x03030303UL) + \ + 0x01010101UL; \ + h0 = ((a & 0xFCFCFCFCUL) >> 2) + \ + ((b & 0xFCFCFCFCUL) >> 2); \ + l1 = (c & 0x03030303UL) + \ + (d & 0x03030303UL); \ + h1 = ((c & 0xFCFCFCFCUL) >> 2) + \ + ((d & 0xFCFCFCFCUL) >> 2); \ + OP(*((uint32_t *) &dst[i * dst_stride + 4]), \ + h0 + h1 + (((l0 + l1) >> 2) & 0x0F0F0F0FUL)); \ + } \ +} \ + \ +static inline void OPNAME ## _pixels16_l4_8(uint8_t *dst, \ + const uint8_t *src1, \ + const uint8_t *src2, \ + const uint8_t *src3, \ + const uint8_t *src4, \ + int dst_stride, \ + int src_stride1, \ + int src_stride2, \ + int src_stride3, \ + int src_stride4, \ + int h) \ +{ \ + OPNAME ## _pixels8_l4_8(dst, src1, src2, src3, src4, dst_stride, \ + src_stride1, src_stride2, src_stride3, \ + src_stride4, h); \ + OPNAME ## _pixels8_l4_8(dst + 8, \ + src1 + 8, src2 + 8, \ + src3 + 8, src4 + 8, \ + dst_stride, src_stride1, src_stride2, \ + src_stride3, src_stride4, h); \ +} \ + \ +static inline void OPNAME ## _no_rnd_pixels16_l4_8(uint8_t *dst, \ + const uint8_t *src1, \ + const uint8_t *src2, \ + const uint8_t *src3, \ + const uint8_t *src4, \ + int dst_stride, \ + int src_stride1, \ + int src_stride2, \ + int src_stride3, \ + int src_stride4, \ + int h) \ +{ \ + OPNAME ## _no_rnd_pixels8_l4_8(dst, src1, src2, src3, src4, \ + dst_stride, src_stride1, \ + src_stride2, src_stride3, \ + src_stride4, h); \ + OPNAME ## _no_rnd_pixels8_l4_8(dst + 8, \ + src1 + 8, src2 + 8, \ + src3 + 8, src4 + 8, \ + dst_stride, src_stride1, \ + src_stride2, src_stride3, \ + src_stride4, h); \ +} \ + +#define op_avg(a, b) a = rnd_avg32(a, b) +#define op_put(a, b) a = b +#define put_no_rnd_pixels8_8_c put_pixels8_8_c +PIXOP2(avg, op_avg) +PIXOP2(put, op_put) +#undef op_avg +#undef op_put diff --git a/libavcodec/qpeldsp.c b/libavcodec/qpeldsp.c new file mode 100644 index 0000000000..7b51ce2d1d --- /dev/null +++ b/libavcodec/qpeldsp.c @@ -0,0 +1,764 @@ +/* + * quarterpel DSP functions + * + * This file is part of Libav. + * + * Libav is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * Libav is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with Libav; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * quarterpel DSP functions + */ + +#include +#include + +#include "config.h" +#include "libavutil/attributes.h" +#include "copy_block.h" +#include "qpeldsp.h" + +#define BIT_DEPTH 8 +#include "hpel_template.c" +#include "tpel_template.c" +#include "qpel_template.c" + +#define QPEL_MC(r, OPNAME, RND, OP) \ +static void OPNAME ## mpeg4_qpel8_h_lowpass(uint8_t *dst, uint8_t *src, \ + int dstStride, int srcStride, \ + int h) \ +{ \ + const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP; \ + int i; \ + \ + for (i = 0; i < h; i++) { \ + OP(dst[0], (src[0] + src[1]) * 20 - (src[0] + src[2]) * 6 + (src[1] + src[3]) * 3 - (src[2] + src[4])); \ + OP(dst[1], (src[1] + src[2]) * 20 - (src[0] + src[3]) * 6 + (src[0] + src[4]) * 3 - (src[1] + src[5])); \ + OP(dst[2], (src[2] + src[3]) * 20 - (src[1] + src[4]) * 6 + (src[0] + src[5]) * 3 - (src[0] + src[6])); \ + OP(dst[3], (src[3] + src[4]) * 20 - (src[2] + src[5]) * 6 + (src[1] + src[6]) * 3 - (src[0] + src[7])); \ + OP(dst[4], (src[4] + src[5]) * 20 - (src[3] + src[6]) * 6 + (src[2] + src[7]) * 3 - (src[1] + src[8])); \ + OP(dst[5], (src[5] + src[6]) * 20 - (src[4] + src[7]) * 6 + (src[3] + src[8]) * 3 - (src[2] + src[8])); \ + OP(dst[6], (src[6] + src[7]) * 20 - (src[5] + src[8]) * 6 + (src[4] + src[8]) * 3 - (src[3] + src[7])); \ + OP(dst[7], (src[7] + src[8]) * 20 - (src[6] + src[8]) * 6 + (src[5] + src[7]) * 3 - (src[4] + src[6])); \ + dst += dstStride; \ + src += srcStride; \ + } \ +} \ + \ +static void OPNAME ## mpeg4_qpel8_v_lowpass(uint8_t *dst, uint8_t *src, \ + int dstStride, int srcStride) \ +{ \ + const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP; \ + const int w = 8; \ + int i; \ + \ + for (i = 0; i < w; i++) { \ + const int src0 = src[0 * srcStride]; \ + const int src1 = src[1 * srcStride]; \ + const int src2 = src[2 * srcStride]; \ + const int src3 = src[3 * srcStride]; \ + const int src4 = src[4 * srcStride]; \ + const int src5 = src[5 * srcStride]; \ + const int src6 = src[6 * srcStride]; \ + const int src7 = src[7 * srcStride]; \ + const int src8 = src[8 * srcStride]; \ + OP(dst[0 * dstStride], (src0 + src1) * 20 - (src0 + src2) * 6 + (src1 + src3) * 3 - (src2 + src4)); \ + OP(dst[1 * dstStride], (src1 + src2) * 20 - (src0 + src3) * 6 + (src0 + src4) * 3 - (src1 + src5)); \ + OP(dst[2 * dstStride], (src2 + src3) * 20 - (src1 + src4) * 6 + (src0 + src5) * 3 - (src0 + src6)); \ + OP(dst[3 * dstStride], (src3 + src4) * 20 - (src2 + src5) * 6 + (src1 + src6) * 3 - (src0 + src7)); \ + OP(dst[4 * dstStride], (src4 + src5) * 20 - (src3 + src6) * 6 + (src2 + src7) * 3 - (src1 + src8)); \ + OP(dst[5 * dstStride], (src5 + src6) * 20 - (src4 + src7) * 6 + (src3 + src8) * 3 - (src2 + src8)); \ + OP(dst[6 * dstStride], (src6 + src7) * 20 - (src5 + src8) * 6 + (src4 + src8) * 3 - (src3 + src7)); \ + OP(dst[7 * dstStride], (src7 + src8) * 20 - (src6 + src8) * 6 + (src5 + src7) * 3 - (src4 + src6)); \ + dst++; \ + src++; \ + } \ +} \ + \ +static void OPNAME ## mpeg4_qpel16_h_lowpass(uint8_t *dst, uint8_t *src, \ + int dstStride, int srcStride, \ + int h) \ +{ \ + const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP; \ + int i; \ + \ + for (i = 0; i < h; i++) { \ + OP(dst[0], (src[0] + src[1]) * 20 - (src[0] + src[2]) * 6 + (src[1] + src[3]) * 3 - (src[2] + src[4])); \ + OP(dst[1], (src[1] + src[2]) * 20 - (src[0] + src[3]) * 6 + (src[0] + src[4]) * 3 - (src[1] + src[5])); \ + OP(dst[2], (src[2] + src[3]) * 20 - (src[1] + src[4]) * 6 + (src[0] + src[5]) * 3 - (src[0] + src[6])); \ + OP(dst[3], (src[3] + src[4]) * 20 - (src[2] + src[5]) * 6 + (src[1] + src[6]) * 3 - (src[0] + src[7])); \ + OP(dst[4], (src[4] + src[5]) * 20 - (src[3] + src[6]) * 6 + (src[2] + src[7]) * 3 - (src[1] + src[8])); \ + OP(dst[5], (src[5] + src[6]) * 20 - (src[4] + src[7]) * 6 + (src[3] + src[8]) * 3 - (src[2] + src[9])); \ + OP(dst[6], (src[6] + src[7]) * 20 - (src[5] + src[8]) * 6 + (src[4] + src[9]) * 3 - (src[3] + src[10])); \ + OP(dst[7], (src[7] + src[8]) * 20 - (src[6] + src[9]) * 6 + (src[5] + src[10]) * 3 - (src[4] + src[11])); \ + OP(dst[8], (src[8] + src[9]) * 20 - (src[7] + src[10]) * 6 + (src[6] + src[11]) * 3 - (src[5] + src[12])); \ + OP(dst[9], (src[9] + src[10]) * 20 - (src[8] + src[11]) * 6 + (src[7] + src[12]) * 3 - (src[6] + src[13])); \ + OP(dst[10], (src[10] + src[11]) * 20 - (src[9] + src[12]) * 6 + (src[8] + src[13]) * 3 - (src[7] + src[14])); \ + OP(dst[11], (src[11] + src[12]) * 20 - (src[10] + src[13]) * 6 + (src[9] + src[14]) * 3 - (src[8] + src[15])); \ + OP(dst[12], (src[12] + src[13]) * 20 - (src[11] + src[14]) * 6 + (src[10] + src[15]) * 3 - (src[9] + src[16])); \ + OP(dst[13], (src[13] + src[14]) * 20 - (src[12] + src[15]) * 6 + (src[11] + src[16]) * 3 - (src[10] + src[16])); \ + OP(dst[14], (src[14] + src[15]) * 20 - (src[13] + src[16]) * 6 + (src[12] + src[16]) * 3 - (src[11] + src[15])); \ + OP(dst[15], (src[15] + src[16]) * 20 - (src[14] + src[16]) * 6 + (src[13] + src[15]) * 3 - (src[12] + src[14])); \ + dst += dstStride; \ + src += srcStride; \ + } \ +} \ + \ +static void OPNAME ## mpeg4_qpel16_v_lowpass(uint8_t *dst, uint8_t *src, \ + int dstStride, int srcStride) \ +{ \ + const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP; \ + const int w = 16; \ + int i; \ + \ + for (i = 0; i < w; i++) { \ + const int src0 = src[0 * srcStride]; \ + const int src1 = src[1 * srcStride]; \ + const int src2 = src[2 * srcStride]; \ + const int src3 = src[3 * srcStride]; \ + const int src4 = src[4 * srcStride]; \ + const int src5 = src[5 * srcStride]; \ + const int src6 = src[6 * srcStride]; \ + const int src7 = src[7 * srcStride]; \ + const int src8 = src[8 * srcStride]; \ + const int src9 = src[9 * srcStride]; \ + const int src10 = src[10 * srcStride]; \ + const int src11 = src[11 * srcStride]; \ + const int src12 = src[12 * srcStride]; \ + const int src13 = src[13 * srcStride]; \ + const int src14 = src[14 * srcStride]; \ + const int src15 = src[15 * srcStride]; \ + const int src16 = src[16 * srcStride]; \ + OP(dst[0 * dstStride], (src0 + src1) * 20 - (src0 + src2) * 6 + (src1 + src3) * 3 - (src2 + src4)); \ + OP(dst[1 * dstStride], (src1 + src2) * 20 - (src0 + src3) * 6 + (src0 + src4) * 3 - (src1 + src5)); \ + OP(dst[2 * dstStride], (src2 + src3) * 20 - (src1 + src4) * 6 + (src0 + src5) * 3 - (src0 + src6)); \ + OP(dst[3 * dstStride], (src3 + src4) * 20 - (src2 + src5) * 6 + (src1 + src6) * 3 - (src0 + src7)); \ + OP(dst[4 * dstStride], (src4 + src5) * 20 - (src3 + src6) * 6 + (src2 + src7) * 3 - (src1 + src8)); \ + OP(dst[5 * dstStride], (src5 + src6) * 20 - (src4 + src7) * 6 + (src3 + src8) * 3 - (src2 + src9)); \ + OP(dst[6 * dstStride], (src6 + src7) * 20 - (src5 + src8) * 6 + (src4 + src9) * 3 - (src3 + src10)); \ + OP(dst[7 * dstStride], (src7 + src8) * 20 - (src6 + src9) * 6 + (src5 + src10) * 3 - (src4 + src11)); \ + OP(dst[8 * dstStride], (src8 + src9) * 20 - (src7 + src10) * 6 + (src6 + src11) * 3 - (src5 + src12)); \ + OP(dst[9 * dstStride], (src9 + src10) * 20 - (src8 + src11) * 6 + (src7 + src12) * 3 - (src6 + src13)); \ + OP(dst[10 * dstStride], (src10 + src11) * 20 - (src9 + src12) * 6 + (src8 + src13) * 3 - (src7 + src14)); \ + OP(dst[11 * dstStride], (src11 + src12) * 20 - (src10 + src13) * 6 + (src9 + src14) * 3 - (src8 + src15)); \ + OP(dst[12 * dstStride], (src12 + src13) * 20 - (src11 + src14) * 6 + (src10 + src15) * 3 - (src9 + src16)); \ + OP(dst[13 * dstStride], (src13 + src14) * 20 - (src12 + src15) * 6 + (src11 + src16) * 3 - (src10 + src16)); \ + OP(dst[14 * dstStride], (src14 + src15) * 20 - (src13 + src16) * 6 + (src12 + src16) * 3 - (src11 + src15)); \ + OP(dst[15 * dstStride], (src15 + src16) * 20 - (src14 + src16) * 6 + (src13 + src15) * 3 - (src12 + src14)); \ + dst++; \ + src++; \ + } \ +} \ + \ +static void OPNAME ## qpel8_mc10_c(uint8_t *dst, uint8_t *src, \ + ptrdiff_t stride) \ +{ \ + uint8_t half[64]; \ + \ + put ## RND ## mpeg4_qpel8_h_lowpass(half, src, 8, stride, 8); \ + OPNAME ## pixels8_l2_8(dst, src, half, stride, stride, 8, 8); \ +} \ + \ +static void OPNAME ## qpel8_mc20_c(uint8_t *dst, uint8_t *src, \ + ptrdiff_t stride) \ +{ \ + OPNAME ## mpeg4_qpel8_h_lowpass(dst, src, stride, stride, 8); \ +} \ + \ +static void OPNAME ## qpel8_mc30_c(uint8_t *dst, uint8_t *src, \ + ptrdiff_t stride) \ +{ \ + uint8_t half[64]; \ + \ + put ## RND ## mpeg4_qpel8_h_lowpass(half, src, 8, stride, 8); \ + OPNAME ## pixels8_l2_8(dst, src + 1, half, stride, stride, 8, 8); \ +} \ + \ +static void OPNAME ## qpel8_mc01_c(uint8_t *dst, uint8_t *src, \ + ptrdiff_t stride) \ +{ \ + uint8_t full[16 * 9]; \ + uint8_t half[64]; \ + \ + copy_block9(full, src, 16, stride, 9); \ + put ## RND ## mpeg4_qpel8_v_lowpass(half, full, 8, 16); \ + OPNAME ## pixels8_l2_8(dst, full, half, stride, 16, 8, 8); \ +} \ + \ +static void OPNAME ## qpel8_mc02_c(uint8_t *dst, uint8_t *src, \ + ptrdiff_t stride) \ +{ \ + uint8_t full[16 * 9]; \ + \ + copy_block9(full, src, 16, stride, 9); \ + OPNAME ## mpeg4_qpel8_v_lowpass(dst, full, stride, 16); \ +} \ + \ +static void OPNAME ## qpel8_mc03_c(uint8_t *dst, uint8_t *src, \ + ptrdiff_t stride) \ +{ \ + uint8_t full[16 * 9]; \ + uint8_t half[64]; \ + \ + copy_block9(full, src, 16, stride, 9); \ + put ## RND ## mpeg4_qpel8_v_lowpass(half, full, 8, 16); \ + OPNAME ## pixels8_l2_8(dst, full + 16, half, stride, 16, 8, 8); \ +} \ + \ +void ff_ ## OPNAME ## qpel8_mc11_old_c(uint8_t *dst, uint8_t *src, \ + ptrdiff_t stride) \ +{ \ + uint8_t full[16 * 9]; \ + uint8_t halfH[72]; \ + uint8_t halfV[64]; \ + uint8_t halfHV[64]; \ + \ + copy_block9(full, src, 16, stride, 9); \ + put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9); \ + put ## RND ## mpeg4_qpel8_v_lowpass(halfV, full, 8, 16); \ + put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8); \ + OPNAME ## pixels8_l4_8(dst, full, halfH, halfV, halfHV, \ + stride, 16, 8, 8, 8, 8); \ +} \ + \ +static void OPNAME ## qpel8_mc11_c(uint8_t *dst, uint8_t *src, \ + ptrdiff_t stride) \ +{ \ + uint8_t full[16 * 9]; \ + uint8_t halfH[72]; \ + uint8_t halfHV[64]; \ + \ + copy_block9(full, src, 16, stride, 9); \ + put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9); \ + put ## RND ## pixels8_l2_8(halfH, halfH, full, 8, 8, 16, 9); \ + put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8); \ + OPNAME ## pixels8_l2_8(dst, halfH, halfHV, stride, 8, 8, 8); \ +} \ + \ +void ff_ ## OPNAME ## qpel8_mc31_old_c(uint8_t *dst, uint8_t *src, \ + ptrdiff_t stride) \ +{ \ + uint8_t full[16 * 9]; \ + uint8_t halfH[72]; \ + uint8_t halfV[64]; \ + uint8_t halfHV[64]; \ + \ + copy_block9(full, src, 16, stride, 9); \ + put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9); \ + put ## RND ## mpeg4_qpel8_v_lowpass(halfV, full + 1, 8, 16); \ + put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8); \ + OPNAME ## pixels8_l4_8(dst, full + 1, halfH, halfV, halfHV, \ + stride, 16, 8, 8, 8, 8); \ +} \ + \ +static void OPNAME ## qpel8_mc31_c(uint8_t *dst, uint8_t *src, \ + ptrdiff_t stride) \ +{ \ + uint8_t full[16 * 9]; \ + uint8_t halfH[72]; \ + uint8_t halfHV[64]; \ + \ + copy_block9(full, src, 16, stride, 9); \ + put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9); \ + put ## RND ## pixels8_l2_8(halfH, halfH, full + 1, 8, 8, 16, 9); \ + put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8); \ + OPNAME ## pixels8_l2_8(dst, halfH, halfHV, stride, 8, 8, 8); \ +} \ + \ +void ff_ ## OPNAME ## qpel8_mc13_old_c(uint8_t *dst, uint8_t *src, \ + ptrdiff_t stride) \ +{ \ + uint8_t full[16 * 9]; \ + uint8_t halfH[72]; \ + uint8_t halfV[64]; \ + uint8_t halfHV[64]; \ + \ + copy_block9(full, src, 16, stride, 9); \ + put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9); \ + put ## RND ## mpeg4_qpel8_v_lowpass(halfV, full, 8, 16); \ + put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8); \ + OPNAME ## pixels8_l4_8(dst, full + 16, halfH + 8, halfV, halfHV, \ + stride, 16, 8, 8, 8, 8); \ +} \ + \ +static void OPNAME ## qpel8_mc13_c(uint8_t *dst, uint8_t *src, \ + ptrdiff_t stride) \ +{ \ + uint8_t full[16 * 9]; \ + uint8_t halfH[72]; \ + uint8_t halfHV[64]; \ + \ + copy_block9(full, src, 16, stride, 9); \ + put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9); \ + put ## RND ## pixels8_l2_8(halfH, halfH, full, 8, 8, 16, 9); \ + put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8); \ + OPNAME ## pixels8_l2_8(dst, halfH + 8, halfHV, stride, 8, 8, 8); \ +} \ + \ +void ff_ ## OPNAME ## qpel8_mc33_old_c(uint8_t *dst, uint8_t *src, \ + ptrdiff_t stride) \ +{ \ + uint8_t full[16 * 9]; \ + uint8_t halfH[72]; \ + uint8_t halfV[64]; \ + uint8_t halfHV[64]; \ + \ + copy_block9(full, src, 16, stride, 9); \ + put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9); \ + put ## RND ## mpeg4_qpel8_v_lowpass(halfV, full + 1, 8, 16); \ + put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8); \ + OPNAME ## pixels8_l4_8(dst, full + 17, halfH + 8, halfV, halfHV, \ + stride, 16, 8, 8, 8, 8); \ +} \ + \ +static void OPNAME ## qpel8_mc33_c(uint8_t *dst, uint8_t *src, \ + ptrdiff_t stride) \ +{ \ + uint8_t full[16 * 9]; \ + uint8_t halfH[72]; \ + uint8_t halfHV[64]; \ + \ + copy_block9(full, src, 16, stride, 9); \ + put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9); \ + put ## RND ## pixels8_l2_8(halfH, halfH, full + 1, 8, 8, 16, 9); \ + put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8); \ + OPNAME ## pixels8_l2_8(dst, halfH + 8, halfHV, stride, 8, 8, 8); \ +} \ + \ +static void OPNAME ## qpel8_mc21_c(uint8_t *dst, uint8_t *src, \ + ptrdiff_t stride) \ +{ \ + uint8_t halfH[72]; \ + uint8_t halfHV[64]; \ + \ + put ## RND ## mpeg4_qpel8_h_lowpass(halfH, src, 8, stride, 9); \ + put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8); \ + OPNAME ## pixels8_l2_8(dst, halfH, halfHV, stride, 8, 8, 8); \ +} \ + \ +static void OPNAME ## qpel8_mc23_c(uint8_t *dst, uint8_t *src, \ + ptrdiff_t stride) \ +{ \ + uint8_t halfH[72]; \ + uint8_t halfHV[64]; \ + \ + put ## RND ## mpeg4_qpel8_h_lowpass(halfH, src, 8, stride, 9); \ + put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8); \ + OPNAME ## pixels8_l2_8(dst, halfH + 8, halfHV, stride, 8, 8, 8); \ +} \ + \ +void ff_ ## OPNAME ## qpel8_mc12_old_c(uint8_t *dst, uint8_t *src, \ + ptrdiff_t stride) \ +{ \ + uint8_t full[16 * 9]; \ + uint8_t halfH[72]; \ + uint8_t halfV[64]; \ + uint8_t halfHV[64]; \ + \ + copy_block9(full, src, 16, stride, 9); \ + put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9); \ + put ## RND ## mpeg4_qpel8_v_lowpass(halfV, full, 8, 16); \ + put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8); \ + OPNAME ## pixels8_l2_8(dst, halfV, halfHV, stride, 8, 8, 8); \ +} \ + \ +static void OPNAME ## qpel8_mc12_c(uint8_t *dst, uint8_t *src, \ + ptrdiff_t stride) \ +{ \ + uint8_t full[16 * 9]; \ + uint8_t halfH[72]; \ + \ + copy_block9(full, src, 16, stride, 9); \ + put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9); \ + put ## RND ## pixels8_l2_8(halfH, halfH, full, 8, 8, 16, 9); \ + OPNAME ## mpeg4_qpel8_v_lowpass(dst, halfH, stride, 8); \ +} \ + \ +void ff_ ## OPNAME ## qpel8_mc32_old_c(uint8_t *dst, uint8_t *src, \ + ptrdiff_t stride) \ +{ \ + uint8_t full[16 * 9]; \ + uint8_t halfH[72]; \ + uint8_t halfV[64]; \ + uint8_t halfHV[64]; \ + \ + copy_block9(full, src, 16, stride, 9); \ + put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9); \ + put ## RND ## mpeg4_qpel8_v_lowpass(halfV, full + 1, 8, 16); \ + put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8); \ + OPNAME ## pixels8_l2_8(dst, halfV, halfHV, stride, 8, 8, 8); \ +} \ + \ +static void OPNAME ## qpel8_mc32_c(uint8_t *dst, uint8_t *src, \ + ptrdiff_t stride) \ +{ \ + uint8_t full[16 * 9]; \ + uint8_t halfH[72]; \ + \ + copy_block9(full, src, 16, stride, 9); \ + put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9); \ + put ## RND ## pixels8_l2_8(halfH, halfH, full + 1, 8, 8, 16, 9); \ + OPNAME ## mpeg4_qpel8_v_lowpass(dst, halfH, stride, 8); \ +} \ + \ +static void OPNAME ## qpel8_mc22_c(uint8_t *dst, uint8_t *src, \ + ptrdiff_t stride) \ +{ \ + uint8_t halfH[72]; \ + \ + put ## RND ## mpeg4_qpel8_h_lowpass(halfH, src, 8, stride, 9); \ + OPNAME ## mpeg4_qpel8_v_lowpass(dst, halfH, stride, 8); \ +} \ + \ +static void OPNAME ## qpel16_mc10_c(uint8_t *dst, uint8_t *src, \ + ptrdiff_t stride) \ +{ \ + uint8_t half[256]; \ + \ + put ## RND ## mpeg4_qpel16_h_lowpass(half, src, 16, stride, 16); \ + OPNAME ## pixels16_l2_8(dst, src, half, stride, stride, 16, 16); \ +} \ + \ +static void OPNAME ## qpel16_mc20_c(uint8_t *dst, uint8_t *src, \ + ptrdiff_t stride) \ +{ \ + OPNAME ## mpeg4_qpel16_h_lowpass(dst, src, stride, stride, 16); \ +} \ + \ +static void OPNAME ## qpel16_mc30_c(uint8_t *dst, uint8_t *src, \ + ptrdiff_t stride) \ +{ \ + uint8_t half[256]; \ + \ + put ## RND ## mpeg4_qpel16_h_lowpass(half, src, 16, stride, 16); \ + OPNAME ## pixels16_l2_8(dst, src + 1, half, stride, stride, 16, 16); \ +} \ + \ +static void OPNAME ## qpel16_mc01_c(uint8_t *dst, uint8_t *src, \ + ptrdiff_t stride) \ +{ \ + uint8_t full[24 * 17]; \ + uint8_t half[256]; \ + \ + copy_block17(full, src, 24, stride, 17); \ + put ## RND ## mpeg4_qpel16_v_lowpass(half, full, 16, 24); \ + OPNAME ## pixels16_l2_8(dst, full, half, stride, 24, 16, 16); \ +} \ + \ +static void OPNAME ## qpel16_mc02_c(uint8_t *dst, uint8_t *src, \ + ptrdiff_t stride) \ +{ \ + uint8_t full[24 * 17]; \ + \ + copy_block17(full, src, 24, stride, 17); \ + OPNAME ## mpeg4_qpel16_v_lowpass(dst, full, stride, 24); \ +} \ + \ +static void OPNAME ## qpel16_mc03_c(uint8_t *dst, uint8_t *src, \ + ptrdiff_t stride) \ +{ \ + uint8_t full[24 * 17]; \ + uint8_t half[256]; \ + \ + copy_block17(full, src, 24, stride, 17); \ + put ## RND ## mpeg4_qpel16_v_lowpass(half, full, 16, 24); \ + OPNAME ## pixels16_l2_8(dst, full + 24, half, stride, 24, 16, 16); \ +} \ + \ +void ff_ ## OPNAME ## qpel16_mc11_old_c(uint8_t *dst, uint8_t *src, \ + ptrdiff_t stride) \ +{ \ + uint8_t full[24 * 17]; \ + uint8_t halfH[272]; \ + uint8_t halfV[256]; \ + uint8_t halfHV[256]; \ + \ + copy_block17(full, src, 24, stride, 17); \ + put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17); \ + put ## RND ## mpeg4_qpel16_v_lowpass(halfV, full, 16, 24); \ + put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16); \ + OPNAME ## pixels16_l4_8(dst, full, halfH, halfV, halfHV, \ + stride, 24, 16, 16, 16, 16); \ +} \ + \ +static void OPNAME ## qpel16_mc11_c(uint8_t *dst, uint8_t *src, \ + ptrdiff_t stride) \ +{ \ + uint8_t full[24 * 17]; \ + uint8_t halfH[272]; \ + uint8_t halfHV[256]; \ + \ + copy_block17(full, src, 24, stride, 17); \ + put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17); \ + put ## RND ## pixels16_l2_8(halfH, halfH, full, 16, 16, 24, 17); \ + put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16); \ + OPNAME ## pixels16_l2_8(dst, halfH, halfHV, stride, 16, 16, 16); \ +} \ + \ +void ff_ ## OPNAME ## qpel16_mc31_old_c(uint8_t *dst, uint8_t *src, \ + ptrdiff_t stride) \ +{ \ + uint8_t full[24 * 17]; \ + uint8_t halfH[272]; \ + uint8_t halfV[256]; \ + uint8_t halfHV[256]; \ + \ + copy_block17(full, src, 24, stride, 17); \ + put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17); \ + put ## RND ## mpeg4_qpel16_v_lowpass(halfV, full + 1, 16, 24); \ + put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16); \ + OPNAME ## pixels16_l4_8(dst, full + 1, halfH, halfV, halfHV, \ + stride, 24, 16, 16, 16, 16); \ +} \ + \ +static void OPNAME ## qpel16_mc31_c(uint8_t *dst, uint8_t *src, \ + ptrdiff_t stride) \ +{ \ + uint8_t full[24 * 17]; \ + uint8_t halfH[272]; \ + uint8_t halfHV[256]; \ + \ + copy_block17(full, src, 24, stride, 17); \ + put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17); \ + put ## RND ## pixels16_l2_8(halfH, halfH, full + 1, 16, 16, 24, 17); \ + put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16); \ + OPNAME ## pixels16_l2_8(dst, halfH, halfHV, stride, 16, 16, 16); \ +} \ + \ +void ff_ ## OPNAME ## qpel16_mc13_old_c(uint8_t *dst, uint8_t *src, \ + ptrdiff_t stride) \ +{ \ + uint8_t full[24 * 17]; \ + uint8_t halfH[272]; \ + uint8_t halfV[256]; \ + uint8_t halfHV[256]; \ + \ + copy_block17(full, src, 24, stride, 17); \ + put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17); \ + put ## RND ## mpeg4_qpel16_v_lowpass(halfV, full, 16, 24); \ + put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16); \ + OPNAME ## pixels16_l4_8(dst, full + 24, halfH + 16, halfV, halfHV, \ + stride, 24, 16, 16, 16, 16); \ +} \ + \ +static void OPNAME ## qpel16_mc13_c(uint8_t *dst, uint8_t *src, \ + ptrdiff_t stride) \ +{ \ + uint8_t full[24 * 17]; \ + uint8_t halfH[272]; \ + uint8_t halfHV[256]; \ + \ + copy_block17(full, src, 24, stride, 17); \ + put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17); \ + put ## RND ## pixels16_l2_8(halfH, halfH, full, 16, 16, 24, 17); \ + put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16); \ + OPNAME ## pixels16_l2_8(dst, halfH + 16, halfHV, stride, 16, 16, 16); \ +} \ + \ +void ff_ ## OPNAME ## qpel16_mc33_old_c(uint8_t *dst, uint8_t *src, \ + ptrdiff_t stride) \ +{ \ + uint8_t full[24 * 17]; \ + uint8_t halfH[272]; \ + uint8_t halfV[256]; \ + uint8_t halfHV[256]; \ + \ + copy_block17(full, src, 24, stride, 17); \ + put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17); \ + put ## RND ## mpeg4_qpel16_v_lowpass(halfV, full + 1, 16, 24); \ + put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16); \ + OPNAME ## pixels16_l4_8(dst, full + 25, halfH + 16, halfV, halfHV, \ + stride, 24, 16, 16, 16, 16); \ +} \ + \ +static void OPNAME ## qpel16_mc33_c(uint8_t *dst, uint8_t *src, \ + ptrdiff_t stride) \ +{ \ + uint8_t full[24 * 17]; \ + uint8_t halfH[272]; \ + uint8_t halfHV[256]; \ + \ + copy_block17(full, src, 24, stride, 17); \ + put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17); \ + put ## RND ## pixels16_l2_8(halfH, halfH, full + 1, 16, 16, 24, 17); \ + put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16); \ + OPNAME ## pixels16_l2_8(dst, halfH + 16, halfHV, stride, 16, 16, 16); \ +} \ + \ +static void OPNAME ## qpel16_mc21_c(uint8_t *dst, uint8_t *src, \ + ptrdiff_t stride) \ +{ \ + uint8_t halfH[272]; \ + uint8_t halfHV[256]; \ + \ + put ## RND ## mpeg4_qpel16_h_lowpass(halfH, src, 16, stride, 17); \ + put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16); \ + OPNAME ## pixels16_l2_8(dst, halfH, halfHV, stride, 16, 16, 16); \ +} \ + \ +static void OPNAME ## qpel16_mc23_c(uint8_t *dst, uint8_t *src, \ + ptrdiff_t stride) \ +{ \ + uint8_t halfH[272]; \ + uint8_t halfHV[256]; \ + \ + put ## RND ## mpeg4_qpel16_h_lowpass(halfH, src, 16, stride, 17); \ + put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16); \ + OPNAME ## pixels16_l2_8(dst, halfH + 16, halfHV, stride, 16, 16, 16); \ +} \ + \ +void ff_ ## OPNAME ## qpel16_mc12_old_c(uint8_t *dst, uint8_t *src, \ + ptrdiff_t stride) \ +{ \ + uint8_t full[24 * 17]; \ + uint8_t halfH[272]; \ + uint8_t halfV[256]; \ + uint8_t halfHV[256]; \ + \ + copy_block17(full, src, 24, stride, 17); \ + put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17); \ + put ## RND ## mpeg4_qpel16_v_lowpass(halfV, full, 16, 24); \ + put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16); \ + OPNAME ## pixels16_l2_8(dst, halfV, halfHV, stride, 16, 16, 16); \ +} \ + \ +static void OPNAME ## qpel16_mc12_c(uint8_t *dst, uint8_t *src, \ + ptrdiff_t stride) \ +{ \ + uint8_t full[24 * 17]; \ + uint8_t halfH[272]; \ + \ + copy_block17(full, src, 24, stride, 17); \ + put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17); \ + put ## RND ## pixels16_l2_8(halfH, halfH, full, 16, 16, 24, 17); \ + OPNAME ## mpeg4_qpel16_v_lowpass(dst, halfH, stride, 16); \ +} \ + \ +void ff_ ## OPNAME ## qpel16_mc32_old_c(uint8_t *dst, uint8_t *src, \ + ptrdiff_t stride) \ +{ \ + uint8_t full[24 * 17]; \ + uint8_t halfH[272]; \ + uint8_t halfV[256]; \ + uint8_t halfHV[256]; \ + \ + copy_block17(full, src, 24, stride, 17); \ + put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17); \ + put ## RND ## mpeg4_qpel16_v_lowpass(halfV, full + 1, 16, 24); \ + put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16); \ + OPNAME ## pixels16_l2_8(dst, halfV, halfHV, stride, 16, 16, 16); \ +} \ + \ +static void OPNAME ## qpel16_mc32_c(uint8_t *dst, uint8_t *src, \ + ptrdiff_t stride) \ +{ \ + uint8_t full[24 * 17]; \ + uint8_t halfH[272]; \ + \ + copy_block17(full, src, 24, stride, 17); \ + put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17); \ + put ## RND ## pixels16_l2_8(halfH, halfH, full + 1, 16, 16, 24, 17); \ + OPNAME ## mpeg4_qpel16_v_lowpass(dst, halfH, stride, 16); \ +} \ + \ +static void OPNAME ## qpel16_mc22_c(uint8_t *dst, uint8_t *src, \ + ptrdiff_t stride) \ +{ \ + uint8_t halfH[272]; \ + \ + put ## RND ## mpeg4_qpel16_h_lowpass(halfH, src, 16, stride, 17); \ + OPNAME ## mpeg4_qpel16_v_lowpass(dst, halfH, stride, 16); \ +} + +#define op_avg(a, b) a = (((a) + cm[((b) + 16) >> 5] + 1) >> 1) +#define op_put(a, b) a = cm[((b) + 16) >> 5] +#define op_put_no_rnd(a, b) a = cm[((b) + 15) >> 5] + +QPEL_MC(0, put_, _, op_put) +QPEL_MC(1, put_no_rnd_, _no_rnd_, op_put_no_rnd) +QPEL_MC(0, avg_, _, op_avg) + +#undef op_avg +#undef op_put +#undef op_put_no_rnd + +void ff_put_pixels8x8_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride) +{ + put_pixels8_8_c(dst, src, stride, 8); +} + +void ff_avg_pixels8x8_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride) +{ + avg_pixels8_8_c(dst, src, stride, 8); +} + +void ff_put_pixels16x16_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride) +{ + put_pixels16_8_c(dst, src, stride, 16); +} + +void ff_avg_pixels16x16_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride) +{ + avg_pixels16_8_c(dst, src, stride, 16); +} + +#define put_qpel8_mc00_c ff_put_pixels8x8_c +#define avg_qpel8_mc00_c ff_avg_pixels8x8_c +#define put_qpel16_mc00_c ff_put_pixels16x16_c +#define avg_qpel16_mc00_c ff_avg_pixels16x16_c +#define put_no_rnd_qpel8_mc00_c ff_put_pixels8x8_c +#define put_no_rnd_qpel16_mc00_c ff_put_pixels16x16_c + +void ff_put_pixels8_l2_8(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, + int dst_stride, int src_stride1, int src_stride2, + int h) +{ + put_pixels8_l2_8(dst, src1, src2, dst_stride, src_stride1, src_stride2, h); + +} + +av_cold void ff_qpeldsp_init(QpelDSPContext *c) +{ +#define dspfunc(PFX, IDX, NUM) \ + c->PFX ## _pixels_tab[IDX][0] = PFX ## NUM ## _mc00_c; \ + c->PFX ## _pixels_tab[IDX][1] = PFX ## NUM ## _mc10_c; \ + c->PFX ## _pixels_tab[IDX][2] = PFX ## NUM ## _mc20_c; \ + c->PFX ## _pixels_tab[IDX][3] = PFX ## NUM ## _mc30_c; \ + c->PFX ## _pixels_tab[IDX][4] = PFX ## NUM ## _mc01_c; \ + c->PFX ## _pixels_tab[IDX][5] = PFX ## NUM ## _mc11_c; \ + c->PFX ## _pixels_tab[IDX][6] = PFX ## NUM ## _mc21_c; \ + c->PFX ## _pixels_tab[IDX][7] = PFX ## NUM ## _mc31_c; \ + c->PFX ## _pixels_tab[IDX][8] = PFX ## NUM ## _mc02_c; \ + c->PFX ## _pixels_tab[IDX][9] = PFX ## NUM ## _mc12_c; \ + c->PFX ## _pixels_tab[IDX][10] = PFX ## NUM ## _mc22_c; \ + c->PFX ## _pixels_tab[IDX][11] = PFX ## NUM ## _mc32_c; \ + c->PFX ## _pixels_tab[IDX][12] = PFX ## NUM ## _mc03_c; \ + c->PFX ## _pixels_tab[IDX][13] = PFX ## NUM ## _mc13_c; \ + c->PFX ## _pixels_tab[IDX][14] = PFX ## NUM ## _mc23_c; \ + c->PFX ## _pixels_tab[IDX][15] = PFX ## NUM ## _mc33_c + + dspfunc(put_qpel, 0, 16); + dspfunc(put_qpel, 1, 8); + + dspfunc(put_no_rnd_qpel, 0, 16); + dspfunc(put_no_rnd_qpel, 1, 8); + + dspfunc(avg_qpel, 0, 16); + dspfunc(avg_qpel, 1, 8); + + if (ARCH_X86) + ff_qpeldsp_init_x86(c); +} diff --git a/libavcodec/qpeldsp.h b/libavcodec/qpeldsp.h new file mode 100644 index 0000000000..a750ff836f --- /dev/null +++ b/libavcodec/qpeldsp.h @@ -0,0 +1,78 @@ +/* + * quarterpel DSP functions + * + * This file is part of Libav. + * + * Libav is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * Libav is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with Libav; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * quarterpel DSP functions + */ + +#ifndef AVCODEC_QPELDSP_H +#define AVCODEC_QPELDSP_H + +#include +#include + +void ff_put_pixels8x8_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride); +void ff_avg_pixels8x8_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride); +void ff_put_pixels16x16_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride); +void ff_avg_pixels16x16_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride); + +void ff_put_pixels8_l2_8(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, + int dst_stride, int src_stride1, int src_stride2, + int h); + +#define DEF_OLD_QPEL(name) \ + void ff_put_ ## name(uint8_t *dst /* align width (8 or 16) */, \ + uint8_t *src /* align 1 */, ptrdiff_t stride); \ + void ff_put_no_rnd_ ## name(uint8_t *dst /* align width (8 or 16) */, \ + uint8_t *src /* align 1 */, ptrdiff_t stride); \ + void ff_avg_ ## name(uint8_t *dst /* align width (8 or 16) */, \ + uint8_t *src /* align 1 */, ptrdiff_t stride); + +DEF_OLD_QPEL(qpel16_mc11_old_c) +DEF_OLD_QPEL(qpel16_mc31_old_c) +DEF_OLD_QPEL(qpel16_mc12_old_c) +DEF_OLD_QPEL(qpel16_mc32_old_c) +DEF_OLD_QPEL(qpel16_mc13_old_c) +DEF_OLD_QPEL(qpel16_mc33_old_c) +DEF_OLD_QPEL(qpel8_mc11_old_c) +DEF_OLD_QPEL(qpel8_mc31_old_c) +DEF_OLD_QPEL(qpel8_mc12_old_c) +DEF_OLD_QPEL(qpel8_mc32_old_c) +DEF_OLD_QPEL(qpel8_mc13_old_c) +DEF_OLD_QPEL(qpel8_mc33_old_c) + +typedef void (*qpel_mc_func)(uint8_t *dst /* align width (8 or 16) */, + uint8_t *src /* align 1 */, ptrdiff_t stride); + +/** + * quarterpel DSP context + */ +typedef struct QpelDSPContext { + qpel_mc_func put_qpel_pixels_tab[2][16]; + qpel_mc_func avg_qpel_pixels_tab[2][16]; + qpel_mc_func put_no_rnd_qpel_pixels_tab[2][16]; +} QpelDSPContext; + +void ff_qpeldsp_init(QpelDSPContext *c); + +void ff_qpeldsp_init_x86(QpelDSPContext *c); + +#endif /* AVCODEC_QPELDSP_H */ diff --git a/libavcodec/rv34.c b/libavcodec/rv34.c index 0720ffb6aa..ba4cc67cc4 100644 --- a/libavcodec/rv34.c +++ b/libavcodec/rv34.c @@ -33,6 +33,7 @@ #include "golomb.h" #include "internal.h" #include "mathops.h" +#include "qpeldsp.h" #include "rectangle.h" #include "thread.h" diff --git a/libavcodec/rv34dsp.h b/libavcodec/rv34dsp.h index 3d616ee246..1aa80cf7ef 100644 --- a/libavcodec/rv34dsp.h +++ b/libavcodec/rv34dsp.h @@ -27,8 +27,8 @@ #ifndef AVCODEC_RV34DSP_H #define AVCODEC_RV34DSP_H -#include "dsputil.h" #include "h264chroma.h" +#include "qpeldsp.h" typedef void (*rv40_weight_func)(uint8_t *dst/*align width (8 or 16)*/, uint8_t *src1/*align width (8 or 16)*/, diff --git a/libavcodec/vc1dec.c b/libavcodec/vc1dec.c index 1b01d7e20b..c8a195ce92 100644 --- a/libavcodec/vc1dec.c +++ b/libavcodec/vc1dec.c @@ -33,6 +33,7 @@ #include "mpegvideo.h" #include "h263.h" #include "h264chroma.h" +#include "qpeldsp.h" #include "vc1.h" #include "vc1data.h" #include "vc1acdata.h" @@ -5603,6 +5604,7 @@ static av_cold int vc1_decode_init(AVCodecContext *avctx) if (ff_vc1_init_common(v) < 0) return -1; ff_h264chroma_init(&v->h264chroma, 8); + ff_qpeldsp_init(&s->qdsp); ff_vc1dsp_init(&v->vc1dsp); if (avctx->codec_id == AV_CODEC_ID_WMV3 || avctx->codec_id == AV_CODEC_ID_WMV3IMAGE) { @@ -5971,8 +5973,8 @@ static int vc1_decode_frame(AVCodecContext *avctx, void *data, s->current_picture_ptr->f->repeat_pict = v->rptfrm * 2; } - s->me.qpel_put = s->dsp.put_qpel_pixels_tab; - s->me.qpel_avg = s->dsp.avg_qpel_pixels_tab; + s->me.qpel_put = s->qdsp.put_qpel_pixels_tab; + s->me.qpel_avg = s->qdsp.avg_qpel_pixels_tab; if (avctx->hwaccel) { if (avctx->hwaccel->start_frame(avctx, buf, buf_size) < 0) diff --git a/libavcodec/vc1dsp.c b/libavcodec/vc1dsp.c index e112c7ccb6..5adcb62635 100644 --- a/libavcodec/vc1dsp.c +++ b/libavcodec/vc1dsp.c @@ -26,8 +26,8 @@ */ #include "libavutil/common.h" -#include "dsputil.h" #include "h264chroma.h" +#include "qpeldsp.h" #include "vc1dsp.h" /* Apply overlap transform to horizontal edge */ diff --git a/libavcodec/wmv2dsp.h b/libavcodec/wmv2dsp.h index 935620da6d..f2f258ed34 100644 --- a/libavcodec/wmv2dsp.h +++ b/libavcodec/wmv2dsp.h @@ -21,7 +21,7 @@ #include -#include "dsputil.h" +#include "qpeldsp.h" typedef struct WMV2DSPContext { void (*idct_add)(uint8_t *dest, int line_size, int16_t *block); diff --git a/libavcodec/x86/Makefile b/libavcodec/x86/Makefile index 10242269c2..5fddf3fb83 100644 --- a/libavcodec/x86/Makefile +++ b/libavcodec/x86/Makefile @@ -20,6 +20,7 @@ OBJS-$(CONFIG_LPC) += x86/lpc.o OBJS-$(CONFIG_MPEGAUDIODSP) += x86/mpegaudiodsp.o OBJS-$(CONFIG_MPEGVIDEO) += x86/mpegvideo.o OBJS-$(CONFIG_MPEGVIDEOENC) += x86/mpegvideoenc.o +OBJS-$(CONFIG_QPELDSP) += x86/qpeldsp_init.o OBJS-$(CONFIG_VIDEODSP) += x86/videodsp_init.o OBJS-$(CONFIG_VP3DSP) += x86/vp3dsp_init.o OBJS-$(CONFIG_XMM_CLOBBER_TEST) += x86/w64xmmtest.o @@ -44,13 +45,13 @@ OBJS-$(CONFIG_VP8_DECODER) += x86/vp8dsp_init.o OBJS-$(CONFIG_VP9_DECODER) += x86/vp9dsp_init.o MMX-OBJS-$(CONFIG_DSPUTIL) += x86/dsputil_mmx.o \ - x86/fpel_mmx.o \ x86/idct_mmx_xvid.o \ x86/idct_sse2_xvid.o \ x86/simple_idct.o MMX-OBJS-$(CONFIG_HPELDSP) += x86/fpel_mmx.o \ x86/hpeldsp_mmx.o MMX-OBJS-$(CONFIG_HUFFYUVDSP) += x86/huffyuvdsp_mmx.o +MMX-OBJS-$(CONFIG_QPELDSP) += x86/fpel_mmx.o MMX-OBJS-$(CONFIG_SVQ1_ENCODER) += x86/svq1enc_mmx.o MMX-OBJS-$(CONFIG_VC1_DECODER) += x86/vc1dsp_mmx.o @@ -61,10 +62,7 @@ YASM-OBJS += x86/deinterlace.o \ YASM-OBJS-$(CONFIG_AC3DSP) += x86/ac3dsp.o YASM-OBJS-$(CONFIG_DCT) += x86/dct32.o YASM-OBJS-$(CONFIG_DNXHD_ENCODER) += x86/dnxhdenc.o -YASM-OBJS-$(CONFIG_DSPUTIL) += x86/dsputil.o \ - x86/fpel.o \ - x86/mpeg4qpel.o \ - x86/qpel.o +YASM-OBJS-$(CONFIG_DSPUTIL) += x86/dsputil.o YASM-OBJS-$(CONFIG_ENCODERS) += x86/dsputilenc.o YASM-OBJS-$(CONFIG_FFT) += x86/fft.o YASM-OBJS-$(CONFIG_H263DSP) += x86/h263_loopfilter.o @@ -86,6 +84,9 @@ YASM-OBJS-$(CONFIG_HPELDSP) += x86/fpel.o \ x86/hpeldsp.o YASM-OBJS-$(CONFIG_HUFFYUVDSP) += x86/huffyuvdsp.o YASM-OBJS-$(CONFIG_MPEGAUDIODSP) += x86/imdct36.o +YASM-OBJS-$(CONFIG_QPELDSP) += x86/qpeldsp.o \ + x86/fpel.o \ + x86/qpel.o YASM-OBJS-$(CONFIG_VIDEODSP) += x86/videodsp.o YASM-OBJS-$(CONFIG_VP3DSP) += x86/vp3dsp.o diff --git a/libavcodec/x86/dsputil_init.c b/libavcodec/x86/dsputil_init.c index 9b0788ff73..389e7634dd 100644 --- a/libavcodec/x86/dsputil_init.c +++ b/libavcodec/x86/dsputil_init.c @@ -23,55 +23,11 @@ #include "libavutil/x86/cpu.h" #include "libavcodec/avcodec.h" #include "libavcodec/dsputil.h" -#include "libavcodec/pixels.h" #include "libavcodec/simple_idct.h" #include "libavcodec/version.h" #include "dsputil_x86.h" -#include "fpel.h" #include "idct_xvid.h" -void ff_put_pixels8_l2_mmxext(uint8_t *dst, uint8_t *src1, uint8_t *src2, - int dstStride, int src1Stride, int h); -void ff_put_no_rnd_pixels8_l2_mmxext(uint8_t *dst, uint8_t *src1, - uint8_t *src2, int dstStride, - int src1Stride, int h); -void ff_avg_pixels8_l2_mmxext(uint8_t *dst, uint8_t *src1, uint8_t *src2, - int dstStride, int src1Stride, int h); -void ff_put_pixels16_l2_mmxext(uint8_t *dst, uint8_t *src1, uint8_t *src2, - int dstStride, int src1Stride, int h); -void ff_avg_pixels16_l2_mmxext(uint8_t *dst, uint8_t *src1, uint8_t *src2, - int dstStride, int src1Stride, int h); -void ff_put_no_rnd_pixels16_l2_mmxext(uint8_t *dst, uint8_t *src1, uint8_t *src2, - int dstStride, int src1Stride, int h); -void ff_put_mpeg4_qpel16_h_lowpass_mmxext(uint8_t *dst, uint8_t *src, - int dstStride, int srcStride, int h); -void ff_avg_mpeg4_qpel16_h_lowpass_mmxext(uint8_t *dst, uint8_t *src, - int dstStride, int srcStride, int h); -void ff_put_no_rnd_mpeg4_qpel16_h_lowpass_mmxext(uint8_t *dst, uint8_t *src, - int dstStride, int srcStride, - int h); -void ff_put_mpeg4_qpel8_h_lowpass_mmxext(uint8_t *dst, uint8_t *src, - int dstStride, int srcStride, int h); -void ff_avg_mpeg4_qpel8_h_lowpass_mmxext(uint8_t *dst, uint8_t *src, - int dstStride, int srcStride, int h); -void ff_put_no_rnd_mpeg4_qpel8_h_lowpass_mmxext(uint8_t *dst, uint8_t *src, - int dstStride, int srcStride, - int h); -void ff_put_mpeg4_qpel16_v_lowpass_mmxext(uint8_t *dst, uint8_t *src, - int dstStride, int srcStride); -void ff_avg_mpeg4_qpel16_v_lowpass_mmxext(uint8_t *dst, uint8_t *src, - int dstStride, int srcStride); -void ff_put_no_rnd_mpeg4_qpel16_v_lowpass_mmxext(uint8_t *dst, uint8_t *src, - int dstStride, int srcStride); -void ff_put_mpeg4_qpel8_v_lowpass_mmxext(uint8_t *dst, uint8_t *src, - int dstStride, int srcStride); -void ff_avg_mpeg4_qpel8_v_lowpass_mmxext(uint8_t *dst, uint8_t *src, - int dstStride, int srcStride); -void ff_put_no_rnd_mpeg4_qpel8_v_lowpass_mmxext(uint8_t *dst, uint8_t *src, - int dstStride, int srcStride); -#define ff_put_no_rnd_pixels16_mmxext ff_put_pixels16_mmxext -#define ff_put_no_rnd_pixels8_mmxext ff_put_pixels8_mmxext - int32_t ff_scalarproduct_int16_mmxext(const int16_t *v1, const int16_t *v2, int order); int32_t ff_scalarproduct_int16_sse2(const int16_t *v1, const int16_t *v2, @@ -89,418 +45,6 @@ void ff_vector_clip_int32_int_sse2(int32_t *dst, const int32_t *src, void ff_vector_clip_int32_sse4(int32_t *dst, const int32_t *src, int32_t min, int32_t max, unsigned int len); -#if HAVE_YASM - -CALL_2X_PIXELS(ff_avg_pixels16_mmxext, ff_avg_pixels8_mmxext, 8) -CALL_2X_PIXELS(ff_put_pixels16_mmxext, ff_put_pixels8_mmxext, 8) - -#define QPEL_OP(OPNAME, RND, MMX) \ -static void OPNAME ## qpel8_mc00_ ## MMX(uint8_t *dst, uint8_t *src, \ - ptrdiff_t stride) \ -{ \ - ff_ ## OPNAME ## pixels8_ ## MMX(dst, src, stride, 8); \ -} \ - \ -static void OPNAME ## qpel8_mc10_ ## MMX(uint8_t *dst, uint8_t *src, \ - ptrdiff_t stride) \ -{ \ - uint64_t temp[8]; \ - uint8_t *const half = (uint8_t *) temp; \ - ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(half, src, 8, \ - stride, 8); \ - ff_ ## OPNAME ## pixels8_l2_ ## MMX(dst, src, half, \ - stride, stride, 8); \ -} \ - \ -static void OPNAME ## qpel8_mc20_ ## MMX(uint8_t *dst, uint8_t *src, \ - ptrdiff_t stride) \ -{ \ - ff_ ## OPNAME ## mpeg4_qpel8_h_lowpass_ ## MMX(dst, src, stride, \ - stride, 8); \ -} \ - \ -static void OPNAME ## qpel8_mc30_ ## MMX(uint8_t *dst, uint8_t *src, \ - ptrdiff_t stride) \ -{ \ - uint64_t temp[8]; \ - uint8_t *const half = (uint8_t *) temp; \ - ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(half, src, 8, \ - stride, 8); \ - ff_ ## OPNAME ## pixels8_l2_ ## MMX(dst, src + 1, half, stride, \ - stride, 8); \ -} \ - \ -static void OPNAME ## qpel8_mc01_ ## MMX(uint8_t *dst, uint8_t *src, \ - ptrdiff_t stride) \ -{ \ - uint64_t temp[8]; \ - uint8_t *const half = (uint8_t *) temp; \ - ff_put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(half, src, \ - 8, stride); \ - ff_ ## OPNAME ## pixels8_l2_ ## MMX(dst, src, half, \ - stride, stride, 8); \ -} \ - \ -static void OPNAME ## qpel8_mc02_ ## MMX(uint8_t *dst, uint8_t *src, \ - ptrdiff_t stride) \ -{ \ - ff_ ## OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, src, \ - stride, stride); \ -} \ - \ -static void OPNAME ## qpel8_mc03_ ## MMX(uint8_t *dst, uint8_t *src, \ - ptrdiff_t stride) \ -{ \ - uint64_t temp[8]; \ - uint8_t *const half = (uint8_t *) temp; \ - ff_put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(half, src, \ - 8, stride); \ - ff_ ## OPNAME ## pixels8_l2_ ## MMX(dst, src + stride, half, stride,\ - stride, 8); \ -} \ - \ -static void OPNAME ## qpel8_mc11_ ## MMX(uint8_t *dst, uint8_t *src, \ - ptrdiff_t stride) \ -{ \ - uint64_t half[8 + 9]; \ - uint8_t *const halfH = (uint8_t *) half + 64; \ - uint8_t *const halfHV = (uint8_t *) half; \ - ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, \ - stride, 9); \ - ff_put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, \ - stride, 9); \ - ff_put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\ - ff_ ## OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, \ - stride, 8, 8); \ -} \ - \ -static void OPNAME ## qpel8_mc31_ ## MMX(uint8_t *dst, uint8_t *src, \ - ptrdiff_t stride) \ -{ \ - uint64_t half[8 + 9]; \ - uint8_t *const halfH = (uint8_t *) half + 64; \ - uint8_t *const halfHV = (uint8_t *) half; \ - ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, \ - stride, 9); \ - ff_put ## RND ## pixels8_l2_ ## MMX(halfH, src + 1, halfH, 8, \ - stride, 9); \ - ff_put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\ - ff_ ## OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, \ - stride, 8, 8); \ -} \ - \ -static void OPNAME ## qpel8_mc13_ ## MMX(uint8_t *dst, uint8_t *src, \ - ptrdiff_t stride) \ -{ \ - uint64_t half[8 + 9]; \ - uint8_t *const halfH = (uint8_t *) half + 64; \ - uint8_t *const halfHV = (uint8_t *) half; \ - ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, \ - stride, 9); \ - ff_put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, \ - stride, 9); \ - ff_put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\ - ff_ ## OPNAME ## pixels8_l2_ ## MMX(dst, halfH + 8, halfHV, \ - stride, 8, 8); \ -} \ - \ -static void OPNAME ## qpel8_mc33_ ## MMX(uint8_t *dst, uint8_t *src, \ - ptrdiff_t stride) \ -{ \ - uint64_t half[8 + 9]; \ - uint8_t *const halfH = (uint8_t *) half + 64; \ - uint8_t *const halfHV = (uint8_t *) half; \ - ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, \ - stride, 9); \ - ff_put ## RND ## pixels8_l2_ ## MMX(halfH, src + 1, halfH, 8, \ - stride, 9); \ - ff_put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\ - ff_ ## OPNAME ## pixels8_l2_ ## MMX(dst, halfH + 8, halfHV, \ - stride, 8, 8); \ -} \ - \ -static void OPNAME ## qpel8_mc21_ ## MMX(uint8_t *dst, uint8_t *src, \ - ptrdiff_t stride) \ -{ \ - uint64_t half[8 + 9]; \ - uint8_t *const halfH = (uint8_t *) half + 64; \ - uint8_t *const halfHV = (uint8_t *) half; \ - ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, \ - stride, 9); \ - ff_put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\ - ff_ ## OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, \ - stride, 8, 8); \ -} \ - \ -static void OPNAME ## qpel8_mc23_ ## MMX(uint8_t *dst, uint8_t *src, \ - ptrdiff_t stride) \ -{ \ - uint64_t half[8 + 9]; \ - uint8_t *const halfH = (uint8_t *) half + 64; \ - uint8_t *const halfHV = (uint8_t *) half; \ - ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, \ - stride, 9); \ - ff_put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\ - ff_ ## OPNAME ## pixels8_l2_ ## MMX(dst, halfH + 8, halfHV, \ - stride, 8, 8); \ -} \ - \ -static void OPNAME ## qpel8_mc12_ ## MMX(uint8_t *dst, uint8_t *src, \ - ptrdiff_t stride) \ -{ \ - uint64_t half[8 + 9]; \ - uint8_t *const halfH = (uint8_t *) half; \ - ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, \ - stride, 9); \ - ff_put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, \ - 8, stride, 9); \ - ff_ ## OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, \ - stride, 8); \ -} \ - \ -static void OPNAME ## qpel8_mc32_ ## MMX(uint8_t *dst, uint8_t *src, \ - ptrdiff_t stride) \ -{ \ - uint64_t half[8 + 9]; \ - uint8_t *const halfH = (uint8_t *) half; \ - ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, \ - stride, 9); \ - ff_put ## RND ## pixels8_l2_ ## MMX(halfH, src + 1, halfH, 8, \ - stride, 9); \ - ff_ ## OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, \ - stride, 8); \ -} \ - \ -static void OPNAME ## qpel8_mc22_ ## MMX(uint8_t *dst, uint8_t *src, \ - ptrdiff_t stride) \ -{ \ - uint64_t half[9]; \ - uint8_t *const halfH = (uint8_t *) half; \ - ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, \ - stride, 9); \ - ff_ ## OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, \ - stride, 8); \ -} \ - \ -static void OPNAME ## qpel16_mc00_ ## MMX(uint8_t *dst, uint8_t *src, \ - ptrdiff_t stride) \ -{ \ - ff_ ## OPNAME ## pixels16_ ## MMX(dst, src, stride, 16); \ -} \ - \ -static void OPNAME ## qpel16_mc10_ ## MMX(uint8_t *dst, uint8_t *src, \ - ptrdiff_t stride) \ -{ \ - uint64_t temp[32]; \ - uint8_t *const half = (uint8_t *) temp; \ - ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(half, src, 16, \ - stride, 16); \ - ff_ ## OPNAME ## pixels16_l2_ ## MMX(dst, src, half, stride, \ - stride, 16); \ -} \ - \ -static void OPNAME ## qpel16_mc20_ ## MMX(uint8_t *dst, uint8_t *src, \ - ptrdiff_t stride) \ -{ \ - ff_ ## OPNAME ## mpeg4_qpel16_h_lowpass_ ## MMX(dst, src, \ - stride, stride, 16);\ -} \ - \ -static void OPNAME ## qpel16_mc30_ ## MMX(uint8_t *dst, uint8_t *src, \ - ptrdiff_t stride) \ -{ \ - uint64_t temp[32]; \ - uint8_t *const half = (uint8_t*) temp; \ - ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(half, src, 16, \ - stride, 16); \ - ff_ ## OPNAME ## pixels16_l2_ ## MMX(dst, src + 1, half, \ - stride, stride, 16); \ -} \ - \ -static void OPNAME ## qpel16_mc01_ ## MMX(uint8_t *dst, uint8_t *src, \ - ptrdiff_t stride) \ -{ \ - uint64_t temp[32]; \ - uint8_t *const half = (uint8_t *) temp; \ - ff_put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(half, src, 16, \ - stride); \ - ff_ ## OPNAME ## pixels16_l2_ ## MMX(dst, src, half, stride, \ - stride, 16); \ -} \ - \ -static void OPNAME ## qpel16_mc02_ ## MMX(uint8_t *dst, uint8_t *src, \ - ptrdiff_t stride) \ -{ \ - ff_ ## OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, src, \ - stride, stride); \ -} \ - \ -static void OPNAME ## qpel16_mc03_ ## MMX(uint8_t *dst, uint8_t *src, \ - ptrdiff_t stride) \ -{ \ - uint64_t temp[32]; \ - uint8_t *const half = (uint8_t *) temp; \ - ff_put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(half, src, 16, \ - stride); \ - ff_ ## OPNAME ## pixels16_l2_ ## MMX(dst, src+stride, half, \ - stride, stride, 16); \ -} \ - \ -static void OPNAME ## qpel16_mc11_ ## MMX(uint8_t *dst, uint8_t *src, \ - ptrdiff_t stride) \ -{ \ - uint64_t half[16 * 2 + 17 * 2]; \ - uint8_t *const halfH = (uint8_t *) half + 256; \ - uint8_t *const halfHV = (uint8_t *) half; \ - ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, \ - stride, 17); \ - ff_put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, \ - stride, 17); \ - ff_put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, \ - 16, 16); \ - ff_ ## OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, \ - stride, 16, 16); \ -} \ - \ -static void OPNAME ## qpel16_mc31_ ## MMX(uint8_t *dst, uint8_t *src, \ - ptrdiff_t stride) \ -{ \ - uint64_t half[16 * 2 + 17 * 2]; \ - uint8_t *const halfH = (uint8_t *) half + 256; \ - uint8_t *const halfHV = (uint8_t *) half; \ - ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, \ - stride, 17); \ - ff_put ## RND ## pixels16_l2_ ## MMX(halfH, src + 1, halfH, 16, \ - stride, 17); \ - ff_put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, \ - 16, 16); \ - ff_ ## OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, \ - stride, 16, 16); \ -} \ - \ -static void OPNAME ## qpel16_mc13_ ## MMX(uint8_t *dst, uint8_t *src, \ - ptrdiff_t stride) \ -{ \ - uint64_t half[16 * 2 + 17 * 2]; \ - uint8_t *const halfH = (uint8_t *) half + 256; \ - uint8_t *const halfHV = (uint8_t *) half; \ - ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, \ - stride, 17); \ - ff_put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, \ - stride, 17); \ - ff_put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, \ - 16, 16); \ - ff_ ## OPNAME ## pixels16_l2_ ## MMX(dst, halfH + 16, halfHV, \ - stride, 16, 16); \ -} \ - \ -static void OPNAME ## qpel16_mc33_ ## MMX(uint8_t *dst, uint8_t *src, \ - ptrdiff_t stride) \ -{ \ - uint64_t half[16 * 2 + 17 * 2]; \ - uint8_t *const halfH = (uint8_t *) half + 256; \ - uint8_t *const halfHV = (uint8_t *) half; \ - ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, \ - stride, 17); \ - ff_put ## RND ## pixels16_l2_ ## MMX(halfH, src + 1, halfH, 16, \ - stride, 17); \ - ff_put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, \ - 16, 16); \ - ff_ ## OPNAME ## pixels16_l2_ ## MMX(dst, halfH + 16, halfHV, \ - stride, 16, 16); \ -} \ - \ -static void OPNAME ## qpel16_mc21_ ## MMX(uint8_t *dst, uint8_t *src, \ - ptrdiff_t stride) \ -{ \ - uint64_t half[16 * 2 + 17 * 2]; \ - uint8_t *const halfH = (uint8_t *) half + 256; \ - uint8_t *const halfHV = (uint8_t *) half; \ - ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, \ - stride, 17); \ - ff_put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, \ - 16, 16); \ - ff_ ## OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, \ - stride, 16, 16); \ -} \ - \ -static void OPNAME ## qpel16_mc23_ ## MMX(uint8_t *dst, uint8_t *src, \ - ptrdiff_t stride) \ -{ \ - uint64_t half[16 * 2 + 17 * 2]; \ - uint8_t *const halfH = (uint8_t *) half + 256; \ - uint8_t *const halfHV = (uint8_t *) half; \ - ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, \ - stride, 17); \ - ff_put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, \ - 16, 16); \ - ff_ ## OPNAME ## pixels16_l2_ ## MMX(dst, halfH + 16, halfHV, \ - stride, 16, 16); \ -} \ - \ -static void OPNAME ## qpel16_mc12_ ## MMX(uint8_t *dst, uint8_t *src, \ - ptrdiff_t stride) \ -{ \ - uint64_t half[17 * 2]; \ - uint8_t *const halfH = (uint8_t *) half; \ - ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, \ - stride, 17); \ - ff_put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, \ - stride, 17); \ - ff_ ## OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, \ - stride, 16); \ -} \ - \ -static void OPNAME ## qpel16_mc32_ ## MMX(uint8_t *dst, uint8_t *src, \ - ptrdiff_t stride) \ -{ \ - uint64_t half[17 * 2]; \ - uint8_t *const halfH = (uint8_t *) half; \ - ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, \ - stride, 17); \ - ff_put ## RND ## pixels16_l2_ ## MMX(halfH, src + 1, halfH, 16, \ - stride, 17); \ - ff_ ## OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, \ - stride, 16); \ -} \ - \ -static void OPNAME ## qpel16_mc22_ ## MMX(uint8_t *dst, uint8_t *src, \ - ptrdiff_t stride) \ -{ \ - uint64_t half[17 * 2]; \ - uint8_t *const halfH = (uint8_t *) half; \ - ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, \ - stride, 17); \ - ff_ ## OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, \ - stride, 16); \ -} - -QPEL_OP(put_, _, mmxext) -QPEL_OP(avg_, _, mmxext) -QPEL_OP(put_no_rnd_, _no_rnd_, mmxext) - -#endif /* HAVE_YASM */ - -#define SET_QPEL_FUNCS(PFX, IDX, SIZE, CPU, PREFIX) \ -do { \ - c->PFX ## _pixels_tab[IDX][ 0] = PREFIX ## PFX ## SIZE ## _mc00_ ## CPU; \ - c->PFX ## _pixels_tab[IDX][ 1] = PREFIX ## PFX ## SIZE ## _mc10_ ## CPU; \ - c->PFX ## _pixels_tab[IDX][ 2] = PREFIX ## PFX ## SIZE ## _mc20_ ## CPU; \ - c->PFX ## _pixels_tab[IDX][ 3] = PREFIX ## PFX ## SIZE ## _mc30_ ## CPU; \ - c->PFX ## _pixels_tab[IDX][ 4] = PREFIX ## PFX ## SIZE ## _mc01_ ## CPU; \ - c->PFX ## _pixels_tab[IDX][ 5] = PREFIX ## PFX ## SIZE ## _mc11_ ## CPU; \ - c->PFX ## _pixels_tab[IDX][ 6] = PREFIX ## PFX ## SIZE ## _mc21_ ## CPU; \ - c->PFX ## _pixels_tab[IDX][ 7] = PREFIX ## PFX ## SIZE ## _mc31_ ## CPU; \ - c->PFX ## _pixels_tab[IDX][ 8] = PREFIX ## PFX ## SIZE ## _mc02_ ## CPU; \ - c->PFX ## _pixels_tab[IDX][ 9] = PREFIX ## PFX ## SIZE ## _mc12_ ## CPU; \ - c->PFX ## _pixels_tab[IDX][10] = PREFIX ## PFX ## SIZE ## _mc22_ ## CPU; \ - c->PFX ## _pixels_tab[IDX][11] = PREFIX ## PFX ## SIZE ## _mc32_ ## CPU; \ - c->PFX ## _pixels_tab[IDX][12] = PREFIX ## PFX ## SIZE ## _mc03_ ## CPU; \ - c->PFX ## _pixels_tab[IDX][13] = PREFIX ## PFX ## SIZE ## _mc13_ ## CPU; \ - c->PFX ## _pixels_tab[IDX][14] = PREFIX ## PFX ## SIZE ## _mc23_ ## CPU; \ - c->PFX ## _pixels_tab[IDX][15] = PREFIX ## PFX ## SIZE ## _mc33_ ## CPU; \ -} while (0) - static av_cold void dsputil_init_mmx(DSPContext *c, AVCodecContext *avctx, int cpu_flags, unsigned high_bit_depth) { @@ -550,14 +94,6 @@ static av_cold void dsputil_init_mmxext(DSPContext *c, AVCodecContext *avctx, #endif /* HAVE_MMXEXT_INLINE */ #if HAVE_MMXEXT_EXTERNAL - SET_QPEL_FUNCS(avg_qpel, 0, 16, mmxext, ); - SET_QPEL_FUNCS(avg_qpel, 1, 8, mmxext, ); - - SET_QPEL_FUNCS(put_qpel, 0, 16, mmxext, ); - SET_QPEL_FUNCS(put_qpel, 1, 8, mmxext, ); - SET_QPEL_FUNCS(put_no_rnd_qpel, 0, 16, mmxext, ); - SET_QPEL_FUNCS(put_no_rnd_qpel, 1, 8, mmxext, ); - c->scalarproduct_int16 = ff_scalarproduct_int16_mmxext; #endif /* HAVE_MMXEXT_EXTERNAL */ } diff --git a/libavcodec/x86/mpeg4qpel.asm b/libavcodec/x86/mpeg4qpel.asm deleted file mode 100644 index 2cdd84b76d..0000000000 --- a/libavcodec/x86/mpeg4qpel.asm +++ /dev/null @@ -1,558 +0,0 @@ -;****************************************************************************** -;* mpeg4 qpel -;* Copyright (c) 2008 Loren Merritt -;* -;* This file is part of Libav. -;* -;* Libav is free software; you can redistribute it and/or -;* modify it under the terms of the GNU Lesser General Public -;* License as published by the Free Software Foundation; either -;* version 2.1 of the License, or (at your option) any later version. -;* -;* Libav is distributed in the hope that it will be useful, -;* but WITHOUT ANY WARRANTY; without even the implied warranty of -;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -;* Lesser General Public License for more details. -;* -;* You should have received a copy of the GNU Lesser General Public -;* License along with Libav; if not, write to the Free Software -;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA -;****************************************************************************** - -%include "libavutil/x86/x86util.asm" - -SECTION_RODATA -cextern pb_1 -cextern pw_3 -cextern pw_15 -cextern pw_16 -cextern pw_20 - - -SECTION_TEXT - -; void ff_put_no_rnd_pixels8_l2(uint8_t *dst, uint8_t *src1, uint8_t *src2, int dstStride, int src1Stride, int h) -%macro PUT_NO_RND_PIXELS8_L2 0 -cglobal put_no_rnd_pixels8_l2, 6,6 - movsxdifnidn r4, r4d - movsxdifnidn r3, r3d - pcmpeqb m6, m6 - test r5d, 1 - je .loop - mova m0, [r1] - mova m1, [r2] - add r1, r4 - add r2, 8 - pxor m0, m6 - pxor m1, m6 - PAVGB m0, m1 - pxor m0, m6 - mova [r0], m0 - add r0, r3 - dec r5d -.loop: - mova m0, [r1] - add r1, r4 - mova m1, [r1] - add r1, r4 - mova m2, [r2] - mova m3, [r2+8] - pxor m0, m6 - pxor m1, m6 - pxor m2, m6 - pxor m3, m6 - PAVGB m0, m2 - PAVGB m1, m3 - pxor m0, m6 - pxor m1, m6 - mova [r0], m0 - add r0, r3 - mova [r0], m1 - add r0, r3 - mova m0, [r1] - add r1, r4 - mova m1, [r1] - add r1, r4 - mova m2, [r2+16] - mova m3, [r2+24] - pxor m0, m6 - pxor m1, m6 - pxor m2, m6 - pxor m3, m6 - PAVGB m0, m2 - PAVGB m1, m3 - pxor m0, m6 - pxor m1, m6 - mova [r0], m0 - add r0, r3 - mova [r0], m1 - add r0, r3 - add r2, 32 - sub r5d, 4 - jne .loop - REP_RET -%endmacro - -INIT_MMX mmxext -PUT_NO_RND_PIXELS8_L2 - - -; void ff_put_no_rnd_pixels16_l2(uint8_t *dst, uint8_t *src1, uint8_t *src2, int dstStride, int src1Stride, int h) -%macro PUT_NO_RND_PIXELS16_l2 0 -cglobal put_no_rnd_pixels16_l2, 6,6 - movsxdifnidn r3, r3d - movsxdifnidn r4, r4d - pcmpeqb m6, m6 - test r5d, 1 - je .loop - mova m0, [r1] - mova m1, [r1+8] - mova m2, [r2] - mova m3, [r2+8] - pxor m0, m6 - pxor m1, m6 - pxor m2, m6 - pxor m3, m6 - PAVGB m0, m2 - PAVGB m1, m3 - pxor m0, m6 - pxor m1, m6 - add r1, r4 - add r2, 16 - mova [r0], m0 - mova [r0+8], m1 - add r0, r3 - dec r5d -.loop: - mova m0, [r1] - mova m1, [r1+8] - add r1, r4 - mova m2, [r2] - mova m3, [r2+8] - pxor m0, m6 - pxor m1, m6 - pxor m2, m6 - pxor m3, m6 - PAVGB m0, m2 - PAVGB m1, m3 - pxor m0, m6 - pxor m1, m6 - mova [r0], m0 - mova [r0+8], m1 - add r0, r3 - mova m0, [r1] - mova m1, [r1+8] - add r1, r4 - mova m2, [r2+16] - mova m3, [r2+24] - pxor m0, m6 - pxor m1, m6 - pxor m2, m6 - pxor m3, m6 - PAVGB m0, m2 - PAVGB m1, m3 - pxor m0, m6 - pxor m1, m6 - mova [r0], m0 - mova [r0+8], m1 - add r0, r3 - add r2, 32 - sub r5d, 2 - jne .loop - REP_RET -%endmacro - -INIT_MMX mmxext -PUT_NO_RND_PIXELS16_l2 -INIT_MMX 3dnow -PUT_NO_RND_PIXELS16_l2 - -%macro MPEG4_QPEL16_H_LOWPASS 1 -cglobal %1_mpeg4_qpel16_h_lowpass, 5, 5, 0, 16 - movsxdifnidn r2, r2d - movsxdifnidn r3, r3d - pxor m7, m7 -.loop: - mova m0, [r1] - mova m1, m0 - mova m2, m0 - punpcklbw m0, m7 - punpckhbw m1, m7 - pshufw m5, m0, 0x90 - pshufw m6, m0, 0x41 - mova m3, m2 - mova m4, m2 - psllq m2, 8 - psllq m3, 16 - psllq m4, 24 - punpckhbw m2, m7 - punpckhbw m3, m7 - punpckhbw m4, m7 - paddw m5, m3 - paddw m6, m2 - paddw m5, m5 - psubw m6, m5 - pshufw m5, m0, 6 - pmullw m6, [pw_3] - paddw m0, m4 - paddw m5, m1 - pmullw m0, [pw_20] - psubw m0, m5 - paddw m6, [PW_ROUND] - paddw m0, m6 - psraw m0, 5 - mova [rsp+8], m0 - mova m0, [r1+5] - mova m5, m0 - mova m6, m0 - psrlq m0, 8 - psrlq m5, 16 - punpcklbw m0, m7 - punpcklbw m5, m7 - paddw m2, m0 - paddw m3, m5 - paddw m2, m2 - psubw m3, m2 - mova m2, m6 - psrlq m6, 24 - punpcklbw m2, m7 - punpcklbw m6, m7 - pmullw m3, [pw_3] - paddw m1, m2 - paddw m4, m6 - pmullw m1, [pw_20] - psubw m3, m4 - paddw m1, [PW_ROUND] - paddw m3, m1 - psraw m3, 5 - mova m1, [rsp+8] - packuswb m1, m3 - OP_MOV [r0], m1, m4 - mova m1, [r1+9] - mova m4, m1 - mova m3, m1 - psrlq m1, 8 - psrlq m4, 16 - punpcklbw m1, m7 - punpcklbw m4, m7 - paddw m5, m1 - paddw m0, m4 - paddw m5, m5 - psubw m0, m5 - mova m5, m3 - psrlq m3, 24 - pmullw m0, [pw_3] - punpcklbw m3, m7 - paddw m2, m3 - psubw m0, m2 - mova m2, m5 - punpcklbw m2, m7 - punpckhbw m5, m7 - paddw m6, m2 - pmullw m6, [pw_20] - paddw m0, [PW_ROUND] - paddw m0, m6 - psraw m0, 5 - paddw m3, m5 - pshufw m6, m5, 0xf9 - paddw m6, m4 - pshufw m4, m5, 0xbe - pshufw m5, m5, 0x6f - paddw m4, m1 - paddw m5, m2 - paddw m6, m6 - psubw m4, m6 - pmullw m3, [pw_20] - pmullw m4, [pw_3] - psubw m3, m5 - paddw m4, [PW_ROUND] - paddw m4, m3 - psraw m4, 5 - packuswb m0, m4 - OP_MOV [r0+8], m0, m4 - add r1, r3 - add r0, r2 - dec r4d - jne .loop - REP_RET -%endmacro - -%macro PUT_OP 2-3 - mova %1, %2 -%endmacro - -%macro AVG_OP 2-3 - mova %3, %1 - pavgb %2, %3 - mova %1, %2 -%endmacro - -INIT_MMX mmxext -%define PW_ROUND pw_16 -%define OP_MOV PUT_OP -MPEG4_QPEL16_H_LOWPASS put -%define PW_ROUND pw_16 -%define OP_MOV AVG_OP -MPEG4_QPEL16_H_LOWPASS avg -%define PW_ROUND pw_15 -%define OP_MOV PUT_OP -MPEG4_QPEL16_H_LOWPASS put_no_rnd - - - -%macro MPEG4_QPEL8_H_LOWPASS 1 -cglobal %1_mpeg4_qpel8_h_lowpass, 5, 5, 0, 8 - movsxdifnidn r2, r2d - movsxdifnidn r3, r3d - pxor m7, m7 -.loop: - mova m0, [r1] - mova m1, m0 - mova m2, m0 - punpcklbw m0, m7 - punpckhbw m1, m7 - pshufw m5, m0, 0x90 - pshufw m6, m0, 0x41 - mova m3, m2 - mova m4, m2 - psllq m2, 8 - psllq m3, 16 - psllq m4, 24 - punpckhbw m2, m7 - punpckhbw m3, m7 - punpckhbw m4, m7 - paddw m5, m3 - paddw m6, m2 - paddw m5, m5 - psubw m6, m5 - pshufw m5, m0, 0x6 - pmullw m6, [pw_3] - paddw m0, m4 - paddw m5, m1 - pmullw m0, [pw_20] - psubw m0, m5 - paddw m6, [PW_ROUND] - paddw m0, m6 - psraw m0, 5 - movh m5, [r1+5] - punpcklbw m5, m7 - pshufw m6, m5, 0xf9 - paddw m1, m5 - paddw m2, m6 - pshufw m6, m5, 0xbe - pshufw m5, m5, 0x6f - paddw m3, m6 - paddw m4, m5 - paddw m2, m2 - psubw m3, m2 - pmullw m1, [pw_20] - pmullw m3, [pw_3] - psubw m3, m4 - paddw m1, [PW_ROUND] - paddw m3, m1 - psraw m3, 5 - packuswb m0, m3 - OP_MOV [r0], m0, m4 - add r1, r3 - add r0, r2 - dec r4d - jne .loop - REP_RET -%endmacro - -INIT_MMX mmxext -%define PW_ROUND pw_16 -%define OP_MOV PUT_OP -MPEG4_QPEL8_H_LOWPASS put -%define PW_ROUND pw_16 -%define OP_MOV AVG_OP -MPEG4_QPEL8_H_LOWPASS avg -%define PW_ROUND pw_15 -%define OP_MOV PUT_OP -MPEG4_QPEL8_H_LOWPASS put_no_rnd - - - -%macro QPEL_V_LOW 5 - paddw m0, m1 - mova m4, [pw_20] - pmullw m4, m0 - mova m0, %4 - mova m5, %1 - paddw m5, m0 - psubw m4, m5 - mova m5, %2 - mova m6, %3 - paddw m5, m3 - paddw m6, m2 - paddw m6, m6 - psubw m5, m6 - pmullw m5, [pw_3] - paddw m4, [PW_ROUND] - paddw m5, m4 - psraw m5, 5 - packuswb m5, m5 - OP_MOV %5, m5, m7 - SWAP 0,1,2,3 -%endmacro - -%macro MPEG4_QPEL16_V_LOWPASS 1 -cglobal %1_mpeg4_qpel16_v_lowpass, 4, 6, 0, 544 - movsxdifnidn r2, r2d - movsxdifnidn r3, r3d - - mov r4d, 17 - mov r5, rsp - pxor m7, m7 -.looph: - mova m0, [r1] - mova m1, [r1] - mova m2, [r1+8] - mova m3, [r1+8] - punpcklbw m0, m7 - punpckhbw m1, m7 - punpcklbw m2, m7 - punpckhbw m3, m7 - mova [r5], m0 - mova [r5+0x88], m1 - mova [r5+0x110], m2 - mova [r5+0x198], m3 - add r5, 8 - add r1, r3 - dec r4d - jne .looph - - - ; NOTE: r1 CHANGES VALUES: r1 -> 4 - 14*dstStride - mov r4d, 4 - mov r1, 4 - neg r2 - lea r1, [r1+r2*8] - lea r1, [r1+r2*4] - lea r1, [r1+r2*2] - neg r2 - mov r5, rsp -.loopv: - pxor m7, m7 - mova m0, [r5+ 0x0] - mova m1, [r5+ 0x8] - mova m2, [r5+0x10] - mova m3, [r5+0x18] - QPEL_V_LOW [r5+0x10], [r5+ 0x8], [r5+ 0x0], [r5+0x20], [r0] - QPEL_V_LOW [r5+ 0x8], [r5+ 0x0], [r5+ 0x0], [r5+0x28], [r0+r2] - lea r0, [r0+r2*2] - QPEL_V_LOW [r5+ 0x0], [r5+ 0x0], [r5+ 0x8], [r5+0x30], [r0] - QPEL_V_LOW [r5+ 0x0], [r5+ 0x8], [r5+0x10], [r5+0x38], [r0+r2] - lea r0, [r0+r2*2] - QPEL_V_LOW [r5+ 0x8], [r5+0x10], [r5+0x18], [r5+0x40], [r0] - QPEL_V_LOW [r5+0x10], [r5+0x18], [r5+0x20], [r5+0x48], [r0+r2] - lea r0, [r0+r2*2] - QPEL_V_LOW [r5+0x18], [r5+0x20], [r5+0x28], [r5+0x50], [r0] - QPEL_V_LOW [r5+0x20], [r5+0x28], [r5+0x30], [r5+0x58], [r0+r2] - lea r0, [r0+r2*2] - QPEL_V_LOW [r5+0x28], [r5+0x30], [r5+0x38], [r5+0x60], [r0] - QPEL_V_LOW [r5+0x30], [r5+0x38], [r5+0x40], [r5+0x68], [r0+r2] - lea r0, [r0+r2*2] - QPEL_V_LOW [r5+0x38], [r5+0x40], [r5+0x48], [r5+0x70], [r0] - QPEL_V_LOW [r5+0x40], [r5+0x48], [r5+0x50], [r5+0x78], [r0+r2] - lea r0, [r0+r2*2] - QPEL_V_LOW [r5+0x48], [r5+0x50], [r5+0x58], [r5+0x80], [r0] - QPEL_V_LOW [r5+0x50], [r5+0x58], [r5+0x60], [r5+0x80], [r0+r2] - lea r0, [r0+r2*2] - QPEL_V_LOW [r5+0x58], [r5+0x60], [r5+0x68], [r5+0x78], [r0] - QPEL_V_LOW [r5+0x60], [r5+0x68], [r5+0x70], [r5+0x70], [r0+r2] - - add r5, 0x88 - add r0, r1 - dec r4d - jne .loopv - REP_RET -%endmacro - -%macro PUT_OPH 2-3 - movh %1, %2 -%endmacro - -%macro AVG_OPH 2-3 - movh %3, %1 - pavgb %2, %3 - movh %1, %2 -%endmacro - -INIT_MMX mmxext -%define PW_ROUND pw_16 -%define OP_MOV PUT_OPH -MPEG4_QPEL16_V_LOWPASS put -%define PW_ROUND pw_16 -%define OP_MOV AVG_OPH -MPEG4_QPEL16_V_LOWPASS avg -%define PW_ROUND pw_15 -%define OP_MOV PUT_OPH -MPEG4_QPEL16_V_LOWPASS put_no_rnd - - - -%macro MPEG4_QPEL8_V_LOWPASS 1 -cglobal %1_mpeg4_qpel8_v_lowpass, 4, 6, 0, 288 - movsxdifnidn r2, r2d - movsxdifnidn r3, r3d - - mov r4d, 9 - mov r5, rsp - pxor m7, m7 -.looph: - mova m0, [r1] - mova m1, [r1] - punpcklbw m0, m7 - punpckhbw m1, m7 - mova [r5], m0 - mova [r5+0x48], m1 - add r5, 8 - add r1, r3 - dec r4d - jne .looph - - - ; NOTE: r1 CHANGES VALUES: r1 -> 4 - 6*dstStride - mov r4d, 2 - mov r1, 4 - neg r2 - lea r1, [r1+r2*4] - lea r1, [r1+r2*2] - neg r2 - mov r5, rsp -.loopv: - pxor m7, m7 - mova m0, [r5+ 0x0] - mova m1, [r5+ 0x8] - mova m2, [r5+0x10] - mova m3, [r5+0x18] - QPEL_V_LOW [r5+0x10], [r5+ 0x8], [r5+ 0x0], [r5+0x20], [r0] - QPEL_V_LOW [r5+ 0x8], [r5+ 0x0], [r5+ 0x0], [r5+0x28], [r0+r2] - lea r0, [r0+r2*2] - QPEL_V_LOW [r5+ 0x0], [r5+ 0x0], [r5+ 0x8], [r5+0x30], [r0] - QPEL_V_LOW [r5+ 0x0], [r5+ 0x8], [r5+0x10], [r5+0x38], [r0+r2] - lea r0, [r0+r2*2] - QPEL_V_LOW [r5+ 0x8], [r5+0x10], [r5+0x18], [r5+0x40], [r0] - QPEL_V_LOW [r5+0x10], [r5+0x18], [r5+0x20], [r5+0x40], [r0+r2] - lea r0, [r0+r2*2] - QPEL_V_LOW [r5+0x18], [r5+0x20], [r5+0x28], [r5+0x38], [r0] - QPEL_V_LOW [r5+0x20], [r5+0x28], [r5+0x30], [r5+0x30], [r0+r2] - - add r5, 0x48 - add r0, r1 - dec r4d - jne .loopv - REP_RET -%endmacro - -INIT_MMX mmxext -%define PW_ROUND pw_16 -%define OP_MOV PUT_OPH -MPEG4_QPEL8_V_LOWPASS put -%define PW_ROUND pw_16 -%define OP_MOV AVG_OPH -MPEG4_QPEL8_V_LOWPASS avg -%define PW_ROUND pw_15 -%define OP_MOV PUT_OPH -MPEG4_QPEL8_V_LOWPASS put_no_rnd diff --git a/libavcodec/x86/qpeldsp.asm b/libavcodec/x86/qpeldsp.asm new file mode 100644 index 0000000000..8f65550e60 --- /dev/null +++ b/libavcodec/x86/qpeldsp.asm @@ -0,0 +1,559 @@ +;****************************************************************************** +;* quarterpel DSP functions +;* +;* Copyright (c) 2008 Loren Merritt +;* +;* This file is part of Libav. +;* +;* Libav is free software; you can redistribute it and/or +;* modify it under the terms of the GNU Lesser General Public +;* License as published by the Free Software Foundation; either +;* version 2.1 of the License, or (at your option) any later version. +;* +;* Libav is distributed in the hope that it will be useful, +;* but WITHOUT ANY WARRANTY; without even the implied warranty of +;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +;* Lesser General Public License for more details. +;* +;* You should have received a copy of the GNU Lesser General Public +;* License along with Libav; if not, write to the Free Software +;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA +;****************************************************************************** + +%include "libavutil/x86/x86util.asm" + +SECTION_RODATA +cextern pb_1 +cextern pw_3 +cextern pw_15 +cextern pw_16 +cextern pw_20 + + +SECTION_TEXT + +; void ff_put_no_rnd_pixels8_l2(uint8_t *dst, uint8_t *src1, uint8_t *src2, int dstStride, int src1Stride, int h) +%macro PUT_NO_RND_PIXELS8_L2 0 +cglobal put_no_rnd_pixels8_l2, 6,6 + movsxdifnidn r4, r4d + movsxdifnidn r3, r3d + pcmpeqb m6, m6 + test r5d, 1 + je .loop + mova m0, [r1] + mova m1, [r2] + add r1, r4 + add r2, 8 + pxor m0, m6 + pxor m1, m6 + PAVGB m0, m1 + pxor m0, m6 + mova [r0], m0 + add r0, r3 + dec r5d +.loop: + mova m0, [r1] + add r1, r4 + mova m1, [r1] + add r1, r4 + mova m2, [r2] + mova m3, [r2+8] + pxor m0, m6 + pxor m1, m6 + pxor m2, m6 + pxor m3, m6 + PAVGB m0, m2 + PAVGB m1, m3 + pxor m0, m6 + pxor m1, m6 + mova [r0], m0 + add r0, r3 + mova [r0], m1 + add r0, r3 + mova m0, [r1] + add r1, r4 + mova m1, [r1] + add r1, r4 + mova m2, [r2+16] + mova m3, [r2+24] + pxor m0, m6 + pxor m1, m6 + pxor m2, m6 + pxor m3, m6 + PAVGB m0, m2 + PAVGB m1, m3 + pxor m0, m6 + pxor m1, m6 + mova [r0], m0 + add r0, r3 + mova [r0], m1 + add r0, r3 + add r2, 32 + sub r5d, 4 + jne .loop + REP_RET +%endmacro + +INIT_MMX mmxext +PUT_NO_RND_PIXELS8_L2 + + +; void ff_put_no_rnd_pixels16_l2(uint8_t *dst, uint8_t *src1, uint8_t *src2, int dstStride, int src1Stride, int h) +%macro PUT_NO_RND_PIXELS16_l2 0 +cglobal put_no_rnd_pixels16_l2, 6,6 + movsxdifnidn r3, r3d + movsxdifnidn r4, r4d + pcmpeqb m6, m6 + test r5d, 1 + je .loop + mova m0, [r1] + mova m1, [r1+8] + mova m2, [r2] + mova m3, [r2+8] + pxor m0, m6 + pxor m1, m6 + pxor m2, m6 + pxor m3, m6 + PAVGB m0, m2 + PAVGB m1, m3 + pxor m0, m6 + pxor m1, m6 + add r1, r4 + add r2, 16 + mova [r0], m0 + mova [r0+8], m1 + add r0, r3 + dec r5d +.loop: + mova m0, [r1] + mova m1, [r1+8] + add r1, r4 + mova m2, [r2] + mova m3, [r2+8] + pxor m0, m6 + pxor m1, m6 + pxor m2, m6 + pxor m3, m6 + PAVGB m0, m2 + PAVGB m1, m3 + pxor m0, m6 + pxor m1, m6 + mova [r0], m0 + mova [r0+8], m1 + add r0, r3 + mova m0, [r1] + mova m1, [r1+8] + add r1, r4 + mova m2, [r2+16] + mova m3, [r2+24] + pxor m0, m6 + pxor m1, m6 + pxor m2, m6 + pxor m3, m6 + PAVGB m0, m2 + PAVGB m1, m3 + pxor m0, m6 + pxor m1, m6 + mova [r0], m0 + mova [r0+8], m1 + add r0, r3 + add r2, 32 + sub r5d, 2 + jne .loop + REP_RET +%endmacro + +INIT_MMX mmxext +PUT_NO_RND_PIXELS16_l2 +INIT_MMX 3dnow +PUT_NO_RND_PIXELS16_l2 + +%macro MPEG4_QPEL16_H_LOWPASS 1 +cglobal %1_mpeg4_qpel16_h_lowpass, 5, 5, 0, 16 + movsxdifnidn r2, r2d + movsxdifnidn r3, r3d + pxor m7, m7 +.loop: + mova m0, [r1] + mova m1, m0 + mova m2, m0 + punpcklbw m0, m7 + punpckhbw m1, m7 + pshufw m5, m0, 0x90 + pshufw m6, m0, 0x41 + mova m3, m2 + mova m4, m2 + psllq m2, 8 + psllq m3, 16 + psllq m4, 24 + punpckhbw m2, m7 + punpckhbw m3, m7 + punpckhbw m4, m7 + paddw m5, m3 + paddw m6, m2 + paddw m5, m5 + psubw m6, m5 + pshufw m5, m0, 6 + pmullw m6, [pw_3] + paddw m0, m4 + paddw m5, m1 + pmullw m0, [pw_20] + psubw m0, m5 + paddw m6, [PW_ROUND] + paddw m0, m6 + psraw m0, 5 + mova [rsp+8], m0 + mova m0, [r1+5] + mova m5, m0 + mova m6, m0 + psrlq m0, 8 + psrlq m5, 16 + punpcklbw m0, m7 + punpcklbw m5, m7 + paddw m2, m0 + paddw m3, m5 + paddw m2, m2 + psubw m3, m2 + mova m2, m6 + psrlq m6, 24 + punpcklbw m2, m7 + punpcklbw m6, m7 + pmullw m3, [pw_3] + paddw m1, m2 + paddw m4, m6 + pmullw m1, [pw_20] + psubw m3, m4 + paddw m1, [PW_ROUND] + paddw m3, m1 + psraw m3, 5 + mova m1, [rsp+8] + packuswb m1, m3 + OP_MOV [r0], m1, m4 + mova m1, [r1+9] + mova m4, m1 + mova m3, m1 + psrlq m1, 8 + psrlq m4, 16 + punpcklbw m1, m7 + punpcklbw m4, m7 + paddw m5, m1 + paddw m0, m4 + paddw m5, m5 + psubw m0, m5 + mova m5, m3 + psrlq m3, 24 + pmullw m0, [pw_3] + punpcklbw m3, m7 + paddw m2, m3 + psubw m0, m2 + mova m2, m5 + punpcklbw m2, m7 + punpckhbw m5, m7 + paddw m6, m2 + pmullw m6, [pw_20] + paddw m0, [PW_ROUND] + paddw m0, m6 + psraw m0, 5 + paddw m3, m5 + pshufw m6, m5, 0xf9 + paddw m6, m4 + pshufw m4, m5, 0xbe + pshufw m5, m5, 0x6f + paddw m4, m1 + paddw m5, m2 + paddw m6, m6 + psubw m4, m6 + pmullw m3, [pw_20] + pmullw m4, [pw_3] + psubw m3, m5 + paddw m4, [PW_ROUND] + paddw m4, m3 + psraw m4, 5 + packuswb m0, m4 + OP_MOV [r0+8], m0, m4 + add r1, r3 + add r0, r2 + dec r4d + jne .loop + REP_RET +%endmacro + +%macro PUT_OP 2-3 + mova %1, %2 +%endmacro + +%macro AVG_OP 2-3 + mova %3, %1 + pavgb %2, %3 + mova %1, %2 +%endmacro + +INIT_MMX mmxext +%define PW_ROUND pw_16 +%define OP_MOV PUT_OP +MPEG4_QPEL16_H_LOWPASS put +%define PW_ROUND pw_16 +%define OP_MOV AVG_OP +MPEG4_QPEL16_H_LOWPASS avg +%define PW_ROUND pw_15 +%define OP_MOV PUT_OP +MPEG4_QPEL16_H_LOWPASS put_no_rnd + + + +%macro MPEG4_QPEL8_H_LOWPASS 1 +cglobal %1_mpeg4_qpel8_h_lowpass, 5, 5, 0, 8 + movsxdifnidn r2, r2d + movsxdifnidn r3, r3d + pxor m7, m7 +.loop: + mova m0, [r1] + mova m1, m0 + mova m2, m0 + punpcklbw m0, m7 + punpckhbw m1, m7 + pshufw m5, m0, 0x90 + pshufw m6, m0, 0x41 + mova m3, m2 + mova m4, m2 + psllq m2, 8 + psllq m3, 16 + psllq m4, 24 + punpckhbw m2, m7 + punpckhbw m3, m7 + punpckhbw m4, m7 + paddw m5, m3 + paddw m6, m2 + paddw m5, m5 + psubw m6, m5 + pshufw m5, m0, 0x6 + pmullw m6, [pw_3] + paddw m0, m4 + paddw m5, m1 + pmullw m0, [pw_20] + psubw m0, m5 + paddw m6, [PW_ROUND] + paddw m0, m6 + psraw m0, 5 + movh m5, [r1+5] + punpcklbw m5, m7 + pshufw m6, m5, 0xf9 + paddw m1, m5 + paddw m2, m6 + pshufw m6, m5, 0xbe + pshufw m5, m5, 0x6f + paddw m3, m6 + paddw m4, m5 + paddw m2, m2 + psubw m3, m2 + pmullw m1, [pw_20] + pmullw m3, [pw_3] + psubw m3, m4 + paddw m1, [PW_ROUND] + paddw m3, m1 + psraw m3, 5 + packuswb m0, m3 + OP_MOV [r0], m0, m4 + add r1, r3 + add r0, r2 + dec r4d + jne .loop + REP_RET +%endmacro + +INIT_MMX mmxext +%define PW_ROUND pw_16 +%define OP_MOV PUT_OP +MPEG4_QPEL8_H_LOWPASS put +%define PW_ROUND pw_16 +%define OP_MOV AVG_OP +MPEG4_QPEL8_H_LOWPASS avg +%define PW_ROUND pw_15 +%define OP_MOV PUT_OP +MPEG4_QPEL8_H_LOWPASS put_no_rnd + + + +%macro QPEL_V_LOW 5 + paddw m0, m1 + mova m4, [pw_20] + pmullw m4, m0 + mova m0, %4 + mova m5, %1 + paddw m5, m0 + psubw m4, m5 + mova m5, %2 + mova m6, %3 + paddw m5, m3 + paddw m6, m2 + paddw m6, m6 + psubw m5, m6 + pmullw m5, [pw_3] + paddw m4, [PW_ROUND] + paddw m5, m4 + psraw m5, 5 + packuswb m5, m5 + OP_MOV %5, m5, m7 + SWAP 0,1,2,3 +%endmacro + +%macro MPEG4_QPEL16_V_LOWPASS 1 +cglobal %1_mpeg4_qpel16_v_lowpass, 4, 6, 0, 544 + movsxdifnidn r2, r2d + movsxdifnidn r3, r3d + + mov r4d, 17 + mov r5, rsp + pxor m7, m7 +.looph: + mova m0, [r1] + mova m1, [r1] + mova m2, [r1+8] + mova m3, [r1+8] + punpcklbw m0, m7 + punpckhbw m1, m7 + punpcklbw m2, m7 + punpckhbw m3, m7 + mova [r5], m0 + mova [r5+0x88], m1 + mova [r5+0x110], m2 + mova [r5+0x198], m3 + add r5, 8 + add r1, r3 + dec r4d + jne .looph + + + ; NOTE: r1 CHANGES VALUES: r1 -> 4 - 14*dstStride + mov r4d, 4 + mov r1, 4 + neg r2 + lea r1, [r1+r2*8] + lea r1, [r1+r2*4] + lea r1, [r1+r2*2] + neg r2 + mov r5, rsp +.loopv: + pxor m7, m7 + mova m0, [r5+ 0x0] + mova m1, [r5+ 0x8] + mova m2, [r5+0x10] + mova m3, [r5+0x18] + QPEL_V_LOW [r5+0x10], [r5+ 0x8], [r5+ 0x0], [r5+0x20], [r0] + QPEL_V_LOW [r5+ 0x8], [r5+ 0x0], [r5+ 0x0], [r5+0x28], [r0+r2] + lea r0, [r0+r2*2] + QPEL_V_LOW [r5+ 0x0], [r5+ 0x0], [r5+ 0x8], [r5+0x30], [r0] + QPEL_V_LOW [r5+ 0x0], [r5+ 0x8], [r5+0x10], [r5+0x38], [r0+r2] + lea r0, [r0+r2*2] + QPEL_V_LOW [r5+ 0x8], [r5+0x10], [r5+0x18], [r5+0x40], [r0] + QPEL_V_LOW [r5+0x10], [r5+0x18], [r5+0x20], [r5+0x48], [r0+r2] + lea r0, [r0+r2*2] + QPEL_V_LOW [r5+0x18], [r5+0x20], [r5+0x28], [r5+0x50], [r0] + QPEL_V_LOW [r5+0x20], [r5+0x28], [r5+0x30], [r5+0x58], [r0+r2] + lea r0, [r0+r2*2] + QPEL_V_LOW [r5+0x28], [r5+0x30], [r5+0x38], [r5+0x60], [r0] + QPEL_V_LOW [r5+0x30], [r5+0x38], [r5+0x40], [r5+0x68], [r0+r2] + lea r0, [r0+r2*2] + QPEL_V_LOW [r5+0x38], [r5+0x40], [r5+0x48], [r5+0x70], [r0] + QPEL_V_LOW [r5+0x40], [r5+0x48], [r5+0x50], [r5+0x78], [r0+r2] + lea r0, [r0+r2*2] + QPEL_V_LOW [r5+0x48], [r5+0x50], [r5+0x58], [r5+0x80], [r0] + QPEL_V_LOW [r5+0x50], [r5+0x58], [r5+0x60], [r5+0x80], [r0+r2] + lea r0, [r0+r2*2] + QPEL_V_LOW [r5+0x58], [r5+0x60], [r5+0x68], [r5+0x78], [r0] + QPEL_V_LOW [r5+0x60], [r5+0x68], [r5+0x70], [r5+0x70], [r0+r2] + + add r5, 0x88 + add r0, r1 + dec r4d + jne .loopv + REP_RET +%endmacro + +%macro PUT_OPH 2-3 + movh %1, %2 +%endmacro + +%macro AVG_OPH 2-3 + movh %3, %1 + pavgb %2, %3 + movh %1, %2 +%endmacro + +INIT_MMX mmxext +%define PW_ROUND pw_16 +%define OP_MOV PUT_OPH +MPEG4_QPEL16_V_LOWPASS put +%define PW_ROUND pw_16 +%define OP_MOV AVG_OPH +MPEG4_QPEL16_V_LOWPASS avg +%define PW_ROUND pw_15 +%define OP_MOV PUT_OPH +MPEG4_QPEL16_V_LOWPASS put_no_rnd + + + +%macro MPEG4_QPEL8_V_LOWPASS 1 +cglobal %1_mpeg4_qpel8_v_lowpass, 4, 6, 0, 288 + movsxdifnidn r2, r2d + movsxdifnidn r3, r3d + + mov r4d, 9 + mov r5, rsp + pxor m7, m7 +.looph: + mova m0, [r1] + mova m1, [r1] + punpcklbw m0, m7 + punpckhbw m1, m7 + mova [r5], m0 + mova [r5+0x48], m1 + add r5, 8 + add r1, r3 + dec r4d + jne .looph + + + ; NOTE: r1 CHANGES VALUES: r1 -> 4 - 6*dstStride + mov r4d, 2 + mov r1, 4 + neg r2 + lea r1, [r1+r2*4] + lea r1, [r1+r2*2] + neg r2 + mov r5, rsp +.loopv: + pxor m7, m7 + mova m0, [r5+ 0x0] + mova m1, [r5+ 0x8] + mova m2, [r5+0x10] + mova m3, [r5+0x18] + QPEL_V_LOW [r5+0x10], [r5+ 0x8], [r5+ 0x0], [r5+0x20], [r0] + QPEL_V_LOW [r5+ 0x8], [r5+ 0x0], [r5+ 0x0], [r5+0x28], [r0+r2] + lea r0, [r0+r2*2] + QPEL_V_LOW [r5+ 0x0], [r5+ 0x0], [r5+ 0x8], [r5+0x30], [r0] + QPEL_V_LOW [r5+ 0x0], [r5+ 0x8], [r5+0x10], [r5+0x38], [r0+r2] + lea r0, [r0+r2*2] + QPEL_V_LOW [r5+ 0x8], [r5+0x10], [r5+0x18], [r5+0x40], [r0] + QPEL_V_LOW [r5+0x10], [r5+0x18], [r5+0x20], [r5+0x40], [r0+r2] + lea r0, [r0+r2*2] + QPEL_V_LOW [r5+0x18], [r5+0x20], [r5+0x28], [r5+0x38], [r0] + QPEL_V_LOW [r5+0x20], [r5+0x28], [r5+0x30], [r5+0x30], [r0+r2] + + add r5, 0x48 + add r0, r1 + dec r4d + jne .loopv + REP_RET +%endmacro + +INIT_MMX mmxext +%define PW_ROUND pw_16 +%define OP_MOV PUT_OPH +MPEG4_QPEL8_V_LOWPASS put +%define PW_ROUND pw_16 +%define OP_MOV AVG_OPH +MPEG4_QPEL8_V_LOWPASS avg +%define PW_ROUND pw_15 +%define OP_MOV PUT_OPH +MPEG4_QPEL8_V_LOWPASS put_no_rnd diff --git a/libavcodec/x86/qpeldsp_init.c b/libavcodec/x86/qpeldsp_init.c new file mode 100644 index 0000000000..435b7651fd --- /dev/null +++ b/libavcodec/x86/qpeldsp_init.c @@ -0,0 +1,501 @@ +/* + * quarterpel DSP functions + * + * This file is part of Libav. + * + * Libav is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * Libav is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with Libav; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include +#include + +#include "config.h" +#include "libavutil/attributes.h" +#include "libavutil/cpu.h" +#include "libavutil/x86/cpu.h" +#include "libavcodec/pixels.h" +#include "libavcodec/qpeldsp.h" +#include "fpel.h" + +void ff_put_pixels8_l2_mmxext(uint8_t *dst, uint8_t *src1, uint8_t *src2, + int dstStride, int src1Stride, int h); +void ff_put_no_rnd_pixels8_l2_mmxext(uint8_t *dst, uint8_t *src1, + uint8_t *src2, int dstStride, + int src1Stride, int h); +void ff_avg_pixels8_l2_mmxext(uint8_t *dst, uint8_t *src1, uint8_t *src2, + int dstStride, int src1Stride, int h); +void ff_put_pixels16_l2_mmxext(uint8_t *dst, uint8_t *src1, uint8_t *src2, + int dstStride, int src1Stride, int h); +void ff_avg_pixels16_l2_mmxext(uint8_t *dst, uint8_t *src1, uint8_t *src2, + int dstStride, int src1Stride, int h); +void ff_put_no_rnd_pixels16_l2_mmxext(uint8_t *dst, uint8_t *src1, uint8_t *src2, + int dstStride, int src1Stride, int h); +void ff_put_mpeg4_qpel16_h_lowpass_mmxext(uint8_t *dst, uint8_t *src, + int dstStride, int srcStride, int h); +void ff_avg_mpeg4_qpel16_h_lowpass_mmxext(uint8_t *dst, uint8_t *src, + int dstStride, int srcStride, int h); +void ff_put_no_rnd_mpeg4_qpel16_h_lowpass_mmxext(uint8_t *dst, uint8_t *src, + int dstStride, int srcStride, + int h); +void ff_put_mpeg4_qpel8_h_lowpass_mmxext(uint8_t *dst, uint8_t *src, + int dstStride, int srcStride, int h); +void ff_avg_mpeg4_qpel8_h_lowpass_mmxext(uint8_t *dst, uint8_t *src, + int dstStride, int srcStride, int h); +void ff_put_no_rnd_mpeg4_qpel8_h_lowpass_mmxext(uint8_t *dst, uint8_t *src, + int dstStride, int srcStride, + int h); +void ff_put_mpeg4_qpel16_v_lowpass_mmxext(uint8_t *dst, uint8_t *src, + int dstStride, int srcStride); +void ff_avg_mpeg4_qpel16_v_lowpass_mmxext(uint8_t *dst, uint8_t *src, + int dstStride, int srcStride); +void ff_put_no_rnd_mpeg4_qpel16_v_lowpass_mmxext(uint8_t *dst, uint8_t *src, + int dstStride, int srcStride); +void ff_put_mpeg4_qpel8_v_lowpass_mmxext(uint8_t *dst, uint8_t *src, + int dstStride, int srcStride); +void ff_avg_mpeg4_qpel8_v_lowpass_mmxext(uint8_t *dst, uint8_t *src, + int dstStride, int srcStride); +void ff_put_no_rnd_mpeg4_qpel8_v_lowpass_mmxext(uint8_t *dst, uint8_t *src, + int dstStride, int srcStride); +#define ff_put_no_rnd_pixels16_mmxext ff_put_pixels16_mmxext +#define ff_put_no_rnd_pixels8_mmxext ff_put_pixels8_mmxext + +#if HAVE_YASM + +CALL_2X_PIXELS(ff_avg_pixels16_mmxext, ff_avg_pixels8_mmxext, 8) +CALL_2X_PIXELS(ff_put_pixels16_mmxext, ff_put_pixels8_mmxext, 8) + +#define QPEL_OP(OPNAME, RND, MMX) \ +static void OPNAME ## qpel8_mc00_ ## MMX(uint8_t *dst, uint8_t *src, \ + ptrdiff_t stride) \ +{ \ + ff_ ## OPNAME ## pixels8_ ## MMX(dst, src, stride, 8); \ +} \ + \ +static void OPNAME ## qpel8_mc10_ ## MMX(uint8_t *dst, uint8_t *src, \ + ptrdiff_t stride) \ +{ \ + uint64_t temp[8]; \ + uint8_t *const half = (uint8_t *) temp; \ + ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(half, src, 8, \ + stride, 8); \ + ff_ ## OPNAME ## pixels8_l2_ ## MMX(dst, src, half, \ + stride, stride, 8); \ +} \ + \ +static void OPNAME ## qpel8_mc20_ ## MMX(uint8_t *dst, uint8_t *src, \ + ptrdiff_t stride) \ +{ \ + ff_ ## OPNAME ## mpeg4_qpel8_h_lowpass_ ## MMX(dst, src, stride, \ + stride, 8); \ +} \ + \ +static void OPNAME ## qpel8_mc30_ ## MMX(uint8_t *dst, uint8_t *src, \ + ptrdiff_t stride) \ +{ \ + uint64_t temp[8]; \ + uint8_t *const half = (uint8_t *) temp; \ + ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(half, src, 8, \ + stride, 8); \ + ff_ ## OPNAME ## pixels8_l2_ ## MMX(dst, src + 1, half, stride, \ + stride, 8); \ +} \ + \ +static void OPNAME ## qpel8_mc01_ ## MMX(uint8_t *dst, uint8_t *src, \ + ptrdiff_t stride) \ +{ \ + uint64_t temp[8]; \ + uint8_t *const half = (uint8_t *) temp; \ + ff_put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(half, src, \ + 8, stride); \ + ff_ ## OPNAME ## pixels8_l2_ ## MMX(dst, src, half, \ + stride, stride, 8); \ +} \ + \ +static void OPNAME ## qpel8_mc02_ ## MMX(uint8_t *dst, uint8_t *src, \ + ptrdiff_t stride) \ +{ \ + ff_ ## OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, src, \ + stride, stride); \ +} \ + \ +static void OPNAME ## qpel8_mc03_ ## MMX(uint8_t *dst, uint8_t *src, \ + ptrdiff_t stride) \ +{ \ + uint64_t temp[8]; \ + uint8_t *const half = (uint8_t *) temp; \ + ff_put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(half, src, \ + 8, stride); \ + ff_ ## OPNAME ## pixels8_l2_ ## MMX(dst, src + stride, half, stride,\ + stride, 8); \ +} \ + \ +static void OPNAME ## qpel8_mc11_ ## MMX(uint8_t *dst, uint8_t *src, \ + ptrdiff_t stride) \ +{ \ + uint64_t half[8 + 9]; \ + uint8_t *const halfH = (uint8_t *) half + 64; \ + uint8_t *const halfHV = (uint8_t *) half; \ + ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, \ + stride, 9); \ + ff_put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, \ + stride, 9); \ + ff_put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\ + ff_ ## OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, \ + stride, 8, 8); \ +} \ + \ +static void OPNAME ## qpel8_mc31_ ## MMX(uint8_t *dst, uint8_t *src, \ + ptrdiff_t stride) \ +{ \ + uint64_t half[8 + 9]; \ + uint8_t *const halfH = (uint8_t *) half + 64; \ + uint8_t *const halfHV = (uint8_t *) half; \ + ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, \ + stride, 9); \ + ff_put ## RND ## pixels8_l2_ ## MMX(halfH, src + 1, halfH, 8, \ + stride, 9); \ + ff_put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\ + ff_ ## OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, \ + stride, 8, 8); \ +} \ + \ +static void OPNAME ## qpel8_mc13_ ## MMX(uint8_t *dst, uint8_t *src, \ + ptrdiff_t stride) \ +{ \ + uint64_t half[8 + 9]; \ + uint8_t *const halfH = (uint8_t *) half + 64; \ + uint8_t *const halfHV = (uint8_t *) half; \ + ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, \ + stride, 9); \ + ff_put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, \ + stride, 9); \ + ff_put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\ + ff_ ## OPNAME ## pixels8_l2_ ## MMX(dst, halfH + 8, halfHV, \ + stride, 8, 8); \ +} \ + \ +static void OPNAME ## qpel8_mc33_ ## MMX(uint8_t *dst, uint8_t *src, \ + ptrdiff_t stride) \ +{ \ + uint64_t half[8 + 9]; \ + uint8_t *const halfH = (uint8_t *) half + 64; \ + uint8_t *const halfHV = (uint8_t *) half; \ + ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, \ + stride, 9); \ + ff_put ## RND ## pixels8_l2_ ## MMX(halfH, src + 1, halfH, 8, \ + stride, 9); \ + ff_put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\ + ff_ ## OPNAME ## pixels8_l2_ ## MMX(dst, halfH + 8, halfHV, \ + stride, 8, 8); \ +} \ + \ +static void OPNAME ## qpel8_mc21_ ## MMX(uint8_t *dst, uint8_t *src, \ + ptrdiff_t stride) \ +{ \ + uint64_t half[8 + 9]; \ + uint8_t *const halfH = (uint8_t *) half + 64; \ + uint8_t *const halfHV = (uint8_t *) half; \ + ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, \ + stride, 9); \ + ff_put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\ + ff_ ## OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, \ + stride, 8, 8); \ +} \ + \ +static void OPNAME ## qpel8_mc23_ ## MMX(uint8_t *dst, uint8_t *src, \ + ptrdiff_t stride) \ +{ \ + uint64_t half[8 + 9]; \ + uint8_t *const halfH = (uint8_t *) half + 64; \ + uint8_t *const halfHV = (uint8_t *) half; \ + ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, \ + stride, 9); \ + ff_put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\ + ff_ ## OPNAME ## pixels8_l2_ ## MMX(dst, halfH + 8, halfHV, \ + stride, 8, 8); \ +} \ + \ +static void OPNAME ## qpel8_mc12_ ## MMX(uint8_t *dst, uint8_t *src, \ + ptrdiff_t stride) \ +{ \ + uint64_t half[8 + 9]; \ + uint8_t *const halfH = (uint8_t *) half; \ + ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, \ + stride, 9); \ + ff_put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, \ + 8, stride, 9); \ + ff_ ## OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, \ + stride, 8); \ +} \ + \ +static void OPNAME ## qpel8_mc32_ ## MMX(uint8_t *dst, uint8_t *src, \ + ptrdiff_t stride) \ +{ \ + uint64_t half[8 + 9]; \ + uint8_t *const halfH = (uint8_t *) half; \ + ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, \ + stride, 9); \ + ff_put ## RND ## pixels8_l2_ ## MMX(halfH, src + 1, halfH, 8, \ + stride, 9); \ + ff_ ## OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, \ + stride, 8); \ +} \ + \ +static void OPNAME ## qpel8_mc22_ ## MMX(uint8_t *dst, uint8_t *src, \ + ptrdiff_t stride) \ +{ \ + uint64_t half[9]; \ + uint8_t *const halfH = (uint8_t *) half; \ + ff_put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, \ + stride, 9); \ + ff_ ## OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, \ + stride, 8); \ +} \ + \ +static void OPNAME ## qpel16_mc00_ ## MMX(uint8_t *dst, uint8_t *src, \ + ptrdiff_t stride) \ +{ \ + ff_ ## OPNAME ## pixels16_ ## MMX(dst, src, stride, 16); \ +} \ + \ +static void OPNAME ## qpel16_mc10_ ## MMX(uint8_t *dst, uint8_t *src, \ + ptrdiff_t stride) \ +{ \ + uint64_t temp[32]; \ + uint8_t *const half = (uint8_t *) temp; \ + ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(half, src, 16, \ + stride, 16); \ + ff_ ## OPNAME ## pixels16_l2_ ## MMX(dst, src, half, stride, \ + stride, 16); \ +} \ + \ +static void OPNAME ## qpel16_mc20_ ## MMX(uint8_t *dst, uint8_t *src, \ + ptrdiff_t stride) \ +{ \ + ff_ ## OPNAME ## mpeg4_qpel16_h_lowpass_ ## MMX(dst, src, \ + stride, stride, 16);\ +} \ + \ +static void OPNAME ## qpel16_mc30_ ## MMX(uint8_t *dst, uint8_t *src, \ + ptrdiff_t stride) \ +{ \ + uint64_t temp[32]; \ + uint8_t *const half = (uint8_t*) temp; \ + ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(half, src, 16, \ + stride, 16); \ + ff_ ## OPNAME ## pixels16_l2_ ## MMX(dst, src + 1, half, \ + stride, stride, 16); \ +} \ + \ +static void OPNAME ## qpel16_mc01_ ## MMX(uint8_t *dst, uint8_t *src, \ + ptrdiff_t stride) \ +{ \ + uint64_t temp[32]; \ + uint8_t *const half = (uint8_t *) temp; \ + ff_put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(half, src, 16, \ + stride); \ + ff_ ## OPNAME ## pixels16_l2_ ## MMX(dst, src, half, stride, \ + stride, 16); \ +} \ + \ +static void OPNAME ## qpel16_mc02_ ## MMX(uint8_t *dst, uint8_t *src, \ + ptrdiff_t stride) \ +{ \ + ff_ ## OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, src, \ + stride, stride); \ +} \ + \ +static void OPNAME ## qpel16_mc03_ ## MMX(uint8_t *dst, uint8_t *src, \ + ptrdiff_t stride) \ +{ \ + uint64_t temp[32]; \ + uint8_t *const half = (uint8_t *) temp; \ + ff_put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(half, src, 16, \ + stride); \ + ff_ ## OPNAME ## pixels16_l2_ ## MMX(dst, src+stride, half, \ + stride, stride, 16); \ +} \ + \ +static void OPNAME ## qpel16_mc11_ ## MMX(uint8_t *dst, uint8_t *src, \ + ptrdiff_t stride) \ +{ \ + uint64_t half[16 * 2 + 17 * 2]; \ + uint8_t *const halfH = (uint8_t *) half + 256; \ + uint8_t *const halfHV = (uint8_t *) half; \ + ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, \ + stride, 17); \ + ff_put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, \ + stride, 17); \ + ff_put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, \ + 16, 16); \ + ff_ ## OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, \ + stride, 16, 16); \ +} \ + \ +static void OPNAME ## qpel16_mc31_ ## MMX(uint8_t *dst, uint8_t *src, \ + ptrdiff_t stride) \ +{ \ + uint64_t half[16 * 2 + 17 * 2]; \ + uint8_t *const halfH = (uint8_t *) half + 256; \ + uint8_t *const halfHV = (uint8_t *) half; \ + ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, \ + stride, 17); \ + ff_put ## RND ## pixels16_l2_ ## MMX(halfH, src + 1, halfH, 16, \ + stride, 17); \ + ff_put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, \ + 16, 16); \ + ff_ ## OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, \ + stride, 16, 16); \ +} \ + \ +static void OPNAME ## qpel16_mc13_ ## MMX(uint8_t *dst, uint8_t *src, \ + ptrdiff_t stride) \ +{ \ + uint64_t half[16 * 2 + 17 * 2]; \ + uint8_t *const halfH = (uint8_t *) half + 256; \ + uint8_t *const halfHV = (uint8_t *) half; \ + ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, \ + stride, 17); \ + ff_put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, \ + stride, 17); \ + ff_put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, \ + 16, 16); \ + ff_ ## OPNAME ## pixels16_l2_ ## MMX(dst, halfH + 16, halfHV, \ + stride, 16, 16); \ +} \ + \ +static void OPNAME ## qpel16_mc33_ ## MMX(uint8_t *dst, uint8_t *src, \ + ptrdiff_t stride) \ +{ \ + uint64_t half[16 * 2 + 17 * 2]; \ + uint8_t *const halfH = (uint8_t *) half + 256; \ + uint8_t *const halfHV = (uint8_t *) half; \ + ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, \ + stride, 17); \ + ff_put ## RND ## pixels16_l2_ ## MMX(halfH, src + 1, halfH, 16, \ + stride, 17); \ + ff_put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, \ + 16, 16); \ + ff_ ## OPNAME ## pixels16_l2_ ## MMX(dst, halfH + 16, halfHV, \ + stride, 16, 16); \ +} \ + \ +static void OPNAME ## qpel16_mc21_ ## MMX(uint8_t *dst, uint8_t *src, \ + ptrdiff_t stride) \ +{ \ + uint64_t half[16 * 2 + 17 * 2]; \ + uint8_t *const halfH = (uint8_t *) half + 256; \ + uint8_t *const halfHV = (uint8_t *) half; \ + ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, \ + stride, 17); \ + ff_put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, \ + 16, 16); \ + ff_ ## OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, \ + stride, 16, 16); \ +} \ + \ +static void OPNAME ## qpel16_mc23_ ## MMX(uint8_t *dst, uint8_t *src, \ + ptrdiff_t stride) \ +{ \ + uint64_t half[16 * 2 + 17 * 2]; \ + uint8_t *const halfH = (uint8_t *) half + 256; \ + uint8_t *const halfHV = (uint8_t *) half; \ + ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, \ + stride, 17); \ + ff_put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, \ + 16, 16); \ + ff_ ## OPNAME ## pixels16_l2_ ## MMX(dst, halfH + 16, halfHV, \ + stride, 16, 16); \ +} \ + \ +static void OPNAME ## qpel16_mc12_ ## MMX(uint8_t *dst, uint8_t *src, \ + ptrdiff_t stride) \ +{ \ + uint64_t half[17 * 2]; \ + uint8_t *const halfH = (uint8_t *) half; \ + ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, \ + stride, 17); \ + ff_put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, \ + stride, 17); \ + ff_ ## OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, \ + stride, 16); \ +} \ + \ +static void OPNAME ## qpel16_mc32_ ## MMX(uint8_t *dst, uint8_t *src, \ + ptrdiff_t stride) \ +{ \ + uint64_t half[17 * 2]; \ + uint8_t *const halfH = (uint8_t *) half; \ + ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, \ + stride, 17); \ + ff_put ## RND ## pixels16_l2_ ## MMX(halfH, src + 1, halfH, 16, \ + stride, 17); \ + ff_ ## OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, \ + stride, 16); \ +} \ + \ +static void OPNAME ## qpel16_mc22_ ## MMX(uint8_t *dst, uint8_t *src, \ + ptrdiff_t stride) \ +{ \ + uint64_t half[17 * 2]; \ + uint8_t *const halfH = (uint8_t *) half; \ + ff_put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, \ + stride, 17); \ + ff_ ## OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, \ + stride, 16); \ +} + +QPEL_OP(put_, _, mmxext) +QPEL_OP(avg_, _, mmxext) +QPEL_OP(put_no_rnd_, _no_rnd_, mmxext) + +#endif /* HAVE_YASM */ + +#define SET_QPEL_FUNCS(PFX, IDX, SIZE, CPU, PREFIX) \ +do { \ + c->PFX ## _pixels_tab[IDX][ 0] = PREFIX ## PFX ## SIZE ## _mc00_ ## CPU; \ + c->PFX ## _pixels_tab[IDX][ 1] = PREFIX ## PFX ## SIZE ## _mc10_ ## CPU; \ + c->PFX ## _pixels_tab[IDX][ 2] = PREFIX ## PFX ## SIZE ## _mc20_ ## CPU; \ + c->PFX ## _pixels_tab[IDX][ 3] = PREFIX ## PFX ## SIZE ## _mc30_ ## CPU; \ + c->PFX ## _pixels_tab[IDX][ 4] = PREFIX ## PFX ## SIZE ## _mc01_ ## CPU; \ + c->PFX ## _pixels_tab[IDX][ 5] = PREFIX ## PFX ## SIZE ## _mc11_ ## CPU; \ + c->PFX ## _pixels_tab[IDX][ 6] = PREFIX ## PFX ## SIZE ## _mc21_ ## CPU; \ + c->PFX ## _pixels_tab[IDX][ 7] = PREFIX ## PFX ## SIZE ## _mc31_ ## CPU; \ + c->PFX ## _pixels_tab[IDX][ 8] = PREFIX ## PFX ## SIZE ## _mc02_ ## CPU; \ + c->PFX ## _pixels_tab[IDX][ 9] = PREFIX ## PFX ## SIZE ## _mc12_ ## CPU; \ + c->PFX ## _pixels_tab[IDX][10] = PREFIX ## PFX ## SIZE ## _mc22_ ## CPU; \ + c->PFX ## _pixels_tab[IDX][11] = PREFIX ## PFX ## SIZE ## _mc32_ ## CPU; \ + c->PFX ## _pixels_tab[IDX][12] = PREFIX ## PFX ## SIZE ## _mc03_ ## CPU; \ + c->PFX ## _pixels_tab[IDX][13] = PREFIX ## PFX ## SIZE ## _mc13_ ## CPU; \ + c->PFX ## _pixels_tab[IDX][14] = PREFIX ## PFX ## SIZE ## _mc23_ ## CPU; \ + c->PFX ## _pixels_tab[IDX][15] = PREFIX ## PFX ## SIZE ## _mc33_ ## CPU; \ +} while (0) + +av_cold void ff_qpeldsp_init_x86(QpelDSPContext *c) +{ + int cpu_flags = av_get_cpu_flags(); + + if (X86_MMXEXT(cpu_flags)) { +#if HAVE_MMXEXT_EXTERNAL + SET_QPEL_FUNCS(avg_qpel, 0, 16, mmxext, ); + SET_QPEL_FUNCS(avg_qpel, 1, 8, mmxext, ); + + SET_QPEL_FUNCS(put_qpel, 0, 16, mmxext, ); + SET_QPEL_FUNCS(put_qpel, 1, 8, mmxext, ); + SET_QPEL_FUNCS(put_no_rnd_qpel, 0, 16, mmxext, ); + SET_QPEL_FUNCS(put_no_rnd_qpel, 1, 8, mmxext, ); +#endif /* HAVE_MMXEXT_EXTERNAL */ + } +} -- cgit v1.2.3