summaryrefslogtreecommitdiff
path: root/libavcodec
diff options
context:
space:
mode:
authorMichael Niedermayer <michaelni@gmx.at>2002-09-11 12:39:53 +0000
committerMichael Niedermayer <michaelni@gmx.at>2002-09-11 12:39:53 +0000
commitb3184779924e40e82b1f92b4b315b2c4074a9669 (patch)
tree6f76a3ff7ce70d6d424f60206de7496f3845873f /libavcodec
parent6b460aa387530feefc91302c150a3405997e61cf (diff)
put/avg_pixels16
fixing 2 small qpel bugs Originally committed as revision 915 to svn://svn.ffmpeg.org/ffmpeg/trunk
Diffstat (limited to 'libavcodec')
-rw-r--r--libavcodec/dsputil.c856
-rw-r--r--libavcodec/dsputil.h24
-rw-r--r--libavcodec/i386/dsputil_mmx.c167
-rw-r--r--libavcodec/i386/dsputil_mmx_avg.h87
-rw-r--r--libavcodec/i386/dsputil_mmx_rnd.h132
-rw-r--r--libavcodec/motion_est.c12
-rw-r--r--libavcodec/mpegvideo.c74
-rw-r--r--libavcodec/svq1.c5
8 files changed, 958 insertions, 399 deletions
diff --git a/libavcodec/dsputil.c b/libavcodec/dsputil.c
index b30d368de9..9ee585c4d7 100644
--- a/libavcodec/dsputil.c
+++ b/libavcodec/dsputil.c
@@ -291,7 +291,7 @@ void add_pixels_clamped_c(const DCTELEM *block, UINT8 *restrict pixels,
#if 0
#define PIXOP2(OPNAME, OP) \
-void OPNAME ## _pixels(uint8_t *block, const uint8_t *pixels, int line_size, int h)\
+static void OPNAME ## _pixels(uint8_t *block, const uint8_t *pixels, int line_size, int h)\
{\
int i;\
for(i=0; i<h; i++){\
@@ -301,7 +301,7 @@ void OPNAME ## _pixels(uint8_t *block, const uint8_t *pixels, int line_size, int
}\
}\
\
-void OPNAME ## _no_rnd_pixels_x2(uint8_t *block, const uint8_t *pixels, int line_size, int h)\
+static void OPNAME ## _no_rnd_pixels_x2(uint8_t *block, const uint8_t *pixels, int line_size, int h)\
{\
int i;\
for(i=0; i<h; i++){\
@@ -313,7 +313,7 @@ void OPNAME ## _no_rnd_pixels_x2(uint8_t *block, const uint8_t *pixels, int line
}\
}\
\
-void OPNAME ## _pixels_x2(uint8_t *block, const uint8_t *pixels, int line_size, int h)\
+static void OPNAME ## _pixels_x2(uint8_t *block, const uint8_t *pixels, int line_size, int h)\
{\
int i;\
for(i=0; i<h; i++){\
@@ -325,7 +325,7 @@ void OPNAME ## _pixels_x2(uint8_t *block, const uint8_t *pixels, int line_size,
}\
}\
\
-void OPNAME ## _no_rnd_pixels_y2(uint8_t *block, const uint8_t *pixels, int line_size, int h)\
+static void OPNAME ## _no_rnd_pixels_y2(uint8_t *block, const uint8_t *pixels, int line_size, int h)\
{\
int i;\
for(i=0; i<h; i++){\
@@ -337,7 +337,7 @@ void OPNAME ## _no_rnd_pixels_y2(uint8_t *block, const uint8_t *pixels, int line
}\
}\
\
-void OPNAME ## _pixels_y2(uint8_t *block, const uint8_t *pixels, int line_size, int h)\
+static void OPNAME ## _pixels_y2(uint8_t *block, const uint8_t *pixels, int line_size, int h)\
{\
int i;\
for(i=0; i<h; i++){\
@@ -349,7 +349,7 @@ void OPNAME ## _pixels_y2(uint8_t *block, const uint8_t *pixels, int line_size,
}\
}\
\
-void OPNAME ## _pixels_xy2(uint8_t *block, const uint8_t *pixels, int line_size, int h)\
+static void OPNAME ## _pixels_xy2(uint8_t *block, const uint8_t *pixels, int line_size, int h)\
{\
int i;\
const uint64_t a= LD64(pixels );\
@@ -385,7 +385,7 @@ void OPNAME ## _pixels_xy2(uint8_t *block, const uint8_t *pixels, int line_size,
}\
}\
\
-void OPNAME ## _no_rnd_pixels_xy2(uint8_t *block, const uint8_t *pixels, int line_size, int h)\
+static void OPNAME ## _no_rnd_pixels_xy2(uint8_t *block, const uint8_t *pixels, int line_size, int h)\
{\
int i;\
const uint64_t a= LD64(pixels );\
@@ -421,26 +421,45 @@ void OPNAME ## _no_rnd_pixels_xy2(uint8_t *block, const uint8_t *pixels, int lin
}\
}\
\
-void (*OPNAME ## _pixels_tab[4])(uint8_t *block, const uint8_t *pixels, int line_size, int h) = {\
- OPNAME ## _pixels,\
- OPNAME ## _pixels_x2,\
- OPNAME ## _pixels_y2,\
- OPNAME ## _pixels_xy2,\
+CALL_2X_PIXELS(OPNAME ## _pixels16 , OPNAME ## _pixels , 8)\
+CALL_2X_PIXELS(OPNAME ## _pixels16_x2 , OPNAME ## _pixels_x2 , 8)\
+CALL_2X_PIXELS(OPNAME ## _pixels16_y2 , OPNAME ## _pixels_y2 , 8)\
+CALL_2X_PIXELS(OPNAME ## _pixels16_xy2, OPNAME ## _pixels_xy2, 8)\
+CALL_2X_PIXELS(OPNAME ## _no_rnd_pixels16_x2 , OPNAME ## _no_rnd_pixels_x2 , 8)\
+CALL_2X_PIXELS(OPNAME ## _no_rnd_pixels16_y2 , OPNAME ## _no_rnd_pixels_y2 , 8)\
+CALL_2X_PIXELS(OPNAME ## _no_rnd_pixels16_xy2, OPNAME ## _no_rnd_pixels_xy2, 8)\
+\
+void (*OPNAME ## _pixels_tab[2][4])(uint8_t *block, const uint8_t *pixels, int line_size, int h) = {\
+ {\
+ OPNAME ## _pixels,\
+ OPNAME ## _pixels_x2,\
+ OPNAME ## _pixels_y2,\
+ OPNAME ## _pixels_xy2},\
+ {\
+ OPNAME ## _pixels16,\
+ OPNAME ## _pixels16_x2,\
+ OPNAME ## _pixels16_y2,\
+ OPNAME ## _pixels16_xy2}\
};\
\
-void (*OPNAME ## _no_rnd_pixels_tab[4])(uint8_t *block, const uint8_t *pixels, int line_size, int h) = {\
- OPNAME ## _pixels,\
- OPNAME ## _no_rnd_pixels_x2,\
- OPNAME ## _no_rnd_pixels_y2,\
- OPNAME ## _no_rnd_pixels_xy2,\
+void (*OPNAME ## _no_rnd_pixels_tab[2][4])(uint8_t *block, const uint8_t *pixels, int line_size, int h) = {\
+ {\
+ OPNAME ## _pixels,\
+ OPNAME ## _no_rnd_pixels_x2,\
+ OPNAME ## _no_rnd_pixels_y2,\
+ OPNAME ## _no_rnd_pixels_xy2},\
+ {\
+ OPNAME ## _pixels16,\
+ OPNAME ## _no_rnd_pixels16_x2,\
+ OPNAME ## _no_rnd_pixels16_y2,\
+ OPNAME ## _no_rnd_pixels16_xy2}\
};
#define op_avg(a, b) a = ( ((a)|(b)) - ((((a)^(b))&0xFEFEFEFEFEFEFEFEULL)>>1) )
#else // 64 bit variant
#define PIXOP2(OPNAME, OP) \
-void OPNAME ## _pixels(uint8_t *block, const uint8_t *pixels, int line_size, int h)\
-{\
+static void OPNAME ## _pixels8(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
int i;\
for(i=0; i<h; i++){\
OP(*((uint32_t*)(block )), LD32(pixels ));\
@@ -449,76 +468,148 @@ void OPNAME ## _pixels(uint8_t *block, const uint8_t *pixels, int line_size, int
block +=line_size;\
}\
}\
+static inline void OPNAME ## _no_rnd_pixels8(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
+ OPNAME ## _pixels8(block, pixels, line_size, h);\
+}\
\
-void OPNAME ## _no_rnd_pixels_x2(uint8_t *block, const uint8_t *pixels, int line_size, int h)\
-{\
+static inline void OPNAME ## _no_rnd_pixels8_l2(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, \
+ int src_stride1, int src_stride2, int h){\
int i;\
for(i=0; i<h; i++){\
- int j;\
- for(j=0; j<2; j++){\
- const uint32_t a= LD32(pixels );\
- const uint32_t b= LD32(pixels+1);\
- OP(*((uint32_t*)block), (a&b) + (((a^b)&0xFEFEFEFEUL)>>1));\
- pixels+=4;\
- block +=4;\
- }\
- pixels+=line_size-8;\
- block +=line_size-8;\
+ uint32_t a,b;\
+ a= LD32(&src1[i*src_stride1 ]);\
+ b= LD32(&src2[i*src_stride2 ]);\
+ OP(*((uint32_t*)&dst[i*dst_stride ]), (a&b) + (((a^b)&0xFEFEFEFEUL)>>1));\
+ a= LD32(&src1[i*src_stride1+4]);\
+ b= LD32(&src2[i*src_stride2+4]);\
+ OP(*((uint32_t*)&dst[i*dst_stride+4]), (a&b) + (((a^b)&0xFEFEFEFEUL)>>1));\
}\
}\
\
-void OPNAME ## _pixels_x2(uint8_t *block, const uint8_t *pixels, int line_size, int h)\
-{\
+static inline void OPNAME ## _pixels8_l2(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, \
+ int src_stride1, int src_stride2, int h){\
int i;\
for(i=0; i<h; i++){\
- int j;\
- for(j=0; j<2; j++){\
- const uint32_t a= LD32(pixels );\
- const uint32_t b= LD32(pixels+1);\
- OP(*((uint32_t*)block), (a|b) - (((a^b)&0xFEFEFEFEUL)>>1));\
- pixels+=4;\
- block +=4;\
- }\
- pixels+=line_size-8;\
- block +=line_size-8;\
+ uint32_t a,b;\
+ a= LD32(&src1[i*src_stride1 ]);\
+ b= LD32(&src2[i*src_stride2 ]);\
+ OP(*((uint32_t*)&dst[i*dst_stride ]), (a|b) - (((a^b)&0xFEFEFEFEUL)>>1));\
+ a= LD32(&src1[i*src_stride1+4]);\
+ b= LD32(&src2[i*src_stride2+4]);\
+ OP(*((uint32_t*)&dst[i*dst_stride+4]), (a|b) - (((a^b)&0xFEFEFEFEUL)>>1));\
}\
}\
\
-void OPNAME ## _no_rnd_pixels_y2(uint8_t *block, const uint8_t *pixels, int line_size, int h)\
-{\
+static inline void OPNAME ## _pixels16_l2(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, \
+ int src_stride1, int src_stride2, int h){\
+ OPNAME ## _pixels8_l2(dst , src1 , src2 , dst_stride, src_stride1, src_stride2, h);\
+ OPNAME ## _pixels8_l2(dst+8, src1+8, src2+8, dst_stride, src_stride1, src_stride2, h);\
+}\
+\
+static inline void OPNAME ## _no_rnd_pixels16_l2(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, \
+ int src_stride1, int src_stride2, int h){\
+ OPNAME ## _no_rnd_pixels8_l2(dst , src1 , src2 , dst_stride, src_stride1, src_stride2, h);\
+ OPNAME ## _no_rnd_pixels8_l2(dst+8, src1+8, src2+8, dst_stride, src_stride1, src_stride2, h);\
+}\
+\
+static inline void OPNAME ## _no_rnd_pixels8_x2(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
+ OPNAME ## _no_rnd_pixels8_l2(block, pixels, pixels+1, line_size, line_size, line_size, h);\
+}\
+\
+static inline void OPNAME ## _pixels8_x2(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
+ OPNAME ## _pixels8_l2(block, pixels, pixels+1, line_size, line_size, line_size, h);\
+}\
+\
+static inline void OPNAME ## _no_rnd_pixels8_y2(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
+ OPNAME ## _no_rnd_pixels8_l2(block, pixels, pixels+line_size, line_size, line_size, line_size, h);\
+}\
+\
+static inline void OPNAME ## _pixels8_y2(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
+ OPNAME ## _pixels8_l2(block, pixels, pixels+line_size, line_size, line_size, line_size, h);\
+}\
+\
+static inline void OPNAME ## _pixels8_l4(uint8_t *dst, const uint8_t *src1, uint8_t *src2, uint8_t *src3, uint8_t *src4,\
+ int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
int i;\
for(i=0; i<h; i++){\
- int j;\
- for(j=0; j<2; j++){\
- const uint32_t a= LD32(pixels );\
- const uint32_t b= LD32(pixels+line_size);\
- OP(*((uint32_t*)block), (a&b) + (((a^b)&0xFEFEFEFEUL)>>1));\
- pixels+=4;\
- block +=4;\
- }\
- pixels+=line_size-8;\
- block +=line_size-8;\
+ uint32_t a, b, c, d, l0, l1, h0, h1;\
+ a= LD32(&src1[i*src_stride1]);\
+ b= LD32(&src2[i*src_stride2]);\
+ c= LD32(&src3[i*src_stride3]);\
+ d= LD32(&src4[i*src_stride4]);\
+ l0= (a&0x03030303UL)\
+ + (b&0x03030303UL)\
+ + 0x02020202UL;\
+ h0= ((a&0xFCFCFCFCUL)>>2)\
+ + ((b&0xFCFCFCFCUL)>>2);\
+ l1= (c&0x03030303UL)\
+ + (d&0x03030303UL);\
+ h1= ((c&0xFCFCFCFCUL)>>2)\
+ + ((d&0xFCFCFCFCUL)>>2);\
+ OP(*((uint32_t*)&dst[i*dst_stride]), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
+ a= LD32(&src1[i*src_stride1+4]);\
+ b= LD32(&src2[i*src_stride2+4]);\
+ c= LD32(&src3[i*src_stride3+4]);\
+ d= LD32(&src4[i*src_stride4+4]);\
+ l0= (a&0x03030303UL)\
+ + (b&0x03030303UL)\
+ + 0x02020202UL;\
+ h0= ((a&0xFCFCFCFCUL)>>2)\
+ + ((b&0xFCFCFCFCUL)>>2);\
+ l1= (c&0x03030303UL)\
+ + (d&0x03030303UL);\
+ h1= ((c&0xFCFCFCFCUL)>>2)\
+ + ((d&0xFCFCFCFCUL)>>2);\
+ OP(*((uint32_t*)&dst[i*dst_stride+4]), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
}\
}\
-\
-void OPNAME ## _pixels_y2(uint8_t *block, const uint8_t *pixels, int line_size, int h)\
-{\
+static inline void OPNAME ## _no_rnd_pixels8_l4(uint8_t *dst, const uint8_t *src1, uint8_t *src2, uint8_t *src3, uint8_t *src4,\
+ int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
int i;\
for(i=0; i<h; i++){\
- int j;\
- for(j=0; j<2; j++){\
- const uint32_t a= LD32(pixels );\
- const uint32_t b= LD32(pixels+line_size);\
- OP(*((uint32_t*)block), (a|b) - (((a^b)&0xFEFEFEFEUL)>>1));\
- pixels+=4;\
- block +=4;\
- }\
- pixels+=line_size-8;\
- block +=line_size-8;\
+ uint32_t a, b, c, d, l0, l1, h0, h1;\
+ a= LD32(&src1[i*src_stride1]);\
+ b= LD32(&src2[i*src_stride2]);\
+ c= LD32(&src3[i*src_stride3]);\
+ d= LD32(&src4[i*src_stride4]);\
+ l0= (a&0x03030303UL)\
+ + (b&0x03030303UL)\
+ + 0x01010101UL;\
+ h0= ((a&0xFCFCFCFCUL)>>2)\
+ + ((b&0xFCFCFCFCUL)>>2);\
+ l1= (c&0x03030303UL)\
+ + (d&0x03030303UL);\
+ h1= ((c&0xFCFCFCFCUL)>>2)\
+ + ((d&0xFCFCFCFCUL)>>2);\
+ OP(*((uint32_t*)&dst[i*dst_stride]), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
+ a= LD32(&src1[i*src_stride1+4]);\
+ b= LD32(&src2[i*src_stride2+4]);\
+ c= LD32(&src3[i*src_stride3+4]);\
+ d= LD32(&src4[i*src_stride4+4]);\
+ l0= (a&0x03030303UL)\
+ + (b&0x03030303UL)\
+ + 0x01010101UL;\
+ h0= ((a&0xFCFCFCFCUL)>>2)\
+ + ((b&0xFCFCFCFCUL)>>2);\
+ l1= (c&0x03030303UL)\
+ + (d&0x03030303UL);\
+ h1= ((c&0xFCFCFCFCUL)>>2)\
+ + ((d&0xFCFCFCFCUL)>>2);\
+ OP(*((uint32_t*)&dst[i*dst_stride+4]), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
}\
}\
+static inline void OPNAME ## _pixels16_l4(uint8_t *dst, const uint8_t *src1, uint8_t *src2, uint8_t *src3, uint8_t *src4,\
+ int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
+ OPNAME ## _pixels8_l4(dst , src1 , src2 , src3 , src4 , dst_stride, src_stride1, src_stride2, src_stride3, src_stride4, h);\
+ OPNAME ## _pixels8_l4(dst+8, src1+8, src2+8, src3+8, src4+8, dst_stride, src_stride1, src_stride2, src_stride3, src_stride4, h);\
+}\
+static inline void OPNAME ## _no_rnd_pixels16_l4(uint8_t *dst, const uint8_t *src1, uint8_t *src2, uint8_t *src3, uint8_t *src4,\
+ int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
+ OPNAME ## _no_rnd_pixels8_l4(dst , src1 , src2 , src3 , src4 , dst_stride, src_stride1, src_stride2, src_stride3, src_stride4, h);\
+ OPNAME ## _no_rnd_pixels8_l4(dst+8, src1+8, src2+8, src3+8, src4+8, dst_stride, src_stride1, src_stride2, src_stride3, src_stride4, h);\
+}\
\
-void OPNAME ## _pixels_xy2(uint8_t *block, const uint8_t *pixels, int line_size, int h)\
+static inline void OPNAME ## _pixels8_xy2(uint8_t *block, const uint8_t *pixels, int line_size, int h)\
{\
int j;\
for(j=0; j<2; j++){\
@@ -559,7 +650,7 @@ void OPNAME ## _pixels_xy2(uint8_t *block, const uint8_t *pixels, int line_size,
}\
}\
\
-void OPNAME ## _no_rnd_pixels_xy2(uint8_t *block, const uint8_t *pixels, int line_size, int h)\
+static inline void OPNAME ## _no_rnd_pixels8_xy2(uint8_t *block, const uint8_t *pixels, int line_size, int h)\
{\
int j;\
for(j=0; j<2; j++){\
@@ -600,19 +691,41 @@ void OPNAME ## _no_rnd_pixels_xy2(uint8_t *block, const uint8_t *pixels, int lin
}\
}\
\
-void (*OPNAME ## _pixels_tab[4])(uint8_t *block, const uint8_t *pixels, int line_size, int h) = {\
- OPNAME ## _pixels,\
- OPNAME ## _pixels_x2,\
- OPNAME ## _pixels_y2,\
- OPNAME ## _pixels_xy2,\
+CALL_2X_PIXELS(OPNAME ## _pixels16 , OPNAME ## _pixels8 , 8)\
+CALL_2X_PIXELS(OPNAME ## _pixels16_x2 , OPNAME ## _pixels8_x2 , 8)\
+CALL_2X_PIXELS(OPNAME ## _pixels16_y2 , OPNAME ## _pixels8_y2 , 8)\
+CALL_2X_PIXELS(OPNAME ## _pixels16_xy2, OPNAME ## _pixels8_xy2, 8)\
+CALL_2X_PIXELS(OPNAME ## _no_rnd_pixels16 , OPNAME ## _pixels8 , 8)\
+CALL_2X_PIXELS(OPNAME ## _no_rnd_pixels16_x2 , OPNAME ## _no_rnd_pixels8_x2 , 8)\
+CALL_2X_PIXELS(OPNAME ## _no_rnd_pixels16_y2 , OPNAME ## _no_rnd_pixels8_y2 , 8)\
+CALL_2X_PIXELS(OPNAME ## _no_rnd_pixels16_xy2, OPNAME ## _no_rnd_pixels8_xy2, 8)\
+\
+void (*OPNAME ## _pixels_tab[2][4])(uint8_t *block, const uint8_t *pixels, int line_size, int h) = {\
+ {\
+ OPNAME ## _pixels16,\
+ OPNAME ## _pixels16_x2,\
+ OPNAME ## _pixels16_y2,\
+ OPNAME ## _pixels16_xy2},\
+ {\
+ OPNAME ## _pixels8,\
+ OPNAME ## _pixels8_x2,\
+ OPNAME ## _pixels8_y2,\
+ OPNAME ## _pixels8_xy2},\
};\
\
-void (*OPNAME ## _no_rnd_pixels_tab[4])(uint8_t *block, const uint8_t *pixels, int line_size, int h) = {\
- OPNAME ## _pixels,\
- OPNAME ## _no_rnd_pixels_x2,\
- OPNAME ## _no_rnd_pixels_y2,\
- OPNAME ## _no_rnd_pixels_xy2,\
+void (*OPNAME ## _no_rnd_pixels_tab[2][4])(uint8_t *block, const uint8_t *pixels, int line_size, int h) = {\
+ {\
+ OPNAME ## _pixels16,\
+ OPNAME ## _no_rnd_pixels16_x2,\
+ OPNAME ## _no_rnd_pixels16_y2,\
+ OPNAME ## _no_rnd_pixels16_xy2},\
+ {\
+ OPNAME ## _pixels8,\
+ OPNAME ## _no_rnd_pixels8_x2,\
+ OPNAME ## _no_rnd_pixels8_y2,\
+ OPNAME ## _no_rnd_pixels8_xy2},\
};
+
#define op_avg(a, b) a = ( ((a)|(b)) - ((((a)^(b))&0xFEFEFEFEUL)>>1) )
#endif
#define op_put(a, b) a = b
@@ -752,7 +865,7 @@ PIXOP(uint8_t, put_no_rnd, op_put, line_size)
#define avg2(a,b) ((a+b+1)>>1)
#define avg4(a,b,c,d) ((a+b+c+d+2)>>2)
-static void gmc1_c(UINT8 *dst, UINT8 *src, int srcStride, int h, int x16, int y16, int rounder)
+static void gmc1_c(UINT8 *dst, UINT8 *src, int stride, int h, int x16, int y16, int rounder)
{
const int A=(16-x16)*(16-y16);
const int B=( x16)*(16-y16);
@@ -763,270 +876,465 @@ static void gmc1_c(UINT8 *dst, UINT8 *src, int srcStride, int h, int x16, int y1
for(i=0; i<h; i++)
{
- dst[0]= (A*src[0] + B*src[1] + C*src[srcStride+0] + D*src[srcStride+1] + rounder)>>8;
- dst[1]= (A*src[1] + B*src[2] + C*src[srcStride+1] + D*src[srcStride+2] + rounder)>>8;
- dst[2]= (A*src[2] + B*src[3] + C*src[srcStride+2] + D*src[srcStride+3] + rounder)>>8;
- dst[3]= (A*src[3] + B*src[4] + C*src[srcStride+3] + D*src[srcStride+4] + rounder)>>8;
- dst[4]= (A*src[4] + B*src[5] + C*src[srcStride+4] + D*src[srcStride+5] + rounder)>>8;
- dst[5]= (A*src[5] + B*src[6] + C*src[srcStride+5] + D*src[srcStride+6] + rounder)>>8;
- dst[6]= (A*src[6] + B*src[7] + C*src[srcStride+6] + D*src[srcStride+7] + rounder)>>8;
- dst[7]= (A*src[7] + B*src[8] + C*src[srcStride+7] + D*src[srcStride+8] + rounder)>>8;
- dst+= srcStride;
- src+= srcStride;
+ dst[0]= (A*src[0] + B*src[1] + C*src[stride+0] + D*src[stride+1] + rounder)>>8;
+ dst[1]= (A*src[1] + B*src[2] + C*src[stride+1] + D*src[stride+2] + rounder)>>8;
+ dst[2]= (A*src[2] + B*src[3] + C*src[stride+2] + D*src[stride+3] + rounder)>>8;
+ dst[3]= (A*src[3] + B*src[4] + C*src[stride+3] + D*src[stride+4] + rounder)>>8;
+ dst[4]= (A*src[4] + B*src[5] + C*src[stride+4] + D*src[stride+5] + rounder)>>8;
+ dst[5]= (A*src[5] + B*src[6] + C*src[stride+5] + D*src[stride+6] + rounder)>>8;
+ dst[6]= (A*src[6] + B*src[7] + C*src[stride+6] + D*src[stride+7] + rounder)>>8;
+ dst[7]= (A*src[7] + B*src[8] + C*src[stride+7] + D*src[stride+8] + rounder)>>8;
+ dst+= stride;
+ src+= stride;
}
}
-static void qpel_h_lowpass(UINT8 *dst, UINT8 *src, int dstStride, int srcStride, int h, int r)
+static inline void copy_block17(UINT8 *dst, UINT8 *src, int dstStride, int srcStride, int h)
{
- UINT8 *cm = cropTbl + MAX_NEG_CROP;
int i;
for(i=0; i<h; i++)
{
- dst[0]= cm[(((src[0]+src[1])*20 - (src[0]+src[2])*6 + (src[1]+src[3])*3 - (src[2]+src[4]) + r)>>5)];
- dst[1]= cm[(((src[1]+src[2])*20 - (src[0]+src[3])*6 + (src[0]+src[4])*3 - (src[1]+src[5]) + r)>>5)];
- dst[2]= cm[(((src[2]+src[3])*20 - (src[1]+src[4])*6 + (src[0]+src[5])*3 - (src[0]+src[6]) + r)>>5)];
- dst[3]= cm[(((src[3]+src[4])*20 - (src[2]+src[5])*6 + (src[1]+src[6])*3 - (src[0]+src[7]) + r)>>5)];
- dst[4]= cm[(((src[4]+src[5])*20 - (src[3]+src[6])*6 + (src[2]+src[7])*3 - (src[1]+src[8]) + r)>>5)];
- dst[5]= cm[(((src[5]+src[6])*20 - (src[4]+src[7])*6 + (src[3]+src[8])*3 - (src[2]+src[8]) + r)>>5)];
- dst[6]= cm[(((src[6]+src[7])*20 - (src[5]+src[8])*6 + (src[4]+src[8])*3 - (src[3]+src[7]) + r)>>5)];
- dst[7]= cm[(((src[7]+src[8])*20 - (src[6]+src[8])*6 + (src[5]+src[7])*3 - (src[4]+src[6]) + r)>>5)];
+ ST32(dst , LD32(src ));
+ ST32(dst+4 , LD32(src+4 ));
+ ST32(dst+8 , LD32(src+8 ));
+ ST32(dst+12, LD32(src+12));
+ dst[16]= src[16];
dst+=dstStride;
src+=srcStride;
}
}
-static void qpel_v_lowpass(UINT8 *dst, UINT8 *src, int dstStride, int srcStride, int w, int r)
-{
- UINT8 *cm = cropTbl + MAX_NEG_CROP;
- int i;
- for(i=0; i<w; i++)
- {
- const int src0= src[0*srcStride];
- const int src1= src[1*srcStride];
- const int src2= src[2*srcStride];
- const int src3= src[3*srcStride];
- const int src4= src[4*srcStride];
- const int src5= src[5*srcStride];
- const int src6= src[6*srcStride];
- const int src7= src[7*srcStride];
- const int src8= src[8*srcStride];
- dst[0*dstStride]= cm[(((src0+src1)*20 - (src0+src2)*6 + (src1+src3)*3 - (src2+src4) + r)>>5)];
- dst[1*dstStride]= cm[(((src1+src2)*20 - (src0+src3)*6 + (src0+src4)*3 - (src1+src5) + r)>>5)];
- dst[2*dstStride]= cm[(((src2+src3)*20 - (src1+src4)*6 + (src0+src5)*3 - (src0+src6) + r)>>5)];
- dst[3*dstStride]= cm[(((src3+src4)*20 - (src2+src5)*6 + (src1+src6)*3 - (src0+src7) + r)>>5)];
- dst[4*dstStride]= cm[(((src4+src5)*20 - (src3+src6)*6 + (src2+src7)*3 - (src1+src8) + r)>>5)];
- dst[5*dstStride]= cm[(((src5+src6)*20 - (src4+src7)*6 + (src3+src8)*3 - (src2+src8) + r)>>5)];
- dst[6*dstStride]= cm[(((src6+src7)*20 - (src5+src8)*6 + (src4+src8)*3 - (src3+src7) + r)>>5)];
- dst[7*dstStride]= cm[(((src7+src8)*20 - (src6+src8)*6 + (src5+src7)*3 - (src4+src6) + r)>>5)];
- dst++;
- src++;
- }
-}
-
-static inline void put_block(UINT8 *dst, UINT8 *src, int dstStride, int srcStride)
+static inline void copy_block9(UINT8 *dst, UINT8 *src, int dstStride, int srcStride, int h)
{
int i;
- for(i=0; i<8; i++)
+ for(i=0; i<h; i++)
{
- dst[0]= src[0];
- dst[1]= src[1];
- dst[2]= src[2];
- dst[3]= src[3];
- dst[4]= src[4];
- dst[5]= src[5];
- dst[6]= src[6];
- dst[7]= src[7];
+ ST32(dst , LD32(src ));
+ ST32(dst+4 , LD32(src+4 ));
+ dst[8]= src[8];
dst+=dstStride;
src+=srcStride;
}
}
-static inline void avg2_block(UINT8 *dst, UINT8 *src1, UINT8 *src2, int dstStride, int srcStride, int r)
-{
- int i;
- for(i=0; i<8; i++)
- {
- dst[0]= (src1[0] + src2[0] + r)>>1;
- dst[1]= (src1[1] + src2[1] + r)>>1;
- dst[2]= (src1[2] + src2[2] + r)>>1;
- dst[3]= (src1[3] + src2[3] + r)>>1;
- dst[4]= (src1[4] + src2[4] + r)>>1;
- dst[5]= (src1[5] + src2[5] + r)>>1;
- dst[6]= (src1[6] + src2[6] + r)>>1;
- dst[7]= (src1[7] + src2[7] + r)>>1;
- dst+=dstStride;
- src1+=srcStride;
- src2+=8;
- }
-}
-
-static inline void avg4_block(UINT8 *dst, UINT8 *src1, UINT8 *src2, UINT8 *src3, UINT8 *src4, int dstStride, int srcStride, int r)
-{
- int i;
- for(i=0; i<8; i++)
- {
- dst[0]= (src1[0] + src2[0] + src3[0] + src4[0] + r)>>2;
- dst[1]= (src1[1] + src2[1] + src3[1] + src4[1] + r)>>2;
- dst[2]= (src1[2] + src2[2] + src3[2] + src4[2] + r)>>2;
- dst[3]= (src1[3] + src2[3] + src3[3] + src4[3] + r)>>2;
- dst[4]= (src1[4] + src2[4] + src3[4] + src4[4] + r)>>2;
- dst[5]= (src1[5] + src2[5] + src3[5] + src4[5] + r)>>2;
- dst[6]= (src1[6] + src2[6] + src3[6] + src4[6] + r)>>2;
- dst[7]= (src1[7] + src2[7] + src3[7] + src4[7] + r)>>2;
- dst+=dstStride;
- src1+=srcStride;
- src2+=8;
- src3+=8;
- src4+=8;
- }
-}
-
-#define QPEL_MC(r, name) \
-static void qpel_mc00_c ## name (UINT8 *dst, UINT8 *src, int dstStride, int srcStride, int mx, int my)\
-{\
- put_block(dst, src, dstStride, srcStride);\
+#define QPEL_MC(r, OPNAME, RND, OP) \
+static void OPNAME ## mpeg4_qpel8_h_lowpass(UINT8 *dst, UINT8 *src, int dstStride, int srcStride, int h){\
+ UINT8 *cm = cropTbl + MAX_NEG_CROP;\
+ int i;\
+ for(i=0; i<h; i++)\
+ {\
+ OP(dst[0], (src[0]+src[1])*20 - (src[0]+src[2])*6 + (src[1]+src[3])*3 - (src[2]+src[4]));\
+ OP(dst[1], (src[1]+src[2])*20 - (src[0]+src[3])*6 + (src[0]+src[4])*3 - (src[1]+src[5]));\
+ OP(dst[2], (src[2]+src[3])*20 - (src[1]+src[4])*6 + (src[0]+src[5])*3 - (src[0]+src[6]));\
+ OP(dst[3], (src[3]+src[4])*20 - (src[2]+src[5])*6 + (src[1]+src[6])*3 - (src[0]+src[7]));\
+ OP(dst[4], (src[4]+src[5])*20 - (src[3]+src[6])*6 + (src[2]+src[7])*3 - (src[1]+src[8]));\
+ OP(dst[5], (src[5]+src[6])*20 - (src[4]+src[7])*6 + (src[3]+src[8])*3 - (src[2]+src[8]));\
+ OP(dst[6], (src[6]+src[7])*20 - (src[5]+src[8])*6 + (src[4]+src[8])*3 - (src[3]+src[7]));\
+ OP(dst[7], (src[7]+src[8])*20 - (src[6]+src[8])*6 + (src[5]+src[7])*3 - (src[4]+src[6]));\
+ dst+=dstStride;\
+ src+=srcStride;\
+ }\
}\
\
-static void qpel_mc10_c ## name (UINT8 *dst, UINT8 *src, int dstStride, int srcStride, int mx, int my)\
-{\
+static void OPNAME ## mpeg4_qpel8_v_lowpass(UINT8 *dst, UINT8 *src, int dstStride, int srcStride, int w){\
+ UINT8 *cm = cropTbl + MAX_NEG_CROP;\
+ int i;\
+ for(i=0; i<w; i++)\
+ {\
+ const int src0= src[0*srcStride];\
+ const int src1= src[1*srcStride];\
+ const int src2= src[2*srcStride];\
+ const int src3= src[3*srcStride];\
+ const int src4= src[4*srcStride];\
+ const int src5= src[5*srcStride];\
+ const int src6= src[6*srcStride];\
+ const int src7= src[7*srcStride];\
+ const int src8= src[8*srcStride];\
+ OP(dst[0*dstStride], (src0+src1)*20 - (src0+src2)*6 + (src1+src3)*3 - (src2+src4));\
+ OP(dst[1*dstStride], (src1+src2)*20 - (src0+src3)*6 + (src0+src4)*3 - (src1+src5));\
+ OP(dst[2*dstStride], (src2+src3)*20 - (src1+src4)*6 + (src0+src5)*3 - (src0+src6));\
+ OP(dst[3*dstStride], (src3+src4)*20 - (src2+src5)*6 + (src1+src6)*3 - (src0+src7));\
+ OP(dst[4*dstStride], (src4+src5)*20 - (src3+src6)*6 + (src2+src7)*3 - (src1+src8));\
+ OP(dst[5*dstStride], (src5+src6)*20 - (src4+src7)*6 + (src3+src8)*3 - (src2+src8));\
+ OP(dst[6*dstStride], (src6+src7)*20 - (src5+src8)*6 + (src4+src8)*3 - (src3+src7));\
+ OP(dst[7*dstStride], (src7+src8)*20 - (src6+src8)*6 + (src5+src7)*3 - (src4+src6));\
+ dst++;\
+ src++;\
+ }\
+}\
+\
+static void OPNAME ## mpeg4_qpel16_h_lowpass(UINT8 *dst, UINT8 *src, int dstStride, int srcStride, int h){\
+ UINT8 *cm = cropTbl + MAX_NEG_CROP;\
+ int i;\
+ for(i=0; i<h; i++)\
+ {\
+ OP(dst[ 0], (src[ 0]+src[ 1])*20 - (src[ 0]+src[ 2])*6 + (src[ 1]+src[ 3])*3 - (src[ 2]+src[ 4]));\
+ OP(dst[ 1], (src[ 1]+src[ 2])*20 - (src[ 0]+src[ 3])*6 + (src[ 0]+src[ 4])*3 - (src[ 1]+src[ 5]));\
+ OP(dst[ 2], (src[ 2]+src[ 3])*20 - (src[ 1]+src[ 4])*6 + (src[ 0]+src[ 5])*3 - (src[ 0]+src[ 6]));\
+ OP(dst[ 3], (src[ 3]+src[ 4])*20 - (src[ 2]+src[ 5])*6 + (src[ 1]+src[ 6])*3 - (src[ 0]+src[ 7]));\
+ OP(dst[ 4], (src[ 4]+src[ 5])*20 - (src[ 3]+src[ 6])*6 + (src[ 2]+src[ 7])*3 - (src[ 1]+src[ 8]));\
+ OP(dst[ 5], (src[ 5]+src[ 6])*20 - (src[ 4]+src[ 7])*6 + (src[ 3]+src[ 8])*3 - (src[ 2]+src[ 9]));\
+ OP(dst[ 6], (src[ 6]+src[ 7])*20 - (src[ 5]+src[ 8])*6 + (src[ 4]+src[ 9])*3 - (src[ 3]+src[10]));\
+ OP(dst[ 7], (src[ 7]+src[ 8])*20 - (src[ 6]+src[ 9])*6 + (src[ 5]+src[10])*3 - (src[ 4]+src[11]));\
+ OP(dst[ 8], (src[ 8]+src[ 9])*20 - (src[ 7]+src[10])*6 + (src[ 6]+src[11])*3 - (src[ 5]+src[12]));\
+ OP(dst[ 9], (src[ 9]+src[10])*20 - (src[ 8]+src[11])*6 + (src[ 7]+src[12])*3 - (src[ 6]+src[13]));\
+ OP(dst[10], (src[10]+src[11])*20 - (src[ 9]+src[12])*6 + (src[ 8]+src[13])*3 - (src[ 7]+src[14]));\
+ OP(dst[11], (src[11]+src[12])*20 - (src[10]+src[13])*6 + (src[ 9]+src[14])*3 - (src[ 8]+src[15]));\
+ OP(dst[12], (src[12]+src[13])*20 - (src[11]+src[14])*6 + (src[10]+src[15])*3 - (src[ 9]+src[16]));\
+ OP(dst[13], (src[13]+src[14])*20 - (src[12]+src[15])*6 + (src[11]+src[16])*3 - (src[10]+src[16]));\
+ OP(dst[14], (src[14]+src[15])*20 - (src[13]+src[16])*6 + (src[12]+src[16])*3 - (src[11]+src[15]));\
+ OP(dst[15], (src[15]+src[16])*20 - (src[14]+src[16])*6 + (src[13]+src[15])*3 - (src[12]+src[14]));\
+ dst+=dstStride;\
+ src+=srcStride;\
+ }\
+}\
+\
+static void OPNAME ## mpeg4_qpel16_v_lowpass(UINT8 *dst, UINT8 *src, int dstStride, int srcStride, int w){\
+ UINT8 *cm = cropTbl + MAX_NEG_CROP;\
+ int i;\
+ for(i=0; i<w; i++)\
+ {\
+ const int src0= src[0*srcStride];\
+ const int src1= src[1*srcStride];\
+ const int src2= src[2*srcStride];\
+ const int src3= src[3*srcStride];\
+ const int src4= src[4*srcStride];\
+ const int src5= src[5*srcStride];\
+ const int src6= src[6*srcStride];\
+ const int src7= src[7*srcStride];\
+ const int src8= src[8*srcStride];\
+ const int src9= src[9*srcStride];\
+ const int src10= src[10*srcStride];\
+ const int src11= src[11*srcStride];\
+ const int src12= src[12*srcStride];\
+ const int src13= src[13*srcStride];\
+ const int src14= src[14*srcStride];\
+ const int src15= src[15*srcStride];\
+ const int src16= src[16*srcStride];\
+ OP(dst[ 0*dstStride], (src0 +src1 )*20 - (src0 +src2 )*6 + (src1 +src3 )*3 - (src2 +src4 ));\
+ OP(dst[ 1*dstStride], (src1 +src2 )*20 - (src0 +src3 )*6 + (src0 +src4 )*3 - (src1 +src5 ));\
+ OP(dst[ 2*dstStride], (src2 +src3 )*20 - (src1 +src4 )*6 + (src0 +src5 )*3 - (src0 +src6 ));\
+ OP(dst[ 3*dstStride], (src3 +src4 )*20 - (src2 +src5 )*6 + (src1 +src6 )*3 - (src0 +src7 ));\
+ OP(dst[ 4*dstStride], (src4 +src5 )*20 - (src3 +src6 )*6 + (src2 +src7 )*3 - (src1 +src8 ));\
+ OP(dst[ 5*dstStride], (src5 +src6 )*20 - (src4 +src7 )*6 + (src3 +src8 )*3 - (src2 +src9 ));\
+ OP(dst[ 6*dstStride], (src6 +src7 )*20 - (src5 +src8 )*6 + (src4 +src9 )*3 - (src3 +src10));\
+ OP(dst[ 7*dstStride], (src7 +src8 )*20 - (src6 +src9 )*6 + (src5 +src10)*3 - (src4 +src11));\
+ OP(dst[ 8*dstStride], (src8 +src9 )*20 - (src7 +src10)*6 + (src6 +src11)*3 - (src5 +src12));\
+ OP(dst[ 9*dstStride], (src9 +src10)*20 - (src8 +src11)*6 + (src7 +src12)*3 - (src6 +src13));\
+ OP(dst[10*dstStride], (src10+src11)*20 - (src9 +src12)*6 + (src8 +src13)*3 - (src7 +src14));\
+ OP(dst[11*dstStride], (src11+src12)*20 - (src10+src13)*6 + (src9 +src14)*3 - (src8 +src15));\
+ OP(dst[12*dstStride], (src12+src13)*20 - (src11+src14)*6 + (src10+src15)*3 - (src9 +src16));\
+ OP(dst[13*dstStride], (src13+src14)*20 - (src12+src15)*6 + (src11+src16)*3 - (src10+src16));\
+ OP(dst[14*dstStride], (src14+src15)*20 - (src13+src16)*6 + (src12+src16)*3 - (src11+src15));\
+ OP(dst[15*dstStride], (src15+src16)*20 - (src14+src16)*6 + (src13+src15)*3 - (src12+src14));\
+ dst++;\
+ src++;\
+ }\
+}\
+\
+static void OPNAME ## qpel8_mc00_c (UINT8 *dst, UINT8 *src, int stride){\
+ OPNAME ## pixels8(dst, src, stride, 8);\
+}\
+\
+static void OPNAME ## qpel8_mc10_c(UINT8 *dst, UINT8 *src, int stride){\
UINT8 half[64];\
- qpel_h_lowpass(half, src, 8, srcStride, 8, 16-r);\
- avg2_block(dst, src, half, dstStride, srcStride, 1-r);\
+ put ## RND ## mpeg4_qpel8_h_lowpass(half, src, 8, stride, 8);\
+ OPNAME ## pixels8_l2(dst, src, half, stride, stride, 8, 8);\
}\
\
-static void qpel_mc20_c ## name (UINT8 *dst, UINT8 *src, int dstStride, int srcStride, int mx, int my)\
-{\
- qpel_h_lowpass(dst, src, dstStride, srcStride, 8, 16-r);\
+static void OPNAME ## qpel8_mc20_c(UINT8 *dst, UINT8 *src, int stride){\
+ OPNAME ## mpeg4_qpel8_h_lowpass(dst, src, stride, stride, 8);\
}\
\
-static void qpel_mc30_c ## name (UINT8 *dst, UINT8 *src, int dstStride, int srcStride, int mx, int my)\
-{\
+static void OPNAME ## qpel8_mc30_c(UINT8 *dst, UINT8 *src, int stride){\
UINT8 half[64];\
- qpel_h_lowpass(half, src, 8, srcStride, 8, 16-r);\
- avg2_block(dst, src+1, half, dstStride, srcStride, 1-r);\
+ put ## RND ## mpeg4_qpel8_h_lowpass(half, src, 8, stride, 8);\
+ OPNAME ## pixels8_l2(dst, src+1, half, stride, stride, 8, 8);\
}\
\
-static void qpel_mc01_c ## name (UINT8 *dst, UINT8 *src, int dstStride, int srcStride, int mx, int my)\
-{\
+static void OPNAME ## qpel8_mc01_c(UINT8 *dst, UINT8 *src, int stride){\
+ UINT8 full[16*9];\
UINT8 half[64];\
- qpel_v_lowpass(half, src, 8, srcStride, 8, 16-r);\
- avg2_block(dst, src, half, dstStride, srcStride, 1-r);\
+ copy_block9(full, src, 16, stride, 9);\
+ put ## RND ## mpeg4_qpel8_v_lowpass(half, full, 8, 16, 8);\
+ OPNAME ## pixels8_l2(dst, full, half, stride, 16, 8, 8);\
}\
\
-static void qpel_mc02_c ## name (UINT8 *dst, UINT8 *src, int dstStride, int srcStride, int mx, int my)\
-{\
- qpel_v_lowpass(dst, src, dstStride, srcStride, 8, 16-r);\
+static void OPNAME ## qpel8_mc02_c(UINT8 *dst, UINT8 *src, int stride){\
+ UINT8 full[16*9];\
+ copy_block9(full, src, 16, stride, 9);\
+ OPNAME ## mpeg4_qpel8_v_lowpass(dst, full, stride, 16, 8);\
}\
\
-static void qpel_mc03_c ## name (UINT8 *dst, UINT8 *src, int dstStride, int srcStride, int mx, int my)\
-{\
+static void OPNAME ## qpel8_mc03_c(UINT8 *dst, UINT8 *src, int stride){\
+ UINT8 full[16*9];\
UINT8 half[64];\
- qpel_v_lowpass(half, src, 8, srcStride, 8, 16-r);\
- avg2_block(dst, src+srcStride, half, dstStride, srcStride, 1-r);\
+ copy_block9(full, src, 16, stride, 9);\
+ put ## RND ## mpeg4_qpel8_v_lowpass(half, full, 8, 16, 8);\
+ OPNAME ## pixels8_l2(dst, full+16, half, stride, 16, 8, 8);\
}\
-static void qpel_mc11_c ## name (UINT8 *dst, UINT8 *src, int dstStride, int srcStride, int mx, int my)\
-{\
+static void OPNAME ## qpel8_mc11_c(UINT8 *dst, UINT8 *src, int stride){\
+ UINT8 full[16*9];\
UINT8 halfH[72];\
UINT8 halfV[64];\
UINT8 halfHV[64];\
- qpel_h_lowpass(halfH, src, 8, srcStride, 9, 16-r);\
- qpel_v_lowpass(halfV, src, 8, srcStride, 8, 16-r);\
- qpel_v_lowpass(halfHV, halfH, 8, 8, 8, 16-r);\
- avg4_block(dst, src, halfH, halfV, halfHV, dstStride, srcStride, 2-r);\
+ copy_block9(full, src, 16, stride, 9);\
+ put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
+ put ## RND ## mpeg4_qpel8_v_lowpass(halfV, full, 8, 16, 8);\
+ put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8, 8);\
+ OPNAME ## pixels8_l4(dst, full, halfH, halfV, halfHV, stride, 16, 8, 8, 8, 8);\
}\
-static void qpel_mc31_c ## name (UINT8 *dst, UINT8 *src, int dstStride, int srcStride, int mx, int my)\
-{\
+static void OPNAME ## qpel8_mc31_c(UINT8 *dst, UINT8 *src, int stride){\
+ UINT8 full[16*9];\
UINT8 halfH[72];\
UINT8 halfV[64];\
UINT8 halfHV[64];\
- qpel_h_lowpass(halfH, src, 8, srcStride, 9, 16-r);\
- qpel_v_lowpass(halfV, src+1, 8, srcStride, 8, 16-r);\
- qpel_v_lowpass(halfHV, halfH, 8, 8, 8, 16-r);\
- avg4_block(dst, src+1, halfH, halfV, halfHV, dstStride, srcStride, 2-r);\
+ copy_block9(full, src, 16, stride, 9);\
+ put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
+ put ## RND ## mpeg4_qpel8_v_lowpass(halfV, full+1, 8, 16, 8);\
+ put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8, 8);\
+ OPNAME ## pixels8_l4(dst, full+1, halfH, halfV, halfHV, stride, 16, 8, 8, 8, 8);\
}\
-static void qpel_mc13_c ## name (UINT8 *dst, UINT8 *src, int dstStride, int srcStride, int mx, int my)\
-{\
+static void OPNAME ## qpel8_mc13_c(UINT8 *dst, UINT8 *src, int stride){\
+ UINT8 full[16*9];\
UINT8 halfH[72];\
UINT8 halfV[64];\
UINT8 halfHV[64];\
- qpel_h_lowpass(halfH, src, 8, srcStride, 9, 16-r);\
- qpel_v_lowpass(halfV, src, 8, srcStride, 8, 16-r);\
- qpel_v_lowpass(halfHV, halfH, 8, 8, 8, 16-r);\
- avg4_block(dst, src+srcStride, halfH+8, halfV, halfHV, dstStride, srcStride, 2-r);\
+ copy_block9(full, src, 16, stride, 9);\
+ put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
+ put ## RND ## mpeg4_qpel8_v_lowpass(halfV, full, 8, 16, 8);\
+ put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8, 8);\
+ OPNAME ## pixels8_l4(dst, full+16, halfH+8, halfV, halfHV, stride, 16, 8, 8, 8, 8);\
}\
-static void qpel_mc33_c ## name (UINT8 *dst, UINT8 *src, int dstStride, int srcStride, int mx, int my)\
-{\
+static void OPNAME ## qpel8_mc33_c(UINT8 *dst, UINT8 *src, int stride){\
+ UINT8 full[16*9];\
UINT8 halfH[72];\
UINT8 halfV[64];\
UINT8 halfHV[64];\
- qpel_h_lowpass(halfH, src, 8, srcStride, 9, 16-r);\
- qpel_v_lowpass(halfV, src+1, 8, srcStride, 8, 16-r);\
- qpel_v_lowpass(halfHV, halfH, 8, 8, 8, 16-r);\
- avg4_block(dst, src+srcStride+1, halfH+8, halfV, halfHV, dstStride, srcStride, 2-r);\
+ copy_block9(full, src, 16, stride, 9);\
+ put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full , 8, 16, 9);\
+ put ## RND ## mpeg4_qpel8_v_lowpass(halfV, full+1, 8, 16, 8);\
+ put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8, 8);\
+ OPNAME ## pixels8_l4(dst, full+17, halfH+8, halfV, halfHV, stride, 16, 8, 8, 8, 8);\
}\
-static void qpel_mc21_c ## name (UINT8 *dst, UINT8 *src, int dstStride, int srcStride, int mx, int my)\
-{\
+static void OPNAME ## qpel8_mc21_c(UINT8 *dst, UINT8 *src, int stride){\
UINT8 halfH[72];\
UINT8 halfHV[64];\
- qpel_h_lowpass(halfH, src, 8, srcStride, 9, 16-r);\
- qpel_v_lowpass(halfHV, halfH, 8, 8, 8, 16-r);\
- avg2_block(dst, halfH, halfHV, dstStride, 8, 1-r);\
+ put ## RND ## mpeg4_qpel8_h_lowpass(halfH, src, 8, stride, 9);\
+ put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8, 8);\
+ OPNAME ## pixels8_l2(dst, halfH, halfHV, stride, 8, 8, 8);\
}\
-static void qpel_mc23_c ## name (UINT8 *dst, UINT8 *src, int dstStride, int srcStride, int mx, int my)\
-{\
+static void OPNAME ## qpel8_mc23_c(UINT8 *dst, UINT8 *src, int stride){\
UINT8 halfH[72];\
UINT8 halfHV[64];\
- qpel_h_lowpass(halfH, src, 8, srcStride, 9, 16-r);\
- qpel_v_lowpass(halfHV, halfH, 8, 8, 8, 16-r);\
- avg2_block(dst, halfH+8, halfHV, dstStride, 8, 1-r);\
+ put ## RND ## mpeg4_qpel8_h_lowpass(halfH, src, 8, stride, 9);\
+ put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8, 8);\
+ OPNAME ## pixels8_l2(dst, halfH+8, halfHV, stride, 8, 8, 8);\
}\
-static void qpel_mc12_c ## name (UINT8 *dst, UINT8 *src, int dstStride, int srcStride, int mx, int my)\
-{\
+static void OPNAME ## qpel8_mc12_c(UINT8 *dst, UINT8 *src, int stride){\
+ UINT8 full[16*9];\
UINT8 halfH[72];\
UINT8 halfV[64];\
UINT8 halfHV[64];\
- qpel_h_lowpass(halfH, src, 8, srcStride, 9, 16-r);\
- qpel_v_lowpass(halfV, src, 8, srcStride, 8, 16-r);\
- qpel_v_lowpass(halfHV, halfH, 8, 8, 8, 16-r);\
- avg2_block(dst, halfV, halfHV, dstStride, 8, 1-r);\
+ copy_block9(full, src, 16, stride, 9);\
+ put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
+ put ## RND ## mpeg4_qpel8_v_lowpass(halfV, full, 8, 16, 8);\
+ put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8, 8);\
+ OPNAME ## pixels8_l2(dst, halfV, halfHV, stride, 8, 8, 8);\
}\
-static void qpel_mc32_c ## name (UINT8 *dst, UINT8 *src, int dstStride, int srcStride, int mx, int my)\
-{\
+static void OPNAME ## qpel8_mc32_c(UINT8 *dst, UINT8 *src, int stride){\
+ UINT8 full[16*9];\
UINT8 halfH[72];\
UINT8 halfV[64];\
UINT8 halfHV[64];\
- qpel_h_lowpass(halfH, src, 8, srcStride, 9, 16-r);\
- qpel_v_lowpass(halfV, src+1, 8, srcStride, 8, 16-r);\
- qpel_v_lowpass(halfHV, halfH, 8, 8, 8, 16-r);\
- avg2_block(dst, halfV, halfHV, dstStride, 8, 1-r);\
+ copy_block9(full, src, 16, stride, 9);\
+ put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
+ put ## RND ## mpeg4_qpel8_v_lowpass(halfV, full+1, 8, 16, 8);\
+ put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8, 8);\
+ OPNAME ## pixels8_l2(dst, halfV, halfHV, stride, 8, 8, 8);\
}\
-static void qpel_mc22_c ## name (UINT8 *dst, UINT8 *src, int dstStride, int srcStride, int mx, int my)\
-{\
+static void OPNAME ## qpel8_mc22_c(UINT8 *dst, UINT8 *src, int stride){\
UINT8 halfH[72];\
- qpel_h_lowpass(halfH, src, 8, srcStride, 9, 16-r);\
- qpel_v_lowpass(dst, halfH, dstStride, 8, 8, 16-r);\
+ put ## RND ## mpeg4_qpel8_h_lowpass(halfH, src, 8, stride, 9);\
+ OPNAME ## mpeg4_qpel8_v_lowpass(dst, halfH, stride, 8, 8);\
+}\
+static void OPNAME ## qpel16_mc00_c (UINT8 *dst, UINT8 *src, int stride){\
+ OPNAME ## pixels16(dst, src, stride, 16);\
+}\
+\
+static void OPNAME ## qpel16_mc10_c(UINT8 *dst, UINT8 *src, int stride){\
+ UINT8 half[256];\
+ put ## RND ## mpeg4_qpel16_h_lowpass(half, src, 16, stride, 16);\
+ OPNAME ## pixels16_l2(dst, src, half, stride, stride, 16, 16);\
+}\
+\
+static void OPNAME ## qpel16_mc20_c(UINT8 *dst, UINT8 *src, int stride){\
+ OPNAME ## mpeg4_qpel16_h_lowpass(dst, src, stride, stride, 16);\
}\
-qpel_mc_func qpel_mc ## name ## _tab[16]={ \
- qpel_mc00_c ## name, \
- qpel_mc10_c ## name, \
- qpel_mc20_c ## name, \
- qpel_mc30_c ## name, \
- qpel_mc01_c ## name, \
- qpel_mc11_c ## name, \
- qpel_mc21_c ## name, \
- qpel_mc31_c ## name, \
- qpel_mc02_c ## name, \
- qpel_mc12_c ## name, \
- qpel_mc22_c ## name, \
- qpel_mc32_c ## name, \
- qpel_mc03_c ## name, \
- qpel_mc13_c ## name, \
- qpel_mc23_c ## name, \
- qpel_mc33_c ## name, \
+\
+static void OPNAME ## qpel16_mc30_c(UINT8 *dst, UINT8 *src, int stride){\
+ UINT8 half[256];\
+ put ## RND ## mpeg4_qpel16_h_lowpass(half, src, 16, stride, 16);\
+ OPNAME ## pixels16_l2(dst, src+1, half, stride, stride, 16, 16);\
+}\
+\
+static void OPNAME ## qpel16_mc01_c(UINT8 *dst, UINT8 *src, int stride){\
+ UINT8 full[24*17];\
+ UINT8 half[256];\
+ copy_block17(full, src, 24, stride, 17);\
+ put ## RND ## mpeg4_qpel16_v_lowpass(half, full, 16, 24, 16);\
+ OPNAME ## pixels16_l2(dst, full, half, stride, 24, 16, 16);\
+}\
+\
+static void OPNAME ## qpel16_mc02_c(UINT8 *dst, UINT8 *src, int stride){\
+ UINT8 full[24*17];\
+ copy_block17(full, src, 24, stride, 17);\
+ OPNAME ## mpeg4_qpel16_v_lowpass(dst, full, stride, 24, 16);\
+}\
+\
+static void OPNAME ## qpel16_mc03_c(UINT8 *dst, UINT8 *src, int stride){\
+ UINT8 full[24*17];\
+ UINT8 half[256];\
+ copy_block17(full, src, 24, stride, 17);\
+ put ## RND ## mpeg4_qpel16_v_lowpass(half, full, 16, 24, 16);\
+ OPNAME ## pixels16_l2(dst, full+24, half, stride, 24, 16, 16);\
+}\
+static void OPNAME ## qpel16_mc11_c(UINT8 *dst, UINT8 *src, int stride){\
+ UINT8 full[24*17];\
+ UINT8 halfH[272];\
+ UINT8 halfV[256];\
+ UINT8 halfHV[256];\
+ copy_block17(full, src, 24, stride, 17);\
+ put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
+ put ## RND ## mpeg4_qpel16_v_lowpass(halfV, full, 16, 24, 16);\
+ put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16, 16);\
+ OPNAME ## pixels16_l4(dst, full, halfH, halfV, halfHV, stride, 24, 16, 16, 16, 16);\
+}\
+static void OPNAME ## qpel16_mc31_c(UINT8 *dst, UINT8 *src, int stride){\
+ UINT8 full[24*17];\
+ UINT8 halfH[272];\
+ UINT8 halfV[256];\
+ UINT8 halfHV[256];\
+ copy_block17(full, src, 24, stride, 17);\
+ put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
+ put ## RND ## mpeg4_qpel16_v_lowpass(halfV, full+1, 16, 24, 16);\
+ put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16, 16);\
+ OPNAME ## pixels16_l4(dst, full+1, halfH, halfV, halfHV, stride, 24, 16, 16, 16, 16);\
+}\
+static void OPNAME ## qpel16_mc13_c(UINT8 *dst, UINT8 *src, int stride){\
+ UINT8 full[24*17];\
+ UINT8 halfH[272];\
+ UINT8 halfV[256];\
+ UINT8 halfHV[256];\
+ copy_block17(full, src, 24, stride, 17);\
+ put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
+ put ## RND ## mpeg4_qpel16_v_lowpass(halfV, full, 16, 24, 16);\
+ put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16, 16);\
+ OPNAME ## pixels16_l4(dst, full+24, halfH+16, halfV, halfHV, stride, 24, 16, 16, 16, 16);\
+}\
+static void OPNAME ## qpel16_mc33_c(UINT8 *dst, UINT8 *src, int stride){\
+ UINT8 full[24*17];\
+ UINT8 halfH[272];\
+ UINT8 halfV[256];\
+ UINT8 halfHV[256];\
+ copy_block17(full, src, 24, stride, 17);\
+ put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full , 16, 24, 17);\
+ put ## RND ## mpeg4_qpel16_v_lowpass(halfV, full+1, 16, 24, 16);\
+ put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16, 16);\
+ OPNAME ## pixels16_l4(dst, full+25, halfH+16, halfV, halfHV, stride, 24, 16, 16, 16, 16);\
+}\
+static void OPNAME ## qpel16_mc21_c(UINT8 *dst, UINT8 *src, int stride){\
+ UINT8 halfH[272];\
+ UINT8 halfHV[256];\
+ put ## RND ## mpeg4_qpel16_h_lowpass(halfH, src, 16, stride, 17);\
+ put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16, 16);\
+ OPNAME ## pixels16_l2(dst, halfH, halfHV, stride, 16, 16, 16);\
+}\
+static void OPNAME ## qpel16_mc23_c(UINT8 *dst, UINT8 *src, int stride){\
+ UINT8 halfH[272];\
+ UINT8 halfHV[256];\
+ put ## RND ## mpeg4_qpel16_h_lowpass(halfH, src, 16, stride, 17);\
+ put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16, 16);\
+ OPNAME ## pixels16_l2(dst, halfH+16, halfHV, stride, 16, 16, 16);\
+}\
+static void OPNAME ## qpel16_mc12_c(UINT8 *dst, UINT8 *src, int stride){\
+ UINT8 full[24*17];\
+ UINT8 halfH[272];\
+ UINT8 halfV[256];\
+ UINT8 halfHV[256];\
+ copy_block17(full, src, 24, stride, 17);\
+ put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
+ put ## RND ## mpeg4_qpel16_v_lowpass(halfV, full, 16, 24, 16);\
+ put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16, 16);\
+ OPNAME ## pixels16_l2(dst, halfV, halfHV, stride, 16, 16, 16);\
+}\
+static void OPNAME ## qpel16_mc32_c(UINT8 *dst, UINT8 *src, int stride){\
+ UINT8 full[24*17];\
+ UINT8 halfH[272];\
+ UINT8 halfV[256];\
+ UINT8 halfHV[256];\
+ copy_block17(full, src, 24, stride, 17);\
+ put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
+ put ## RND ## mpeg4_qpel16_v_lowpass(halfV, full+1, 16, 24, 16);\
+ put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16, 16);\
+ OPNAME ## pixels16_l2(dst, halfV, halfHV, stride, 16, 16, 16);\
+}\
+static void OPNAME ## qpel16_mc22_c(UINT8 *dst, UINT8 *src, int stride){\
+ UINT8 halfH[272];\
+ put ## RND ## mpeg4_qpel16_h_lowpass(halfH, src, 16, stride, 17);\
+ OPNAME ## mpeg4_qpel16_v_lowpass(dst, halfH, stride, 16, 16);\
+}\
+qpel_mc_func OPNAME ## qpel_pixels_tab[2][16]={ \
+ {\
+ OPNAME ## qpel16_mc00_c, \
+ OPNAME ## qpel16_mc10_c, \
+ OPNAME ## qpel16_mc20_c, \
+ OPNAME ## qpel16_mc30_c, \
+ OPNAME ## qpel16_mc01_c, \
+ OPNAME ## qpel16_mc11_c, \
+ OPNAME ## qpel16_mc21_c, \
+ OPNAME ## qpel16_mc31_c, \
+ OPNAME ## qpel16_mc02_c, \
+ OPNAME ## qpel16_mc12_c, \
+ OPNAME ## qpel16_mc22_c, \
+ OPNAME ## qpel16_mc32_c, \
+ OPNAME ## qpel16_mc03_c, \
+ OPNAME ## qpel16_mc13_c, \
+ OPNAME ## qpel16_mc23_c, \
+ OPNAME ## qpel16_mc33_c, \
+ },{\
+ OPNAME ## qpel8_mc00_c, \
+ OPNAME ## qpel8_mc10_c, \
+ OPNAME ## qpel8_mc20_c, \
+ OPNAME ## qpel8_mc30_c, \
+ OPNAME ## qpel8_mc01_c, \
+ OPNAME ## qpel8_mc11_c, \
+ OPNAME ## qpel8_mc21_c, \
+ OPNAME ## qpel8_mc31_c, \
+ OPNAME ## qpel8_mc02_c, \
+ OPNAME ## qpel8_mc12_c, \
+ OPNAME ## qpel8_mc22_c, \
+ OPNAME ## qpel8_mc32_c, \
+ OPNAME ## qpel8_mc03_c, \
+ OPNAME ## qpel8_mc13_c, \
+ OPNAME ## qpel8_mc23_c, \
+ OPNAME ## qpel8_mc33_c, \
+ }\
};
-QPEL_MC(0, _rnd)
-QPEL_MC(1, _no_rnd)
+#define op_avg(a, b) a = (((a)+cm[((b) + 16)>>5]+1)>>1)
+#define op_avg_no_rnd(a, b) a = (((a)+cm[((b) + 15)>>5])>>1)
+#define op_put(a, b) a = cm[((b) + 16)>>5]
+#define op_put_no_rnd(a, b) a = cm[((b) + 15)>>5]
+
+QPEL_MC(0, put_ , _ , op_put)
+QPEL_MC(1, put_no_rnd_, _no_rnd_, op_put_no_rnd)
+QPEL_MC(0, avg_ , _ , op_avg)
+//QPEL_MC(1, avg_no_rnd , _ , op_avg)
+#undef op_avg
+#undef op_avg_no_rnd
+#undef op_put
+#undef op_put_no_rnd
int pix_abs16x16_c(UINT8 *pix1, UINT8 *pix2, int line_size)
{
diff --git a/libavcodec/dsputil.h b/libavcodec/dsputil.h
index 8703203269..0ec9d5cccb 100644
--- a/libavcodec/dsputil.h
+++ b/libavcodec/dsputil.h
@@ -74,14 +74,22 @@ void clear_blocks_c(DCTELEM *blocks);
/* add and put pixel (decoding) */
typedef void (*op_pixels_func)(UINT8 *block, const UINT8 *pixels, int line_size, int h);
-typedef void (*qpel_mc_func)(UINT8 *dst, UINT8 *src, int dstStride, int srcStride, int mx, int my);
-
-extern op_pixels_func put_pixels_tab[4];
-extern op_pixels_func avg_pixels_tab[4];
-extern op_pixels_func put_no_rnd_pixels_tab[4];
-extern op_pixels_func avg_no_rnd_pixels_tab[4];
-extern qpel_mc_func qpel_mc_rnd_tab[16];
-extern qpel_mc_func qpel_mc_no_rnd_tab[16];
+typedef void (*qpel_mc_func)(UINT8 *dst, UINT8 *src, int stride);
+
+extern op_pixels_func put_pixels_tab[2][4];
+extern op_pixels_func avg_pixels_tab[2][4];
+extern op_pixels_func put_no_rnd_pixels_tab[2][4];
+extern op_pixels_func avg_no_rnd_pixels_tab[2][4];
+extern qpel_mc_func put_qpel_pixels_tab[2][16];
+extern qpel_mc_func avg_qpel_pixels_tab[2][16];
+extern qpel_mc_func put_no_rnd_qpel_pixels_tab[2][16];
+extern qpel_mc_func avg_no_rnd_qpel_pixels_tab[2][16];
+
+#define CALL_2X_PIXELS(a, b, n)\
+static void a(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
+ b(block , pixels , line_size, h);\
+ b(block+n, pixels+n, line_size, h);\
+}
/* motion estimation */
diff --git a/libavcodec/i386/dsputil_mmx.c b/libavcodec/i386/dsputil_mmx.c
index fed1818743..4336e4bde0 100644
--- a/libavcodec/i386/dsputil_mmx.c
+++ b/libavcodec/i386/dsputil_mmx.c
@@ -343,7 +343,7 @@ static void add_pixels_clamped_mmx(const DCTELEM *block, UINT8 *pixels, int line
} while (--i);
}
-static void put_pixels_mmx(UINT8 *block, const UINT8 *pixels, int line_size, int h)
+static void put_pixels8_mmx(UINT8 *block, const UINT8 *pixels, int line_size, int h)
{
__asm __volatile(
"lea (%3, %3), %%eax \n\t"
@@ -369,6 +369,40 @@ static void put_pixels_mmx(UINT8 *block, const UINT8 *pixels, int line_size, int
);
}
+static void put_pixels16_mmx(UINT8 *block, const UINT8 *pixels, int line_size, int h)
+{
+ __asm __volatile(
+ "lea (%3, %3), %%eax \n\t"
+ ".balign 8 \n\t"
+ "1: \n\t"
+ "movq (%1), %%mm0 \n\t"
+ "movq 8(%1), %%mm4 \n\t"
+ "movq (%1, %3), %%mm1 \n\t"
+ "movq 8(%1, %3), %%mm5 \n\t"
+ "movq %%mm0, (%2) \n\t"
+ "movq %%mm4, 8(%2) \n\t"
+ "movq %%mm1, (%2, %3) \n\t"
+ "movq %%mm5, 8(%2, %3) \n\t"
+ "addl %%eax, %1 \n\t"
+ "addl %%eax, %2 \n\t"
+ "movq (%1), %%mm0 \n\t"
+ "movq 8(%1), %%mm4 \n\t"
+ "movq (%1, %3), %%mm1 \n\t"
+ "movq 8(%1, %3), %%mm5 \n\t"
+ "movq %%mm0, (%2) \n\t"
+ "movq %%mm4, 8(%2) \n\t"
+ "movq %%mm1, (%2, %3) \n\t"
+ "movq %%mm5, 8(%2, %3) \n\t"
+ "addl %%eax, %1 \n\t"
+ "addl %%eax, %2 \n\t"
+ "subl $4, %0 \n\t"
+ "jnz 1b \n\t"
+ : "+g"(h), "+r" (pixels), "+r" (block)
+ : "r"(line_size)
+ : "%eax", "memory"
+ );
+}
+
static void clear_blocks_mmx(DCTELEM *blocks)
{
__asm __volatile(
@@ -424,25 +458,45 @@ void dsputil_init_mmx(void)
pix_abs8x8_y2 = pix_abs8x8_y2_mmx;
pix_abs8x8_xy2= pix_abs8x8_xy2_mmx;
- put_pixels_tab[0] = put_pixels_mmx;
- put_pixels_tab[1] = put_pixels_x2_mmx;
- put_pixels_tab[2] = put_pixels_y2_mmx;
- put_pixels_tab[3] = put_pixels_xy2_mmx;
-
- put_no_rnd_pixels_tab[0] = put_pixels_mmx;
- put_no_rnd_pixels_tab[1] = put_no_rnd_pixels_x2_mmx;
- put_no_rnd_pixels_tab[2] = put_no_rnd_pixels_y2_mmx;
- put_no_rnd_pixels_tab[3] = put_no_rnd_pixels_xy2_mmx;
-
- avg_pixels_tab[0] = avg_pixels_mmx;
- avg_pixels_tab[1] = avg_pixels_x2_mmx;
- avg_pixels_tab[2] = avg_pixels_y2_mmx;
- avg_pixels_tab[3] = avg_pixels_xy2_mmx;
-
- avg_no_rnd_pixels_tab[0] = avg_no_rnd_pixels_mmx;
- avg_no_rnd_pixels_tab[1] = avg_no_rnd_pixels_x2_mmx;
- avg_no_rnd_pixels_tab[2] = avg_no_rnd_pixels_y2_mmx;
- avg_no_rnd_pixels_tab[3] = avg_no_rnd_pixels_xy2_mmx;
+ put_pixels_tab[0][0] = put_pixels16_mmx;
+ put_pixels_tab[0][1] = put_pixels16_x2_mmx;
+ put_pixels_tab[0][2] = put_pixels16_y2_mmx;
+ put_pixels_tab[0][3] = put_pixels16_xy2_mmx;
+
+ put_no_rnd_pixels_tab[0][0] = put_pixels16_mmx;
+ put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_mmx;
+ put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_mmx;
+ put_no_rnd_pixels_tab[0][3] = put_no_rnd_pixels16_xy2_mmx;
+
+ avg_pixels_tab[0][0] = avg_pixels16_mmx;
+ avg_pixels_tab[0][1] = avg_pixels16_x2_mmx;
+ avg_pixels_tab[0][2] = avg_pixels16_y2_mmx;
+ avg_pixels_tab[0][3] = avg_pixels16_xy2_mmx;
+
+ avg_no_rnd_pixels_tab[0][0] = avg_no_rnd_pixels16_mmx;
+ avg_no_rnd_pixels_tab[0][1] = avg_no_rnd_pixels16_x2_mmx;
+ avg_no_rnd_pixels_tab[0][2] = avg_no_rnd_pixels16_y2_mmx;
+ avg_no_rnd_pixels_tab[0][3] = avg_no_rnd_pixels16_xy2_mmx;
+
+ put_pixels_tab[1][0] = put_pixels8_mmx;
+ put_pixels_tab[1][1] = put_pixels8_x2_mmx;
+ put_pixels_tab[1][2] = put_pixels8_y2_mmx;
+ put_pixels_tab[1][3] = put_pixels8_xy2_mmx;
+
+ put_no_rnd_pixels_tab[1][0] = put_pixels8_mmx;
+ put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_mmx;
+ put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_mmx;
+ put_no_rnd_pixels_tab[1][3] = put_no_rnd_pixels8_xy2_mmx;
+
+ avg_pixels_tab[1][0] = avg_pixels8_mmx;
+ avg_pixels_tab[1][1] = avg_pixels8_x2_mmx;
+ avg_pixels_tab[1][2] = avg_pixels8_y2_mmx;
+ avg_pixels_tab[1][3] = avg_pixels8_xy2_mmx;
+
+ avg_no_rnd_pixels_tab[1][0] = avg_no_rnd_pixels8_mmx;
+ avg_no_rnd_pixels_tab[1][1] = avg_no_rnd_pixels8_x2_mmx;
+ avg_no_rnd_pixels_tab[1][2] = avg_no_rnd_pixels8_y2_mmx;
+ avg_no_rnd_pixels_tab[1][3] = avg_no_rnd_pixels8_xy2_mmx;
if (mm_flags & MM_MMXEXT) {
pix_abs16x16 = pix_abs16x16_mmx2;
@@ -455,25 +509,45 @@ void dsputil_init_mmx(void)
pix_abs8x8_y2 = pix_abs8x8_y2_mmx2;
pix_abs8x8_xy2= pix_abs8x8_xy2_mmx2;
- put_pixels_tab[1] = put_pixels_x2_mmx2;
- put_pixels_tab[2] = put_pixels_y2_mmx2;
- put_no_rnd_pixels_tab[1] = put_no_rnd_pixels_x2_mmx2;
- put_no_rnd_pixels_tab[2] = put_no_rnd_pixels_y2_mmx2;
-
- avg_pixels_tab[0] = avg_pixels_mmx2;
- avg_pixels_tab[1] = avg_pixels_x2_mmx2;
- avg_pixels_tab[2] = avg_pixels_y2_mmx2;
- avg_pixels_tab[3] = avg_pixels_xy2_mmx2;
+ put_pixels_tab[0][1] = put_pixels16_x2_mmx2;
+ put_pixels_tab[0][2] = put_pixels16_y2_mmx2;
+ put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_mmx2;
+ put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_mmx2;
+
+ avg_pixels_tab[0][0] = avg_pixels16_mmx2;
+ avg_pixels_tab[0][1] = avg_pixels16_x2_mmx2;
+ avg_pixels_tab[0][2] = avg_pixels16_y2_mmx2;
+ avg_pixels_tab[0][3] = avg_pixels16_xy2_mmx2;
+
+ put_pixels_tab[1][1] = put_pixels8_x2_mmx2;
+ put_pixels_tab[1][2] = put_pixels8_y2_mmx2;
+ put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_mmx2;
+ put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_mmx2;
+
+ avg_pixels_tab[1][0] = avg_pixels8_mmx2;
+ avg_pixels_tab[1][1] = avg_pixels8_x2_mmx2;
+ avg_pixels_tab[1][2] = avg_pixels8_y2_mmx2;
+ avg_pixels_tab[1][3] = avg_pixels8_xy2_mmx2;
} else if (mm_flags & MM_3DNOW) {
- put_pixels_tab[1] = put_pixels_x2_3dnow;
- put_pixels_tab[2] = put_pixels_y2_3dnow;
- put_no_rnd_pixels_tab[1] = put_no_rnd_pixels_x2_3dnow;
- put_no_rnd_pixels_tab[2] = put_no_rnd_pixels_y2_3dnow;
-
- avg_pixels_tab[0] = avg_pixels_3dnow;
- avg_pixels_tab[1] = avg_pixels_x2_3dnow;
- avg_pixels_tab[2] = avg_pixels_y2_3dnow;
- avg_pixels_tab[3] = avg_pixels_xy2_3dnow;
+ put_pixels_tab[0][1] = put_pixels16_x2_3dnow;
+ put_pixels_tab[0][2] = put_pixels16_y2_3dnow;
+ put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_3dnow;
+ put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_3dnow;
+
+ avg_pixels_tab[0][0] = avg_pixels16_3dnow;
+ avg_pixels_tab[0][1] = avg_pixels16_x2_3dnow;
+ avg_pixels_tab[0][2] = avg_pixels16_y2_3dnow;
+ avg_pixels_tab[0][3] = avg_pixels16_xy2_3dnow;
+
+ put_pixels_tab[1][1] = put_pixels8_x2_3dnow;
+ put_pixels_tab[1][2] = put_pixels8_y2_3dnow;
+ put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_3dnow;
+ put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_3dnow;
+
+ avg_pixels_tab[1][0] = avg_pixels8_3dnow;
+ avg_pixels_tab[1][1] = avg_pixels8_x2_3dnow;
+ avg_pixels_tab[1][2] = avg_pixels8_y2_3dnow;
+ avg_pixels_tab[1][3] = avg_pixels8_xy2_3dnow;
}
/* idct */
@@ -552,21 +626,22 @@ void bit_exact_idct_put(UINT8 *dest, int line_size, INT16 *block){
void dsputil_set_bit_exact_mmx(void)
{
if (mm_flags & MM_MMX) {
- if (mm_flags & MM_MMXEXT) {
- put_no_rnd_pixels_tab[1] = put_no_rnd_pixels_x2_mmx;
- put_no_rnd_pixels_tab[2] = put_no_rnd_pixels_y2_mmx;
- avg_pixels_tab[3] = avg_pixels_xy2_mmx;
+
+ /* MMX2 & 3DNOW */
+ put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_mmx;
+ put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_mmx;
+ avg_pixels_tab[0][3] = avg_pixels16_xy2_mmx;
+ put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_mmx;
+ put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_mmx;
+ avg_pixels_tab[1][3] = avg_pixels8_xy2_mmx;
+ if (mm_flags & MM_MMXEXT) {
pix_abs16x16_x2 = pix_abs16x16_x2_mmx;
pix_abs16x16_y2 = pix_abs16x16_y2_mmx;
pix_abs16x16_xy2 = pix_abs16x16_xy2_mmx;
pix_abs8x8_x2 = pix_abs8x8_x2_mmx;
pix_abs8x8_y2 = pix_abs8x8_y2_mmx;
pix_abs8x8_xy2= pix_abs8x8_xy2_mmx;
- } else if (mm_flags & MM_3DNOW) {
- put_no_rnd_pixels_tab[1] = put_no_rnd_pixels_x2_mmx;
- put_no_rnd_pixels_tab[2] = put_no_rnd_pixels_y2_mmx;
- avg_pixels_tab[3] = avg_pixels_xy2_mmx;
}
#ifdef SIMPLE_IDCT
if(ff_idct_put==gen_idct_put && ff_idct == simple_idct_mmx)
diff --git a/libavcodec/i386/dsputil_mmx_avg.h b/libavcodec/i386/dsputil_mmx_avg.h
index a16ccc88b0..6873432ce8 100644
--- a/libavcodec/i386/dsputil_mmx_avg.h
+++ b/libavcodec/i386/dsputil_mmx_avg.h
@@ -25,7 +25,7 @@
/* XXX: we use explicit registers to avoid a gcc 2.95.2 register asm
clobber bug - now it will work with 2.95.2 and also with -fPIC
*/
-static void DEF(put_pixels_x2)(UINT8 *block, const UINT8 *pixels, int line_size, int h)
+static void DEF(put_pixels8_x2)(UINT8 *block, const UINT8 *pixels, int line_size, int h)
{
__asm __volatile(
"lea (%3, %3), %%eax \n\t"
@@ -52,9 +52,49 @@ static void DEF(put_pixels_x2)(UINT8 *block, const UINT8 *pixels, int line_size,
:"r" (line_size)
:"%eax", "memory");
}
+
+static void DEF(put_pixels16_x2)(UINT8 *block, const UINT8 *pixels, int line_size, int h)
+{
+ __asm __volatile(
+ "lea (%3, %3), %%eax \n\t"
+ "1: \n\t"
+ "movq (%1), %%mm0 \n\t"
+ "movq (%1, %3), %%mm1 \n\t"
+ "movq 8(%1), %%mm2 \n\t"
+ "movq 8(%1, %3), %%mm3 \n\t"
+ PAVGB" 1(%1), %%mm0 \n\t"
+ PAVGB" 1(%1, %3), %%mm1 \n\t"
+ PAVGB" 9(%1), %%mm2 \n\t"
+ PAVGB" 9(%1, %3), %%mm3 \n\t"
+ "movq %%mm0, (%2) \n\t"
+ "movq %%mm1, (%2, %3) \n\t"
+ "movq %%mm2, 8(%2) \n\t"
+ "movq %%mm3, 8(%2, %3) \n\t"
+ "addl %%eax, %1 \n\t"
+ "addl %%eax, %2 \n\t"
+ "movq (%1), %%mm0 \n\t"
+ "movq (%1, %3), %%mm1 \n\t"
+ "movq 8(%1), %%mm2 \n\t"
+ "movq 8(%1, %3), %%mm3 \n\t"
+ PAVGB" 1(%1), %%mm0 \n\t"
+ PAVGB" 1(%1, %3), %%mm1 \n\t"
+ PAVGB" 9(%1), %%mm2 \n\t"
+ PAVGB" 9(%1, %3), %%mm3 \n\t"
+ "addl %%eax, %1 \n\t"
+ "movq %%mm0, (%2) \n\t"
+ "movq %%mm1, (%2, %3) \n\t"
+ "movq %%mm2, 8(%2) \n\t"
+ "movq %%mm3, 8(%2, %3) \n\t"
+ "addl %%eax, %2 \n\t"
+ "subl $4, %0 \n\t"
+ "jnz 1b \n\t"
+ :"+g"(h), "+S"(pixels), "+D"(block)
+ :"r" (line_size)
+ :"%eax", "memory");
+}
/* GL: this function does incorrect rounding if overflow */
-static void DEF(put_no_rnd_pixels_x2)(UINT8 *block, const UINT8 *pixels, int line_size, int h)
+static void DEF(put_no_rnd_pixels8_x2)(UINT8 *block, const UINT8 *pixels, int line_size, int h)
{
MOVQ_BONE(mm6);
__asm __volatile(
@@ -91,7 +131,7 @@ static void DEF(put_no_rnd_pixels_x2)(UINT8 *block, const UINT8 *pixels, int lin
:"%eax", "memory");
}
-static void DEF(put_pixels_y2)(UINT8 *block, const UINT8 *pixels, int line_size, int h)
+static void DEF(put_pixels8_y2)(UINT8 *block, const UINT8 *pixels, int line_size, int h)
{
__asm __volatile(
"lea (%3, %3), %%eax \n\t"
@@ -122,7 +162,7 @@ static void DEF(put_pixels_y2)(UINT8 *block, const UINT8 *pixels, int line_size,
}
/* GL: this function does incorrect rounding if overflow */
-static void DEF(put_no_rnd_pixels_y2)(UINT8 *block, const UINT8 *pixels, int line_size, int h)
+static void DEF(put_no_rnd_pixels8_y2)(UINT8 *block, const UINT8 *pixels, int line_size, int h)
{
MOVQ_BONE(mm6);
__asm __volatile(
@@ -155,7 +195,7 @@ static void DEF(put_no_rnd_pixels_y2)(UINT8 *block, const UINT8 *pixels, int lin
:"%eax", "memory");
}
-static void DEF(avg_pixels)(UINT8 *block, const UINT8 *pixels, int line_size, int h)
+static void DEF(avg_pixels8)(UINT8 *block, const UINT8 *pixels, int line_size, int h)
{
__asm __volatile(
"lea (%3, %3), %%eax \n\t"
@@ -183,7 +223,7 @@ static void DEF(avg_pixels)(UINT8 *block, const UINT8 *pixels, int line_size, in
:"%eax", "memory");
}
-static void DEF(avg_pixels_x2)(UINT8 *block, const UINT8 *pixels, int line_size, int h)
+static void DEF(avg_pixels8_x2)(UINT8 *block, const UINT8 *pixels, int line_size, int h)
{
__asm __volatile(
"lea (%3, %3), %%eax \n\t"
@@ -215,7 +255,7 @@ static void DEF(avg_pixels_x2)(UINT8 *block, const UINT8 *pixels, int line_size,
:"%eax", "memory");
}
-static void DEF(avg_pixels_y2)(UINT8 *block, const UINT8 *pixels, int line_size, int h)
+static void DEF(avg_pixels8_y2)(UINT8 *block, const UINT8 *pixels, int line_size, int h)
{
__asm __volatile(
"lea (%3, %3), %%eax \n\t"
@@ -254,7 +294,7 @@ static void DEF(avg_pixels_y2)(UINT8 *block, const UINT8 *pixels, int line_size,
}
// Note this is not correctly rounded, but this function is only used for b frames so it doesnt matter
-static void DEF(avg_pixels_xy2)(UINT8 *block, const UINT8 *pixels, int line_size, int h)
+static void DEF(avg_pixels8_xy2)(UINT8 *block, const UINT8 *pixels, int line_size, int h)
{
MOVQ_BONE(mm6);
__asm __volatile(
@@ -294,3 +334,34 @@ static void DEF(avg_pixels_xy2)(UINT8 *block, const UINT8 *pixels, int line_size
:"r" (line_size)
:"%eax", "memory");
}
+
+//FIXME the following could be optimized too ...
+static void DEF(put_no_rnd_pixels16_x2)(UINT8 *block, const UINT8 *pixels, int line_size, int h){
+ DEF(put_no_rnd_pixels8_x2)(block , pixels , line_size, h);
+ DEF(put_no_rnd_pixels8_x2)(block+8, pixels+8, line_size, h);
+}
+static void DEF(put_pixels16_y2)(UINT8 *block, const UINT8 *pixels, int line_size, int h){
+ DEF(put_pixels8_y2)(block , pixels , line_size, h);
+ DEF(put_pixels8_y2)(block+8, pixels+8, line_size, h);
+}
+static void DEF(put_no_rnd_pixels16_y2)(UINT8 *block, const UINT8 *pixels, int line_size, int h){
+ DEF(put_no_rnd_pixels8_y2)(block , pixels , line_size, h);
+ DEF(put_no_rnd_pixels8_y2)(block+8, pixels+8, line_size, h);
+}
+static void DEF(avg_pixels16)(UINT8 *block, const UINT8 *pixels, int line_size, int h){
+ DEF(avg_pixels8)(block , pixels , line_size, h);
+ DEF(avg_pixels8)(block+8, pixels+8, line_size, h);
+}
+static void DEF(avg_pixels16_x2)(UINT8 *block, const UINT8 *pixels, int line_size, int h){
+ DEF(avg_pixels8_x2)(block , pixels , line_size, h);
+ DEF(avg_pixels8_x2)(block+8, pixels+8, line_size, h);
+}
+static void DEF(avg_pixels16_y2)(UINT8 *block, const UINT8 *pixels, int line_size, int h){
+ DEF(avg_pixels8_y2)(block , pixels , line_size, h);
+ DEF(avg_pixels8_y2)(block+8, pixels+8, line_size, h);
+}
+static void DEF(avg_pixels16_xy2)(UINT8 *block, const UINT8 *pixels, int line_size, int h){
+ DEF(avg_pixels8_xy2)(block , pixels , line_size, h);
+ DEF(avg_pixels8_xy2)(block+8, pixels+8, line_size, h);
+}
+
diff --git a/libavcodec/i386/dsputil_mmx_rnd.h b/libavcodec/i386/dsputil_mmx_rnd.h
index 873f4b3e1e..3605e03f9c 100644
--- a/libavcodec/i386/dsputil_mmx_rnd.h
+++ b/libavcodec/i386/dsputil_mmx_rnd.h
@@ -22,7 +22,7 @@
*/
// put_pixels
-static void DEF(put, pixels_x2)(UINT8 *block, const UINT8 *pixels, int line_size, int h)
+static void DEF(put, pixels8_x2)(UINT8 *block, const UINT8 *pixels, int line_size, int h)
{
MOVQ_BFE(mm6);
__asm __volatile(
@@ -54,7 +54,53 @@ static void DEF(put, pixels_x2)(UINT8 *block, const UINT8 *pixels, int line_size
:"eax", "memory");
}
-static void DEF(put, pixels_y2)(UINT8 *block, const UINT8 *pixels, int line_size, int h)
+static void DEF(put, pixels16_x2)(UINT8 *block, const UINT8 *pixels, int line_size, int h)
+{
+ MOVQ_BFE(mm6);
+ __asm __volatile(
+ "lea (%3, %3), %%eax \n\t"
+ ".balign 8 \n\t"
+ "1: \n\t"
+ "movq (%1), %%mm0 \n\t"
+ "movq 1(%1), %%mm1 \n\t"
+ "movq (%1, %3), %%mm2 \n\t"
+ "movq 1(%1, %3), %%mm3 \n\t"
+ PAVGBP(%%mm0, %%mm1, %%mm4, %%mm2, %%mm3, %%mm5)
+ "movq %%mm4, (%2) \n\t"
+ "movq %%mm5, (%2, %3) \n\t"
+ "movq 8(%1), %%mm0 \n\t"
+ "movq 9(%1), %%mm1 \n\t"
+ "movq 8(%1, %3), %%mm2 \n\t"
+ "movq 9(%1, %3), %%mm3 \n\t"
+ PAVGBP(%%mm0, %%mm1, %%mm4, %%mm2, %%mm3, %%mm5)
+ "movq %%mm4, 8(%2) \n\t"
+ "movq %%mm5, 8(%2, %3) \n\t"
+ "addl %%eax, %1 \n\t"
+ "addl %%eax, %2 \n\t"
+ "movq (%1), %%mm0 \n\t"
+ "movq 1(%1), %%mm1 \n\t"
+ "movq (%1, %3), %%mm2 \n\t"
+ "movq 1(%1, %3), %%mm3 \n\t"
+ PAVGBP(%%mm0, %%mm1, %%mm4, %%mm2, %%mm3, %%mm5)
+ "movq %%mm4, (%2) \n\t"
+ "movq %%mm5, (%2, %3) \n\t"
+ "movq 8(%1), %%mm0 \n\t"
+ "movq 9(%1), %%mm1 \n\t"
+ "movq 8(%1, %3), %%mm2 \n\t"
+ "movq 9(%1, %3), %%mm3 \n\t"
+ PAVGBP(%%mm0, %%mm1, %%mm4, %%mm2, %%mm3, %%mm5)
+ "movq %%mm4, 8(%2) \n\t"
+ "movq %%mm5, 8(%2, %3) \n\t"
+ "addl %%eax, %1 \n\t"
+ "addl %%eax, %2 \n\t"
+ "subl $4, %0 \n\t"
+ "jnz 1b \n\t"
+ :"+g"(h), "+S"(pixels), "+D"(block)
+ :"r"(line_size)
+ :"eax", "memory");
+}
+
+static void DEF(put, pixels8_y2)(UINT8 *block, const UINT8 *pixels, int line_size, int h)
{
MOVQ_BFE(mm6);
__asm __volatile(
@@ -83,7 +129,7 @@ static void DEF(put, pixels_y2)(UINT8 *block, const UINT8 *pixels, int line_size
:"eax", "memory");
}
-static void DEF(put, pixels_xy2)(UINT8 *block, const UINT8 *pixels, int line_size, int h)
+static void DEF(put, pixels8_xy2)(UINT8 *block, const UINT8 *pixels, int line_size, int h)
{
MOVQ_ZERO(mm7);
SET_RND(mm6); // =2 for rnd and =1 for no_rnd version
@@ -151,7 +197,7 @@ static void DEF(put, pixels_xy2)(UINT8 *block, const UINT8 *pixels, int line_siz
// avg_pixels
// in case more speed is needed - unroling would certainly help
-static void DEF(avg, pixels)(UINT8 *block, const UINT8 *pixels, int line_size, int h)
+static void DEF(avg, pixels8)(UINT8 *block, const UINT8 *pixels, int line_size, int h)
{
MOVQ_BFE(mm6);
JUMPALIGN();
@@ -170,7 +216,30 @@ static void DEF(avg, pixels)(UINT8 *block, const UINT8 *pixels, int line_size, i
while (--h);
}
-static void DEF(avg, pixels_x2)(UINT8 *block, const UINT8 *pixels, int line_size, int h)
+static void DEF(avg, pixels16)(UINT8 *block, const UINT8 *pixels, int line_size, int h)
+{
+ MOVQ_BFE(mm6);
+ JUMPALIGN();
+ do {
+ __asm __volatile(
+ "movq %0, %%mm0 \n\t"
+ "movq %1, %%mm1 \n\t"
+ PAVGB(%%mm0, %%mm1, %%mm2, %%mm6)
+ "movq %%mm2, %0 \n\t"
+ "movq 8%0, %%mm0 \n\t"
+ "movq 8%1, %%mm1 \n\t"
+ PAVGB(%%mm0, %%mm1, %%mm2, %%mm6)
+ "movq %%mm2, 8%0 \n\t"
+ :"+m"(*block)
+ :"m"(*pixels)
+ :"memory");
+ pixels += line_size;
+ block += line_size;
+ }
+ while (--h);
+}
+
+static void DEF(avg, pixels8_x2)(UINT8 *block, const UINT8 *pixels, int line_size, int h)
{
MOVQ_BFE(mm6);
JUMPALIGN();
@@ -190,7 +259,33 @@ static void DEF(avg, pixels_x2)(UINT8 *block, const UINT8 *pixels, int line_size
} while (--h);
}
-static void DEF(avg, pixels_y2)(UINT8 *block, const UINT8 *pixels, int line_size, int h)
+static void DEF(avg, pixels16_x2)(UINT8 *block, const UINT8 *pixels, int line_size, int h)
+{
+ MOVQ_BFE(mm6);
+ JUMPALIGN();
+ do {
+ __asm __volatile(
+ "movq %1, %%mm0 \n\t"
+ "movq 1%1, %%mm1 \n\t"
+ "movq %0, %%mm3 \n\t"
+ PAVGB(%%mm0, %%mm1, %%mm2, %%mm6)
+ PAVGB(%%mm3, %%mm2, %%mm0, %%mm6)
+ "movq %%mm0, %0 \n\t"
+ "movq 8%1, %%mm0 \n\t"
+ "movq 9%1, %%mm1 \n\t"
+ "movq 8%0, %%mm3 \n\t"
+ PAVGB(%%mm0, %%mm1, %%mm2, %%mm6)
+ PAVGB(%%mm3, %%mm2, %%mm0, %%mm6)
+ "movq %%mm0, 8%0 \n\t"
+ :"+m"(*block)
+ :"m"(*pixels)
+ :"memory");
+ pixels += line_size;
+ block += line_size;
+ } while (--h);
+}
+
+static void DEF(avg, pixels8_y2)(UINT8 *block, const UINT8 *pixels, int line_size, int h)
{
MOVQ_BFE(mm6);
__asm __volatile(
@@ -230,7 +325,7 @@ static void DEF(avg, pixels_y2)(UINT8 *block, const UINT8 *pixels, int line_size
}
// this routine is 'slightly' suboptimal but mostly unused
-static void DEF(avg, pixels_xy2)(UINT8 *block, const UINT8 *pixels, int line_size, int h)
+static void DEF(avg, pixels8_xy2)(UINT8 *block, const UINT8 *pixels, int line_size, int h)
{
MOVQ_ZERO(mm7);
SET_RND(mm6); // =2 for rnd and =1 for no_rnd version
@@ -303,3 +398,26 @@ static void DEF(avg, pixels_xy2)(UINT8 *block, const UINT8 *pixels, int line_siz
:"D"(block), "r"(line_size)
:"eax", "memory");
}
+
+//FIXME optimize
+static void DEF(put, pixels16_y2)(UINT8 *block, const UINT8 *pixels, int line_size, int h){
+ DEF(put, pixels8_y2)(block , pixels , line_size, h);
+ DEF(put, pixels8_y2)(block+8, pixels+8, line_size, h);
+}
+
+static void DEF(put, pixels16_xy2)(UINT8 *block, const UINT8 *pixels, int line_size, int h){
+ DEF(put, pixels8_xy2)(block , pixels , line_size, h);
+ DEF(put, pixels8_xy2)(block+8, pixels+8, line_size, h);
+}
+
+static void DEF(avg, pixels16_y2)(UINT8 *block, const UINT8 *pixels, int line_size, int h){
+ DEF(avg, pixels8_y2)(block , pixels , line_size, h);
+ DEF(avg, pixels8_y2)(block+8, pixels+8, line_size, h);
+}
+
+static void DEF(avg, pixels16_xy2)(UINT8 *block, const UINT8 *pixels, int line_size, int h){
+ DEF(avg, pixels8_xy2)(block , pixels , line_size, h);
+ DEF(avg, pixels8_xy2)(block+8, pixels+8, line_size, h);
+}
+
+
diff --git a/libavcodec/motion_est.c b/libavcodec/motion_est.c
index 295163898d..325d0f5525 100644
--- a/libavcodec/motion_est.c
+++ b/libavcodec/motion_est.c
@@ -1337,8 +1337,7 @@ static inline int check_bidir_mv(MpegEncContext * s,
src_y = mb_y * 16 + (motion_fy >> 1);
ptr = s->last_picture[0] + (src_y * s->linesize) + src_x;
- put_pixels_tab[dxy](dest_y , ptr , s->linesize, 16);
- put_pixels_tab[dxy](dest_y + 8, ptr + 8, s->linesize, 16);
+ put_pixels_tab[0][dxy](dest_y , ptr , s->linesize, 16);
fbmin += (mv_penalty[motion_bx-pred_bx] + mv_penalty[motion_by-pred_by])*s->qscale;
@@ -1347,8 +1346,7 @@ static inline int check_bidir_mv(MpegEncContext * s,
src_y = mb_y * 16 + (motion_by >> 1);
ptr = s->next_picture[0] + (src_y * s->linesize) + src_x;
- avg_pixels_tab[dxy](dest_y , ptr , s->linesize, 16);
- avg_pixels_tab[dxy](dest_y + 8, ptr + 8, s->linesize, 16);
+ avg_pixels_tab[0][dxy](dest_y , ptr , s->linesize, 16);
fbmin += pix_abs16x16(s->new_picture[0] + mb_x*16 + mb_y*16*s->linesize, dest_y, s->linesize);
return fbmin;
@@ -1433,8 +1431,7 @@ static inline int direct_search(MpegEncContext * s,
if (src_y == height) dxy &= ~2;
ptr = s->last_picture[0] + (src_y * s->linesize) + src_x;
- put_pixels_tab[dxy](dest_y , ptr , s->linesize, 16);
- put_pixels_tab[dxy](dest_y + 8, ptr + 8, s->linesize, 16);
+ put_pixels_tab[0][dxy](dest_y , ptr , s->linesize, 16);
dxy = ((motion_by & 1) << 1) | (motion_bx & 1);
src_x = (mb_x + bx) * 16 + (motion_bx >> 1);
@@ -1444,8 +1441,7 @@ static inline int direct_search(MpegEncContext * s,
src_y = clip(src_y, -16, height);
if (src_y == height) dxy &= ~2;
- avg_pixels_tab[dxy](dest_y , ptr , s->linesize, 16);
- avg_pixels_tab[dxy](dest_y + 8, ptr + 8, s->linesize, 16);
+ avg_pixels_tab[0][dxy](dest_y , ptr , s->linesize, 16);
}
}
diff --git a/libavcodec/mpegvideo.c b/libavcodec/mpegvideo.c
index 70eb782a00..e26024cb48 100644
--- a/libavcodec/mpegvideo.c
+++ b/libavcodec/mpegvideo.c
@@ -1063,18 +1063,19 @@ static inline void mpeg_motion(MpegEncContext *s,
UINT8 *dest_y, UINT8 *dest_cb, UINT8 *dest_cr,
int dest_offset,
UINT8 **ref_picture, int src_offset,
- int field_based, op_pixels_func *pix_op,
+ int field_based, op_pixels_func (*pix_op)[4],
int motion_x, int motion_y, int h)
{
UINT8 *ptr;
int dxy, offset, mx, my, src_x, src_y, height, v_edge_pos, linesize, uvlinesize;
int emu=0;
-
+#if 0
if(s->quarter_sample)
{
motion_x>>=1;
motion_y>>=1;
}
+#endif
dxy = ((motion_y & 1) << 1) | (motion_x & 1);
src_x = s->mb_x * 16 + (motion_x >> 1);
src_y = s->mb_y * (16 >> field_based) + (motion_y >> 1);
@@ -1101,8 +1102,7 @@ if(s->quarter_sample)
emu=1;
}
}
- pix_op[dxy](dest_y, ptr, linesize, h);
- pix_op[dxy](dest_y + 8, ptr + 8, linesize, h);
+ pix_op[0][dxy](dest_y, ptr, linesize, h);
if(s->flags&CODEC_FLAG_GRAY) return;
@@ -1136,22 +1136,22 @@ if(s->quarter_sample)
emulated_edge_mc(s, ptr, uvlinesize, 9, (h>>1)+1, src_x, src_y, s->h_edge_pos>>1, v_edge_pos>>1);
ptr= s->edge_emu_buffer;
}
- pix_op[dxy](dest_cb + (dest_offset >> 1), ptr, uvlinesize, h >> 1);
+ pix_op[1][dxy](dest_cb + (dest_offset >> 1), ptr, uvlinesize, h >> 1);
ptr = ref_picture[2] + offset;
if(emu){
emulated_edge_mc(s, ptr, uvlinesize, 9, (h>>1)+1, src_x, src_y, s->h_edge_pos>>1, v_edge_pos>>1);
ptr= s->edge_emu_buffer;
}
- pix_op[dxy](dest_cr + (dest_offset >> 1), ptr, uvlinesize, h >> 1);
+ pix_op[1][dxy](dest_cr + (dest_offset >> 1), ptr, uvlinesize, h >> 1);
}
static inline void qpel_motion(MpegEncContext *s,
UINT8 *dest_y, UINT8 *dest_cb, UINT8 *dest_cr,
int dest_offset,
UINT8 **ref_picture, int src_offset,
- int field_based, op_pixels_func *pix_op,
- qpel_mc_func *qpix_op,
+ int field_based, op_pixels_func (*pix_op)[4],
+ qpel_mc_func (*qpix_op)[16],
int motion_x, int motion_y, int h)
{
UINT8 *ptr;
@@ -1183,11 +1183,8 @@ static inline void qpel_motion(MpegEncContext *s,
emu=1;
}
}
- qpix_op[dxy](dest_y , ptr , linesize, linesize, motion_x&3, motion_y&3);
- qpix_op[dxy](dest_y + 8, ptr + 8, linesize, linesize, motion_x&3, motion_y&3);
- qpix_op[dxy](dest_y + linesize*8 , ptr + linesize*8 , linesize, linesize, motion_x&3, motion_y&3);
- qpix_op[dxy](dest_y + linesize*8 + 8, ptr + linesize*8 + 8, linesize, linesize, motion_x&3, motion_y&3);
-
+ qpix_op[0][dxy](dest_y, ptr, linesize);
+
if(s->flags&CODEC_FLAG_GRAY) return;
mx= (motion_x>>1) | (motion_x&1);
@@ -1216,21 +1213,21 @@ static inline void qpel_motion(MpegEncContext *s,
emulated_edge_mc(s, ptr, s->uvlinesize, 9, (h>>1)+1, src_x, src_y, s->h_edge_pos>>1, v_edge_pos>>1);
ptr= s->edge_emu_buffer;
}
- pix_op[dxy](dest_cb + (dest_offset >> 1), ptr, s->uvlinesize, h >> 1);
+ pix_op[1][dxy](dest_cb + (dest_offset >> 1), ptr, s->uvlinesize, h >> 1);
ptr = ref_picture[2] + offset;
if(emu){
emulated_edge_mc(s, ptr, s->uvlinesize, 9, (h>>1)+1, src_x, src_y, s->h_edge_pos>>1, v_edge_pos>>1);
ptr= s->edge_emu_buffer;
}
- pix_op[dxy](dest_cr + (dest_offset >> 1), ptr, s->uvlinesize, h >> 1);
+ pix_op[1][dxy](dest_cr + (dest_offset >> 1), ptr, s->uvlinesize, h >> 1);
}
static inline void MPV_motion(MpegEncContext *s,
UINT8 *dest_y, UINT8 *dest_cb, UINT8 *dest_cr,
int dir, UINT8 **ref_picture,
- op_pixels_func *pix_op, qpel_mc_func *qpix_op)
+ op_pixels_func (*pix_op)[4], qpel_mc_func (*qpix_op)[16])
{
int dxy, offset, mx, my, src_x, src_y, motion_x, motion_y;
int mb_x, mb_y, i;
@@ -1243,19 +1240,10 @@ static inline void MPV_motion(MpegEncContext *s,
switch(s->mv_type) {
case MV_TYPE_16X16:
if(s->mcsel){
-#if 0
- mpeg_motion(s, dest_y, dest_cb, dest_cr, 0,
- ref_picture, 0,
- 0, pix_op,
- s->sprite_offset[0][0]>>3,
- s->sprite_offset[0][1]>>3,
- 16);
-#else
gmc1_motion(s, dest_y, dest_cb, dest_cr, 0,
ref_picture, 0,
16);
-#endif
- }else if(s->quarter_sample && dir==0){ //FIXME
+ }else if(s->quarter_sample){
qpel_motion(s, dest_y, dest_cb, dest_cr, 0,
ref_picture, 0,
0, pix_op, qpix_op,
@@ -1293,7 +1281,7 @@ static inline void MPV_motion(MpegEncContext *s,
}
}
dest = dest_y + ((i & 1) * 8) + (i >> 1) * 8 * s->linesize;
- pix_op[dxy](dest, ptr, s->linesize, 8);
+ pix_op[1][dxy](dest, ptr, s->linesize, 8);
}
if(s->flags&CODEC_FLAG_GRAY) break;
@@ -1340,14 +1328,14 @@ static inline void MPV_motion(MpegEncContext *s,
emu=1;
}
}
- pix_op[dxy](dest_cb, ptr, s->uvlinesize, 8);
+ pix_op[1][dxy](dest_cb, ptr, s->uvlinesize, 8);
ptr = ref_picture[2] + offset;
if(emu){
emulated_edge_mc(s, ptr, s->uvlinesize, 9, 9, src_x, src_y, s->h_edge_pos>>1, s->v_edge_pos>>1);
ptr= s->edge_emu_buffer;
}
- pix_op[dxy](dest_cr, ptr, s->uvlinesize, 8);
+ pix_op[1][dxy](dest_cr, ptr, s->uvlinesize, 8);
break;
case MV_TYPE_FIELD:
if (s->picture_structure == PICT_FRAME) {
@@ -1510,8 +1498,8 @@ void MPV_decode_mb(MpegEncContext *s, DCTELEM block[6][64])
if (!(s->encoding && (s->intra_only || s->pict_type==B_TYPE))) {
UINT8 *dest_y, *dest_cb, *dest_cr;
int dct_linesize, dct_offset;
- op_pixels_func *op_pix;
- qpel_mc_func *op_qpix;
+ op_pixels_func (*op_pix)[4];
+ qpel_mc_func (*op_qpix)[16];
/* avoid copy if macroblock skipped in last frame too
dont touch it for B-frames as they need the skip info from the next p-frame */
@@ -1550,18 +1538,16 @@ void MPV_decode_mb(MpegEncContext *s, DCTELEM block[6][64])
if((!s->encoding) || (s->mb_type[mb_xy]&(s->mb_type[mb_xy]-1))){
if ((!s->no_rounding) || s->pict_type==B_TYPE){
op_pix = put_pixels_tab;
- op_qpix= qpel_mc_rnd_tab;
+ op_qpix= put_qpel_pixels_tab;
}else{
op_pix = put_no_rnd_pixels_tab;
- op_qpix= qpel_mc_no_rnd_tab;
+ op_qpix= put_no_rnd_qpel_pixels_tab;
}
if (s->mv_dir & MV_DIR_FORWARD) {
MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture, op_pix, op_qpix);
- if ((!s->no_rounding) || s->pict_type==B_TYPE)
- op_pix = avg_pixels_tab;
- else
- op_pix = avg_no_rnd_pixels_tab;
+ op_pix = avg_pixels_tab;
+ op_qpix= avg_qpel_pixels_tab;
}
if (s->mv_dir & MV_DIR_BACKWARD) {
MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture, op_pix, op_qpix);
@@ -1729,8 +1715,8 @@ static void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
get_pixels(s->block[5], ptr, wrap);
}
}else{
- op_pixels_func *op_pix;
- qpel_mc_func *op_qpix;
+ op_pixels_func (*op_pix)[4];
+ qpel_mc_func (*op_qpix)[16];
UINT8 *dest_y, *dest_cb, *dest_cr;
UINT8 *ptr_y, *ptr_cb, *ptr_cr;
int wrap_y, wrap_c;
@@ -1747,18 +1733,16 @@ static void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
if ((!s->no_rounding) || s->pict_type==B_TYPE){
op_pix = put_pixels_tab;
- op_qpix= qpel_mc_rnd_tab;
+ op_qpix= put_qpel_pixels_tab;
}else{
op_pix = put_no_rnd_pixels_tab;
- op_qpix= qpel_mc_no_rnd_tab;
+ op_qpix= put_no_rnd_qpel_pixels_tab;
}
if (s->mv_dir & MV_DIR_FORWARD) {
MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture, op_pix, op_qpix);
- if ((!s->no_rounding) || s->pict_type==B_TYPE)
- op_pix = avg_pixels_tab;
- else
- op_pix = avg_no_rnd_pixels_tab;
+ op_pix = avg_pixels_tab;
+ op_qpix= avg_qpel_pixels_tab;
}
if (s->mv_dir & MV_DIR_BACKWARD) {
MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture, op_pix, op_qpix);
diff --git a/libavcodec/svq1.c b/libavcodec/svq1.c
index f6de669b46..741bef2172 100644
--- a/libavcodec/svq1.c
+++ b/libavcodec/svq1.c
@@ -839,8 +839,7 @@ static int svq1_motion_inter_block (bit_buffer_t *bitbuf,
src = &previous[(x + (mv.x >> 1)) + (y + (mv.y >> 1))*pitch];
dst = current;
- put_pixels_tab[((mv.y & 1) << 1) | (mv.x & 1)](dst,src,pitch,16);
- put_pixels_tab[((mv.y & 1) << 1) | (mv.x & 1)](dst+8,src+8,pitch,16);
+ put_pixels_tab[0][((mv.y & 1) << 1) | (mv.x & 1)](dst,src,pitch,16);
return 0;
}
@@ -907,7 +906,7 @@ static int svq1_motion_inter_4v_block (bit_buffer_t *bitbuf,
src = &previous[(x + (pmv[i]->x >> 1)) + (y + (pmv[i]->y >> 1))*pitch];
dst = current;
- put_pixels_tab[((pmv[i]->y & 1) << 1) | (pmv[i]->x & 1)](dst,src,pitch,8);
+ put_pixels_tab[1][((pmv[i]->y & 1) << 1) | (pmv[i]->x & 1)](dst,src,pitch,8);
/* select next block */
if (i & 1) {