summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--libavcodec/arm/me_cmp_init_arm.c10
-rw-r--r--libavcodec/me_cmp.c116
-rw-r--r--libavcodec/me_cmp.h3
-rw-r--r--libavcodec/ppc/me_cmp.c56
-rw-r--r--libavcodec/x86/me_cmp_init.c132
5 files changed, 162 insertions, 155 deletions
diff --git a/libavcodec/arm/me_cmp_init_arm.c b/libavcodec/arm/me_cmp_init_arm.c
index 819d901f90..4d73f3e0fd 100644
--- a/libavcodec/arm/me_cmp_init_arm.c
+++ b/libavcodec/arm/me_cmp_init_arm.c
@@ -26,17 +26,17 @@
#include "libavcodec/mpegvideo.h"
int ff_pix_abs16_armv6(MpegEncContext *s, uint8_t *blk1, uint8_t *blk2,
- int line_size, int h);
+ ptrdiff_t stride, int h);
int ff_pix_abs16_x2_armv6(MpegEncContext *s, uint8_t *blk1, uint8_t *blk2,
- int line_size, int h);
+ ptrdiff_t stride, int h);
int ff_pix_abs16_y2_armv6(MpegEncContext *s, uint8_t *blk1, uint8_t *blk2,
- int line_size, int h);
+ ptrdiff_t stride, int h);
int ff_pix_abs8_armv6(MpegEncContext *s, uint8_t *blk1, uint8_t *blk2,
- int line_size, int h);
+ ptrdiff_t stride, int h);
int ff_sse16_armv6(MpegEncContext *s, uint8_t *blk1, uint8_t *blk2,
- int line_size, int h);
+ ptrdiff_t stride, int h);
av_cold void ff_me_cmp_init_arm(MECmpContext *c, AVCodecContext *avctx)
{
diff --git a/libavcodec/me_cmp.c b/libavcodec/me_cmp.c
index 9fcc93739a..eb98a72f1b 100644
--- a/libavcodec/me_cmp.c
+++ b/libavcodec/me_cmp.c
@@ -27,7 +27,7 @@
uint32_t ff_square_tab[512] = { 0, };
static int sse4_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
- int line_size, int h)
+ ptrdiff_t stride, int h)
{
int s = 0, i;
uint32_t *sq = ff_square_tab + 256;
@@ -37,14 +37,14 @@ static int sse4_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
s += sq[pix1[1] - pix2[1]];
s += sq[pix1[2] - pix2[2]];
s += sq[pix1[3] - pix2[3]];
- pix1 += line_size;
- pix2 += line_size;
+ pix1 += stride;
+ pix2 += stride;
}
return s;
}
static int sse8_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
- int line_size, int h)
+ ptrdiff_t stride, int h)
{
int s = 0, i;
uint32_t *sq = ff_square_tab + 256;
@@ -58,14 +58,14 @@ static int sse8_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
s += sq[pix1[5] - pix2[5]];
s += sq[pix1[6] - pix2[6]];
s += sq[pix1[7] - pix2[7]];
- pix1 += line_size;
- pix2 += line_size;
+ pix1 += stride;
+ pix2 += stride;
}
return s;
}
static int sse16_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
- int line_size, int h)
+ ptrdiff_t stride, int h)
{
int s = 0, i;
uint32_t *sq = ff_square_tab + 256;
@@ -88,8 +88,8 @@ static int sse16_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
s += sq[pix1[14] - pix2[14]];
s += sq[pix1[15] - pix2[15]];
- pix1 += line_size;
- pix2 += line_size;
+ pix1 += stride;
+ pix2 += stride;
}
return s;
}
@@ -107,7 +107,7 @@ static int sum_abs_dctelem_c(int16_t *block)
#define avg4(a, b, c, d) ((a + b + c + d + 2) >> 2)
static inline int pix_abs16_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
- int line_size, int h)
+ ptrdiff_t stride, int h)
{
int s = 0, i;
@@ -128,14 +128,14 @@ static inline int pix_abs16_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
s += abs(pix1[13] - pix2[13]);
s += abs(pix1[14] - pix2[14]);
s += abs(pix1[15] - pix2[15]);
- pix1 += line_size;
- pix2 += line_size;
+ pix1 += stride;
+ pix2 += stride;
}
return s;
}
static int pix_abs16_x2_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
- int line_size, int h)
+ ptrdiff_t stride, int h)
{
int s = 0, i;
@@ -156,17 +156,17 @@ static int pix_abs16_x2_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
s += abs(pix1[13] - avg2(pix2[13], pix2[14]));
s += abs(pix1[14] - avg2(pix2[14], pix2[15]));
s += abs(pix1[15] - avg2(pix2[15], pix2[16]));
- pix1 += line_size;
- pix2 += line_size;
+ pix1 += stride;
+ pix2 += stride;
}
return s;
}
static int pix_abs16_y2_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
- int line_size, int h)
+ ptrdiff_t stride, int h)
{
int s = 0, i;
- uint8_t *pix3 = pix2 + line_size;
+ uint8_t *pix3 = pix2 + stride;
for (i = 0; i < h; i++) {
s += abs(pix1[0] - avg2(pix2[0], pix3[0]));
@@ -185,18 +185,18 @@ static int pix_abs16_y2_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
s += abs(pix1[13] - avg2(pix2[13], pix3[13]));
s += abs(pix1[14] - avg2(pix2[14], pix3[14]));
s += abs(pix1[15] - avg2(pix2[15], pix3[15]));
- pix1 += line_size;
- pix2 += line_size;
- pix3 += line_size;
+ pix1 += stride;
+ pix2 += stride;
+ pix3 += stride;
}
return s;
}
static int pix_abs16_xy2_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
- int line_size, int h)
+ ptrdiff_t stride, int h)
{
int s = 0, i;
- uint8_t *pix3 = pix2 + line_size;
+ uint8_t *pix3 = pix2 + stride;
for (i = 0; i < h; i++) {
s += abs(pix1[0] - avg4(pix2[0], pix2[1], pix3[0], pix3[1]));
@@ -215,15 +215,15 @@ static int pix_abs16_xy2_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
s += abs(pix1[13] - avg4(pix2[13], pix2[14], pix3[13], pix3[14]));
s += abs(pix1[14] - avg4(pix2[14], pix2[15], pix3[14], pix3[15]));
s += abs(pix1[15] - avg4(pix2[15], pix2[16], pix3[15], pix3[16]));
- pix1 += line_size;
- pix2 += line_size;
- pix3 += line_size;
+ pix1 += stride;
+ pix2 += stride;
+ pix3 += stride;
}
return s;
}
static inline int pix_abs8_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
- int line_size, int h)
+ ptrdiff_t stride, int h)
{
int s = 0, i;
@@ -236,14 +236,14 @@ static inline int pix_abs8_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
s += abs(pix1[5] - pix2[5]);
s += abs(pix1[6] - pix2[6]);
s += abs(pix1[7] - pix2[7]);
- pix1 += line_size;
- pix2 += line_size;
+ pix1 += stride;
+ pix2 += stride;
}
return s;
}
static int pix_abs8_x2_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
- int line_size, int h)
+ ptrdiff_t stride, int h)
{
int s = 0, i;
@@ -256,17 +256,17 @@ static int pix_abs8_x2_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
s += abs(pix1[5] - avg2(pix2[5], pix2[6]));
s += abs(pix1[6] - avg2(pix2[6], pix2[7]));
s += abs(pix1[7] - avg2(pix2[7], pix2[8]));
- pix1 += line_size;
- pix2 += line_size;
+ pix1 += stride;
+ pix2 += stride;
}
return s;
}
static int pix_abs8_y2_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
- int line_size, int h)
+ ptrdiff_t stride, int h)
{
int s = 0, i;
- uint8_t *pix3 = pix2 + line_size;
+ uint8_t *pix3 = pix2 + stride;
for (i = 0; i < h; i++) {
s += abs(pix1[0] - avg2(pix2[0], pix3[0]));
@@ -277,18 +277,18 @@ static int pix_abs8_y2_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
s += abs(pix1[5] - avg2(pix2[5], pix3[5]));
s += abs(pix1[6] - avg2(pix2[6], pix3[6]));
s += abs(pix1[7] - avg2(pix2[7], pix3[7]));
- pix1 += line_size;
- pix2 += line_size;
- pix3 += line_size;
+ pix1 += stride;
+ pix2 += stride;
+ pix3 += stride;
}
return s;
}
static int pix_abs8_xy2_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
- int line_size, int h)
+ ptrdiff_t stride, int h)
{
int s = 0, i;
- uint8_t *pix3 = pix2 + line_size;
+ uint8_t *pix3 = pix2 + stride;
for (i = 0; i < h; i++) {
s += abs(pix1[0] - avg4(pix2[0], pix2[1], pix3[0], pix3[1]));
@@ -299,14 +299,15 @@ static int pix_abs8_xy2_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
s += abs(pix1[5] - avg4(pix2[5], pix2[6], pix3[5], pix3[6]));
s += abs(pix1[6] - avg4(pix2[6], pix2[7], pix3[6], pix3[7]));
s += abs(pix1[7] - avg4(pix2[7], pix2[8], pix3[7], pix3[8]));
- pix1 += line_size;
- pix2 += line_size;
- pix3 += line_size;
+ pix1 += stride;
+ pix2 += stride;
+ pix3 += stride;
}
return s;
}
-static int nsse16_c(MpegEncContext *c, uint8_t *s1, uint8_t *s2, int stride, int h)
+static int nsse16_c(MpegEncContext *c, uint8_t *s1, uint8_t *s2,
+ ptrdiff_t stride, int h)
{
int score1 = 0, score2 = 0, x, y;
@@ -330,7 +331,8 @@ static int nsse16_c(MpegEncContext *c, uint8_t *s1, uint8_t *s2, int stride, int
return score1 + FFABS(score2) * 8;
}
-static int nsse8_c(MpegEncContext *c, uint8_t *s1, uint8_t *s2, int stride, int h)
+static int nsse8_c(MpegEncContext *c, uint8_t *s1, uint8_t *s2,
+ ptrdiff_t stride, int h)
{
int score1 = 0, score2 = 0, x, y;
@@ -355,7 +357,7 @@ static int nsse8_c(MpegEncContext *c, uint8_t *s1, uint8_t *s2, int stride, int
}
static int zero_cmp(MpegEncContext *s, uint8_t *a, uint8_t *b,
- int stride, int h)
+ ptrdiff_t stride, int h)
{
return 0;
}
@@ -430,7 +432,7 @@ void ff_set_cmp(MECmpContext *c, me_cmp_func *cmp, int type)
#define BUTTERFLYA(x, y) (FFABS((x) + (y)) + FFABS((x) - (y)))
static int hadamard8_diff8x8_c(MpegEncContext *s, uint8_t *dst,
- uint8_t *src, int stride, int h)
+ uint8_t *src, ptrdiff_t stride, int h)
{
int i, temp[64], sum = 0;
@@ -482,7 +484,7 @@ static int hadamard8_diff8x8_c(MpegEncContext *s, uint8_t *dst,
}
static int hadamard8_intra8x8_c(MpegEncContext *s, uint8_t *src,
- uint8_t *dummy, int stride, int h)
+ uint8_t *dummy, ptrdiff_t stride, int h)
{
int i, temp[64], sum = 0;
@@ -534,7 +536,7 @@ static int hadamard8_intra8x8_c(MpegEncContext *s, uint8_t *src,
}
static int dct_sad8x8_c(MpegEncContext *s, uint8_t *src1,
- uint8_t *src2, int stride, int h)
+ uint8_t *src2, ptrdiff_t stride, int h)
{
LOCAL_ALIGNED_16(int16_t, temp, [64]);
@@ -575,7 +577,7 @@ static int dct_sad8x8_c(MpegEncContext *s, uint8_t *src1,
}
static int dct264_sad8x8_c(MpegEncContext *s, uint8_t *src1,
- uint8_t *src2, int stride, int h)
+ uint8_t *src2, ptrdiff_t stride, int h)
{
int16_t dct[8][8];
int i, sum = 0;
@@ -600,7 +602,7 @@ static int dct264_sad8x8_c(MpegEncContext *s, uint8_t *src1,
#endif
static int dct_max8x8_c(MpegEncContext *s, uint8_t *src1,
- uint8_t *src2, int stride, int h)
+ uint8_t *src2, ptrdiff_t stride, int h)
{
LOCAL_ALIGNED_16(int16_t, temp, [64]);
int sum = 0, i;
@@ -617,7 +619,7 @@ static int dct_max8x8_c(MpegEncContext *s, uint8_t *src1,
}
static int quant_psnr8x8_c(MpegEncContext *s, uint8_t *src1,
- uint8_t *src2, int stride, int h)
+ uint8_t *src2, ptrdiff_t stride, int h)
{
LOCAL_ALIGNED_16(int16_t, temp, [64 * 2]);
int16_t *const bak = temp + 64;
@@ -642,7 +644,7 @@ static int quant_psnr8x8_c(MpegEncContext *s, uint8_t *src1,
}
static int rd8x8_c(MpegEncContext *s, uint8_t *src1, uint8_t *src2,
- int stride, int h)
+ ptrdiff_t stride, int h)
{
const uint8_t *scantable = s->intra_scantable.permutated;
LOCAL_ALIGNED_16(int16_t, temp, [64]);
@@ -719,7 +721,7 @@ static int rd8x8_c(MpegEncContext *s, uint8_t *src1, uint8_t *src2,
}
static int bit8x8_c(MpegEncContext *s, uint8_t *src1, uint8_t *src2,
- int stride, int h)
+ ptrdiff_t stride, int h)
{
const uint8_t *scantable = s->intra_scantable.permutated;
LOCAL_ALIGNED_16(int16_t, temp, [64]);
@@ -782,7 +784,7 @@ static int bit8x8_c(MpegEncContext *s, uint8_t *src1, uint8_t *src2,
#define VSAD_INTRA(size) \
static int vsad_intra ## size ## _c(MpegEncContext *c, \
uint8_t *s, uint8_t *dummy, \
- int stride, int h) \
+ ptrdiff_t stride, int h) \
{ \
int score = 0, x, y; \
\
@@ -802,7 +804,7 @@ VSAD_INTRA(8)
VSAD_INTRA(16)
static int vsad16_c(MpegEncContext *c, uint8_t *s1, uint8_t *s2,
- int stride, int h)
+ ptrdiff_t stride, int h)
{
int score = 0, x, y;
@@ -820,7 +822,7 @@ static int vsad16_c(MpegEncContext *c, uint8_t *s1, uint8_t *s2,
#define VSSE_INTRA(size) \
static int vsse_intra ## size ## _c(MpegEncContext *c, \
uint8_t *s, uint8_t *dummy, \
- int stride, int h) \
+ ptrdiff_t stride, int h) \
{ \
int score = 0, x, y; \
\
@@ -840,7 +842,7 @@ VSSE_INTRA(8)
VSSE_INTRA(16)
static int vsse16_c(MpegEncContext *c, uint8_t *s1, uint8_t *s2,
- int stride, int h)
+ ptrdiff_t stride, int h)
{
int score = 0, x, y;
@@ -856,7 +858,7 @@ static int vsse16_c(MpegEncContext *c, uint8_t *s1, uint8_t *s2,
#define WRAPPER8_16_SQ(name8, name16) \
static int name16(MpegEncContext *s, uint8_t *dst, uint8_t *src, \
- int stride, int h) \
+ ptrdiff_t stride, int h) \
{ \
int score = 0; \
\
diff --git a/libavcodec/me_cmp.h b/libavcodec/me_cmp.h
index 05ae30b0c1..725f9b25ee 100644
--- a/libavcodec/me_cmp.h
+++ b/libavcodec/me_cmp.h
@@ -33,7 +33,8 @@ struct MpegEncContext;
* width < 8 are neither used nor implemented. */
typedef int (*me_cmp_func)(struct MpegEncContext *c,
uint8_t *blk1 /* align width (8 or 16) */,
- uint8_t *blk2 /* align 1 */, int line_size, int h);
+ uint8_t *blk2 /* align 1 */, ptrdiff_t stride,
+ int h);
typedef struct MECmpContext {
int (*sum_abs_dctelem)(int16_t *block /* align 16 */);
diff --git a/libavcodec/ppc/me_cmp.c b/libavcodec/ppc/me_cmp.c
index 88c7feaa7e..fd67cf34ee 100644
--- a/libavcodec/ppc/me_cmp.c
+++ b/libavcodec/ppc/me_cmp.c
@@ -36,7 +36,7 @@
#if HAVE_ALTIVEC
static int sad16_x2_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
- int line_size, int h)
+ ptrdiff_t stride, int h)
{
int i, s = 0;
const vector unsigned char zero =
@@ -66,8 +66,8 @@ static int sad16_x2_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
/* Add each 4 pixel group together and put 4 results into sad. */
sad = vec_sum4s(t5, sad);
- pix1 += line_size;
- pix2 += line_size;
+ pix1 += stride;
+ pix2 += stride;
}
/* Sum up the four partial sums, and put the result into s. */
sumdiffs = vec_sums((vector signed int) sad, (vector signed int) zero);
@@ -78,7 +78,7 @@ static int sad16_x2_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
}
static int sad16_y2_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
- int line_size, int h)
+ ptrdiff_t stride, int h)
{
int i, s = 0;
const vector unsigned char zero =
@@ -87,9 +87,9 @@ static int sad16_y2_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
vector unsigned char pix1v, pix3v, avgv, t5;
vector unsigned int sad = (vector unsigned int) vec_splat_u32(0);
vector signed int sumdiffs;
- uint8_t *pix3 = pix2 + line_size;
+ uint8_t *pix3 = pix2 + stride;
- /* Due to the fact that pix3 = pix2 + line_size, the pix3 of one
+ /* Due to the fact that pix3 = pix2 + stride, the pix3 of one
* iteration becomes pix2 in the next iteration. We can use this
* fact to avoid a potentially expensive unaligned read, each
* time around the loop.
@@ -119,9 +119,9 @@ static int sad16_y2_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
/* Add each 4 pixel group together and put 4 results into sad. */
sad = vec_sum4s(t5, sad);
- pix1 += line_size;
+ pix1 += stride;
pix2v = pix3v;
- pix3 += line_size;
+ pix3 += stride;
}
/* Sum up the four partial sums, and put the result into s. */
@@ -132,10 +132,10 @@ static int sad16_y2_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
}
static int sad16_xy2_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
- int line_size, int h)
+ ptrdiff_t stride, int h)
{
int i, s = 0;
- uint8_t *pix3 = pix2 + line_size;
+ uint8_t *pix3 = pix2 + stride;
const vector unsigned char zero =
(const vector unsigned char) vec_splat_u8(0);
const vector unsigned short two =
@@ -149,7 +149,7 @@ static int sad16_xy2_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
vector unsigned int sad = (vector unsigned int) vec_splat_u32(0);
vector signed int sumdiffs;
- /* Due to the fact that pix3 = pix2 + line_size, the pix3 of one
+ /* Due to the fact that pix3 = pix2 + stride, the pix3 of one
* iteration becomes pix2 in the next iteration. We can use this
* fact to avoid a potentially expensive unaligned read, as well
* as some splitting, and vector addition each time around the loop.
@@ -212,8 +212,8 @@ static int sad16_xy2_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
/* Add each 4 pixel group together and put 4 results into sad. */
sad = vec_sum4s(t5, sad);
- pix1 += line_size;
- pix3 += line_size;
+ pix1 += stride;
+ pix3 += stride;
/* Transfer the calculated values for pix3 into pix2. */
t1 = t3;
t2 = t4;
@@ -227,7 +227,7 @@ static int sad16_xy2_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
}
static int sad16_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
- int line_size, int h)
+ ptrdiff_t stride, int h)
{
int i, s;
const vector unsigned int zero =
@@ -251,8 +251,8 @@ static int sad16_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
/* Add each 4 pixel group together and put 4 results into sad. */
sad = vec_sum4s(t5, sad);
- pix1 += line_size;
- pix2 += line_size;
+ pix1 += stride;
+ pix2 += stride;
}
/* Sum up the four partial sums, and put the result into s. */
@@ -264,7 +264,7 @@ static int sad16_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
}
static int sad8_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
- int line_size, int h)
+ ptrdiff_t stride, int h)
{
int i, s;
const vector unsigned int zero =
@@ -298,8 +298,8 @@ static int sad8_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
/* Add each 4 pixel group together and put 4 results into sad. */
sad = vec_sum4s(t5, sad);
- pix1 += line_size;
- pix2 += line_size;
+ pix1 += stride;
+ pix2 += stride;
}
/* Sum up the four partial sums, and put the result into s. */
@@ -313,7 +313,7 @@ static int sad8_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
/* Sum of Squared Errors for an 8x8 block, AltiVec-enhanced.
* It's the sad8_altivec code above w/ squaring added. */
static int sse8_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
- int line_size, int h)
+ ptrdiff_t stride, int h)
{
int i, s;
const vector unsigned int zero =
@@ -350,8 +350,8 @@ static int sse8_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
/* Square the values and add them to our sum. */
sum = vec_msum(t5, t5, sum);
- pix1 += line_size;
- pix2 += line_size;
+ pix1 += stride;
+ pix2 += stride;
}
/* Sum up the four partial sums, and put the result into s. */
@@ -365,7 +365,7 @@ static int sse8_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
/* Sum of Squared Errors for a 16x16 block, AltiVec-enhanced.
* It's the sad16_altivec code above w/ squaring added. */
static int sse16_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
- int line_size, int h)
+ ptrdiff_t stride, int h)
{
int i, s;
const vector unsigned int zero =
@@ -392,8 +392,8 @@ static int sse16_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
/* Square the values and add them to our sum. */
sum = vec_msum(t5, t5, sum);
- pix1 += line_size;
- pix2 += line_size;
+ pix1 += stride;
+ pix2 += stride;
}
/* Sum up the four partial sums, and put the result into s. */
@@ -405,7 +405,7 @@ static int sse16_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
}
static int hadamard8_diff8x8_altivec(MpegEncContext *s, uint8_t *dst,
- uint8_t *src, int stride, int h)
+ uint8_t *src, ptrdiff_t stride, int h)
{
int sum;
register const vector unsigned char vzero =
@@ -534,7 +534,7 @@ static int hadamard8_diff8x8_altivec(MpegEncContext *s, uint8_t *dst,
* but xlc goes to around 660 on the regular C code...
*/
static int hadamard8_diff16x8_altivec(MpegEncContext *s, uint8_t *dst,
- uint8_t *src, int stride, int h)
+ uint8_t *src, ptrdiff_t stride, int h)
{
int sum;
register vector signed short
@@ -731,7 +731,7 @@ static int hadamard8_diff16x8_altivec(MpegEncContext *s, uint8_t *dst,
}
static int hadamard8_diff16_altivec(MpegEncContext *s, uint8_t *dst,
- uint8_t *src, int stride, int h)
+ uint8_t *src, ptrdiff_t stride, int h)
{
int score = hadamard8_diff16x8_altivec(s, dst, src, stride, 8);
diff --git a/libavcodec/x86/me_cmp_init.c b/libavcodec/x86/me_cmp_init.c
index e93b67b053..f6c8e5b565 100644
--- a/libavcodec/x86/me_cmp_init.c
+++ b/libavcodec/x86/me_cmp_init.c
@@ -32,7 +32,7 @@
#if HAVE_INLINE_ASM
static int sse8_mmx(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
- int line_size, int h)
+ ptrdiff_t stride, int h)
{
int tmp;
@@ -74,8 +74,8 @@ static int sse8_mmx(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
"pmaddwd %%mm1, %%mm1 \n"
"pmaddwd %%mm3, %%mm3 \n"
- "lea (%0, %3, 2), %0 \n" /* pix1 += 2 * line_size */
- "lea (%1, %3, 2), %1 \n" /* pix2 += 2 * line_size */
+ "lea (%0, %3, 2), %0 \n" /* pix1 += 2 * stride */
+ "lea (%1, %3, 2), %1 \n" /* pix2 += 2 * stride */
"paddd %%mm2, %%mm1 \n"
"paddd %%mm4, %%mm3 \n"
@@ -90,14 +90,14 @@ static int sse8_mmx(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
"paddd %%mm7, %%mm1 \n"
"movd %%mm1, %2 \n"
: "+r" (pix1), "+r" (pix2), "=r" (tmp)
- : "r" ((x86_reg) line_size), "m" (h)
+ : "r" (stride), "m" (h)
: "%ecx");
return tmp;
}
static int sse16_mmx(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
- int line_size, int h)
+ ptrdiff_t stride, int h)
{
int tmp;
@@ -154,13 +154,13 @@ static int sse16_mmx(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
"paddd %%mm7, %%mm1\n"
"movd %%mm1, %2\n"
: "+r" (pix1), "+r" (pix2), "=r" (tmp)
- : "r" ((x86_reg) line_size), "m" (h)
+ : "r" (stride), "m" (h)
: "%ecx");
return tmp;
}
-static int hf_noise8_mmx(uint8_t *pix1, int line_size, int h)
+static int hf_noise8_mmx(uint8_t *pix1, ptrdiff_t stride, int h)
{
int tmp;
@@ -282,13 +282,13 @@ static int hf_noise8_mmx(uint8_t *pix1, int line_size, int h)
"paddd %%mm6, %%mm0\n"
"movd %%mm0, %1\n"
: "+r" (pix1), "=r" (tmp)
- : "r" ((x86_reg) line_size), "g" (h - 2)
+ : "r" (stride), "g" (h - 2)
: "%ecx");
return tmp;
}
-static int hf_noise16_mmx(uint8_t *pix1, int line_size, int h)
+static int hf_noise16_mmx(uint8_t *pix1, ptrdiff_t stride, int h)
{
int tmp;
uint8_t *pix = pix1;
@@ -399,23 +399,23 @@ static int hf_noise16_mmx(uint8_t *pix1, int line_size, int h)
"paddd %%mm6, %%mm0\n"
"movd %%mm0, %1\n"
: "+r" (pix1), "=r" (tmp)
- : "r" ((x86_reg) line_size), "g" (h - 2)
+ : "r" (stride), "g" (h - 2)
: "%ecx");
- return tmp + hf_noise8_mmx(pix + 8, line_size, h);
+ return tmp + hf_noise8_mmx(pix + 8, stride, h);
}
static int nsse16_mmx(MpegEncContext *c, uint8_t *pix1, uint8_t *pix2,
- int line_size, int h)
+ ptrdiff_t stride, int h)
{
int score1, score2;
if (c)
- score1 = c->mecc.sse[0](c, pix1, pix2, line_size, h);
+ score1 = c->mecc.sse[0](c, pix1, pix2, stride, h);
else
- score1 = sse16_mmx(c, pix1, pix2, line_size, h);
- score2 = hf_noise16_mmx(pix1, line_size, h) -
- hf_noise16_mmx(pix2, line_size, h);
+ score1 = sse16_mmx(c, pix1, pix2, stride, h);
+ score2 = hf_noise16_mmx(pix1, stride, h) -
+ hf_noise16_mmx(pix2, stride, h);
if (c)
return score1 + FFABS(score2) * c->avctx->nsse_weight;
@@ -424,11 +424,11 @@ static int nsse16_mmx(MpegEncContext *c, uint8_t *pix1, uint8_t *pix2,
}
static int nsse8_mmx(MpegEncContext *c, uint8_t *pix1, uint8_t *pix2,
- int line_size, int h)
+ ptrdiff_t stride, int h)
{
- int score1 = sse8_mmx(c, pix1, pix2, line_size, h);
- int score2 = hf_noise8_mmx(pix1, line_size, h) -
- hf_noise8_mmx(pix2, line_size, h);
+ int score1 = sse8_mmx(c, pix1, pix2, stride, h);
+ int score2 = hf_noise8_mmx(pix1, stride, h) -
+ hf_noise8_mmx(pix2, stride, h);
if (c)
return score1 + FFABS(score2) * c->avctx->nsse_weight;
@@ -437,12 +437,12 @@ static int nsse8_mmx(MpegEncContext *c, uint8_t *pix1, uint8_t *pix2,
}
static int vsad_intra16_mmx(MpegEncContext *v, uint8_t *pix, uint8_t *dummy,
- int line_size, int h)
+ ptrdiff_t stride, int h)
{
int tmp;
assert((((int) pix) & 7) == 0);
- assert((line_size & 7) == 0);
+ assert((stride & 7) == 0);
#define SUM(in0, in1, out0, out1) \
"movq (%0), %%mm2\n" \
@@ -493,7 +493,7 @@ static int vsad_intra16_mmx(MpegEncContext *v, uint8_t *pix, uint8_t *dummy,
"paddw %%mm6, %%mm0\n"
"movd %%mm0, %1\n"
: "+r" (pix), "=r" (tmp)
- : "r" ((x86_reg) line_size), "m" (h)
+ : "r" (stride), "m" (h)
: "%ecx");
return tmp & 0xFFFF;
@@ -501,12 +501,12 @@ static int vsad_intra16_mmx(MpegEncContext *v, uint8_t *pix, uint8_t *dummy,
#undef SUM
static int vsad_intra16_mmxext(MpegEncContext *v, uint8_t *pix, uint8_t *dummy,
- int line_size, int h)
+ ptrdiff_t stride, int h)
{
int tmp;
assert((((int) pix) & 7) == 0);
- assert((line_size & 7) == 0);
+ assert((stride & 7) == 0);
#define SUM(in0, in1, out0, out1) \
"movq (%0), " #out0 "\n" \
@@ -536,7 +536,7 @@ static int vsad_intra16_mmxext(MpegEncContext *v, uint8_t *pix, uint8_t *dummy,
"movd %%mm6, %1\n"
: "+r" (pix), "=r" (tmp)
- : "r" ((x86_reg) line_size), "m" (h)
+ : "r" (stride), "m" (h)
: "%ecx");
return tmp;
@@ -544,13 +544,13 @@ static int vsad_intra16_mmxext(MpegEncContext *v, uint8_t *pix, uint8_t *dummy,
#undef SUM
static int vsad16_mmx(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
- int line_size, int h)
+ ptrdiff_t stride, int h)
{
int tmp;
assert((((int) pix1) & 7) == 0);
assert((((int) pix2) & 7) == 0);
- assert((line_size & 7) == 0);
+ assert((stride & 7) == 0);
#define SUM(in0, in1, out0, out1) \
"movq (%0), %%mm2\n" \
@@ -617,7 +617,7 @@ static int vsad16_mmx(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
"paddw %%mm6, %%mm0\n"
"movd %%mm0, %2\n"
: "+r" (pix1), "+r" (pix2), "=r" (tmp)
- : "r" ((x86_reg) line_size), "m" (h)
+ : "r" (stride), "m" (h)
: "%ecx");
return tmp & 0x7FFF;
@@ -625,13 +625,13 @@ static int vsad16_mmx(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
#undef SUM
static int vsad16_mmxext(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
- int line_size, int h)
+ ptrdiff_t stride, int h)
{
int tmp;
assert((((int) pix1) & 7) == 0);
assert((((int) pix2) & 7) == 0);
- assert((line_size & 7) == 0);
+ assert((stride & 7) == 0);
#define SUM(in0, in1, out0, out1) \
"movq (%0), " #out0 "\n" \
@@ -677,7 +677,7 @@ static int vsad16_mmxext(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
"movd %%mm6, %2\n"
: "+r" (pix1), "+r" (pix2), "=r" (tmp)
- : "r" ((x86_reg) line_size), "m" (h)
+ : "r" (stride), "m" (h)
: "%ecx");
return tmp;
@@ -805,7 +805,8 @@ DECLARE_ASM_CONST(8, uint64_t, round_tab)[3] = {
DECLARE_ASM_CONST(8, uint64_t, bone) = 0x0101010101010101LL;
-static inline void sad8_1_mmx(uint8_t *blk1, uint8_t *blk2, int stride, int h)
+static inline void sad8_1_mmx(uint8_t *blk1, uint8_t *blk2,
+ ptrdiff_t stride, int h)
{
x86_reg len = -(stride * h);
__asm__ volatile (
@@ -837,11 +838,11 @@ static inline void sad8_1_mmx(uint8_t *blk1, uint8_t *blk2, int stride, int h)
"add %3, %%"REG_a" \n\t"
" js 1b \n\t"
: "+a" (len)
- : "r" (blk1 - len), "r" (blk2 - len), "r" ((x86_reg) stride));
+ : "r" (blk1 - len), "r" (blk2 - len), "r" (stride));
}
static inline void sad8_1_mmxext(uint8_t *blk1, uint8_t *blk2,
- int stride, int h)
+ ptrdiff_t stride, int h)
{
__asm__ volatile (
".p2align 4 \n\t"
@@ -857,11 +858,11 @@ static inline void sad8_1_mmxext(uint8_t *blk1, uint8_t *blk2,
"sub $2, %0 \n\t"
" jg 1b \n\t"
: "+r" (h), "+r" (blk1), "+r" (blk2)
- : "r" ((x86_reg) stride));
+ : "r" (stride));
}
static int sad16_sse2(MpegEncContext *v, uint8_t *blk2, uint8_t *blk1,
- int stride, int h)
+ ptrdiff_t stride, int h)
{
int ret;
__asm__ volatile (
@@ -882,12 +883,12 @@ static int sad16_sse2(MpegEncContext *v, uint8_t *blk2, uint8_t *blk1,
"paddw %%xmm0, %%xmm2 \n\t"
"movd %%xmm2, %3 \n\t"
: "+r" (h), "+r" (blk1), "+r" (blk2), "=r" (ret)
- : "r" ((x86_reg) stride));
+ : "r" (stride));
return ret;
}
static inline void sad8_x2a_mmxext(uint8_t *blk1, uint8_t *blk2,
- int stride, int h)
+ ptrdiff_t stride, int h)
{
__asm__ volatile (
".p2align 4 \n\t"
@@ -905,11 +906,11 @@ static inline void sad8_x2a_mmxext(uint8_t *blk1, uint8_t *blk2,
"sub $2, %0 \n\t"
" jg 1b \n\t"
: "+r" (h), "+r" (blk1), "+r" (blk2)
- : "r" ((x86_reg) stride));
+ : "r" (stride));
}
static inline void sad8_y2a_mmxext(uint8_t *blk1, uint8_t *blk2,
- int stride, int h)
+ ptrdiff_t stride, int h)
{
__asm__ volatile (
"movq (%1), %%mm0 \n\t"
@@ -930,11 +931,11 @@ static inline void sad8_y2a_mmxext(uint8_t *blk1, uint8_t *blk2,
"sub $2, %0 \n\t"
" jg 1b \n\t"
: "+r" (h), "+r" (blk1), "+r" (blk2)
- : "r" ((x86_reg) stride));
+ : "r" (stride));
}
static inline void sad8_4_mmxext(uint8_t *blk1, uint8_t *blk2,
- int stride, int h)
+ ptrdiff_t stride, int h)
{
__asm__ volatile (
"movq "MANGLE(bone)", %%mm5 \n\t"
@@ -960,11 +961,11 @@ static inline void sad8_4_mmxext(uint8_t *blk1, uint8_t *blk2,
"sub $2, %0 \n\t"
" jg 1b \n\t"
: "+r" (h), "+r" (blk1), "+r" (blk2)
- : "r" ((x86_reg) stride));
+ : "r" (stride));
}
static inline void sad8_2_mmx(uint8_t *blk1a, uint8_t *blk1b, uint8_t *blk2,
- int stride, int h)
+ ptrdiff_t stride, int h)
{
x86_reg len = -(stride * h);
__asm__ volatile (
@@ -999,10 +1000,11 @@ static inline void sad8_2_mmx(uint8_t *blk1a, uint8_t *blk1b, uint8_t *blk2,
" js 1b \n\t"
: "+a" (len)
: "r" (blk1a - len), "r" (blk1b - len), "r" (blk2 - len),
- "r" ((x86_reg) stride));
+ "r" (stride));
}
-static inline void sad8_4_mmx(uint8_t *blk1, uint8_t *blk2, int stride, int h)
+static inline void sad8_4_mmx(uint8_t *blk1, uint8_t *blk2,
+ ptrdiff_t stride, int h)
{
x86_reg len = -(stride * h);
__asm__ volatile (
@@ -1052,7 +1054,7 @@ static inline void sad8_4_mmx(uint8_t *blk1, uint8_t *blk2, int stride, int h)
" js 1b \n\t"
: "+a" (len)
: "r" (blk1 - len), "r" (blk1 - len + stride), "r" (blk2 - len),
- "r" ((x86_reg) stride));
+ "r" (stride));
}
static inline int sum_mmx(void)
@@ -1079,19 +1081,21 @@ static inline int sum_mmxext(void)
return ret;
}
-static inline void sad8_x2a_mmx(uint8_t *blk1, uint8_t *blk2, int stride, int h)
+static inline void sad8_x2a_mmx(uint8_t *blk1, uint8_t *blk2,
+ ptrdiff_t stride, int h)
{
sad8_2_mmx(blk1, blk1 + 1, blk2, stride, h);
}
-static inline void sad8_y2a_mmx(uint8_t *blk1, uint8_t *blk2, int stride, int h)
+static inline void sad8_y2a_mmx(uint8_t *blk1, uint8_t *blk2,
+ ptrdiff_t stride, int h)
{
sad8_2_mmx(blk1, blk1 + stride, blk2, stride, h);
}
#define PIX_SAD(suf) \
static int sad8_ ## suf(MpegEncContext *v, uint8_t *blk2, \
- uint8_t *blk1, int stride, int h) \
+ uint8_t *blk1, ptrdiff_t stride, int h) \
{ \
assert(h == 8); \
__asm__ volatile ( \
@@ -1105,7 +1109,7 @@ static int sad8_ ## suf(MpegEncContext *v, uint8_t *blk2, \
} \
\
static int sad8_x2_ ## suf(MpegEncContext *v, uint8_t *blk2, \
- uint8_t *blk1, int stride, int h) \
+ uint8_t *blk1, ptrdiff_t stride, int h) \
{ \
assert(h == 8); \
__asm__ volatile ( \
@@ -1120,7 +1124,7 @@ static int sad8_x2_ ## suf(MpegEncContext *v, uint8_t *blk2, \
} \
\
static int sad8_y2_ ## suf(MpegEncContext *v, uint8_t *blk2, \
- uint8_t *blk1, int stride, int h) \
+ uint8_t *blk1, ptrdiff_t stride, int h) \
{ \
assert(h == 8); \
__asm__ volatile ( \
@@ -1135,7 +1139,7 @@ static int sad8_y2_ ## suf(MpegEncContext *v, uint8_t *blk2, \
} \
\
static int sad8_xy2_ ## suf(MpegEncContext *v, uint8_t *blk2, \
- uint8_t *blk1, int stride, int h) \
+ uint8_t *blk1, ptrdiff_t stride, int h) \
{ \
assert(h == 8); \
__asm__ volatile ( \
@@ -1149,7 +1153,7 @@ static int sad8_xy2_ ## suf(MpegEncContext *v, uint8_t *blk2, \
} \
\
static int sad16_ ## suf(MpegEncContext *v, uint8_t *blk2, \
- uint8_t *blk1, int stride, int h) \
+ uint8_t *blk1, ptrdiff_t stride, int h) \
{ \
__asm__ volatile ( \
"pxor %%mm7, %%mm7 \n\t" \
@@ -1163,7 +1167,7 @@ static int sad16_ ## suf(MpegEncContext *v, uint8_t *blk2, \
} \
\
static int sad16_x2_ ## suf(MpegEncContext *v, uint8_t *blk2, \
- uint8_t *blk1, int stride, int h) \
+ uint8_t *blk1, ptrdiff_t stride, int h) \
{ \
__asm__ volatile ( \
"pxor %%mm7, %%mm7 \n\t" \
@@ -1178,7 +1182,7 @@ static int sad16_x2_ ## suf(MpegEncContext *v, uint8_t *blk2, \
} \
\
static int sad16_y2_ ## suf(MpegEncContext *v, uint8_t *blk2, \
- uint8_t *blk1, int stride, int h) \
+ uint8_t *blk1, ptrdiff_t stride, int h) \
{ \
__asm__ volatile ( \
"pxor %%mm7, %%mm7 \n\t" \
@@ -1193,7 +1197,7 @@ static int sad16_y2_ ## suf(MpegEncContext *v, uint8_t *blk2, \
} \
\
static int sad16_xy2_ ## suf(MpegEncContext *v, uint8_t *blk2, \
- uint8_t *blk1, int stride, int h) \
+ uint8_t *blk1, ptrdiff_t stride, int h) \
{ \
__asm__ volatile ( \
"pxor %%mm7, %%mm7 \n\t" \
@@ -1212,13 +1216,13 @@ PIX_SAD(mmxext)
#endif /* HAVE_INLINE_ASM */
int ff_sse16_sse2(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
- int line_size, int h);
+ ptrdiff_t stride, int h);
-#define hadamard_func(cpu) \
- int ff_hadamard8_diff_ ## cpu(MpegEncContext *s, uint8_t *src1, \
- uint8_t *src2, int stride, int h); \
- int ff_hadamard8_diff16_ ## cpu(MpegEncContext *s, uint8_t *src1, \
- uint8_t *src2, int stride, int h);
+#define hadamard_func(cpu) \
+ int ff_hadamard8_diff_ ## cpu(MpegEncContext *s, uint8_t *src1, \
+ uint8_t *src2, ptrdiff_t stride, int h); \
+ int ff_hadamard8_diff16_ ## cpu(MpegEncContext *s, uint8_t *src1, \
+ uint8_t *src2, ptrdiff_t stride, int h);
hadamard_func(mmx)
hadamard_func(mmxext)