From a8b6015823e628047a45916404c00044c5e80415 Mon Sep 17 00:00:00 2001 From: Luca Barbato Date: Fri, 8 Mar 2013 22:40:03 +0100 Subject: dsputil: convert remaining functions to use ptrdiff_t strides Signed-off-by: Luca Barbato --- libavcodec/x86/dsputil_mmx.c | 86 ++++++++++++++++++++++---------------------- 1 file changed, 43 insertions(+), 43 deletions(-) (limited to 'libavcodec/x86/dsputil_mmx.c') diff --git a/libavcodec/x86/dsputil_mmx.c b/libavcodec/x86/dsputil_mmx.c index a13e9a11d9..aa7b3984aa 100644 --- a/libavcodec/x86/dsputil_mmx.c +++ b/libavcodec/x86/dsputil_mmx.c @@ -460,7 +460,7 @@ void ff_add_pixels_clamped_mmx(const int16_t *block, uint8_t *pixels, } static void put_pixels8_mmx(uint8_t *block, const uint8_t *pixels, - int line_size, int h) + ptrdiff_t line_size, int h) { __asm__ volatile ( "lea (%3, %3), %%"REG_a" \n\t" @@ -487,7 +487,7 @@ static void put_pixels8_mmx(uint8_t *block, const uint8_t *pixels, } static void put_pixels16_mmx(uint8_t *block, const uint8_t *pixels, - int line_size, int h) + ptrdiff_t line_size, int h) { __asm__ volatile ( "lea (%3, %3), %%"REG_a" \n\t" @@ -750,13 +750,13 @@ static void draw_edges_mmx(uint8_t *buf, int wrap, int width, int height, #if HAVE_YASM #define QPEL_OP(OPNAME, ROUNDER, RND, MMX) \ static void OPNAME ## qpel8_mc00_ ## MMX (uint8_t *dst, uint8_t *src, \ - int stride) \ + ptrdiff_t stride) \ { \ ff_ ## OPNAME ## pixels8_ ## MMX(dst, src, stride, 8); \ } \ \ static void OPNAME ## qpel8_mc10_ ## MMX(uint8_t *dst, uint8_t *src, \ - int stride) \ + ptrdiff_t stride) \ { \ uint64_t temp[8]; \ uint8_t * const half = (uint8_t*)temp; \ @@ -767,14 +767,14 @@ static void OPNAME ## qpel8_mc10_ ## MMX(uint8_t *dst, uint8_t *src, \ } \ \ static void OPNAME ## qpel8_mc20_ ## MMX(uint8_t *dst, uint8_t *src, \ - int stride) \ + ptrdiff_t stride) \ { \ ff_ ## OPNAME ## mpeg4_qpel8_h_lowpass_ ## MMX(dst, src, stride, \ stride, 8); \ } \ \ static void OPNAME ## qpel8_mc30_ ## MMX(uint8_t *dst, uint8_t *src, \ - int stride) \ + ptrdiff_t stride) \ { \ uint64_t temp[8]; \ uint8_t * const half = (uint8_t*)temp; \ @@ -785,7 +785,7 @@ static void OPNAME ## qpel8_mc30_ ## MMX(uint8_t *dst, uint8_t *src, \ } \ \ static void OPNAME ## qpel8_mc01_ ## MMX(uint8_t *dst, uint8_t *src, \ - int stride) \ + ptrdiff_t stride) \ { \ uint64_t temp[8]; \ uint8_t * const half = (uint8_t*)temp; \ @@ -796,14 +796,14 @@ static void OPNAME ## qpel8_mc01_ ## MMX(uint8_t *dst, uint8_t *src, \ } \ \ static void OPNAME ## qpel8_mc02_ ## MMX(uint8_t *dst, uint8_t *src, \ - int stride) \ + ptrdiff_t stride) \ { \ ff_ ## OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, src, \ stride, stride); \ } \ \ static void OPNAME ## qpel8_mc03_ ## MMX(uint8_t *dst, uint8_t *src, \ - int stride) \ + ptrdiff_t stride) \ { \ uint64_t temp[8]; \ uint8_t * const half = (uint8_t*)temp; \ @@ -814,7 +814,7 @@ static void OPNAME ## qpel8_mc03_ ## MMX(uint8_t *dst, uint8_t *src, \ } \ \ static void OPNAME ## qpel8_mc11_ ## MMX(uint8_t *dst, uint8_t *src, \ - int stride) \ + ptrdiff_t stride) \ { \ uint64_t half[8 + 9]; \ uint8_t * const halfH = ((uint8_t*)half) + 64; \ @@ -829,7 +829,7 @@ static void OPNAME ## qpel8_mc11_ ## MMX(uint8_t *dst, uint8_t *src, \ } \ \ static void OPNAME ## qpel8_mc31_ ## MMX(uint8_t *dst, uint8_t *src, \ - int stride) \ + ptrdiff_t stride) \ { \ uint64_t half[8 + 9]; \ uint8_t * const halfH = ((uint8_t*)half) + 64; \ @@ -844,7 +844,7 @@ static void OPNAME ## qpel8_mc31_ ## MMX(uint8_t *dst, uint8_t *src, \ } \ \ static void OPNAME ## qpel8_mc13_ ## MMX(uint8_t *dst, uint8_t *src, \ - int stride) \ + ptrdiff_t stride) \ { \ uint64_t half[8 + 9]; \ uint8_t * const halfH = ((uint8_t*)half) + 64; \ @@ -859,7 +859,7 @@ static void OPNAME ## qpel8_mc13_ ## MMX(uint8_t *dst, uint8_t *src, \ } \ \ static void OPNAME ## qpel8_mc33_ ## MMX(uint8_t *dst, uint8_t *src, \ - int stride) \ + ptrdiff_t stride) \ { \ uint64_t half[8 + 9]; \ uint8_t * const halfH = ((uint8_t*)half) + 64; \ @@ -874,7 +874,7 @@ static void OPNAME ## qpel8_mc33_ ## MMX(uint8_t *dst, uint8_t *src, \ } \ \ static void OPNAME ## qpel8_mc21_ ## MMX(uint8_t *dst, uint8_t *src, \ - int stride) \ + ptrdiff_t stride) \ { \ uint64_t half[8 + 9]; \ uint8_t * const halfH = ((uint8_t*)half) + 64; \ @@ -887,7 +887,7 @@ static void OPNAME ## qpel8_mc21_ ## MMX(uint8_t *dst, uint8_t *src, \ } \ \ static void OPNAME ## qpel8_mc23_ ## MMX(uint8_t *dst, uint8_t *src, \ - int stride) \ + ptrdiff_t stride) \ { \ uint64_t half[8 + 9]; \ uint8_t * const halfH = ((uint8_t*)half) + 64; \ @@ -900,7 +900,7 @@ static void OPNAME ## qpel8_mc23_ ## MMX(uint8_t *dst, uint8_t *src, \ } \ \ static void OPNAME ## qpel8_mc12_ ## MMX(uint8_t *dst, uint8_t *src, \ - int stride) \ + ptrdiff_t stride) \ { \ uint64_t half[8 + 9]; \ uint8_t * const halfH = ((uint8_t*)half); \ @@ -913,7 +913,7 @@ static void OPNAME ## qpel8_mc12_ ## MMX(uint8_t *dst, uint8_t *src, \ } \ \ static void OPNAME ## qpel8_mc32_ ## MMX(uint8_t *dst, uint8_t *src, \ - int stride) \ + ptrdiff_t stride) \ { \ uint64_t half[8 + 9]; \ uint8_t * const halfH = ((uint8_t*)half); \ @@ -926,7 +926,7 @@ static void OPNAME ## qpel8_mc32_ ## MMX(uint8_t *dst, uint8_t *src, \ } \ \ static void OPNAME ## qpel8_mc22_ ## MMX(uint8_t *dst, uint8_t *src, \ - int stride) \ + ptrdiff_t stride) \ { \ uint64_t half[9]; \ uint8_t * const halfH = ((uint8_t*)half); \ @@ -937,13 +937,13 @@ static void OPNAME ## qpel8_mc22_ ## MMX(uint8_t *dst, uint8_t *src, \ } \ \ static void OPNAME ## qpel16_mc00_ ## MMX (uint8_t *dst, uint8_t *src, \ - int stride) \ + ptrdiff_t stride) \ { \ ff_ ## OPNAME ## pixels16_ ## MMX(dst, src, stride, 16); \ } \ \ static void OPNAME ## qpel16_mc10_ ## MMX(uint8_t *dst, uint8_t *src, \ - int stride) \ + ptrdiff_t stride) \ { \ uint64_t temp[32]; \ uint8_t * const half = (uint8_t*)temp; \ @@ -954,14 +954,14 @@ static void OPNAME ## qpel16_mc10_ ## MMX(uint8_t *dst, uint8_t *src, \ } \ \ static void OPNAME ## qpel16_mc20_ ## MMX(uint8_t *dst, uint8_t *src, \ - int stride) \ + ptrdiff_t stride) \ { \ ff_ ## OPNAME ## mpeg4_qpel16_h_lowpass_ ## MMX(dst, src, \ stride, stride, 16);\ } \ \ static void OPNAME ## qpel16_mc30_ ## MMX(uint8_t *dst, uint8_t *src, \ - int stride) \ + ptrdiff_t stride) \ { \ uint64_t temp[32]; \ uint8_t * const half = (uint8_t*)temp; \ @@ -972,7 +972,7 @@ static void OPNAME ## qpel16_mc30_ ## MMX(uint8_t *dst, uint8_t *src, \ } \ \ static void OPNAME ## qpel16_mc01_ ## MMX(uint8_t *dst, uint8_t *src, \ - int stride) \ + ptrdiff_t stride) \ { \ uint64_t temp[32]; \ uint8_t * const half = (uint8_t*)temp; \ @@ -983,14 +983,14 @@ static void OPNAME ## qpel16_mc01_ ## MMX(uint8_t *dst, uint8_t *src, \ } \ \ static void OPNAME ## qpel16_mc02_ ## MMX(uint8_t *dst, uint8_t *src, \ - int stride) \ + ptrdiff_t stride) \ { \ ff_ ## OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, src, \ stride, stride); \ } \ \ static void OPNAME ## qpel16_mc03_ ## MMX(uint8_t *dst, uint8_t *src, \ - int stride) \ + ptrdiff_t stride) \ { \ uint64_t temp[32]; \ uint8_t * const half = (uint8_t*)temp; \ @@ -1001,7 +1001,7 @@ static void OPNAME ## qpel16_mc03_ ## MMX(uint8_t *dst, uint8_t *src, \ } \ \ static void OPNAME ## qpel16_mc11_ ## MMX(uint8_t *dst, uint8_t *src, \ - int stride) \ + ptrdiff_t stride) \ { \ uint64_t half[16 * 2 + 17 * 2]; \ uint8_t * const halfH = ((uint8_t*)half) + 256; \ @@ -1017,7 +1017,7 @@ static void OPNAME ## qpel16_mc11_ ## MMX(uint8_t *dst, uint8_t *src, \ } \ \ static void OPNAME ## qpel16_mc31_ ## MMX(uint8_t *dst, uint8_t *src, \ - int stride) \ + ptrdiff_t stride) \ { \ uint64_t half[16 * 2 + 17 * 2]; \ uint8_t * const halfH = ((uint8_t*)half) + 256; \ @@ -1033,7 +1033,7 @@ static void OPNAME ## qpel16_mc31_ ## MMX(uint8_t *dst, uint8_t *src, \ } \ \ static void OPNAME ## qpel16_mc13_ ## MMX(uint8_t *dst, uint8_t *src, \ - int stride) \ + ptrdiff_t stride) \ { \ uint64_t half[16 * 2 + 17 * 2]; \ uint8_t * const halfH = ((uint8_t*)half) + 256; \ @@ -1049,7 +1049,7 @@ static void OPNAME ## qpel16_mc13_ ## MMX(uint8_t *dst, uint8_t *src, \ } \ \ static void OPNAME ## qpel16_mc33_ ## MMX(uint8_t *dst, uint8_t *src, \ - int stride) \ + ptrdiff_t stride) \ { \ uint64_t half[16 * 2 + 17 * 2]; \ uint8_t * const halfH = ((uint8_t*)half) + 256; \ @@ -1065,7 +1065,7 @@ static void OPNAME ## qpel16_mc33_ ## MMX(uint8_t *dst, uint8_t *src, \ } \ \ static void OPNAME ## qpel16_mc21_ ## MMX(uint8_t *dst, uint8_t *src, \ - int stride) \ + ptrdiff_t stride) \ { \ uint64_t half[16 * 2 + 17 * 2]; \ uint8_t * const halfH = ((uint8_t*)half) + 256; \ @@ -1079,7 +1079,7 @@ static void OPNAME ## qpel16_mc21_ ## MMX(uint8_t *dst, uint8_t *src, \ } \ \ static void OPNAME ## qpel16_mc23_ ## MMX(uint8_t *dst, uint8_t *src, \ - int stride) \ + ptrdiff_t stride) \ { \ uint64_t half[16 * 2 + 17 * 2]; \ uint8_t * const halfH = ((uint8_t*)half) + 256; \ @@ -1093,7 +1093,7 @@ static void OPNAME ## qpel16_mc23_ ## MMX(uint8_t *dst, uint8_t *src, \ } \ \ static void OPNAME ## qpel16_mc12_ ## MMX(uint8_t *dst, uint8_t *src, \ - int stride) \ + ptrdiff_t stride) \ { \ uint64_t half[17 * 2]; \ uint8_t * const halfH = ((uint8_t*)half); \ @@ -1106,7 +1106,7 @@ static void OPNAME ## qpel16_mc12_ ## MMX(uint8_t *dst, uint8_t *src, \ } \ \ static void OPNAME ## qpel16_mc32_ ## MMX(uint8_t *dst, uint8_t *src, \ - int stride) \ + ptrdiff_t stride) \ { \ uint64_t half[17 * 2]; \ uint8_t * const halfH = ((uint8_t*)half); \ @@ -1119,7 +1119,7 @@ static void OPNAME ## qpel16_mc32_ ## MMX(uint8_t *dst, uint8_t *src, \ } \ \ static void OPNAME ## qpel16_mc22_ ## MMX(uint8_t *dst, uint8_t *src, \ - int stride) \ + ptrdiff_t stride) \ { \ uint64_t half[17 * 2]; \ uint8_t * const halfH = ((uint8_t*)half); \ @@ -1136,19 +1136,19 @@ QPEL_OP(put_no_rnd_, ff_pw_15, _no_rnd_, mmxext) #if HAVE_INLINE_ASM -void ff_put_rv40_qpel8_mc33_mmx(uint8_t *dst, uint8_t *src, int stride) +void ff_put_rv40_qpel8_mc33_mmx(uint8_t *dst, uint8_t *src, ptrdiff_t stride) { put_pixels8_xy2_mmx(dst, src, stride, 8); } -void ff_put_rv40_qpel16_mc33_mmx(uint8_t *dst, uint8_t *src, int stride) +void ff_put_rv40_qpel16_mc33_mmx(uint8_t *dst, uint8_t *src, ptrdiff_t stride) { put_pixels16_xy2_mmx(dst, src, stride, 16); } -void ff_avg_rv40_qpel8_mc33_mmx(uint8_t *dst, uint8_t *src, int stride) +void ff_avg_rv40_qpel8_mc33_mmx(uint8_t *dst, uint8_t *src, ptrdiff_t stride) { avg_pixels8_xy2_mmx(dst, src, stride, 8); } -void ff_avg_rv40_qpel16_mc33_mmx(uint8_t *dst, uint8_t *src, int stride) +void ff_avg_rv40_qpel16_mc33_mmx(uint8_t *dst, uint8_t *src, ptrdiff_t stride) { avg_pixels16_xy2_mmx(dst, src, stride, 16); } @@ -1278,29 +1278,29 @@ void ff_avg_pixels16_sse2(uint8_t *block, const uint8_t *pixels, #if HAVE_INLINE_ASM /* CAVS-specific */ -void ff_put_cavs_qpel8_mc00_mmxext(uint8_t *dst, uint8_t *src, int stride) +void ff_put_cavs_qpel8_mc00_mmxext(uint8_t *dst, uint8_t *src, ptrdiff_t stride) { put_pixels8_mmx(dst, src, stride, 8); } -void ff_avg_cavs_qpel8_mc00_mmxext(uint8_t *dst, uint8_t *src, int stride) +void ff_avg_cavs_qpel8_mc00_mmxext(uint8_t *dst, uint8_t *src, ptrdiff_t stride) { avg_pixels8_mmx(dst, src, stride, 8); } -void ff_put_cavs_qpel16_mc00_mmxext(uint8_t *dst, uint8_t *src, int stride) +void ff_put_cavs_qpel16_mc00_mmxext(uint8_t *dst, uint8_t *src, ptrdiff_t stride) { put_pixels16_mmx(dst, src, stride, 16); } -void ff_avg_cavs_qpel16_mc00_mmxext(uint8_t *dst, uint8_t *src, int stride) +void ff_avg_cavs_qpel16_mc00_mmxext(uint8_t *dst, uint8_t *src, ptrdiff_t stride) { avg_pixels16_mmx(dst, src, stride, 16); } /* VC-1-specific */ void ff_put_vc1_mspel_mc00_mmx(uint8_t *dst, const uint8_t *src, - int stride, int rnd) + ptrdiff_t stride, int rnd) { put_pixels8_mmx(dst, src, stride, 8); } -- cgit v1.2.3