summaryrefslogtreecommitdiff
path: root/libavcodec/vp8dsp.c
diff options
context:
space:
mode:
authorVittorio Giovara <vittorio.giovara@gmail.com>2014-03-28 01:13:54 +0100
committerDiego Biurrun <diego@biurrun.de>2014-03-29 16:11:09 +0100
commit53c20f17c78d1d8a0fc2505868f201e69ff59cc5 (patch)
tree7d543871ede0f0adf63d3c5fcaef907275e42758 /libavcodec/vp8dsp.c
parent6adf3bc42e36242d487636786e995149bbb849fe (diff)
vp8: K&R formatting cosmetics
Signed-off-by: Diego Biurrun <diego@biurrun.de>
Diffstat (limited to 'libavcodec/vp8dsp.c')
-rw-r--r--libavcodec/vp8dsp.c566
1 files changed, 302 insertions, 264 deletions
diff --git a/libavcodec/vp8dsp.c b/libavcodec/vp8dsp.c
index ad7c60309c..431c96ea77 100644
--- a/libavcodec/vp8dsp.c
+++ b/libavcodec/vp8dsp.c
@@ -24,9 +24,10 @@
* VP8 compatible video decoder
*/
+#include "libavutil/common.h"
+
#include "mathops.h"
#include "vp8dsp.h"
-#include "libavutil/common.h"
// TODO: Maybe add dequant
static void vp8_luma_dc_wht_c(int16_t block[4][4][16], int16_t dc[16])
@@ -34,26 +35,26 @@ static void vp8_luma_dc_wht_c(int16_t block[4][4][16], int16_t dc[16])
int i, t0, t1, t2, t3;
for (i = 0; i < 4; i++) {
- t0 = dc[0*4+i] + dc[3*4+i];
- t1 = dc[1*4+i] + dc[2*4+i];
- t2 = dc[1*4+i] - dc[2*4+i];
- t3 = dc[0*4+i] - dc[3*4+i];
-
- dc[0*4+i] = t0 + t1;
- dc[1*4+i] = t3 + t2;
- dc[2*4+i] = t0 - t1;
- dc[3*4+i] = t3 - t2;
+ t0 = dc[0 * 4 + i] + dc[3 * 4 + i];
+ t1 = dc[1 * 4 + i] + dc[2 * 4 + i];
+ t2 = dc[1 * 4 + i] - dc[2 * 4 + i];
+ t3 = dc[0 * 4 + i] - dc[3 * 4 + i];
+
+ dc[0 * 4 + i] = t0 + t1;
+ dc[1 * 4 + i] = t3 + t2;
+ dc[2 * 4 + i] = t0 - t1;
+ dc[3 * 4 + i] = t3 - t2;
}
for (i = 0; i < 4; i++) {
- t0 = dc[i*4+0] + dc[i*4+3] + 3; // rounding
- t1 = dc[i*4+1] + dc[i*4+2];
- t2 = dc[i*4+1] - dc[i*4+2];
- t3 = dc[i*4+0] - dc[i*4+3] + 3; // rounding
- dc[i*4+0] = 0;
- dc[i*4+1] = 0;
- dc[i*4+2] = 0;
- dc[i*4+3] = 0;
+ t0 = dc[i * 4 + 0] + dc[i * 4 + 3] + 3; // rounding
+ t1 = dc[i * 4 + 1] + dc[i * 4 + 2];
+ t2 = dc[i * 4 + 1] - dc[i * 4 + 2];
+ t3 = dc[i * 4 + 0] - dc[i * 4 + 3] + 3; // rounding
+ dc[i * 4 + 0] = 0;
+ dc[i * 4 + 1] = 0;
+ dc[i * 4 + 2] = 0;
+ dc[i * 4 + 3] = 0;
block[i][0][0] = (t0 + t1) >> 3;
block[i][1][0] = (t3 + t2) >> 3;
@@ -75,8 +76,8 @@ static void vp8_luma_dc_wht_dc_c(int16_t block[4][4][16], int16_t dc[16])
}
}
-#define MUL_20091(a) ((((a)*20091) >> 16) + (a))
-#define MUL_35468(a) (((a)*35468) >> 16)
+#define MUL_20091(a) ((((a) * 20091) >> 16) + (a))
+#define MUL_35468(a) (((a) * 35468) >> 16)
static void vp8_idct_add_c(uint8_t *dst, int16_t block[16], ptrdiff_t stride)
{
@@ -84,32 +85,32 @@ static void vp8_idct_add_c(uint8_t *dst, int16_t block[16], ptrdiff_t stride)
int16_t tmp[16];
for (i = 0; i < 4; i++) {
- t0 = block[0*4+i] + block[2*4+i];
- t1 = block[0*4+i] - block[2*4+i];
- t2 = MUL_35468(block[1*4+i]) - MUL_20091(block[3*4+i]);
- t3 = MUL_20091(block[1*4+i]) + MUL_35468(block[3*4+i]);
- block[0*4+i] = 0;
- block[1*4+i] = 0;
- block[2*4+i] = 0;
- block[3*4+i] = 0;
-
- tmp[i*4+0] = t0 + t3;
- tmp[i*4+1] = t1 + t2;
- tmp[i*4+2] = t1 - t2;
- tmp[i*4+3] = t0 - t3;
+ t0 = block[0 * 4 + i] + block[2 * 4 + i];
+ t1 = block[0 * 4 + i] - block[2 * 4 + i];
+ t2 = MUL_35468(block[1 * 4 + i]) - MUL_20091(block[3 * 4 + i]);
+ t3 = MUL_20091(block[1 * 4 + i]) + MUL_35468(block[3 * 4 + i]);
+ block[0 * 4 + i] = 0;
+ block[1 * 4 + i] = 0;
+ block[2 * 4 + i] = 0;
+ block[3 * 4 + i] = 0;
+
+ tmp[i * 4 + 0] = t0 + t3;
+ tmp[i * 4 + 1] = t1 + t2;
+ tmp[i * 4 + 2] = t1 - t2;
+ tmp[i * 4 + 3] = t0 - t3;
}
for (i = 0; i < 4; i++) {
- t0 = tmp[0*4+i] + tmp[2*4+i];
- t1 = tmp[0*4+i] - tmp[2*4+i];
- t2 = MUL_35468(tmp[1*4+i]) - MUL_20091(tmp[3*4+i]);
- t3 = MUL_20091(tmp[1*4+i]) + MUL_35468(tmp[3*4+i]);
+ t0 = tmp[0 * 4 + i] + tmp[2 * 4 + i];
+ t1 = tmp[0 * 4 + i] - tmp[2 * 4 + i];
+ t2 = MUL_35468(tmp[1 * 4 + i]) - MUL_20091(tmp[3 * 4 + i]);
+ t3 = MUL_20091(tmp[1 * 4 + i]) + MUL_35468(tmp[3 * 4 + i]);
dst[0] = av_clip_uint8(dst[0] + ((t0 + t3 + 4) >> 3));
dst[1] = av_clip_uint8(dst[1] + ((t1 + t2 + 4) >> 3));
dst[2] = av_clip_uint8(dst[2] + ((t1 - t2 + 4) >> 3));
dst[3] = av_clip_uint8(dst[3] + ((t0 - t3 + 4) >> 3));
- dst += stride;
+ dst += stride;
}
}
@@ -123,46 +124,49 @@ static void vp8_idct_dc_add_c(uint8_t *dst, int16_t block[16], ptrdiff_t stride)
dst[1] = av_clip_uint8(dst[1] + dc);
dst[2] = av_clip_uint8(dst[2] + dc);
dst[3] = av_clip_uint8(dst[3] + dc);
- dst += stride;
+ dst += stride;
}
}
-static void vp8_idct_dc_add4uv_c(uint8_t *dst, int16_t block[4][16], ptrdiff_t stride)
+static void vp8_idct_dc_add4uv_c(uint8_t *dst, int16_t block[4][16],
+ ptrdiff_t stride)
{
- vp8_idct_dc_add_c(dst+stride*0+0, block[0], stride);
- vp8_idct_dc_add_c(dst+stride*0+4, block[1], stride);
- vp8_idct_dc_add_c(dst+stride*4+0, block[2], stride);
- vp8_idct_dc_add_c(dst+stride*4+4, block[3], stride);
+ vp8_idct_dc_add_c(dst + stride * 0 + 0, block[0], stride);
+ vp8_idct_dc_add_c(dst + stride * 0 + 4, block[1], stride);
+ vp8_idct_dc_add_c(dst + stride * 4 + 0, block[2], stride);
+ vp8_idct_dc_add_c(dst + stride * 4 + 4, block[3], stride);
}
-static void vp8_idct_dc_add4y_c(uint8_t *dst, int16_t block[4][16], ptrdiff_t stride)
+static void vp8_idct_dc_add4y_c(uint8_t *dst, int16_t block[4][16],
+ ptrdiff_t stride)
{
- vp8_idct_dc_add_c(dst+ 0, block[0], stride);
- vp8_idct_dc_add_c(dst+ 4, block[1], stride);
- vp8_idct_dc_add_c(dst+ 8, block[2], stride);
- vp8_idct_dc_add_c(dst+12, block[3], stride);
+ vp8_idct_dc_add_c(dst + 0, block[0], stride);
+ vp8_idct_dc_add_c(dst + 4, block[1], stride);
+ vp8_idct_dc_add_c(dst + 8, block[2], stride);
+ vp8_idct_dc_add_c(dst + 12, block[3], stride);
}
// because I like only having two parameters to pass functions...
-#define LOAD_PIXELS\
- int av_unused p3 = p[-4*stride];\
- int av_unused p2 = p[-3*stride];\
- int av_unused p1 = p[-2*stride];\
- int av_unused p0 = p[-1*stride];\
- int av_unused q0 = p[ 0*stride];\
- int av_unused q1 = p[ 1*stride];\
- int av_unused q2 = p[ 2*stride];\
- int av_unused q3 = p[ 3*stride];
-
-#define clip_int8(n) (cm[n+0x80]-0x80)
-
-static av_always_inline void filter_common(uint8_t *p, ptrdiff_t stride, int is4tap)
+#define LOAD_PIXELS \
+ int av_unused p3 = p[-4 * stride]; \
+ int av_unused p2 = p[-3 * stride]; \
+ int av_unused p1 = p[-2 * stride]; \
+ int av_unused p0 = p[-1 * stride]; \
+ int av_unused q0 = p[ 0 * stride]; \
+ int av_unused q1 = p[ 1 * stride]; \
+ int av_unused q2 = p[ 2 * stride]; \
+ int av_unused q3 = p[ 3 * stride];
+
+#define clip_int8(n) (cm[n + 0x80] - 0x80)
+
+static av_always_inline void filter_common(uint8_t *p, ptrdiff_t stride,
+ int is4tap)
{
LOAD_PIXELS
int a, f1, f2;
const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP;
- a = 3*(q0 - p0);
+ a = 3 * (q0 - p0);
if (is4tap)
a += clip_int8(p1 - q1);
@@ -171,45 +175,50 @@ static av_always_inline void filter_common(uint8_t *p, ptrdiff_t stride, int is4
// We deviate from the spec here with c(a+3) >> 3
// since that's what libvpx does.
- f1 = FFMIN(a+4, 127) >> 3;
- f2 = FFMIN(a+3, 127) >> 3;
+ f1 = FFMIN(a + 4, 127) >> 3;
+ f2 = FFMIN(a + 3, 127) >> 3;
// Despite what the spec says, we do need to clamp here to
// be bitexact with libvpx.
- p[-1*stride] = cm[p0 + f2];
- p[ 0*stride] = cm[q0 - f1];
+ p[-1 * stride] = cm[p0 + f2];
+ p[ 0 * stride] = cm[q0 - f1];
// only used for _inner on blocks without high edge variance
if (!is4tap) {
- a = (f1+1)>>1;
- p[-2*stride] = cm[p1 + a];
- p[ 1*stride] = cm[q1 - a];
+ a = (f1 + 1) >> 1;
+ p[-2 * stride] = cm[p1 + a];
+ p[ 1 * stride] = cm[q1 - a];
}
}
static av_always_inline int simple_limit(uint8_t *p, ptrdiff_t stride, int flim)
{
LOAD_PIXELS
- return 2*FFABS(p0-q0) + (FFABS(p1-q1) >> 1) <= flim;
+ return 2 * FFABS(p0 - q0) + (FFABS(p1 - q1) >> 1) <= flim;
}
/**
* E - limit at the macroblock edge
* I - limit for interior difference
*/
-static av_always_inline int normal_limit(uint8_t *p, ptrdiff_t stride, int E, int I)
+static av_always_inline int normal_limit(uint8_t *p, ptrdiff_t stride,
+ int E, int I)
{
LOAD_PIXELS
- return simple_limit(p, stride, E)
- && FFABS(p3-p2) <= I && FFABS(p2-p1) <= I && FFABS(p1-p0) <= I
- && FFABS(q3-q2) <= I && FFABS(q2-q1) <= I && FFABS(q1-q0) <= I;
+ return simple_limit(p, stride, E) &&
+ FFABS(p3 - p2) <= I &&
+ FFABS(p2 - p1) <= I &&
+ FFABS(p1 - p0) <= I &&
+ FFABS(q3 - q2) <= I &&
+ FFABS(q2 - q1) <= I &&
+ FFABS(q1 - q0) <= I;
}
// high edge variance
static av_always_inline int hev(uint8_t *p, ptrdiff_t stride, int thresh)
{
LOAD_PIXELS
- return FFABS(p1-p0) > thresh || FFABS(q1-q0) > thresh;
+ return FFABS(p1 - p0) > thresh || FFABS(q1 - q0) > thresh;
}
static av_always_inline void filter_mbedge(uint8_t *p, ptrdiff_t stride)
@@ -219,67 +228,75 @@ static av_always_inline void filter_mbedge(uint8_t *p, ptrdiff_t stride)
LOAD_PIXELS
- w = clip_int8(p1-q1);
- w = clip_int8(w + 3*(q0-p0));
+ w = clip_int8(p1 - q1);
+ w = clip_int8(w + 3 * (q0 - p0));
- a0 = (27*w + 63) >> 7;
- a1 = (18*w + 63) >> 7;
- a2 = ( 9*w + 63) >> 7;
+ a0 = (27 * w + 63) >> 7;
+ a1 = (18 * w + 63) >> 7;
+ a2 = (9 * w + 63) >> 7;
- p[-3*stride] = cm[p2 + a2];
- p[-2*stride] = cm[p1 + a1];
- p[-1*stride] = cm[p0 + a0];
- p[ 0*stride] = cm[q0 - a0];
- p[ 1*stride] = cm[q1 - a1];
- p[ 2*stride] = cm[q2 - a2];
+ p[-3 * stride] = cm[p2 + a2];
+ p[-2 * stride] = cm[p1 + a1];
+ p[-1 * stride] = cm[p0 + a0];
+ p[ 0 * stride] = cm[q0 - a0];
+ p[ 1 * stride] = cm[q1 - a1];
+ p[ 2 * stride] = cm[q2 - a2];
}
-#define LOOP_FILTER(dir, size, stridea, strideb, maybe_inline) \
-static maybe_inline void vp8_ ## dir ## _loop_filter ## size ## _c(uint8_t *dst, ptrdiff_t stride,\
- int flim_E, int flim_I, int hev_thresh)\
-{\
- int i;\
-\
- for (i = 0; i < size; i++)\
- if (normal_limit(dst+i*stridea, strideb, flim_E, flim_I)) {\
- if (hev(dst+i*stridea, strideb, hev_thresh))\
- filter_common(dst+i*stridea, strideb, 1);\
- else\
- filter_mbedge(dst+i*stridea, strideb);\
- }\
-}\
-\
-static maybe_inline void vp8_ ## dir ## _loop_filter ## size ## _inner_c(uint8_t *dst, ptrdiff_t stride,\
- int flim_E, int flim_I, int hev_thresh)\
-{\
- int i;\
-\
- for (i = 0; i < size; i++)\
- if (normal_limit(dst+i*stridea, strideb, flim_E, flim_I)) {\
- int hv = hev(dst+i*stridea, strideb, hev_thresh);\
- if (hv) \
- filter_common(dst+i*stridea, strideb, 1);\
- else \
- filter_common(dst+i*stridea, strideb, 0);\
- }\
+#define LOOP_FILTER(dir, size, stridea, strideb, maybe_inline) \
+static maybe_inline \
+void vp8_ ## dir ## _loop_filter ## size ## _c(uint8_t *dst, \
+ ptrdiff_t stride, \
+ int flim_E, int flim_I, \
+ int hev_thresh) \
+{ \
+ int i; \
+ for (i = 0; i < size; i++) \
+ if (normal_limit(dst + i * stridea, strideb, flim_E, flim_I)) { \
+ if (hev(dst + i * stridea, strideb, hev_thresh)) \
+ filter_common(dst + i * stridea, strideb, 1); \
+ else \
+ filter_mbedge(dst + i * stridea, strideb); \
+ } \
+} \
+ \
+static maybe_inline \
+void vp8_ ## dir ## _loop_filter ## size ## _inner_c(uint8_t *dst, \
+ ptrdiff_t stride, \
+ int flim_E, int flim_I, \
+ int hev_thresh) \
+{ \
+ int i; \
+ for (i = 0; i < size; i++) \
+ if (normal_limit(dst + i * stridea, strideb, flim_E, flim_I)) { \
+ int hv = hev(dst + i * stridea, strideb, hev_thresh); \
+ if (hv) \
+ filter_common(dst + i * stridea, strideb, 1); \
+ else \
+ filter_common(dst + i * stridea, strideb, 0); \
+ } \
}
-LOOP_FILTER(v, 16, 1, stride,)
-LOOP_FILTER(h, 16, stride, 1,)
-
-#define UV_LOOP_FILTER(dir, stridea, strideb) \
-LOOP_FILTER(dir, 8, stridea, strideb, av_always_inline) \
-static void vp8_ ## dir ## _loop_filter8uv_c(uint8_t *dstU, uint8_t *dstV, ptrdiff_t stride,\
- int fE, int fI, int hev_thresh)\
-{\
- vp8_ ## dir ## _loop_filter8_c(dstU, stride, fE, fI, hev_thresh);\
- vp8_ ## dir ## _loop_filter8_c(dstV, stride, fE, fI, hev_thresh);\
-}\
-static void vp8_ ## dir ## _loop_filter8uv_inner_c(uint8_t *dstU, uint8_t *dstV, ptrdiff_t stride,\
- int fE, int fI, int hev_thresh)\
-{\
- vp8_ ## dir ## _loop_filter8_inner_c(dstU, stride, fE, fI, hev_thresh);\
- vp8_ ## dir ## _loop_filter8_inner_c(dstV, stride, fE, fI, hev_thresh);\
+LOOP_FILTER(v, 16, 1, stride, )
+LOOP_FILTER(h, 16, stride, 1, )
+
+#define UV_LOOP_FILTER(dir, stridea, strideb) \
+LOOP_FILTER(dir, 8, stridea, strideb, av_always_inline) \
+static void vp8_ ## dir ## _loop_filter8uv_c(uint8_t *dstU, uint8_t *dstV, \
+ ptrdiff_t stride, int fE, \
+ int fI, int hev_thresh) \
+{ \
+ vp8_ ## dir ## _loop_filter8_c(dstU, stride, fE, fI, hev_thresh); \
+ vp8_ ## dir ## _loop_filter8_c(dstV, stride, fE, fI, hev_thresh); \
+} \
+ \
+static void vp8_ ## dir ## _loop_filter8uv_inner_c(uint8_t *dstU, \
+ uint8_t *dstV, \
+ ptrdiff_t stride, int fE, \
+ int fI, int hev_thresh) \
+{ \
+ vp8_ ## dir ## _loop_filter8_inner_c(dstU, stride, fE, fI, hev_thresh); \
+ vp8_ ## dir ## _loop_filter8_inner_c(dstV, stride, fE, fI, hev_thresh); \
}
UV_LOOP_FILTER(v, 1, stride)
@@ -290,8 +307,8 @@ static void vp8_v_loop_filter_simple_c(uint8_t *dst, ptrdiff_t stride, int flim)
int i;
for (i = 0; i < 16; i++)
- if (simple_limit(dst+i, stride, flim))
- filter_common(dst+i, stride, 1);
+ if (simple_limit(dst + i, stride, flim))
+ filter_common(dst + i, stride, 1);
}
static void vp8_h_loop_filter_simple_c(uint8_t *dst, ptrdiff_t stride, int flim)
@@ -299,94 +316,110 @@ static void vp8_h_loop_filter_simple_c(uint8_t *dst, ptrdiff_t stride, int flim)
int i;
for (i = 0; i < 16; i++)
- if (simple_limit(dst+i*stride, 1, flim))
- filter_common(dst+i*stride, 1, 1);
+ if (simple_limit(dst + i * stride, 1, flim))
+ filter_common(dst + i * stride, 1, 1);
}
static const uint8_t subpel_filters[7][6] = {
- { 0, 6, 123, 12, 1, 0 },
- { 2, 11, 108, 36, 8, 1 },
- { 0, 9, 93, 50, 6, 0 },
- { 3, 16, 77, 77, 16, 3 },
- { 0, 6, 50, 93, 9, 0 },
- { 1, 8, 36, 108, 11, 2 },
- { 0, 1, 12, 123, 6, 0 },
+ { 0, 6, 123, 12, 1, 0 },
+ { 2, 11, 108, 36, 8, 1 },
+ { 0, 9, 93, 50, 6, 0 },
+ { 3, 16, 77, 77, 16, 3 },
+ { 0, 6, 50, 93, 9, 0 },
+ { 1, 8, 36, 108, 11, 2 },
+ { 0, 1, 12, 123, 6, 0 },
};
-#define PUT_PIXELS(WIDTH) \
-static void put_vp8_pixels ## WIDTH ##_c(uint8_t *dst, ptrdiff_t dststride, uint8_t *src, ptrdiff_t srcstride, int h, int x, int y) { \
- int i; \
- for (i = 0; i < h; i++, dst+= dststride, src+= srcstride) { \
- memcpy(dst, src, WIDTH); \
- } \
+#define PUT_PIXELS(WIDTH) \
+static void put_vp8_pixels ## WIDTH ## _c(uint8_t *dst, ptrdiff_t dststride, \
+ uint8_t *src, ptrdiff_t srcstride, \
+ int h, int x, int y) \
+{ \
+ int i; \
+ for (i = 0; i < h; i++, dst += dststride, src += srcstride) \
+ memcpy(dst, src, WIDTH); \
}
PUT_PIXELS(16)
PUT_PIXELS(8)
PUT_PIXELS(4)
-#define FILTER_6TAP(src, F, stride) \
- cm[(F[2]*src[x+0*stride] - F[1]*src[x-1*stride] + F[0]*src[x-2*stride] + \
- F[3]*src[x+1*stride] - F[4]*src[x+2*stride] + F[5]*src[x+3*stride] + 64) >> 7]
-
-#define FILTER_4TAP(src, F, stride) \
- cm[(F[2]*src[x+0*stride] - F[1]*src[x-1*stride] + \
- F[3]*src[x+1*stride] - F[4]*src[x+2*stride] + 64) >> 7]
-
-#define VP8_EPEL_H(SIZE, TAPS) \
-static void put_vp8_epel ## SIZE ## _h ## TAPS ## _c(uint8_t *dst, ptrdiff_t dststride, uint8_t *src, ptrdiff_t srcstride, int h, int mx, int my) \
-{ \
- const uint8_t *filter = subpel_filters[mx-1]; \
- const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP; \
- int x, y; \
-\
- for (y = 0; y < h; y++) { \
- for (x = 0; x < SIZE; x++) \
- dst[x] = FILTER_ ## TAPS ## TAP(src, filter, 1); \
- dst += dststride; \
- src += srcstride; \
- } \
+#define FILTER_6TAP(src, F, stride) \
+ cm[(F[2] * src[x + 0 * stride] - F[1] * src[x - 1 * stride] + \
+ F[0] * src[x - 2 * stride] + F[3] * src[x + 1 * stride] - \
+ F[4] * src[x + 2 * stride] + F[5] * src[x + 3 * stride] + 64) >> 7]
+
+#define FILTER_4TAP(src, F, stride) \
+ cm[(F[2] * src[x + 0 * stride] - F[1] * src[x - 1 * stride] + \
+ F[3] * src[x + 1 * stride] - F[4] * src[x + 2 * stride] + 64) >> 7]
+
+#define VP8_EPEL_H(SIZE, TAPS) \
+static void put_vp8_epel ## SIZE ## _h ## TAPS ## _c(uint8_t *dst, \
+ ptrdiff_t dststride, \
+ uint8_t *src, \
+ ptrdiff_t srcstride, \
+ int h, int mx, int my) \
+{ \
+ const uint8_t *filter = subpel_filters[mx - 1]; \
+ const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP; \
+ int x, y; \
+ for (y = 0; y < h; y++) { \
+ for (x = 0; x < SIZE; x++) \
+ dst[x] = FILTER_ ## TAPS ## TAP(src, filter, 1); \
+ dst += dststride; \
+ src += srcstride; \
+ } \
}
-#define VP8_EPEL_V(SIZE, TAPS) \
-static void put_vp8_epel ## SIZE ## _v ## TAPS ## _c(uint8_t *dst, ptrdiff_t dststride, uint8_t *src, ptrdiff_t srcstride, int h, int mx, int my) \
-{ \
- const uint8_t *filter = subpel_filters[my-1]; \
- const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP; \
- int x, y; \
-\
- for (y = 0; y < h; y++) { \
- for (x = 0; x < SIZE; x++) \
- dst[x] = FILTER_ ## TAPS ## TAP(src, filter, srcstride); \
- dst += dststride; \
- src += srcstride; \
- } \
+
+#define VP8_EPEL_V(SIZE, TAPS) \
+static void put_vp8_epel ## SIZE ## _v ## TAPS ## _c(uint8_t *dst, \
+ ptrdiff_t dststride, \
+ uint8_t *src, \
+ ptrdiff_t srcstride, \
+ int h, int mx, int my) \
+{ \
+ const uint8_t *filter = subpel_filters[my - 1]; \
+ const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP; \
+ int x, y; \
+ for (y = 0; y < h; y++) { \
+ for (x = 0; x < SIZE; x++) \
+ dst[x] = FILTER_ ## TAPS ## TAP(src, filter, srcstride); \
+ dst += dststride; \
+ src += srcstride; \
+ } \
}
-#define VP8_EPEL_HV(SIZE, HTAPS, VTAPS) \
-static void put_vp8_epel ## SIZE ## _h ## HTAPS ## v ## VTAPS ## _c(uint8_t *dst, ptrdiff_t dststride, uint8_t *src, ptrdiff_t srcstride, int h, int mx, int my) \
-{ \
- const uint8_t *filter = subpel_filters[mx-1]; \
- const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP; \
- int x, y; \
- uint8_t tmp_array[(2*SIZE+VTAPS-1)*SIZE]; \
- uint8_t *tmp = tmp_array; \
- src -= (2-(VTAPS==4))*srcstride; \
-\
- for (y = 0; y < h+VTAPS-1; y++) { \
- for (x = 0; x < SIZE; x++) \
- tmp[x] = FILTER_ ## HTAPS ## TAP(src, filter, 1); \
- tmp += SIZE; \
- src += srcstride; \
- } \
-\
- tmp = tmp_array + (2-(VTAPS==4))*SIZE; \
- filter = subpel_filters[my-1]; \
-\
- for (y = 0; y < h; y++) { \
- for (x = 0; x < SIZE; x++) \
- dst[x] = FILTER_ ## VTAPS ## TAP(tmp, filter, SIZE); \
- dst += dststride; \
- tmp += SIZE; \
- } \
+
+#define VP8_EPEL_HV(SIZE, HTAPS, VTAPS) \
+static void \
+put_vp8_epel ## SIZE ## _h ## HTAPS ## v ## VTAPS ## _c(uint8_t *dst, \
+ ptrdiff_t dststride, \
+ uint8_t *src, \
+ ptrdiff_t srcstride, \
+ int h, int mx, \
+ int my) \
+{ \
+ const uint8_t *filter = subpel_filters[mx - 1]; \
+ const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP; \
+ int x, y; \
+ uint8_t tmp_array[(2 * SIZE + VTAPS - 1) * SIZE]; \
+ uint8_t *tmp = tmp_array; \
+ src -= (2 - (VTAPS == 4)) * srcstride; \
+ \
+ for (y = 0; y < h + VTAPS - 1; y++) { \
+ for (x = 0; x < SIZE; x++) \
+ tmp[x] = FILTER_ ## HTAPS ## TAP(src, filter, 1); \
+ tmp += SIZE; \
+ src += srcstride; \
+ } \
+ tmp = tmp_array + (2 - (VTAPS == 4)) * SIZE; \
+ filter = subpel_filters[my - 1]; \
+ \
+ for (y = 0; y < h; y++) { \
+ for (x = 0; x < SIZE; x++) \
+ dst[x] = FILTER_ ## VTAPS ## TAP(tmp, filter, SIZE); \
+ dst += dststride; \
+ tmp += SIZE; \
+ } \
}
VP8_EPEL_H(16, 4)
@@ -401,6 +434,7 @@ VP8_EPEL_V(4, 4)
VP8_EPEL_V(16, 6)
VP8_EPEL_V(8, 6)
VP8_EPEL_V(4, 6)
+
VP8_EPEL_HV(16, 4, 4)
VP8_EPEL_HV(8, 4, 4)
VP8_EPEL_HV(4, 4, 4)
@@ -414,73 +448,77 @@ VP8_EPEL_HV(16, 6, 6)
VP8_EPEL_HV(8, 6, 6)
VP8_EPEL_HV(4, 6, 6)
-#define VP8_BILINEAR(SIZE) \
-static void put_vp8_bilinear ## SIZE ## _h_c(uint8_t *dst, ptrdiff_t dstride, uint8_t *src, ptrdiff_t sstride, int h, int mx, int my) \
-{ \
- int a = 8-mx, b = mx; \
- int x, y; \
-\
- for (y = 0; y < h; y++) { \
- for (x = 0; x < SIZE; x++) \
- dst[x] = (a*src[x] + b*src[x+1] + 4) >> 3; \
- dst += dstride; \
- src += sstride; \
- } \
-} \
-static void put_vp8_bilinear ## SIZE ## _v_c(uint8_t *dst, ptrdiff_t dstride, uint8_t *src, ptrdiff_t sstride, int h, int mx, int my) \
-{ \
- int c = 8-my, d = my; \
- int x, y; \
-\
- for (y = 0; y < h; y++) { \
- for (x = 0; x < SIZE; x++) \
- dst[x] = (c*src[x] + d*src[x+sstride] + 4) >> 3; \
- dst += dstride; \
- src += sstride; \
- } \
-} \
-\
-static void put_vp8_bilinear ## SIZE ## _hv_c(uint8_t *dst, ptrdiff_t dstride, uint8_t *src, ptrdiff_t sstride, int h, int mx, int my) \
-{ \
- int a = 8-mx, b = mx; \
- int c = 8-my, d = my; \
- int x, y; \
- uint8_t tmp_array[(2*SIZE+1)*SIZE]; \
- uint8_t *tmp = tmp_array; \
-\
- for (y = 0; y < h+1; y++) { \
- for (x = 0; x < SIZE; x++) \
- tmp[x] = (a*src[x] + b*src[x+1] + 4) >> 3; \
- tmp += SIZE; \
- src += sstride; \
- } \
-\
- tmp = tmp_array; \
-\
- for (y = 0; y < h; y++) { \
- for (x = 0; x < SIZE; x++) \
- dst[x] = (c*tmp[x] + d*tmp[x+SIZE] + 4) >> 3; \
- dst += dstride; \
- tmp += SIZE; \
- } \
+#define VP8_BILINEAR(SIZE) \
+static void put_vp8_bilinear ## SIZE ## _h_c(uint8_t *dst, ptrdiff_t dstride, \
+ uint8_t *src, ptrdiff_t sstride, \
+ int h, int mx, int my) \
+{ \
+ int a = 8 - mx, b = mx; \
+ int x, y; \
+ for (y = 0; y < h; y++) { \
+ for (x = 0; x < SIZE; x++) \
+ dst[x] = (a * src[x] + b * src[x + 1] + 4) >> 3; \
+ dst += dstride; \
+ src += sstride; \
+ } \
+} \
+ \
+static void put_vp8_bilinear ## SIZE ## _v_c(uint8_t *dst, ptrdiff_t dstride, \
+ uint8_t *src, ptrdiff_t sstride, \
+ int h, int mx, int my) \
+{ \
+ int c = 8 - my, d = my; \
+ int x, y; \
+ for (y = 0; y < h; y++) { \
+ for (x = 0; x < SIZE; x++) \
+ dst[x] = (c * src[x] + d * src[x + sstride] + 4) >> 3; \
+ dst += dstride; \
+ src += sstride; \
+ } \
+} \
+ \
+static void put_vp8_bilinear ## SIZE ## _hv_c(uint8_t *dst, \
+ ptrdiff_t dstride, \
+ uint8_t *src, \
+ ptrdiff_t sstride, \
+ int h, int mx, int my) \
+{ \
+ int a = 8 - mx, b = mx; \
+ int c = 8 - my, d = my; \
+ int x, y; \
+ uint8_t tmp_array[(2 * SIZE + 1) * SIZE]; \
+ uint8_t *tmp = tmp_array; \
+ for (y = 0; y < h + 1; y++) { \
+ for (x = 0; x < SIZE; x++) \
+ tmp[x] = (a * src[x] + b * src[x + 1] + 4) >> 3; \
+ tmp += SIZE; \
+ src += sstride; \
+ } \
+ tmp = tmp_array; \
+ for (y = 0; y < h; y++) { \
+ for (x = 0; x < SIZE; x++) \
+ dst[x] = (c * tmp[x] + d * tmp[x + SIZE] + 4) >> 3; \
+ dst += dstride; \
+ tmp += SIZE; \
+ } \
}
VP8_BILINEAR(16)
VP8_BILINEAR(8)
VP8_BILINEAR(4)
-#define VP8_MC_FUNC(IDX, SIZE) \
- dsp->put_vp8_epel_pixels_tab[IDX][0][0] = put_vp8_pixels ## SIZE ## _c; \
- dsp->put_vp8_epel_pixels_tab[IDX][0][1] = put_vp8_epel ## SIZE ## _h4_c; \
- dsp->put_vp8_epel_pixels_tab[IDX][0][2] = put_vp8_epel ## SIZE ## _h6_c; \
- dsp->put_vp8_epel_pixels_tab[IDX][1][0] = put_vp8_epel ## SIZE ## _v4_c; \
+#define VP8_MC_FUNC(IDX, SIZE) \
+ dsp->put_vp8_epel_pixels_tab[IDX][0][0] = put_vp8_pixels ## SIZE ## _c; \
+ dsp->put_vp8_epel_pixels_tab[IDX][0][1] = put_vp8_epel ## SIZE ## _h4_c; \
+ dsp->put_vp8_epel_pixels_tab[IDX][0][2] = put_vp8_epel ## SIZE ## _h6_c; \
+ dsp->put_vp8_epel_pixels_tab[IDX][1][0] = put_vp8_epel ## SIZE ## _v4_c; \
dsp->put_vp8_epel_pixels_tab[IDX][1][1] = put_vp8_epel ## SIZE ## _h4v4_c; \
dsp->put_vp8_epel_pixels_tab[IDX][1][2] = put_vp8_epel ## SIZE ## _h6v4_c; \
- dsp->put_vp8_epel_pixels_tab[IDX][2][0] = put_vp8_epel ## SIZE ## _v6_c; \
+ dsp->put_vp8_epel_pixels_tab[IDX][2][0] = put_vp8_epel ## SIZE ## _v6_c; \
dsp->put_vp8_epel_pixels_tab[IDX][2][1] = put_vp8_epel ## SIZE ## _h4v6_c; \
dsp->put_vp8_epel_pixels_tab[IDX][2][2] = put_vp8_epel ## SIZE ## _h6v6_c
-#define VP8_BILINEAR_MC_FUNC(IDX, SIZE) \
+#define VP8_BILINEAR_MC_FUNC(IDX, SIZE) \
dsp->put_vp8_bilinear_pixels_tab[IDX][0][0] = put_vp8_pixels ## SIZE ## _c; \
dsp->put_vp8_bilinear_pixels_tab[IDX][0][1] = put_vp8_bilinear ## SIZE ## _h_c; \
dsp->put_vp8_bilinear_pixels_tab[IDX][0][2] = put_vp8_bilinear ## SIZE ## _h_c; \