summaryrefslogtreecommitdiff
path: root/libavcodec
diff options
context:
space:
mode:
authorMichael Niedermayer <michaelni@gmx.at>2012-03-03 00:09:47 +0100
committerMichael Niedermayer <michaelni@gmx.at>2012-03-03 00:23:10 +0100
commit268098d8b2a6e3dd84be788a2cd6fda10f7b3e71 (patch)
treeafd9e17a980920b66edb38331e5a608308da711d /libavcodec
parent689f65126be8a55e8a1e706cb56b19bb975c20ce (diff)
parent9d87374ec0f382c8394ad511243db6980afa42af (diff)
Merge remote-tracking branch 'qatar/master'
* qatar/master: (29 commits) amrwb: remove duplicate arguments from extrapolate_isf(). amrwb: error out early if mode is invalid. h264: change underread for 10bit QPEL to overread. matroska: check buffer size for RM-style byte reordering. vp8: disable mmx functions with sse/sse2 counterparts on x86-64. vp8: change int stride to ptrdiff_t stride. wma: fix invalid buffer size assumptions causing random overreads. Windows Media Audio Lossless decoder rv10/20: Fix slice overflow with checked bitstream reader. h263dec: Disallow width/height changing with frame threads. rv10/20: Fix a buffer overread caused by losing track of the remaining buffer size. rmdec: Honor .RMF tag size rather than assuming 18. g722: Fix the QMF scaling r3d: don't set codec timebase. electronicarts: set timebase for tgv video. electronicarts: parse the framerate for cmv video. ogg: don't set codec timebase electronicarts: don't set codec timebase avs: don't set codec timebase wavpack: Fix an integer overflow ... Conflicts: libavcodec/arm/vp8dsp_init_arm.c libavcodec/fraps.c libavcodec/h264.c libavcodec/mpeg4videodec.c libavcodec/mpegvideo.c libavcodec/msmpeg4.c libavcodec/pnmdec.c libavcodec/qpeg.c libavcodec/rawenc.c libavcodec/ulti.c libavcodec/vcr1.c libavcodec/version.h libavcodec/wmalosslessdec.c libavformat/electronicarts.c libswscale/ppc/yuv2rgb_altivec.c tests/ref/acodec/g722 tests/ref/fate/ea-cmv Merged-by: Michael Niedermayer <michaelni@gmx.at>
Diffstat (limited to 'libavcodec')
-rw-r--r--libavcodec/Makefile12
-rw-r--r--libavcodec/a64multienc.c2
-rw-r--r--libavcodec/amrwbdec.c37
-rw-r--r--libavcodec/arm/vp8dsp_init_arm.c60
-rw-r--r--libavcodec/asv1.c2
-rw-r--r--libavcodec/avs.c4
-rw-r--r--libavcodec/bmp.c4
-rw-r--r--libavcodec/bmpenc.c6
-rw-r--r--libavcodec/cavsdec.c16
-rw-r--r--libavcodec/error_resilience.c6
-rw-r--r--libavcodec/fraps.c4
-rw-r--r--libavcodec/g722dec.c4
-rw-r--r--libavcodec/g722enc.c4
-rw-r--r--libavcodec/gif.c2
-rw-r--r--libavcodec/h261dec.c3
-rw-r--r--libavcodec/h263dec.c6
-rw-r--r--libavcodec/h264.c27
-rw-r--r--libavcodec/h264_direct.c3
-rw-r--r--libavcodec/indeo2.c4
-rw-r--r--libavcodec/jpeglsenc.c2
-rw-r--r--libavcodec/ljpegenc.c2
-rw-r--r--libavcodec/loco.c2
-rw-r--r--libavcodec/mdec.c2
-rw-r--r--libavcodec/mjpegdec.c2
-rw-r--r--libavcodec/mpeg12.c6
-rw-r--r--libavcodec/mpeg4videodec.c4
-rw-r--r--libavcodec/mpegvideo.c42
-rw-r--r--libavcodec/mpegvideo_enc.c12
-rw-r--r--libavcodec/msmpeg4.c700
-rw-r--r--libavcodec/msmpeg4.h12
-rw-r--r--libavcodec/msmpeg4data.c4
-rw-r--r--libavcodec/msmpeg4data.h4
-rw-r--r--libavcodec/msmpeg4enc.c692
-rw-r--r--libavcodec/pamenc.c2
-rw-r--r--libavcodec/pnm.c4
-rw-r--r--libavcodec/pnmdec.c4
-rw-r--r--libavcodec/pnmenc.c2
-rw-r--r--libavcodec/ppc/vp8dsp_altivec.c16
-rw-r--r--libavcodec/qdrw.c2
-rw-r--r--libavcodec/qpeg.c8
-rw-r--r--libavcodec/rawdec.c4
-rw-r--r--libavcodec/rawenc.c2
-rw-r--r--libavcodec/rv10.c38
-rw-r--r--libavcodec/rv34.c6
-rw-r--r--libavcodec/svq1dec.c2
-rw-r--r--libavcodec/svq1enc.c4
-rw-r--r--libavcodec/svq3.c6
-rw-r--r--libavcodec/targa.c8
-rw-r--r--libavcodec/tiff.c8
-rw-r--r--libavcodec/tiffenc.c2
-rw-r--r--libavcodec/truemotion2.c2
-rw-r--r--libavcodec/ulti.c2
-rw-r--r--libavcodec/utils.c6
-rw-r--r--libavcodec/vc1dec.c6
-rw-r--r--libavcodec/vcr1.c8
-rw-r--r--libavcodec/version.h2
-rw-r--r--libavcodec/vp8dsp.c44
-rw-r--r--libavcodec/vp8dsp.h45
-rw-r--r--libavcodec/wma.h2
-rw-r--r--libavcodec/wmadec.c13
-rw-r--r--libavcodec/wmalosslessdec.c982
-rw-r--r--libavcodec/wnv1.c2
-rw-r--r--libavcodec/x86/h264_qpel_10bit.asm2
-rw-r--r--libavcodec/x86/vp8dsp-init.c213
-rw-r--r--libavcodec/x86/vp8dsp.asm15
-rw-r--r--libavcodec/xl.c2
-rw-r--r--libavcodec/zmbvenc.c2
67 files changed, 1493 insertions, 1665 deletions
diff --git a/libavcodec/Makefile b/libavcodec/Makefile
index 880e30b07e..44dafb9626 100644
--- a/libavcodec/Makefile
+++ b/libavcodec/Makefile
@@ -299,12 +299,14 @@ OBJS-$(CONFIG_MPEG4_VAAPI_HWACCEL) += vaapi_mpeg4.o
OBJS-$(CONFIG_MSMPEG4V1_DECODER) += msmpeg4.o msmpeg4data.o
OBJS-$(CONFIG_MSMPEG4V2_DECODER) += msmpeg4.o msmpeg4data.o h263dec.o \
h263.o ituh263dec.o mpeg4videodec.o
-OBJS-$(CONFIG_MSMPEG4V2_ENCODER) += msmpeg4.o msmpeg4data.o h263dec.o \
- h263.o ituh263dec.o mpeg4videodec.o
+OBJS-$(CONFIG_MSMPEG4V2_ENCODER) += msmpeg4.o msmpeg4enc.o msmpeg4data.o \
+ h263dec.o h263.o ituh263dec.o \
+ mpeg4videodec.o
OBJS-$(CONFIG_MSMPEG4V3_DECODER) += msmpeg4.o msmpeg4data.o h263dec.o \
h263.o ituh263dec.o mpeg4videodec.o
-OBJS-$(CONFIG_MSMPEG4V3_ENCODER) += msmpeg4.o msmpeg4data.o h263dec.o \
- h263.o ituh263dec.o mpeg4videodec.o
+OBJS-$(CONFIG_MSMPEG4V3_ENCODER) += msmpeg4.o msmpeg4enc.o msmpeg4data.o \
+ h263dec.o h263.o ituh263dec.o \
+ mpeg4videodec.o
OBJS-$(CONFIG_MSRLE_DECODER) += msrle.o msrledec.o
OBJS-$(CONFIG_MSVIDEO1_DECODER) += msvideo1.o
OBJS-$(CONFIG_MSVIDEO1_ENCODER) += msvideo1enc.o elbg.o
@@ -471,7 +473,7 @@ OBJS-$(CONFIG_WMV2_DECODER) += wmv2dec.o wmv2.o \
msmpeg4.o msmpeg4data.o \
intrax8.o intrax8dsp.o
OBJS-$(CONFIG_WMV2_ENCODER) += wmv2enc.o wmv2.o \
- msmpeg4.o msmpeg4data.o \
+ msmpeg4.o msmpeg4enc.o msmpeg4data.o \
mpeg4videodec.o ituh263dec.o h263dec.o
OBJS-$(CONFIG_WNV1_DECODER) += wnv1.o
OBJS-$(CONFIG_WS_SND1_DECODER) += ws-snd1.o
diff --git a/libavcodec/a64multienc.c b/libavcodec/a64multienc.c
index 0d88f72e34..a432069d26 100644
--- a/libavcodec/a64multienc.c
+++ b/libavcodec/a64multienc.c
@@ -246,7 +246,7 @@ static int a64multi_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
const AVFrame *pict, int *got_packet)
{
A64Context *c = avctx->priv_data;
- AVFrame *const p = (AVFrame *) & c->picture;
+ AVFrame *const p = &c->picture;
int frame;
int x, y;
diff --git a/libavcodec/amrwbdec.c b/libavcodec/amrwbdec.c
index 524979d755..663fd0f2e9 100644
--- a/libavcodec/amrwbdec.c
+++ b/libavcodec/amrwbdec.c
@@ -898,10 +898,10 @@ static float auto_correlation(float *diff_isf, float mean, int lag)
* Extrapolate a ISF vector to the 16kHz range (20th order LP)
* used at mode 6k60 LP filter for the high frequency band.
*
- * @param[out] out Buffer for extrapolated isf
- * @param[in] isf Input isf vector
+ * @param[out] isf Buffer for extrapolated isf; contains LP_ORDER
+ * values on input
*/
-static void extrapolate_isf(float out[LP_ORDER_16k], float isf[LP_ORDER])
+static void extrapolate_isf(float isf[LP_ORDER_16k])
{
float diff_isf[LP_ORDER - 2], diff_mean;
float *diff_hi = diff_isf - LP_ORDER + 1; // diff array for extrapolated indexes
@@ -909,8 +909,7 @@ static void extrapolate_isf(float out[LP_ORDER_16k], float isf[LP_ORDER])
float est, scale;
int i, i_max_corr;
- memcpy(out, isf, (LP_ORDER - 1) * sizeof(float));
- out[LP_ORDER_16k - 1] = isf[LP_ORDER - 1];
+ isf[LP_ORDER_16k - 1] = isf[LP_ORDER - 1];
/* Calculate the difference vector */
for (i = 0; i < LP_ORDER - 2; i++)
@@ -931,16 +930,16 @@ static void extrapolate_isf(float out[LP_ORDER_16k], float isf[LP_ORDER])
i_max_corr++;
for (i = LP_ORDER - 1; i < LP_ORDER_16k - 1; i++)
- out[i] = isf[i - 1] + isf[i - 1 - i_max_corr]
+ isf[i] = isf[i - 1] + isf[i - 1 - i_max_corr]
- isf[i - 2 - i_max_corr];
/* Calculate an estimate for ISF(18) and scale ISF based on the error */
- est = 7965 + (out[2] - out[3] - out[4]) / 6.0;
- scale = 0.5 * (FFMIN(est, 7600) - out[LP_ORDER - 2]) /
- (out[LP_ORDER_16k - 2] - out[LP_ORDER - 2]);
+ est = 7965 + (isf[2] - isf[3] - isf[4]) / 6.0;
+ scale = 0.5 * (FFMIN(est, 7600) - isf[LP_ORDER - 2]) /
+ (isf[LP_ORDER_16k - 2] - isf[LP_ORDER - 2]);
for (i = LP_ORDER - 1; i < LP_ORDER_16k - 1; i++)
- diff_hi[i] = scale * (out[i] - out[i - 1]);
+ diff_hi[i] = scale * (isf[i] - isf[i - 1]);
/* Stability insurance */
for (i = LP_ORDER; i < LP_ORDER_16k - 1; i++)
@@ -952,11 +951,11 @@ static void extrapolate_isf(float out[LP_ORDER_16k], float isf[LP_ORDER])
}
for (i = LP_ORDER - 1; i < LP_ORDER_16k - 1; i++)
- out[i] = out[i - 1] + diff_hi[i] * (1.0f / (1 << 15));
+ isf[i] = isf[i - 1] + diff_hi[i] * (1.0f / (1 << 15));
/* Scale the ISF vector for 16000 Hz */
for (i = 0; i < LP_ORDER_16k - 1; i++)
- out[i] *= 0.8;
+ isf[i] *= 0.8;
}
/**
@@ -1003,7 +1002,7 @@ static void hb_synthesis(AMRWBContext *ctx, int subframe, float *samples,
ff_weighted_vector_sumf(e_isf, isf_past, isf, isfp_inter[subframe],
1.0 - isfp_inter[subframe], LP_ORDER);
- extrapolate_isf(e_isf, e_isf);
+ extrapolate_isf(e_isf);
e_isf[LP_ORDER_16k - 1] *= 2.0;
ff_acelp_lsf2lspd(e_isp, e_isf, LP_ORDER_16k);
@@ -1095,23 +1094,27 @@ static int amrwb_decode_frame(AVCodecContext *avctx, void *data,
buf_out = (float *)ctx->avframe.data[0];
header_size = decode_mime_header(ctx, buf);
+ if (ctx->fr_cur_mode > MODE_SID) {
+ av_log(avctx, AV_LOG_ERROR,
+ "Invalid mode %d\n", ctx->fr_cur_mode);
+ return AVERROR_INVALIDDATA;
+ }
expected_fr_size = ((cf_sizes_wb[ctx->fr_cur_mode] + 7) >> 3) + 1;
if (buf_size < expected_fr_size) {
av_log(avctx, AV_LOG_ERROR,
"Frame too small (%d bytes). Truncated file?\n", buf_size);
*got_frame_ptr = 0;
- return buf_size;
+ return AVERROR_INVALIDDATA;
}
if (!ctx->fr_quality || ctx->fr_cur_mode > MODE_SID)
av_log(avctx, AV_LOG_ERROR, "Encountered a bad or corrupted frame\n");
- if (ctx->fr_cur_mode == MODE_SID) /* Comfort noise frame */
+ if (ctx->fr_cur_mode == MODE_SID) { /* Comfort noise frame */
av_log_missing_feature(avctx, "SID mode", 1);
-
- if (ctx->fr_cur_mode >= MODE_SID)
return -1;
+ }
ff_amr_bit_reorder((uint16_t *) &ctx->frame, sizeof(AMRWBFrame),
buf + header_size, amr_bit_orderings_by_mode[ctx->fr_cur_mode]);
diff --git a/libavcodec/arm/vp8dsp_init_arm.c b/libavcodec/arm/vp8dsp_init_arm.c
index 14021c954d..91f0dc95b4 100644
--- a/libavcodec/arm/vp8dsp_init_arm.c
+++ b/libavcodec/arm/vp8dsp_init_arm.c
@@ -23,87 +23,87 @@ void ff_vp8_luma_dc_wht_dc_armv6(DCTELEM block[4][4][16], DCTELEM dc[16]);
#define idct_funcs(opt) \
void ff_vp8_luma_dc_wht_ ## opt(DCTELEM block[4][4][16], DCTELEM dc[16]); \
-void ff_vp8_idct_add_ ## opt(uint8_t *dst, DCTELEM block[16], int stride); \
-void ff_vp8_idct_dc_add_ ## opt(uint8_t *dst, DCTELEM block[16], int stride); \
-void ff_vp8_idct_dc_add4y_ ## opt(uint8_t *dst, DCTELEM block[4][16], int stride); \
-void ff_vp8_idct_dc_add4uv_ ## opt(uint8_t *dst, DCTELEM block[4][16], int stride)
+void ff_vp8_idct_add_ ## opt(uint8_t *dst, DCTELEM block[16], ptrdiff_t stride); \
+void ff_vp8_idct_dc_add_ ## opt(uint8_t *dst, DCTELEM block[16], ptrdiff_t stride); \
+void ff_vp8_idct_dc_add4y_ ## opt(uint8_t *dst, DCTELEM block[4][16], ptrdiff_t stride); \
+void ff_vp8_idct_dc_add4uv_ ## opt(uint8_t *dst, DCTELEM block[4][16], ptrdiff_t stride)
idct_funcs(neon);
idct_funcs(armv6);
-void ff_vp8_v_loop_filter16_neon(uint8_t *dst, int stride,
+void ff_vp8_v_loop_filter16_neon(uint8_t *dst, ptrdiff_t stride,
int flim_E, int flim_I, int hev_thresh);
-void ff_vp8_h_loop_filter16_neon(uint8_t *dst, int stride,
+void ff_vp8_h_loop_filter16_neon(uint8_t *dst, ptrdiff_t stride,
int flim_E, int flim_I, int hev_thresh);
-void ff_vp8_v_loop_filter8uv_neon(uint8_t *dstU, uint8_t *dstV, int stride,
+void ff_vp8_v_loop_filter8uv_neon(uint8_t *dstU, uint8_t *dstV, ptrdiff_t stride,
int flim_E, int flim_I, int hev_thresh);
-void ff_vp8_h_loop_filter8uv_neon(uint8_t *dstU, uint8_t *dstV, int stride,
+void ff_vp8_h_loop_filter8uv_neon(uint8_t *dstU, uint8_t *dstV, ptrdiff_t stride,
int flim_E, int flim_I, int hev_thresh);
-void ff_vp8_v_loop_filter16_inner_neon(uint8_t *dst, int stride,
+void ff_vp8_v_loop_filter16_inner_neon(uint8_t *dst, ptrdiff_t stride,
int flim_E, int flim_I, int hev_thresh);
-void ff_vp8_h_loop_filter16_inner_neon(uint8_t *dst, int stride,
+void ff_vp8_h_loop_filter16_inner_neon(uint8_t *dst, ptrdiff_t stride,
int flim_E, int flim_I, int hev_thresh);
void ff_vp8_v_loop_filter8uv_inner_neon(uint8_t *dstU, uint8_t *dstV,
- int stride, int flim_E, int flim_I,
+ ptrdiff_t stride, int flim_E, int flim_I,
int hev_thresh);
void ff_vp8_h_loop_filter8uv_inner_neon(uint8_t *dstU, uint8_t *dstV,
- int stride, int flim_E, int flim_I,
+ ptrdiff_t stride, int flim_E, int flim_I,
int hev_thresh);
-void ff_vp8_v_loop_filter_inner_armv6(uint8_t *dst, int stride,
+void ff_vp8_v_loop_filter_inner_armv6(uint8_t *dst, ptrdiff_t stride,
int flim_E, int flim_I,
int hev_thresh, int count);
-void ff_vp8_h_loop_filter_inner_armv6(uint8_t *dst, int stride,
+void ff_vp8_h_loop_filter_inner_armv6(uint8_t *dst, ptrdiff_t stride,
int flim_E, int flim_I,
int hev_thresh, int count);
-void ff_vp8_v_loop_filter_armv6(uint8_t *dst, int stride,
+void ff_vp8_v_loop_filter_armv6(uint8_t *dst, ptrdiff_t stride,
int flim_E, int flim_I,
int hev_thresh, int count);
-void ff_vp8_h_loop_filter_armv6(uint8_t *dst, int stride,
+void ff_vp8_h_loop_filter_armv6(uint8_t *dst, ptrdiff_t stride,
int flim_E, int flim_I,
int hev_thresh, int count);
-static void ff_vp8_v_loop_filter16_armv6(uint8_t *dst, int stride,
+static void ff_vp8_v_loop_filter16_armv6(uint8_t *dst, ptrdiff_t stride,
int flim_E, int flim_I, int hev_thresh)
{
ff_vp8_v_loop_filter_armv6(dst, stride, flim_E, flim_I, hev_thresh, 4);
}
-static void ff_vp8_h_loop_filter16_armv6(uint8_t *dst, int stride,
+static void ff_vp8_h_loop_filter16_armv6(uint8_t *dst, ptrdiff_t stride,
int flim_E, int flim_I, int hev_thresh)
{
ff_vp8_h_loop_filter_armv6(dst, stride, flim_E, flim_I, hev_thresh, 4);
}
-static void ff_vp8_v_loop_filter8uv_armv6(uint8_t *dstU, uint8_t *dstV, int stride,
+static void ff_vp8_v_loop_filter8uv_armv6(uint8_t *dstU, uint8_t *dstV, ptrdiff_t stride,
int flim_E, int flim_I, int hev_thresh)
{
ff_vp8_v_loop_filter_armv6(dstU, stride, flim_E, flim_I, hev_thresh, 2);
ff_vp8_v_loop_filter_armv6(dstV, stride, flim_E, flim_I, hev_thresh, 2);
}
-static void ff_vp8_h_loop_filter8uv_armv6(uint8_t *dstU, uint8_t *dstV, int stride,
+static void ff_vp8_h_loop_filter8uv_armv6(uint8_t *dstU, uint8_t *dstV, ptrdiff_t stride,
int flim_E, int flim_I, int hev_thresh)
{
ff_vp8_h_loop_filter_armv6(dstU, stride, flim_E, flim_I, hev_thresh, 2);
ff_vp8_h_loop_filter_armv6(dstV, stride, flim_E, flim_I, hev_thresh, 2);
}
-static void ff_vp8_v_loop_filter16_inner_armv6(uint8_t *dst, int stride,
+static void ff_vp8_v_loop_filter16_inner_armv6(uint8_t *dst, ptrdiff_t stride,
int flim_E, int flim_I, int hev_thresh)
{
ff_vp8_v_loop_filter_inner_armv6(dst, stride, flim_E, flim_I, hev_thresh, 4);
}
-static void ff_vp8_h_loop_filter16_inner_armv6(uint8_t *dst, int stride,
+static void ff_vp8_h_loop_filter16_inner_armv6(uint8_t *dst, ptrdiff_t stride,
int flim_E, int flim_I, int hev_thresh)
{
ff_vp8_h_loop_filter_inner_armv6(dst, stride, flim_E, flim_I, hev_thresh, 4);
}
static void ff_vp8_v_loop_filter8uv_inner_armv6(uint8_t *dstU, uint8_t *dstV,
- int stride, int flim_E, int flim_I,
+ ptrdiff_t stride, int flim_E, int flim_I,
int hev_thresh)
{
ff_vp8_v_loop_filter_inner_armv6(dstU, stride, flim_E, flim_I, hev_thresh, 2);
@@ -111,7 +111,7 @@ static void ff_vp8_v_loop_filter8uv_inner_armv6(uint8_t *dstU, uint8_t *dstV,
}
static void ff_vp8_h_loop_filter8uv_inner_armv6(uint8_t *dstU, uint8_t *dstV,
- int stride, int flim_E, int flim_I,
+ ptrdiff_t stride, int flim_E, int flim_I,
int hev_thresh)
{
ff_vp8_h_loop_filter_inner_armv6(dstU, stride, flim_E, flim_I, hev_thresh, 2);
@@ -119,16 +119,16 @@ static void ff_vp8_h_loop_filter8uv_inner_armv6(uint8_t *dstU, uint8_t *dstV,
}
#define simple_lf_funcs(opt) \
-void ff_vp8_v_loop_filter16_simple_ ## opt(uint8_t *dst, int stride, int flim); \
-void ff_vp8_h_loop_filter16_simple_ ## opt(uint8_t *dst, int stride, int flim)
+void ff_vp8_v_loop_filter16_simple_ ## opt(uint8_t *dst, ptrdiff_t stride, int flim); \
+void ff_vp8_h_loop_filter16_simple_ ## opt(uint8_t *dst, ptrdiff_t stride, int flim)
simple_lf_funcs(neon);
simple_lf_funcs(armv6);
-#define VP8_MC_OPT(n, opt) \
- void ff_put_vp8_##n##_##opt(uint8_t *dst, int dststride, \
- uint8_t *src, int srcstride, \
- int h, int x, int y)
+#define VP8_MC(n) \
+ void ff_put_vp8_##n##_neon(uint8_t *dst, ptrdiff_t dststride, \
+ uint8_t *src, ptrdiff_t srcstride, \
+ int h, int x, int y)
#define VP8_MC(n) \
VP8_MC_OPT(n, neon)
diff --git a/libavcodec/asv1.c b/libavcodec/asv1.c
index 4bcc8d72b1..c123125d46 100644
--- a/libavcodec/asv1.c
+++ b/libavcodec/asv1.c
@@ -454,7 +454,7 @@ static int decode_frame(AVCodecContext *avctx,
}
}
- *picture= *(AVFrame*)&a->picture;
+ *picture = a->picture;
*data_size = sizeof(AVPicture);
emms_c();
diff --git a/libavcodec/avs.c b/libavcodec/avs.c
index 05cb815fd8..4dcbb27fe0 100644
--- a/libavcodec/avs.c
+++ b/libavcodec/avs.c
@@ -51,7 +51,7 @@ avs_decode_frame(AVCodecContext * avctx,
int buf_size = avpkt->size;
AvsContext *const avs = avctx->priv_data;
AVFrame *picture = data;
- AVFrame *const p = (AVFrame *) & avs->picture;
+ AVFrame *const p = &avs->picture;
const uint8_t *table, *vect;
uint8_t *out;
int i, j, x, y, stride, vect_w = 3, vect_h = 3;
@@ -151,7 +151,7 @@ avs_decode_frame(AVCodecContext * avctx,
align_get_bits(&change_map);
}
- *picture = *(AVFrame *) & avs->picture;
+ *picture = avs->picture;
*data_size = sizeof(AVPicture);
return buf_size;
diff --git a/libavcodec/bmp.c b/libavcodec/bmp.c
index b7853d1093..b249f312d4 100644
--- a/libavcodec/bmp.c
+++ b/libavcodec/bmp.c
@@ -27,8 +27,8 @@
static av_cold int bmp_decode_init(AVCodecContext *avctx){
BMPContext *s = avctx->priv_data;
- avcodec_get_frame_defaults((AVFrame*)&s->picture);
- avctx->coded_frame = (AVFrame*)&s->picture;
+ avcodec_get_frame_defaults(&s->picture);
+ avctx->coded_frame = &s->picture;
return 0;
}
diff --git a/libavcodec/bmpenc.c b/libavcodec/bmpenc.c
index 4455af7d8c..1c40b13add 100644
--- a/libavcodec/bmpenc.c
+++ b/libavcodec/bmpenc.c
@@ -34,8 +34,8 @@ static const uint32_t rgb444_masks[] = { 0x0F00, 0x00F0, 0x000F };
static av_cold int bmp_encode_init(AVCodecContext *avctx){
BMPContext *s = avctx->priv_data;
- avcodec_get_frame_defaults((AVFrame*)&s->picture);
- avctx->coded_frame = (AVFrame*)&s->picture;
+ avcodec_get_frame_defaults(&s->picture);
+ avctx->coded_frame = &s->picture;
switch (avctx->pix_fmt) {
case PIX_FMT_BGRA:
@@ -72,7 +72,7 @@ static int bmp_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
const AVFrame *pict, int *got_packet)
{
BMPContext *s = avctx->priv_data;
- AVFrame * const p= (AVFrame*)&s->picture;
+ AVFrame * const p = &s->picture;
int n_bytes_image, n_bytes_per_row, n_bytes, i, n, hsize, ret;
const uint32_t *pal = NULL;
uint32_t palette256[256];
diff --git a/libavcodec/cavsdec.c b/libavcodec/cavsdec.c
index fb0ec841ce..b06bd53c00 100644
--- a/libavcodec/cavsdec.c
+++ b/libavcodec/cavsdec.c
@@ -501,9 +501,9 @@ static int decode_pic(AVSContext *h) {
}
/* release last B frame */
if(h->picture.f.data[0])
- s->avctx->release_buffer(s->avctx, (AVFrame *)&h->picture);
+ s->avctx->release_buffer(s->avctx, &h->picture.f);
- s->avctx->get_buffer(s->avctx, (AVFrame *)&h->picture);
+ s->avctx->get_buffer(s->avctx, &h->picture.f);
ff_cavs_init_pic(h);
h->picture.poc = get_bits(&s->gb,8)*2;
@@ -592,7 +592,7 @@ static int decode_pic(AVSContext *h) {
}
if(h->pic_type != AV_PICTURE_TYPE_B) {
if(h->DPB[1].f.data[0])
- s->avctx->release_buffer(s->avctx, (AVFrame *)&h->DPB[1]);
+ s->avctx->release_buffer(s->avctx, &h->DPB[1].f);
h->DPB[1] = h->DPB[0];
h->DPB[0] = h->picture;
memset(&h->picture,0,sizeof(Picture));
@@ -656,7 +656,7 @@ static int cavs_decode_frame(AVCodecContext * avctx,void *data, int *data_size,
if (buf_size == 0) {
if (!s->low_delay && h->DPB[0].f.data[0]) {
*data_size = sizeof(AVPicture);
- *picture = *(AVFrame *) &h->DPB[0];
+ *picture = h->DPB[0].f;
}
return 0;
}
@@ -676,9 +676,9 @@ static int cavs_decode_frame(AVCodecContext * avctx,void *data, int *data_size,
case PIC_I_START_CODE:
if(!h->got_keyframe) {
if(h->DPB[0].f.data[0])
- avctx->release_buffer(avctx, (AVFrame *)&h->DPB[0]);
+ avctx->release_buffer(avctx, &h->DPB[0].f);
if(h->DPB[1].f.data[0])
- avctx->release_buffer(avctx, (AVFrame *)&h->DPB[1]);
+ avctx->release_buffer(avctx, &h->DPB[1].f);
h->got_keyframe = 1;
}
case PIC_PB_START_CODE:
@@ -692,12 +692,12 @@ static int cavs_decode_frame(AVCodecContext * avctx,void *data, int *data_size,
*data_size = sizeof(AVPicture);
if(h->pic_type != AV_PICTURE_TYPE_B) {
if(h->DPB[1].f.data[0]) {
- *picture = *(AVFrame *) &h->DPB[1];
+ *picture = h->DPB[1].f;
} else {
*data_size = 0;
}
} else
- *picture = *(AVFrame *) &h->picture;
+ *picture = h->picture.f;
break;
case EXT_START_CODE:
//mpeg_decode_extension(avctx,buf_ptr, input_size);
diff --git a/libavcodec/error_resilience.c b/libavcodec/error_resilience.c
index 2391e57f90..1c1420f93c 100644
--- a/libavcodec/error_resilience.c
+++ b/libavcodec/error_resilience.c
@@ -613,7 +613,7 @@ skip_mean_and_median:
if (s->avctx->codec_id == CODEC_ID_H264) {
// FIXME
} else {
- ff_thread_await_progress((AVFrame *) s->last_picture_ptr,
+ ff_thread_await_progress(&s->last_picture_ptr->f,
mb_y, 0);
}
if (!s->last_picture.f.motion_val[0] ||
@@ -786,7 +786,7 @@ static int is_intra_more_likely(MpegEncContext *s)
if (s->avctx->codec_id == CODEC_ID_H264) {
// FIXME
} else {
- ff_thread_await_progress((AVFrame *) s->last_picture_ptr,
+ ff_thread_await_progress(&s->last_picture_ptr->f,
mb_y, 0);
}
is_intra_likely += s->dsp.sad[0](NULL, last_mb_ptr, mb_ptr , s->linesize, 16);
@@ -1170,7 +1170,7 @@ void ff_er_frame_end(MpegEncContext *s)
if (s->avctx->codec_id == CODEC_ID_H264) {
// FIXME
} else {
- ff_thread_await_progress((AVFrame *) s->next_picture_ptr, mb_y, 0);
+ ff_thread_await_progress(&s->next_picture_ptr->f, mb_y, 0);
}
s->mv[0][0][0] = s->next_picture.f.motion_val[0][xy][0] * time_pb / time_pp;
s->mv[0][0][1] = s->next_picture.f.motion_val[0][xy][1] * time_pb / time_pp;
diff --git a/libavcodec/fraps.c b/libavcodec/fraps.c
index 45d95c9c24..e8c0b18b5d 100644
--- a/libavcodec/fraps.c
+++ b/libavcodec/fraps.c
@@ -62,7 +62,7 @@ static av_cold int decode_init(AVCodecContext *avctx)
FrapsContext * const s = avctx->priv_data;
avcodec_get_frame_defaults(&s->frame);
- avctx->coded_frame = (AVFrame*)&s->frame;
+ avctx->coded_frame = &s->frame;
s->avctx = avctx;
s->tmpbuf = NULL;
@@ -132,7 +132,7 @@ static int decode_frame(AVCodecContext *avctx,
int buf_size = avpkt->size;
FrapsContext * const s = avctx->priv_data;
AVFrame *frame = data;
- AVFrame * const f = (AVFrame*)&s->frame;
+ AVFrame * const f = &s->frame;
uint32_t header;
unsigned int version,header_size;
unsigned int x, y;
diff --git a/libavcodec/g722dec.c b/libavcodec/g722dec.c
index 50a224ba10..72bb0ef3c7 100644
--- a/libavcodec/g722dec.c
+++ b/libavcodec/g722dec.c
@@ -126,8 +126,8 @@ static int g722_decode_frame(AVCodecContext *avctx, void *data,
c->prev_samples[c->prev_samples_pos++] = rlow - rhigh;
ff_g722_apply_qmf(c->prev_samples + c->prev_samples_pos - 24,
&xout1, &xout2);
- *out_buf++ = av_clip_int16(xout1 >> 12);
- *out_buf++ = av_clip_int16(xout2 >> 12);
+ *out_buf++ = av_clip_int16(xout1 >> 11);
+ *out_buf++ = av_clip_int16(xout2 >> 11);
if (c->prev_samples_pos >= PREV_SAMPLES_BUF_SIZE) {
memmove(c->prev_samples, c->prev_samples + c->prev_samples_pos - 22,
22 * sizeof(c->prev_samples[0]));
diff --git a/libavcodec/g722enc.c b/libavcodec/g722enc.c
index a5ae0a5153..ba8ceeff86 100644
--- a/libavcodec/g722enc.c
+++ b/libavcodec/g722enc.c
@@ -136,8 +136,8 @@ static inline void filter_samples(G722Context *c, const int16_t *samples,
c->prev_samples[c->prev_samples_pos++] = samples[0];
c->prev_samples[c->prev_samples_pos++] = samples[1];
ff_g722_apply_qmf(c->prev_samples + c->prev_samples_pos - 24, &xout1, &xout2);
- *xlow = xout1 + xout2 >> 13;
- *xhigh = xout1 - xout2 >> 13;
+ *xlow = xout1 + xout2 >> 14;
+ *xhigh = xout1 - xout2 >> 14;
if (c->prev_samples_pos >= PREV_SAMPLES_BUF_SIZE) {
memmove(c->prev_samples,
c->prev_samples + c->prev_samples_pos - 22,
diff --git a/libavcodec/gif.c b/libavcodec/gif.c
index 875c5b15dc..2ae63865d2 100644
--- a/libavcodec/gif.c
+++ b/libavcodec/gif.c
@@ -160,7 +160,7 @@ static int gif_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
const AVFrame *pict, int *got_packet)
{
GIFContext *s = avctx->priv_data;
- AVFrame *const p = (AVFrame *)&s->picture;
+ AVFrame *const p = &s->picture;
uint8_t *outbuf_ptr, *end;
int ret;
diff --git a/libavcodec/h261dec.c b/libavcodec/h261dec.c
index 68e3212a19..e97c76dab4 100644
--- a/libavcodec/h261dec.c
+++ b/libavcodec/h261dec.c
@@ -628,7 +628,8 @@ retry:
assert(s->current_picture.f.pict_type == s->current_picture_ptr->f.pict_type);
assert(s->current_picture.f.pict_type == s->pict_type);
- *pict= *(AVFrame*)s->current_picture_ptr;
+
+ *pict = s->current_picture_ptr->f;
ff_print_debug_info(s, pict);
*data_size = sizeof(AVFrame);
diff --git a/libavcodec/h263dec.c b/libavcodec/h263dec.c
index ac2aeaf424..b5b6d8c863 100644
--- a/libavcodec/h263dec.c
+++ b/libavcodec/h263dec.c
@@ -357,7 +357,7 @@ uint64_t time= rdtsc();
if (buf_size == 0) {
/* special case for last picture */
if (s->low_delay==0 && s->next_picture_ptr) {
- *pict= *(AVFrame*)s->next_picture_ptr;
+ *pict = s->next_picture_ptr->f;
s->next_picture_ptr= NULL;
*data_size = sizeof(AVFrame);
@@ -727,9 +727,9 @@ intrax8_decoded:
assert(s->current_picture.f.pict_type == s->current_picture_ptr->f.pict_type);
assert(s->current_picture.f.pict_type == s->pict_type);
if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay) {
- *pict= *(AVFrame*)s->current_picture_ptr;
+ *pict = s->current_picture_ptr->f;
} else if (s->last_picture_ptr != NULL) {
- *pict= *(AVFrame*)s->last_picture_ptr;
+ *pict = s->last_picture_ptr->f;
}
if(s->last_picture_ptr || s->low_delay){
diff --git a/libavcodec/h264.c b/libavcodec/h264.c
index 20daa2e0a8..caf4b8ae5f 100644
--- a/libavcodec/h264.c
+++ b/libavcodec/h264.c
@@ -367,14 +367,14 @@ static void await_references(H264Context *h){
nrefs[list]--;
if(!FIELD_PICTURE && ref_field_picture){ // frame referencing two fields
- ff_thread_await_progress((AVFrame*)ref_pic, FFMIN((row >> 1) - !(row&1), pic_height-1), 1);
- ff_thread_await_progress((AVFrame*)ref_pic, FFMIN((row >> 1) , pic_height-1), 0);
+ ff_thread_await_progress(&ref_pic->f, FFMIN((row >> 1) - !(row & 1), pic_height - 1), 1);
+ ff_thread_await_progress(&ref_pic->f, FFMIN((row >> 1), pic_height - 1), 0);
}else if(FIELD_PICTURE && !ref_field_picture){ // field referencing one field of a frame
- ff_thread_await_progress((AVFrame*)ref_pic, FFMIN(row*2 + ref_field , pic_height-1), 0);
+ ff_thread_await_progress(&ref_pic->f, FFMIN(row * 2 + ref_field, pic_height - 1), 0);
}else if(FIELD_PICTURE){
- ff_thread_await_progress((AVFrame*)ref_pic, FFMIN(row, pic_height-1), ref_field);
+ ff_thread_await_progress(&ref_pic->f, FFMIN(row, pic_height - 1), ref_field);
}else{
- ff_thread_await_progress((AVFrame*)ref_pic, FFMIN(row, pic_height-1), 0);
+ ff_thread_await_progress(&ref_pic->f, FFMIN(row, pic_height - 1), 0);
}
}
}
@@ -2507,8 +2507,9 @@ static int field_end(H264Context *h, int in_setup){
s->mb_y= 0;
if (!in_setup && !s->dropable)
- ff_thread_report_progress((AVFrame*)s->current_picture_ptr, (16*s->mb_height >> FIELD_PICTURE) - 1,
- s->picture_structure==PICT_BOTTOM_FIELD);
+ ff_thread_report_progress(&s->current_picture_ptr->f,
+ (16 * s->mb_height >> FIELD_PICTURE) - 1,
+ s->picture_structure == PICT_BOTTOM_FIELD);
if (CONFIG_H264_VDPAU_DECODER && s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
ff_vdpau_h264_set_reference_frames(s);
@@ -2906,8 +2907,8 @@ static int decode_slice_header(H264Context *h, H264Context *h0){
h->prev_frame_num++;
h->prev_frame_num %= 1<<h->sps.log2_max_frame_num;
s->current_picture_ptr->frame_num= h->prev_frame_num;
- ff_thread_report_progress((AVFrame*)s->current_picture_ptr, INT_MAX, 0);
- ff_thread_report_progress((AVFrame*)s->current_picture_ptr, INT_MAX, 1);
+ ff_thread_report_progress(&s->current_picture_ptr->f, INT_MAX, 0);
+ ff_thread_report_progress(&s->current_picture_ptr->f, INT_MAX, 1);
ff_generate_sliding_window_mmcos(h);
if (ff_h264_execute_ref_pic_marking(h, h->mmco, h->mmco_index) < 0 &&
(s->avctx->err_recognition & AV_EF_EXPLODE))
@@ -3577,8 +3578,8 @@ static void decode_finish_row(H264Context *h){
if (s->dropable) return;
- ff_thread_report_progress((AVFrame*)s->current_picture_ptr, top + height - 1,
- s->picture_structure==PICT_BOTTOM_FIELD);
+ ff_thread_report_progress(&s->current_picture_ptr->f, top + height - 1,
+ s->picture_structure == PICT_BOTTOM_FIELD);
}
static int decode_slice(struct AVCodecContext *avctx, void *arg){
@@ -4067,7 +4068,7 @@ static int decode_frame(AVCodecContext *avctx,
if(out){
*data_size = sizeof(AVFrame);
- *pict= *(AVFrame*)out;
+ *pict = out->f;
}
return buf_index;
@@ -4121,7 +4122,7 @@ not_extra:
*data_size = 0; /* Wait for second field. */
if (h->next_output_pic && (h->next_output_pic->sync || h->sync>1)) {
*data_size = sizeof(AVFrame);
- *pict = *(AVFrame*)h->next_output_pic;
+ *pict = h->next_output_pic->f;
}
}
diff --git a/libavcodec/h264_direct.c b/libavcodec/h264_direct.c
index 079c665509..263832d829 100644
--- a/libavcodec/h264_direct.c
+++ b/libavcodec/h264_direct.c
@@ -154,7 +154,8 @@ static void await_reference_mb_row(H264Context * const h, Picture *ref, int mb_y
//FIXME it can be safe to access mb stuff
//even if pixels aren't deblocked yet
- ff_thread_await_progress((AVFrame*)ref, FFMIN(16*mb_y >> ref_field_picture, ref_height-1),
+ ff_thread_await_progress(&ref->f,
+ FFMIN(16 * mb_y >> ref_field_picture, ref_height - 1),
ref_field_picture && ref_field);
}
diff --git a/libavcodec/indeo2.c b/libavcodec/indeo2.c
index eb58939338..ec5a86ad76 100644
--- a/libavcodec/indeo2.c
+++ b/libavcodec/indeo2.c
@@ -143,7 +143,7 @@ static int ir2_decode_frame(AVCodecContext *avctx,
int buf_size = avpkt->size;
Ir2Context * const s = avctx->priv_data;
AVFrame *picture = data;
- AVFrame * const p= (AVFrame*)&s->picture;
+ AVFrame * const p = &s->picture;
int start;
p->reference = 3;
@@ -188,7 +188,7 @@ static int ir2_decode_frame(AVCodecContext *avctx,
s->picture.data[1], s->picture.linesize[1], ir2_luma_table);
}
- *picture= *(AVFrame*)&s->picture;
+ *picture = s->picture;
*data_size = sizeof(AVPicture);
return buf_size;
diff --git a/libavcodec/jpeglsenc.c b/libavcodec/jpeglsenc.c
index d5135adb30..62a2328a0a 100644
--- a/libavcodec/jpeglsenc.c
+++ b/libavcodec/jpeglsenc.c
@@ -232,7 +232,7 @@ static int encode_picture_ls(AVCodecContext *avctx, AVPacket *pkt,
const AVFrame *pict, int *got_packet)
{
JpeglsContext * const s = avctx->priv_data;
- AVFrame * const p= (AVFrame*)&s->picture;
+ AVFrame * const p = &s->picture;
const int near = avctx->prediction_method;
PutBitContext pb, pb2;
GetBitContext gb;
diff --git a/libavcodec/ljpegenc.c b/libavcodec/ljpegenc.c
index 00d34ecc9c..b5b443e7cc 100644
--- a/libavcodec/ljpegenc.c
+++ b/libavcodec/ljpegenc.c
@@ -45,7 +45,7 @@ static int encode_picture_lossless(AVCodecContext *avctx, AVPacket *pkt,
MJpegContext * const m = s->mjpeg_ctx;
const int width= s->width;
const int height= s->height;
- AVFrame * const p= (AVFrame*)&s->current_picture;
+ AVFrame * const p = &s->current_picture.f;
const int predictor= avctx->prediction_method+1;
const int mb_width = (width + s->mjpeg_hsample[0] - 1) / s->mjpeg_hsample[0];
const int mb_height = (height + s->mjpeg_vsample[0] - 1) / s->mjpeg_vsample[0];
diff --git a/libavcodec/loco.c b/libavcodec/loco.c
index 75701e970b..eaf7e81cd4 100644
--- a/libavcodec/loco.c
+++ b/libavcodec/loco.c
@@ -166,7 +166,7 @@ static int decode_frame(AVCodecContext *avctx,
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
LOCOContext * const l = avctx->priv_data;
- AVFrame * const p= (AVFrame*)&l->pic;
+ AVFrame * const p = &l->pic;
int decoded;
if(p->data[0])
diff --git a/libavcodec/mdec.c b/libavcodec/mdec.c
index 9a417ee45b..03361c7c79 100644
--- a/libavcodec/mdec.c
+++ b/libavcodec/mdec.c
@@ -243,7 +243,7 @@ static av_cold int decode_init(AVCodecContext *avctx){
static av_cold int decode_init_thread_copy(AVCodecContext *avctx){
MDECContext * const a = avctx->priv_data;
- AVFrame *p = (AVFrame*)&a->picture;
+ AVFrame *p = &a->picture;
avctx->coded_frame= p;
a->avctx= avctx;
diff --git a/libavcodec/mjpegdec.c b/libavcodec/mjpegdec.c
index 0a9d03061f..c5db0f1d41 100644
--- a/libavcodec/mjpegdec.c
+++ b/libavcodec/mjpegdec.c
@@ -1577,7 +1577,7 @@ int ff_mjpeg_decode_frame(AVCodecContext *avctx, void *data, int *data_size,
goto the_end;
} else if (unescaped_buf_size > (1U<<29)) {
av_log(avctx, AV_LOG_ERROR, "MJPEG packet 0x%x too big (0x%x/0x%x), corrupt data?\n",
- start_code, unescaped_buf_ptr, buf_size);
+ start_code, unescaped_buf_size, buf_size);
return AVERROR_INVALIDDATA;
} else {
av_log(avctx, AV_LOG_DEBUG, "marker=%x avail_size_in_buf=%td\n",
diff --git a/libavcodec/mpeg12.c b/libavcodec/mpeg12.c
index a4e4661d18..5c5e09ec2d 100644
--- a/libavcodec/mpeg12.c
+++ b/libavcodec/mpeg12.c
@@ -1952,7 +1952,7 @@ static int slice_end(AVCodecContext *avctx, AVFrame *pict)
ff_MPV_frame_end(s);
if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay) {
- *pict = *(AVFrame*)s->current_picture_ptr;
+ *pict = s->current_picture_ptr->f;
ff_print_debug_info(s, pict);
} else {
if (avctx->active_thread_type & FF_THREAD_FRAME)
@@ -1960,7 +1960,7 @@ static int slice_end(AVCodecContext *avctx, AVFrame *pict)
/* latency of 1 frame for I- and P-frames */
/* XXX: use another variable than picture_number */
if (s->last_picture_ptr != NULL) {
- *pict = *(AVFrame*)s->last_picture_ptr;
+ *pict = s->last_picture_ptr->f;
ff_print_debug_info(s, pict);
}
}
@@ -2256,7 +2256,7 @@ static int mpeg_decode_frame(AVCodecContext *avctx,
if (buf_size == 0 || (buf_size == 4 && AV_RB32(buf) == SEQ_END_CODE)) {
/* special case for last picture */
if (s2->low_delay == 0 && s2->next_picture_ptr) {
- *picture = *(AVFrame*)s2->next_picture_ptr;
+ *picture = s2->next_picture_ptr->f;
s2->next_picture_ptr = NULL;
*data_size = sizeof(AVFrame);
diff --git a/libavcodec/mpeg4videodec.c b/libavcodec/mpeg4videodec.c
index a12ad99348..c87af8216e 100644
--- a/libavcodec/mpeg4videodec.c
+++ b/libavcodec/mpeg4videodec.c
@@ -1313,7 +1313,7 @@ static int mpeg4_decode_mb(MpegEncContext *s,
s->last_mv[i][1][1]= 0;
}
- ff_thread_await_progress((AVFrame*)s->next_picture_ptr, s->mb_y, 0);
+ ff_thread_await_progress(&s->next_picture_ptr->f, s->mb_y, 0);
}
/* if we skipped it in the future P Frame than skip it now too */
@@ -1500,7 +1500,7 @@ end:
if(s->pict_type==AV_PICTURE_TYPE_B){
const int delta= s->mb_x + 1 == s->mb_width ? 2 : 1;
- ff_thread_await_progress((AVFrame*)s->next_picture_ptr,
+ ff_thread_await_progress(&s->next_picture_ptr->f,
(s->mb_x + delta >= s->mb_width) ? FFMIN(s->mb_y+1, s->mb_height-1) : s->mb_y, 0);
if (s->next_picture.f.mbskip_table[xy + delta])
return SLICE_OK;
diff --git a/libavcodec/mpegvideo.c b/libavcodec/mpegvideo.c
index efe4cae762..9577c9d38f 100644
--- a/libavcodec/mpegvideo.c
+++ b/libavcodec/mpegvideo.c
@@ -232,9 +232,9 @@ static void free_frame_buffer(MpegEncContext *s, Picture *pic)
* dimensions; ignore user defined callbacks for these
*/
if (s->codec_id != CODEC_ID_WMV3IMAGE && s->codec_id != CODEC_ID_VC1IMAGE)
- ff_thread_release_buffer(s->avctx, (AVFrame *) pic);
+ ff_thread_release_buffer(s->avctx, &pic->f);
else
- avcodec_default_release_buffer(s->avctx, (AVFrame *) pic);
+ avcodec_default_release_buffer(s->avctx, &pic->f);
av_freep(&pic->f.hwaccel_picture_private);
}
@@ -257,9 +257,9 @@ static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
}
if (s->codec_id != CODEC_ID_WMV3IMAGE && s->codec_id != CODEC_ID_VC1IMAGE)
- r = ff_thread_get_buffer(s->avctx, (AVFrame *) pic);
+ r = ff_thread_get_buffer(s->avctx, &pic->f);
else
- r = avcodec_default_get_buffer(s->avctx, (AVFrame *) pic);
+ r = avcodec_default_get_buffer(s->avctx, &pic->f);
if (r < 0 || !pic->f.type || !pic->f.data[0]) {
av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %d %p)\n",
@@ -729,7 +729,7 @@ av_cold int ff_MPV_common_init(MpegEncContext *s)
s->codec_tag = avpriv_toupper4(s->avctx->codec_tag);
s->stream_codec_tag = avpriv_toupper4(s->avctx->stream_codec_tag);
- s->avctx->coded_frame = (AVFrame*)&s->current_picture;
+ s->avctx->coded_frame = &s->current_picture.f;
FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num + 1) * sizeof(int), fail); // error ressilience code looks cleaner with this
for (y = 0; y < s->mb_height; y++)
@@ -781,7 +781,7 @@ av_cold int ff_MPV_common_init(MpegEncContext *s)
FF_ALLOCZ_OR_GOTO(s->avctx, s->picture,
s->picture_count * sizeof(Picture), fail);
for (i = 0; i < s->picture_count; i++) {
- avcodec_get_frame_defaults((AVFrame *) &s->picture[i]);
+ avcodec_get_frame_defaults(&s->picture[i].f);
}
FF_ALLOCZ_OR_GOTO(s->avctx, s->error_status_table, mb_array_size*sizeof(uint8_t), fail)
@@ -1247,10 +1247,8 @@ int ff_MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
memset(s->last_picture_ptr->f.data[0] + s->last_picture_ptr->f.linesize[0]*i, 16, avctx->width);
}
- ff_thread_report_progress((AVFrame *) s->last_picture_ptr,
- INT_MAX, 0);
- ff_thread_report_progress((AVFrame *) s->last_picture_ptr,
- INT_MAX, 1);
+ ff_thread_report_progress(&s->last_picture_ptr->f, INT_MAX, 0);
+ ff_thread_report_progress(&s->last_picture_ptr->f, INT_MAX, 1);
}
if ((s->next_picture_ptr == NULL ||
s->next_picture_ptr->f.data[0] == NULL) &&
@@ -1263,10 +1261,8 @@ int ff_MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
s->next_picture_ptr->f.key_frame = 0;
if (ff_alloc_picture(s, s->next_picture_ptr, 0) < 0)
return -1;
- ff_thread_report_progress((AVFrame *) s->next_picture_ptr,
- INT_MAX, 0);
- ff_thread_report_progress((AVFrame *) s->next_picture_ptr,
- INT_MAX, 1);
+ ff_thread_report_progress(&s->next_picture_ptr->f, INT_MAX, 0);
+ ff_thread_report_progress(&s->next_picture_ptr->f, INT_MAX, 1);
}
}
@@ -1391,10 +1387,10 @@ void ff_MPV_frame_end(MpegEncContext *s)
memset(&s->next_picture, 0, sizeof(Picture));
memset(&s->current_picture, 0, sizeof(Picture));
#endif
- s->avctx->coded_frame = (AVFrame *) s->current_picture_ptr;
+ s->avctx->coded_frame = &s->current_picture_ptr->f;
if (s->codec_id != CODEC_ID_H264 && s->current_picture.f.reference) {
- ff_thread_report_progress((AVFrame *) s->current_picture_ptr,
+ ff_thread_report_progress(&s->current_picture_ptr->f,
s->mb_height - 1, 0);
}
}
@@ -2346,10 +2342,14 @@ void MPV_decode_mb_internal(MpegEncContext *s, DCTELEM block[12][64],
if(HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
if (s->mv_dir & MV_DIR_FORWARD) {
- ff_thread_await_progress((AVFrame*)s->last_picture_ptr, ff_MPV_lowest_referenced_row(s, 0), 0);
+ ff_thread_await_progress(&s->last_picture_ptr->f,
+ ff_MPV_lowest_referenced_row(s, 0),
+ 0);
}
if (s->mv_dir & MV_DIR_BACKWARD) {
- ff_thread_await_progress((AVFrame*)s->next_picture_ptr, ff_MPV_lowest_referenced_row(s, 1), 0);
+ ff_thread_await_progress(&s->next_picture_ptr->f,
+ ff_MPV_lowest_referenced_row(s, 1),
+ 0);
}
}
@@ -2556,9 +2556,9 @@ void ff_draw_horiz_band(MpegEncContext *s, int y, int h){
int i;
if(s->pict_type==AV_PICTURE_TYPE_B || s->low_delay || (s->avctx->slice_flags&SLICE_FLAG_CODED_ORDER))
- src= (AVFrame*)s->current_picture_ptr;
+ src = &s->current_picture_ptr->f;
else if(s->last_picture_ptr)
- src= (AVFrame*)s->last_picture_ptr;
+ src = &s->last_picture_ptr->f;
else
return;
@@ -2867,5 +2867,5 @@ void ff_set_qscale(MpegEncContext * s, int qscale)
void ff_MPV_report_decode_progress(MpegEncContext *s)
{
if (s->pict_type != AV_PICTURE_TYPE_B && !s->partitioned_frame && !s->error_occurred)
- ff_thread_report_progress((AVFrame*)s->current_picture_ptr, s->mb_y, 0);
+ ff_thread_report_progress(&s->current_picture_ptr->f, s->mb_y, 0);
}
diff --git a/libavcodec/mpegvideo_enc.c b/libavcodec/mpegvideo_enc.c
index af57644626..9c7b889b97 100644
--- a/libavcodec/mpegvideo_enc.c
+++ b/libavcodec/mpegvideo_enc.c
@@ -981,7 +981,7 @@ static int load_input_picture(MpegEncContext *s, AVFrame *pic_arg)
if (i < 0)
return i;
- pic = (AVFrame *) &s->picture[i];
+ pic = &s->picture[i].f;
pic->reference = 3;
for (i = 0; i < 4; i++) {
@@ -996,7 +996,7 @@ static int load_input_picture(MpegEncContext *s, AVFrame *pic_arg)
if (i < 0)
return i;
- pic = (AVFrame *) &s->picture[i];
+ pic = &s->picture[i].f;
pic->reference = 3;
if (ff_alloc_picture(s, (Picture *) pic, 0) < 0) {
@@ -1252,7 +1252,7 @@ static int select_input_picture(MpegEncContext *s)
s->input_picture[0]->f.type == FF_BUFFER_TYPE_INTERNAL);
s->avctx->release_buffer(s->avctx,
- (AVFrame *) s->input_picture[0]);
+ &s->input_picture[0]->f);
}
emms_c();
@@ -1385,13 +1385,13 @@ no_output_pic:
/* mark us unused / free shared pic */
if (s->reordered_input_picture[0]->f.type == FF_BUFFER_TYPE_INTERNAL)
s->avctx->release_buffer(s->avctx,
- (AVFrame *) s->reordered_input_picture[0]);
+ &s->reordered_input_picture[0]->f);
for (i = 0; i < 4; i++)
s->reordered_input_picture[0]->f.data[i] = NULL;
s->reordered_input_picture[0]->f.type = 0;
- copy_picture_attributes(s, (AVFrame *) pic,
- (AVFrame *) s->reordered_input_picture[0]);
+ copy_picture_attributes(s, &pic->f,
+ &s->reordered_input_picture[0]->f);
s->current_picture_ptr = pic;
} else {
diff --git a/libavcodec/msmpeg4.c b/libavcodec/msmpeg4.c
index 232b82b219..b182da1f90 100644
--- a/libavcodec/msmpeg4.c
+++ b/libavcodec/msmpeg4.c
@@ -34,6 +34,7 @@
#include "libavutil/x86_cpu.h"
#include "h263.h"
#include "mpeg4video.h"
+#include "msmpeg4data.h"
#include "vc1data.h"
/*
@@ -52,22 +53,8 @@
#define V2_MV_VLC_BITS 9
#define TEX_VLC_BITS 9
-#define II_BITRATE 128*1024
-#define MBAC_BITRATE 50*1024
-
#define DEFAULT_INTER_INDEX 3
-static uint32_t v2_dc_lum_table[512][2];
-static uint32_t v2_dc_chroma_table[512][2];
-
-#include "msmpeg4data.h"
-
-#if CONFIG_ENCODERS //strangely gcc includes this even if it is not referenced
-static uint8_t rl_length[NB_RL_TABLES][MAX_LEVEL+1][MAX_RUN+1][2];
-#endif //CONFIG_ENCODERS
-
-static uint8_t static_rl_table_store[NB_RL_TABLES][2][2*MAX_RUN + MAX_LEVEL + 3];
-
/* This table is practically identical to the one from h263
* except that it is inverted. */
static av_cold void init_h263_dc_for_msmpeg4(void)
@@ -102,8 +89,8 @@ static av_cold void init_h263_dc_for_msmpeg4(void)
uni_len++;
}
}
- v2_dc_lum_table[level+256][0]= uni_code;
- v2_dc_lum_table[level+256][1]= uni_len;
+ ff_v2_dc_lum_table[level + 256][0] = uni_code;
+ ff_v2_dc_lum_table[level + 256][1] = uni_len;
/* chrominance h263 */
uni_code= ff_mpeg4_DCtab_chrom[size][0];
@@ -118,13 +105,13 @@ static av_cold void init_h263_dc_for_msmpeg4(void)
uni_len++;
}
}
- v2_dc_chroma_table[level+256][0]= uni_code;
- v2_dc_chroma_table[level+256][1]= uni_len;
+ ff_v2_dc_chroma_table[level + 256][0] = uni_code;
+ ff_v2_dc_chroma_table[level + 256][1] = uni_len;
}
}
-static av_cold void common_init(MpegEncContext * s)
+av_cold void ff_msmpeg4_common_init(MpegEncContext *s)
{
static int initialized=0;
@@ -173,251 +160,6 @@ static av_cold void common_init(MpegEncContext * s)
}
}
-#if CONFIG_ENCODERS
-
-/* build the table which associate a (x,y) motion vector to a vlc */
-static void init_mv_table(MVTable *tab)
-{
- int i, x, y;
-
- tab->table_mv_index = av_malloc(sizeof(uint16_t) * 4096);
- /* mark all entries as not used */
- for(i=0;i<4096;i++)
- tab->table_mv_index[i] = tab->n;
-
- for(i=0;i<tab->n;i++) {
- x = tab->table_mvx[i];
- y = tab->table_mvy[i];
- tab->table_mv_index[(x << 6) | y] = i;
- }
-}
-
-void ff_msmpeg4_code012(PutBitContext *pb, int n)
-{
- if (n == 0) {
- put_bits(pb, 1, 0);
- } else {
- put_bits(pb, 1, 1);
- put_bits(pb, 1, (n >= 2));
- }
-}
-
-static int get_size_of_code(MpegEncContext * s, RLTable *rl, int last, int run, int level, int intra){
- int size=0;
- int code;
- int run_diff= intra ? 0 : 1;
-
- code = get_rl_index(rl, last, run, level);
- size+= rl->table_vlc[code][1];
- if (code == rl->n) {
- int level1, run1;
-
- level1 = level - rl->max_level[last][run];
- if (level1 < 1)
- goto esc2;
- code = get_rl_index(rl, last, run, level1);
- if (code == rl->n) {
- esc2:
- size++;
- if (level > MAX_LEVEL)
- goto esc3;
- run1 = run - rl->max_run[last][level] - run_diff;
- if (run1 < 0)
- goto esc3;
- code = get_rl_index(rl, last, run1, level);
- if (code == rl->n) {
- esc3:
- /* third escape */
- size+=1+1+6+8;
- } else {
- /* second escape */
- size+= 1+1+ rl->table_vlc[code][1];
- }
- } else {
- /* first escape */
- size+= 1+1+ rl->table_vlc[code][1];
- }
- } else {
- size++;
- }
- return size;
-}
-
-av_cold void ff_msmpeg4_encode_init(MpegEncContext *s)
-{
- static int init_done=0;
- int i;
-
- common_init(s);
- if(s->msmpeg4_version>=4){
- s->min_qcoeff= -255;
- s->max_qcoeff= 255;
- }
-
- if (!init_done) {
- /* init various encoding tables */
- init_done = 1;
- init_mv_table(&ff_mv_tables[0]);
- init_mv_table(&ff_mv_tables[1]);
- for(i=0;i<NB_RL_TABLES;i++)
- ff_init_rl(&ff_rl_table[i], static_rl_table_store[i]);
-
- for(i=0; i<NB_RL_TABLES; i++){
- int level;
- for (level = 1; level <= MAX_LEVEL; level++) {
- int run;
- for(run=0; run<=MAX_RUN; run++){
- int last;
- for(last=0; last<2; last++){
- rl_length[i][level][run][last]= get_size_of_code(s, &ff_rl_table[ i], last, run, level, 0);
- }
- }
- }
- }
- }
-}
-
-static void find_best_tables(MpegEncContext * s)
-{
- int i;
- int best = 0, best_size = INT_MAX;
- int chroma_best = 0, best_chroma_size = INT_MAX;
-
- for(i=0; i<3; i++){
- int level;
- int chroma_size=0;
- int size=0;
-
- if(i>0){// ;)
- size++;
- chroma_size++;
- }
- for(level=0; level<=MAX_LEVEL; level++){
- int run;
- for(run=0; run<=MAX_RUN; run++){
- int last;
- const int last_size= size + chroma_size;
- for(last=0; last<2; last++){
- int inter_count = s->ac_stats[0][0][level][run][last] + s->ac_stats[0][1][level][run][last];
- int intra_luma_count = s->ac_stats[1][0][level][run][last];
- int intra_chroma_count= s->ac_stats[1][1][level][run][last];
-
- if(s->pict_type==AV_PICTURE_TYPE_I){
- size += intra_luma_count *rl_length[i ][level][run][last];
- chroma_size+= intra_chroma_count*rl_length[i+3][level][run][last];
- }else{
- size+= intra_luma_count *rl_length[i ][level][run][last]
- +intra_chroma_count*rl_length[i+3][level][run][last]
- +inter_count *rl_length[i+3][level][run][last];
- }
- }
- if(last_size == size+chroma_size) break;
- }
- }
- if(size<best_size){
- best_size= size;
- best= i;
- }
- if(chroma_size<best_chroma_size){
- best_chroma_size= chroma_size;
- chroma_best= i;
- }
- }
-
-// printf("type:%d, best:%d, qp:%d, var:%d, mcvar:%d, size:%d //\n",
-// s->pict_type, best, s->qscale, s->mb_var_sum, s->mc_mb_var_sum, best_size);
-
- if(s->pict_type==AV_PICTURE_TYPE_P) chroma_best= best;
-
- memset(s->ac_stats, 0, sizeof(int)*(MAX_LEVEL+1)*(MAX_RUN+1)*2*2*2);
-
- s->rl_table_index = best;
- s->rl_chroma_table_index= chroma_best;
-
- if(s->pict_type != s->last_non_b_pict_type){
- s->rl_table_index= 2;
- if(s->pict_type==AV_PICTURE_TYPE_I)
- s->rl_chroma_table_index= 1;
- else
- s->rl_chroma_table_index= 2;
- }
-
-}
-
-/* write MSMPEG4 compatible frame header */
-void ff_msmpeg4_encode_picture_header(MpegEncContext * s, int picture_number)
-{
- find_best_tables(s);
-
- avpriv_align_put_bits(&s->pb);
- put_bits(&s->pb, 2, s->pict_type - 1);
-
- put_bits(&s->pb, 5, s->qscale);
- if(s->msmpeg4_version<=2){
- s->rl_table_index = 2;
- s->rl_chroma_table_index = 2;
- }
-
- s->dc_table_index = 1;
- s->mv_table_index = 1; /* only if P frame */
- s->use_skip_mb_code = 1; /* only if P frame */
- s->per_mb_rl_table = 0;
- if(s->msmpeg4_version==4)
- s->inter_intra_pred= (s->width*s->height < 320*240 && s->bit_rate<=II_BITRATE && s->pict_type==AV_PICTURE_TYPE_P);
-//printf("%d %d %d %d %d\n", s->pict_type, s->bit_rate, s->inter_intra_pred, s->width, s->height);
-
- if (s->pict_type == AV_PICTURE_TYPE_I) {
- s->slice_height= s->mb_height/1;
- put_bits(&s->pb, 5, 0x16 + s->mb_height/s->slice_height);
-
- if(s->msmpeg4_version==4){
- ff_msmpeg4_encode_ext_header(s);
- if(s->bit_rate>MBAC_BITRATE)
- put_bits(&s->pb, 1, s->per_mb_rl_table);
- }
-
- if(s->msmpeg4_version>2){
- if(!s->per_mb_rl_table){
- ff_msmpeg4_code012(&s->pb, s->rl_chroma_table_index);
- ff_msmpeg4_code012(&s->pb, s->rl_table_index);
- }
-
- put_bits(&s->pb, 1, s->dc_table_index);
- }
- } else {
- put_bits(&s->pb, 1, s->use_skip_mb_code);
-
- if(s->msmpeg4_version==4 && s->bit_rate>MBAC_BITRATE)
- put_bits(&s->pb, 1, s->per_mb_rl_table);
-
- if(s->msmpeg4_version>2){
- if(!s->per_mb_rl_table)
- ff_msmpeg4_code012(&s->pb, s->rl_table_index);
-
- put_bits(&s->pb, 1, s->dc_table_index);
-
- put_bits(&s->pb, 1, s->mv_table_index);
- }
- }
-
- s->esc3_level_length= 0;
- s->esc3_run_length= 0;
-}
-
-void ff_msmpeg4_encode_ext_header(MpegEncContext * s)
-{
- put_bits(&s->pb, 5, s->avctx->time_base.den / s->avctx->time_base.num); //yes 29.97 -> 29
-
- put_bits(&s->pb, 11, FFMIN(s->bit_rate/1024, 2047));
-
- if(s->msmpeg4_version>=3)
- put_bits(&s->pb, 1, s->flipflop_rounding);
- else
- assert(s->flipflop_rounding==0);
-}
-
-#endif //CONFIG_ENCODERS
-
/* predict coded block */
int ff_msmpeg4_coded_block_pred(MpegEncContext * s, int n, uint8_t **coded_block_ptr)
{
@@ -445,217 +187,6 @@ int ff_msmpeg4_coded_block_pred(MpegEncContext * s, int n, uint8_t **coded_block
return pred;
}
-#if CONFIG_ENCODERS
-
-void ff_msmpeg4_encode_motion(MpegEncContext * s,
- int mx, int my)
-{
- int code;
- MVTable *mv;
-
- /* modulo encoding */
- /* WARNING : you cannot reach all the MVs even with the modulo
- encoding. This is a somewhat strange compromise they took !!! */
- if (mx <= -64)
- mx += 64;
- else if (mx >= 64)
- mx -= 64;
- if (my <= -64)
- my += 64;
- else if (my >= 64)
- my -= 64;
-
- mx += 32;
- my += 32;
-#if 0
- if ((unsigned)mx >= 64 ||
- (unsigned)my >= 64)
- av_log(s->avctx, AV_LOG_ERROR, "error mx=%d my=%d\n", mx, my);
-#endif
- mv = &ff_mv_tables[s->mv_table_index];
-
- code = mv->table_mv_index[(mx << 6) | my];
- put_bits(&s->pb,
- mv->table_mv_bits[code],
- mv->table_mv_code[code]);
- if (code == mv->n) {
- /* escape : code literally */
- put_bits(&s->pb, 6, mx);
- put_bits(&s->pb, 6, my);
- }
-}
-
-void ff_msmpeg4_handle_slices(MpegEncContext *s){
- if (s->mb_x == 0) {
- if (s->slice_height && (s->mb_y % s->slice_height) == 0) {
- if(s->msmpeg4_version < 4){
- ff_mpeg4_clean_buffers(s);
- }
- s->first_slice_line = 1;
- } else {
- s->first_slice_line = 0;
- }
- }
-}
-
-static void msmpeg4v2_encode_motion(MpegEncContext * s, int val)
-{
- int range, bit_size, sign, code, bits;
-
- if (val == 0) {
- /* zero vector */
- code = 0;
- put_bits(&s->pb, ff_mvtab[code][1], ff_mvtab[code][0]);
- } else {
- bit_size = s->f_code - 1;
- range = 1 << bit_size;
- if (val <= -64)
- val += 64;
- else if (val >= 64)
- val -= 64;
-
- if (val >= 0) {
- sign = 0;
- } else {
- val = -val;
- sign = 1;
- }
- val--;
- code = (val >> bit_size) + 1;
- bits = val & (range - 1);
-
- put_bits(&s->pb, ff_mvtab[code][1] + 1, (ff_mvtab[code][0] << 1) | sign);
- if (bit_size > 0) {
- put_bits(&s->pb, bit_size, bits);
- }
- }
-}
-
-void ff_msmpeg4_encode_mb(MpegEncContext * s,
- DCTELEM block[6][64],
- int motion_x, int motion_y)
-{
- int cbp, coded_cbp, i;
- int pred_x, pred_y;
- uint8_t *coded_block;
-
- ff_msmpeg4_handle_slices(s);
-
- if (!s->mb_intra) {
- /* compute cbp */
- cbp = 0;
- for (i = 0; i < 6; i++) {
- if (s->block_last_index[i] >= 0)
- cbp |= 1 << (5 - i);
- }
- if (s->use_skip_mb_code && (cbp | motion_x | motion_y) == 0) {
- /* skip macroblock */
- put_bits(&s->pb, 1, 1);
- s->last_bits++;
- s->misc_bits++;
- s->skip_count++;
-
- return;
- }
- if (s->use_skip_mb_code)
- put_bits(&s->pb, 1, 0); /* mb coded */
-
- if(s->msmpeg4_version<=2){
- put_bits(&s->pb,
- ff_v2_mb_type[cbp&3][1],
- ff_v2_mb_type[cbp&3][0]);
- if((cbp&3) != 3) coded_cbp= cbp ^ 0x3C;
- else coded_cbp= cbp;
-
- put_bits(&s->pb,
- ff_h263_cbpy_tab[coded_cbp>>2][1],
- ff_h263_cbpy_tab[coded_cbp>>2][0]);
-
- s->misc_bits += get_bits_diff(s);
-
- ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
- msmpeg4v2_encode_motion(s, motion_x - pred_x);
- msmpeg4v2_encode_motion(s, motion_y - pred_y);
- }else{
- put_bits(&s->pb,
- ff_table_mb_non_intra[cbp + 64][1],
- ff_table_mb_non_intra[cbp + 64][0]);
-
- s->misc_bits += get_bits_diff(s);
-
- /* motion vector */
- ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
- ff_msmpeg4_encode_motion(s, motion_x - pred_x,
- motion_y - pred_y);
- }
-
- s->mv_bits += get_bits_diff(s);
-
- for (i = 0; i < 6; i++) {
- ff_msmpeg4_encode_block(s, block[i], i);
- }
- s->p_tex_bits += get_bits_diff(s);
- } else {
- /* compute cbp */
- cbp = 0;
- coded_cbp = 0;
- for (i = 0; i < 6; i++) {
- int val, pred;
- val = (s->block_last_index[i] >= 1);
- cbp |= val << (5 - i);
- if (i < 4) {
- /* predict value for close blocks only for luma */
- pred = ff_msmpeg4_coded_block_pred(s, i, &coded_block);
- *coded_block = val;
- val = val ^ pred;
- }
- coded_cbp |= val << (5 - i);
- }
-
- if(s->msmpeg4_version<=2){
- if (s->pict_type == AV_PICTURE_TYPE_I) {
- put_bits(&s->pb,
- ff_v2_intra_cbpc[cbp&3][1], ff_v2_intra_cbpc[cbp&3][0]);
- } else {
- if (s->use_skip_mb_code)
- put_bits(&s->pb, 1, 0); /* mb coded */
- put_bits(&s->pb,
- ff_v2_mb_type[(cbp&3) + 4][1],
- ff_v2_mb_type[(cbp&3) + 4][0]);
- }
- put_bits(&s->pb, 1, 0); /* no AC prediction yet */
- put_bits(&s->pb,
- ff_h263_cbpy_tab[cbp>>2][1],
- ff_h263_cbpy_tab[cbp>>2][0]);
- }else{
- if (s->pict_type == AV_PICTURE_TYPE_I) {
- put_bits(&s->pb,
- ff_msmp4_mb_i_table[coded_cbp][1], ff_msmp4_mb_i_table[coded_cbp][0]);
- } else {
- if (s->use_skip_mb_code)
- put_bits(&s->pb, 1, 0); /* mb coded */
- put_bits(&s->pb,
- ff_table_mb_non_intra[cbp][1],
- ff_table_mb_non_intra[cbp][0]);
- }
- put_bits(&s->pb, 1, 0); /* no AC prediction yet */
- if(s->inter_intra_pred){
- s->h263_aic_dir=0;
- put_bits(&s->pb, ff_table_inter_intra[s->h263_aic_dir][1], ff_table_inter_intra[s->h263_aic_dir][0]);
- }
- }
- s->misc_bits += get_bits_diff(s);
-
- for (i = 0; i < 6; i++) {
- ff_msmpeg4_encode_block(s, block[i], i);
- }
- s->i_tex_bits += get_bits_diff(s);
- s->i_count++;
- }
-}
-
-#endif //CONFIG_ENCODERS
-
static inline int msmpeg4v1_pred_dc(MpegEncContext * s, int n,
int32_t **dc_val_ptr)
{
@@ -685,8 +216,8 @@ static int get_dc(uint8_t *src, int stride, int scale)
}
/* dir = 0: left, dir = 1: top prediction */
-static inline int msmpeg4_pred_dc(MpegEncContext * s, int n,
- int16_t **dc_val_ptr, int *dir_ptr)
+int ff_msmpeg4_pred_dc(MpegEncContext *s, int n,
+ int16_t **dc_val_ptr, int *dir_ptr)
{
int a, b, c, wrap, pred, scale;
int16_t *dc_val;
@@ -832,207 +363,6 @@ static inline int msmpeg4_pred_dc(MpegEncContext * s, int n,
return pred;
}
-#define DC_MAX 119
-
-static void msmpeg4_encode_dc(MpegEncContext * s, int level, int n, int *dir_ptr)
-{
- int sign, code;
- int pred, extquant;
- int extrabits = 0;
-
- int16_t *dc_val;
- pred = msmpeg4_pred_dc(s, n, &dc_val, dir_ptr);
-
- /* update predictor */
- if (n < 4) {
- *dc_val = level * s->y_dc_scale;
- } else {
- *dc_val = level * s->c_dc_scale;
- }
-
- /* do the prediction */
- level -= pred;
-
- if(s->msmpeg4_version<=2){
- if (n < 4) {
- put_bits(&s->pb,
- v2_dc_lum_table[level+256][1],
- v2_dc_lum_table[level+256][0]);
- }else{
- put_bits(&s->pb,
- v2_dc_chroma_table[level+256][1],
- v2_dc_chroma_table[level+256][0]);
- }
- }else{
- sign = 0;
- if (level < 0) {
- level = -level;
- sign = 1;
- }
- code = level;
- if (code > DC_MAX)
- code = DC_MAX;
- else if( s->msmpeg4_version>=6 ) {
- if( s->qscale == 1 ) {
- extquant = (level + 3) & 0x3;
- code = ((level+3)>>2);
- } else if( s->qscale == 2 ) {
- extquant = (level + 1) & 0x1;
- code = ((level+1)>>1);
- }
- }
-
- if (s->dc_table_index == 0) {
- if (n < 4) {
- put_bits(&s->pb, ff_table0_dc_lum[code][1], ff_table0_dc_lum[code][0]);
- } else {
- put_bits(&s->pb, ff_table0_dc_chroma[code][1], ff_table0_dc_chroma[code][0]);
- }
- } else {
- if (n < 4) {
- put_bits(&s->pb, ff_table1_dc_lum[code][1], ff_table1_dc_lum[code][0]);
- } else {
- put_bits(&s->pb, ff_table1_dc_chroma[code][1], ff_table1_dc_chroma[code][0]);
- }
- }
-
- if(s->msmpeg4_version>=6 && s->qscale<=2)
- extrabits = 3 - s->qscale;
-
- if (code == DC_MAX)
- put_bits(&s->pb, 8 + extrabits, level);
- else if(extrabits > 0)//== VC1 && s->qscale<=2
- put_bits(&s->pb, extrabits, extquant);
-
- if (level != 0) {
- put_bits(&s->pb, 1, sign);
- }
- }
-}
-
-/* Encoding of a block. Very similar to MPEG4 except for a different
- escape coding (same as H263) and more vlc tables.
- */
-void ff_msmpeg4_encode_block(MpegEncContext * s, DCTELEM * block, int n)
-{
- int level, run, last, i, j, last_index;
- int last_non_zero, sign, slevel;
- int code, run_diff, dc_pred_dir;
- const RLTable *rl;
- const uint8_t *scantable;
-
- if (s->mb_intra) {
- msmpeg4_encode_dc(s, block[0], n, &dc_pred_dir);
- i = 1;
- if (n < 4) {
- rl = &ff_rl_table[s->rl_table_index];
- } else {
- rl = &ff_rl_table[3 + s->rl_chroma_table_index];
- }
- run_diff = s->msmpeg4_version>=4;
- scantable= s->intra_scantable.permutated;
- } else {
- i = 0;
- rl = &ff_rl_table[3 + s->rl_table_index];
- if(s->msmpeg4_version<=2)
- run_diff = 0;
- else
- run_diff = 1;
- scantable= s->inter_scantable.permutated;
- }
-
- /* recalculate block_last_index for M$ wmv1 */
- if(s->msmpeg4_version>=4 && s->msmpeg4_version<6 && s->block_last_index[n]>0){
- for(last_index=63; last_index>=0; last_index--){
- if(block[scantable[last_index]]) break;
- }
- s->block_last_index[n]= last_index;
- }else
- last_index = s->block_last_index[n];
- /* AC coefs */
- last_non_zero = i - 1;
- for (; i <= last_index; i++) {
- j = scantable[i];
- level = block[j];
- if (level) {
- run = i - last_non_zero - 1;
- last = (i == last_index);
- sign = 0;
- slevel = level;
- if (level < 0) {
- sign = 1;
- level = -level;
- }
-
- if(level<=MAX_LEVEL && run<=MAX_RUN){
- s->ac_stats[s->mb_intra][n>3][level][run][last]++;
- }
-
- s->ac_stats[s->mb_intra][n > 3][40][63][0]++; //esc3 like
-
- code = get_rl_index(rl, last, run, level);
- put_bits(&s->pb, rl->table_vlc[code][1], rl->table_vlc[code][0]);
- if (code == rl->n) {
- int level1, run1;
-
- level1 = level - rl->max_level[last][run];
- if (level1 < 1)
- goto esc2;
- code = get_rl_index(rl, last, run, level1);
- if (code == rl->n) {
- esc2:
- put_bits(&s->pb, 1, 0);
- if (level > MAX_LEVEL)
- goto esc3;
- run1 = run - rl->max_run[last][level] - run_diff;
- if (run1 < 0)
- goto esc3;
- code = get_rl_index(rl, last, run1+1, level);
- if (s->msmpeg4_version == 4 && code == rl->n)
- goto esc3;
- code = get_rl_index(rl, last, run1, level);
- if (code == rl->n) {
- esc3:
- /* third escape */
- put_bits(&s->pb, 1, 0);
- put_bits(&s->pb, 1, last);
- if(s->msmpeg4_version>=4){
- if(s->esc3_level_length==0){
- s->esc3_level_length=8;
- s->esc3_run_length= 6;
- //ESCLVLSZ + ESCRUNSZ
- if(s->qscale<8)
- put_bits(&s->pb, 6 + (s->msmpeg4_version>=6), 3);
- else
- put_bits(&s->pb, 8, 3);
- }
- put_bits(&s->pb, s->esc3_run_length, run);
- put_bits(&s->pb, 1, sign);
- put_bits(&s->pb, s->esc3_level_length, level);
- }else{
- put_bits(&s->pb, 6, run);
- put_sbits(&s->pb, 8, slevel);
- }
- } else {
- /* second escape */
- put_bits(&s->pb, 1, 1);
- put_bits(&s->pb, rl->table_vlc[code][1], rl->table_vlc[code][0]);
- put_bits(&s->pb, 1, sign);
- }
- } else {
- /* first escape */
- put_bits(&s->pb, 1, 1);
- put_bits(&s->pb, rl->table_vlc[code][1], rl->table_vlc[code][0]);
- put_bits(&s->pb, 1, sign);
- }
- } else {
- put_bits(&s->pb, 1, sign);
- }
- last_non_zero = i;
- }
- }
-}
-
/****************************************/
/* decoding stuff */
@@ -1263,13 +593,13 @@ av_cold int ff_msmpeg4_decode_init(AVCodecContext *avctx)
if (ff_h263_decode_init(avctx) < 0)
return -1;
- common_init(s);
+ ff_msmpeg4_common_init(s);
if (!done) {
done = 1;
for(i=0;i<NB_RL_TABLES;i++) {
- ff_init_rl(&ff_rl_table[i], static_rl_table_store[i]);
+ ff_init_rl(&ff_rl_table[i], ff_static_rl_table_store[i]);
}
INIT_VLC_RL(ff_rl_table[0], 642);
INIT_VLC_RL(ff_rl_table[1], 1104);
@@ -1301,11 +631,11 @@ av_cold int ff_msmpeg4_decode_init(AVCodecContext *avctx)
&ff_table1_dc_chroma[0][0], 8, 4, 1216);
INIT_VLC_STATIC(&v2_dc_lum_vlc, DC_VLC_BITS, 512,
- &v2_dc_lum_table[0][1], 8, 4,
- &v2_dc_lum_table[0][0], 8, 4, 1472);
+ &ff_v2_dc_lum_table[0][1], 8, 4,
+ &ff_v2_dc_lum_table[0][0], 8, 4, 1472);
INIT_VLC_STATIC(&v2_dc_chroma_vlc, DC_VLC_BITS, 512,
- &v2_dc_chroma_table[0][1], 8, 4,
- &v2_dc_chroma_table[0][0], 8, 4, 1506);
+ &ff_v2_dc_chroma_table[0][1], 8, 4,
+ &ff_v2_dc_chroma_table[0][0], 8, 4, 1506);
INIT_VLC_STATIC(&v2_intra_cbpc_vlc, V2_INTRA_CBPC_VLC_BITS, 4,
&ff_v2_intra_cbpc[0][1], 2, 1,
@@ -1588,7 +918,7 @@ static int msmpeg4_decode_dc(MpegEncContext * s, int n, int *dir_ptr)
*dc_val= level;
}else{
int16_t *dc_val;
- pred = msmpeg4_pred_dc(s, n, &dc_val, dir_ptr);
+ pred = ff_msmpeg4_pred_dc(s, n, &dc_val, dir_ptr);
level += pred;
/* update predictor */
diff --git a/libavcodec/msmpeg4.h b/libavcodec/msmpeg4.h
index 463c72fa49..abc414cfdd 100644
--- a/libavcodec/msmpeg4.h
+++ b/libavcodec/msmpeg4.h
@@ -22,19 +22,29 @@
#ifndef AVCODEC_MSMPEG4_H
#define AVCODEC_MSMPEG4_H
+#include <stdint.h>
+
#include "config.h"
#include "avcodec.h"
#include "dsputil.h"
#include "mpegvideo.h"
+#include "msmpeg4data.h"
+#include "put_bits.h"
#define INTER_INTRA_VLC_BITS 3
#define MB_NON_INTRA_VLC_BITS 9
#define MB_INTRA_VLC_BITS 9
+#define II_BITRATE 128*1024
+#define MBAC_BITRATE 50*1024
+
+#define DC_MAX 119
+
extern VLC ff_mb_non_intra_vlc[4];
extern VLC ff_inter_intra_vlc;
void ff_msmpeg4_code012(PutBitContext *pb, int n);
+void ff_msmpeg4_common_init(MpegEncContext *s);
void ff_msmpeg4_encode_block(MpegEncContext * s, DCTELEM * block, int n);
void ff_msmpeg4_handle_slices(MpegEncContext *s);
void ff_msmpeg4_encode_motion(MpegEncContext * s, int mx, int my);
@@ -43,6 +53,8 @@ int ff_msmpeg4_coded_block_pred(MpegEncContext * s, int n,
int ff_msmpeg4_decode_motion(MpegEncContext * s, int *mx_ptr, int *my_ptr);
int ff_msmpeg4_decode_block(MpegEncContext * s, DCTELEM * block,
int n, int coded, const uint8_t *scan_table);
+int ff_msmpeg4_pred_dc(MpegEncContext *s, int n,
+ int16_t **dc_val_ptr, int *dir_ptr);
int ff_wmv2_decode_mb(MpegEncContext *s, DCTELEM block[6][64]);
#define CONFIG_MSMPEG4_DECODER (CONFIG_MSMPEG4V1_DECODER || \
diff --git a/libavcodec/msmpeg4data.c b/libavcodec/msmpeg4data.c
index 266ea2ec5d..50ba18c8cc 100644
--- a/libavcodec/msmpeg4data.c
+++ b/libavcodec/msmpeg4data.c
@@ -29,6 +29,10 @@
#include "msmpeg4data.h"
+uint32_t ff_v2_dc_lum_table[512][2];
+uint32_t ff_v2_dc_chroma_table[512][2];
+uint8_t ff_static_rl_table_store[NB_RL_TABLES][2][2 * MAX_RUN + MAX_LEVEL + 3];
+
VLC ff_msmp4_mb_i_vlc;
VLC ff_msmp4_dc_luma_vlc[2];
VLC ff_msmp4_dc_chroma_vlc[2];
diff --git a/libavcodec/msmpeg4data.h b/libavcodec/msmpeg4data.h
index c32c09e241..24a10d9f2e 100644
--- a/libavcodec/msmpeg4data.h
+++ b/libavcodec/msmpeg4data.h
@@ -59,6 +59,10 @@ extern const uint8_t ff_wmv1_scantable[WMV1_SCANTABLE_COUNT][64];
#define NB_RL_TABLES 6
extern RLTable ff_rl_table[NB_RL_TABLES];
+extern uint8_t ff_static_rl_table_store[NB_RL_TABLES][2][2 * MAX_RUN + MAX_LEVEL + 3];
+
+extern uint32_t ff_v2_dc_lum_table[512][2];
+extern uint32_t ff_v2_dc_chroma_table[512][2];
extern const uint8_t ff_wmv1_y_dc_scale_table[32];
extern const uint8_t ff_wmv1_c_dc_scale_table[32];
diff --git a/libavcodec/msmpeg4enc.c b/libavcodec/msmpeg4enc.c
new file mode 100644
index 0000000000..527d2840e1
--- /dev/null
+++ b/libavcodec/msmpeg4enc.c
@@ -0,0 +1,692 @@
+/*
+ * MSMPEG4 encoder backend
+ * Copyright (c) 2001 Fabrice Bellard
+ * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * msmpeg4v1 & v2 stuff by Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * MSMPEG4 encoder backend
+ */
+
+#include <stdint.h>
+#include <string.h>
+
+#include "libavutil/avutil.h"
+#include "libavutil/mem.h"
+#include "mpegvideo.h"
+#include "msmpeg4.h"
+#include "h263.h"
+#include "mpeg4video.h"
+#include "msmpeg4.h"
+#include "msmpeg4data.h"
+#include "put_bits.h"
+#include "rl.h"
+#include "vc1data.h"
+
+static uint8_t rl_length[NB_RL_TABLES][MAX_LEVEL+1][MAX_RUN+1][2];
+
+/* build the table which associate a (x,y) motion vector to a vlc */
+static void init_mv_table(MVTable *tab)
+{
+ int i, x, y;
+
+ tab->table_mv_index = av_malloc(sizeof(uint16_t) * 4096);
+ /* mark all entries as not used */
+ for(i=0;i<4096;i++)
+ tab->table_mv_index[i] = tab->n;
+
+ for(i=0;i<tab->n;i++) {
+ x = tab->table_mvx[i];
+ y = tab->table_mvy[i];
+ tab->table_mv_index[(x << 6) | y] = i;
+ }
+}
+
+void ff_msmpeg4_code012(PutBitContext *pb, int n)
+{
+ if (n == 0) {
+ put_bits(pb, 1, 0);
+ } else {
+ put_bits(pb, 1, 1);
+ put_bits(pb, 1, (n >= 2));
+ }
+}
+
+static int get_size_of_code(MpegEncContext * s, RLTable *rl, int last, int run, int level, int intra){
+ int size=0;
+ int code;
+ int run_diff= intra ? 0 : 1;
+
+ code = get_rl_index(rl, last, run, level);
+ size+= rl->table_vlc[code][1];
+ if (code == rl->n) {
+ int level1, run1;
+
+ level1 = level - rl->max_level[last][run];
+ if (level1 < 1)
+ goto esc2;
+ code = get_rl_index(rl, last, run, level1);
+ if (code == rl->n) {
+ esc2:
+ size++;
+ if (level > MAX_LEVEL)
+ goto esc3;
+ run1 = run - rl->max_run[last][level] - run_diff;
+ if (run1 < 0)
+ goto esc3;
+ code = get_rl_index(rl, last, run1, level);
+ if (code == rl->n) {
+ esc3:
+ /* third escape */
+ size+=1+1+6+8;
+ } else {
+ /* second escape */
+ size+= 1+1+ rl->table_vlc[code][1];
+ }
+ } else {
+ /* first escape */
+ size+= 1+1+ rl->table_vlc[code][1];
+ }
+ } else {
+ size++;
+ }
+ return size;
+}
+
+av_cold void ff_msmpeg4_encode_init(MpegEncContext *s)
+{
+ static int init_done=0;
+ int i;
+
+ ff_msmpeg4_common_init(s);
+ if(s->msmpeg4_version>=4){
+ s->min_qcoeff= -255;
+ s->max_qcoeff= 255;
+ }
+
+ if (!init_done) {
+ /* init various encoding tables */
+ init_done = 1;
+ init_mv_table(&ff_mv_tables[0]);
+ init_mv_table(&ff_mv_tables[1]);
+ for(i=0;i<NB_RL_TABLES;i++)
+ ff_init_rl(&ff_rl_table[i], ff_static_rl_table_store[i]);
+
+ for(i=0; i<NB_RL_TABLES; i++){
+ int level;
+ for (level = 1; level <= MAX_LEVEL; level++) {
+ int run;
+ for(run=0; run<=MAX_RUN; run++){
+ int last;
+ for(last=0; last<2; last++){
+ rl_length[i][level][run][last]= get_size_of_code(s, &ff_rl_table[ i], last, run, level, 0);
+ }
+ }
+ }
+ }
+ }
+}
+
+static void find_best_tables(MpegEncContext * s)
+{
+ int i;
+ int best = 0, best_size = INT_MAX;
+ int chroma_best = 0, best_chroma_size = INT_MAX;
+
+ for(i=0; i<3; i++){
+ int level;
+ int chroma_size=0;
+ int size=0;
+
+ if(i>0){// ;)
+ size++;
+ chroma_size++;
+ }
+ for(level=0; level<=MAX_LEVEL; level++){
+ int run;
+ for(run=0; run<=MAX_RUN; run++){
+ int last;
+ const int last_size= size + chroma_size;
+ for(last=0; last<2; last++){
+ int inter_count = s->ac_stats[0][0][level][run][last] + s->ac_stats[0][1][level][run][last];
+ int intra_luma_count = s->ac_stats[1][0][level][run][last];
+ int intra_chroma_count= s->ac_stats[1][1][level][run][last];
+
+ if(s->pict_type==AV_PICTURE_TYPE_I){
+ size += intra_luma_count *rl_length[i ][level][run][last];
+ chroma_size+= intra_chroma_count*rl_length[i+3][level][run][last];
+ }else{
+ size+= intra_luma_count *rl_length[i ][level][run][last]
+ +intra_chroma_count*rl_length[i+3][level][run][last]
+ +inter_count *rl_length[i+3][level][run][last];
+ }
+ }
+ if(last_size == size+chroma_size) break;
+ }
+ }
+ if(size<best_size){
+ best_size= size;
+ best= i;
+ }
+ if(chroma_size<best_chroma_size){
+ best_chroma_size= chroma_size;
+ chroma_best= i;
+ }
+ }
+
+// printf("type:%d, best:%d, qp:%d, var:%d, mcvar:%d, size:%d //\n",
+// s->pict_type, best, s->qscale, s->mb_var_sum, s->mc_mb_var_sum, best_size);
+
+ if(s->pict_type==AV_PICTURE_TYPE_P) chroma_best= best;
+
+ memset(s->ac_stats, 0, sizeof(int)*(MAX_LEVEL+1)*(MAX_RUN+1)*2*2*2);
+
+ s->rl_table_index = best;
+ s->rl_chroma_table_index= chroma_best;
+
+ if(s->pict_type != s->last_non_b_pict_type){
+ s->rl_table_index= 2;
+ if(s->pict_type==AV_PICTURE_TYPE_I)
+ s->rl_chroma_table_index= 1;
+ else
+ s->rl_chroma_table_index= 2;
+ }
+
+}
+
+/* write MSMPEG4 compatible frame header */
+void ff_msmpeg4_encode_picture_header(MpegEncContext * s, int picture_number)
+{
+ find_best_tables(s);
+
+ avpriv_align_put_bits(&s->pb);
+ put_bits(&s->pb, 2, s->pict_type - 1);
+
+ put_bits(&s->pb, 5, s->qscale);
+ if(s->msmpeg4_version<=2){
+ s->rl_table_index = 2;
+ s->rl_chroma_table_index = 2;
+ }
+
+ s->dc_table_index = 1;
+ s->mv_table_index = 1; /* only if P frame */
+ s->use_skip_mb_code = 1; /* only if P frame */
+ s->per_mb_rl_table = 0;
+ if(s->msmpeg4_version==4)
+ s->inter_intra_pred= (s->width*s->height < 320*240 && s->bit_rate<=II_BITRATE && s->pict_type==AV_PICTURE_TYPE_P);
+//printf("%d %d %d %d %d\n", s->pict_type, s->bit_rate, s->inter_intra_pred, s->width, s->height);
+
+ if (s->pict_type == AV_PICTURE_TYPE_I) {
+ s->slice_height= s->mb_height/1;
+ put_bits(&s->pb, 5, 0x16 + s->mb_height/s->slice_height);
+
+ if(s->msmpeg4_version==4){
+ ff_msmpeg4_encode_ext_header(s);
+ if(s->bit_rate>MBAC_BITRATE)
+ put_bits(&s->pb, 1, s->per_mb_rl_table);
+ }
+
+ if(s->msmpeg4_version>2){
+ if(!s->per_mb_rl_table){
+ ff_msmpeg4_code012(&s->pb, s->rl_chroma_table_index);
+ ff_msmpeg4_code012(&s->pb, s->rl_table_index);
+ }
+
+ put_bits(&s->pb, 1, s->dc_table_index);
+ }
+ } else {
+ put_bits(&s->pb, 1, s->use_skip_mb_code);
+
+ if(s->msmpeg4_version==4 && s->bit_rate>MBAC_BITRATE)
+ put_bits(&s->pb, 1, s->per_mb_rl_table);
+
+ if(s->msmpeg4_version>2){
+ if(!s->per_mb_rl_table)
+ ff_msmpeg4_code012(&s->pb, s->rl_table_index);
+
+ put_bits(&s->pb, 1, s->dc_table_index);
+
+ put_bits(&s->pb, 1, s->mv_table_index);
+ }
+ }
+
+ s->esc3_level_length= 0;
+ s->esc3_run_length= 0;
+}
+
+void ff_msmpeg4_encode_ext_header(MpegEncContext * s)
+{
+ put_bits(&s->pb, 5, s->avctx->time_base.den / s->avctx->time_base.num); //yes 29.97 -> 29
+
+ put_bits(&s->pb, 11, FFMIN(s->bit_rate/1024, 2047));
+
+ if(s->msmpeg4_version>=3)
+ put_bits(&s->pb, 1, s->flipflop_rounding);
+ else
+ assert(s->flipflop_rounding==0);
+}
+
+void ff_msmpeg4_encode_motion(MpegEncContext * s,
+ int mx, int my)
+{
+ int code;
+ MVTable *mv;
+
+ /* modulo encoding */
+ /* WARNING : you cannot reach all the MVs even with the modulo
+ encoding. This is a somewhat strange compromise they took !!! */
+ if (mx <= -64)
+ mx += 64;
+ else if (mx >= 64)
+ mx -= 64;
+ if (my <= -64)
+ my += 64;
+ else if (my >= 64)
+ my -= 64;
+
+ mx += 32;
+ my += 32;
+#if 0
+ if ((unsigned)mx >= 64 ||
+ (unsigned)my >= 64)
+ av_log(s->avctx, AV_LOG_ERROR, "error mx=%d my=%d\n", mx, my);
+#endif
+ mv = &ff_mv_tables[s->mv_table_index];
+
+ code = mv->table_mv_index[(mx << 6) | my];
+ put_bits(&s->pb,
+ mv->table_mv_bits[code],
+ mv->table_mv_code[code]);
+ if (code == mv->n) {
+ /* escape : code literally */
+ put_bits(&s->pb, 6, mx);
+ put_bits(&s->pb, 6, my);
+ }
+}
+
+void ff_msmpeg4_handle_slices(MpegEncContext *s){
+ if (s->mb_x == 0) {
+ if (s->slice_height && (s->mb_y % s->slice_height) == 0) {
+ if(s->msmpeg4_version < 4){
+ ff_mpeg4_clean_buffers(s);
+ }
+ s->first_slice_line = 1;
+ } else {
+ s->first_slice_line = 0;
+ }
+ }
+}
+
+static void msmpeg4v2_encode_motion(MpegEncContext * s, int val)
+{
+ int range, bit_size, sign, code, bits;
+
+ if (val == 0) {
+ /* zero vector */
+ code = 0;
+ put_bits(&s->pb, ff_mvtab[code][1], ff_mvtab[code][0]);
+ } else {
+ bit_size = s->f_code - 1;
+ range = 1 << bit_size;
+ if (val <= -64)
+ val += 64;
+ else if (val >= 64)
+ val -= 64;
+
+ if (val >= 0) {
+ sign = 0;
+ } else {
+ val = -val;
+ sign = 1;
+ }
+ val--;
+ code = (val >> bit_size) + 1;
+ bits = val & (range - 1);
+
+ put_bits(&s->pb, ff_mvtab[code][1] + 1, (ff_mvtab[code][0] << 1) | sign);
+ if (bit_size > 0) {
+ put_bits(&s->pb, bit_size, bits);
+ }
+ }
+}
+
+void ff_msmpeg4_encode_mb(MpegEncContext * s,
+ DCTELEM block[6][64],
+ int motion_x, int motion_y)
+{
+ int cbp, coded_cbp, i;
+ int pred_x, pred_y;
+ uint8_t *coded_block;
+
+ ff_msmpeg4_handle_slices(s);
+
+ if (!s->mb_intra) {
+ /* compute cbp */
+ cbp = 0;
+ for (i = 0; i < 6; i++) {
+ if (s->block_last_index[i] >= 0)
+ cbp |= 1 << (5 - i);
+ }
+ if (s->use_skip_mb_code && (cbp | motion_x | motion_y) == 0) {
+ /* skip macroblock */
+ put_bits(&s->pb, 1, 1);
+ s->last_bits++;
+ s->misc_bits++;
+ s->skip_count++;
+
+ return;
+ }
+ if (s->use_skip_mb_code)
+ put_bits(&s->pb, 1, 0); /* mb coded */
+
+ if(s->msmpeg4_version<=2){
+ put_bits(&s->pb,
+ ff_v2_mb_type[cbp&3][1],
+ ff_v2_mb_type[cbp&3][0]);
+ if((cbp&3) != 3) coded_cbp= cbp ^ 0x3C;
+ else coded_cbp= cbp;
+
+ put_bits(&s->pb,
+ ff_h263_cbpy_tab[coded_cbp>>2][1],
+ ff_h263_cbpy_tab[coded_cbp>>2][0]);
+
+ s->misc_bits += get_bits_diff(s);
+
+ ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
+ msmpeg4v2_encode_motion(s, motion_x - pred_x);
+ msmpeg4v2_encode_motion(s, motion_y - pred_y);
+ }else{
+ put_bits(&s->pb,
+ ff_table_mb_non_intra[cbp + 64][1],
+ ff_table_mb_non_intra[cbp + 64][0]);
+
+ s->misc_bits += get_bits_diff(s);
+
+ /* motion vector */
+ ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
+ ff_msmpeg4_encode_motion(s, motion_x - pred_x,
+ motion_y - pred_y);
+ }
+
+ s->mv_bits += get_bits_diff(s);
+
+ for (i = 0; i < 6; i++) {
+ ff_msmpeg4_encode_block(s, block[i], i);
+ }
+ s->p_tex_bits += get_bits_diff(s);
+ } else {
+ /* compute cbp */
+ cbp = 0;
+ coded_cbp = 0;
+ for (i = 0; i < 6; i++) {
+ int val, pred;
+ val = (s->block_last_index[i] >= 1);
+ cbp |= val << (5 - i);
+ if (i < 4) {
+ /* predict value for close blocks only for luma */
+ pred = ff_msmpeg4_coded_block_pred(s, i, &coded_block);
+ *coded_block = val;
+ val = val ^ pred;
+ }
+ coded_cbp |= val << (5 - i);
+ }
+
+ if(s->msmpeg4_version<=2){
+ if (s->pict_type == AV_PICTURE_TYPE_I) {
+ put_bits(&s->pb,
+ ff_v2_intra_cbpc[cbp&3][1], ff_v2_intra_cbpc[cbp&3][0]);
+ } else {
+ if (s->use_skip_mb_code)
+ put_bits(&s->pb, 1, 0); /* mb coded */
+ put_bits(&s->pb,
+ ff_v2_mb_type[(cbp&3) + 4][1],
+ ff_v2_mb_type[(cbp&3) + 4][0]);
+ }
+ put_bits(&s->pb, 1, 0); /* no AC prediction yet */
+ put_bits(&s->pb,
+ ff_h263_cbpy_tab[cbp>>2][1],
+ ff_h263_cbpy_tab[cbp>>2][0]);
+ }else{
+ if (s->pict_type == AV_PICTURE_TYPE_I) {
+ put_bits(&s->pb,
+ ff_msmp4_mb_i_table[coded_cbp][1], ff_msmp4_mb_i_table[coded_cbp][0]);
+ } else {
+ if (s->use_skip_mb_code)
+ put_bits(&s->pb, 1, 0); /* mb coded */
+ put_bits(&s->pb,
+ ff_table_mb_non_intra[cbp][1],
+ ff_table_mb_non_intra[cbp][0]);
+ }
+ put_bits(&s->pb, 1, 0); /* no AC prediction yet */
+ if(s->inter_intra_pred){
+ s->h263_aic_dir=0;
+ put_bits(&s->pb, ff_table_inter_intra[s->h263_aic_dir][1], ff_table_inter_intra[s->h263_aic_dir][0]);
+ }
+ }
+ s->misc_bits += get_bits_diff(s);
+
+ for (i = 0; i < 6; i++) {
+ ff_msmpeg4_encode_block(s, block[i], i);
+ }
+ s->i_tex_bits += get_bits_diff(s);
+ s->i_count++;
+ }
+}
+
+static void msmpeg4_encode_dc(MpegEncContext * s, int level, int n, int *dir_ptr)
+{
+ int sign, code;
+ int pred, extquant;
+ int extrabits = 0;
+
+ int16_t *dc_val;
+ pred = ff_msmpeg4_pred_dc(s, n, &dc_val, dir_ptr);
+
+ /* update predictor */
+ if (n < 4) {
+ *dc_val = level * s->y_dc_scale;
+ } else {
+ *dc_val = level * s->c_dc_scale;
+ }
+
+ /* do the prediction */
+ level -= pred;
+
+ if(s->msmpeg4_version<=2){
+ if (n < 4) {
+ put_bits(&s->pb,
+ ff_v2_dc_lum_table[level + 256][1],
+ ff_v2_dc_lum_table[level + 256][0]);
+ }else{
+ put_bits(&s->pb,
+ ff_v2_dc_chroma_table[level + 256][1],
+ ff_v2_dc_chroma_table[level + 256][0]);
+ }
+ }else{
+ sign = 0;
+ if (level < 0) {
+ level = -level;
+ sign = 1;
+ }
+ code = level;
+ if (code > DC_MAX)
+ code = DC_MAX;
+ else if( s->msmpeg4_version>=6 ) {
+ if( s->qscale == 1 ) {
+ extquant = (level + 3) & 0x3;
+ code = ((level+3)>>2);
+ } else if( s->qscale == 2 ) {
+ extquant = (level + 1) & 0x1;
+ code = ((level+1)>>1);
+ }
+ }
+
+ if (s->dc_table_index == 0) {
+ if (n < 4) {
+ put_bits(&s->pb, ff_table0_dc_lum[code][1], ff_table0_dc_lum[code][0]);
+ } else {
+ put_bits(&s->pb, ff_table0_dc_chroma[code][1], ff_table0_dc_chroma[code][0]);
+ }
+ } else {
+ if (n < 4) {
+ put_bits(&s->pb, ff_table1_dc_lum[code][1], ff_table1_dc_lum[code][0]);
+ } else {
+ put_bits(&s->pb, ff_table1_dc_chroma[code][1], ff_table1_dc_chroma[code][0]);
+ }
+ }
+
+ if(s->msmpeg4_version>=6 && s->qscale<=2)
+ extrabits = 3 - s->qscale;
+
+ if (code == DC_MAX)
+ put_bits(&s->pb, 8 + extrabits, level);
+ else if(extrabits > 0)//== VC1 && s->qscale<=2
+ put_bits(&s->pb, extrabits, extquant);
+
+ if (level != 0) {
+ put_bits(&s->pb, 1, sign);
+ }
+ }
+}
+
+/* Encoding of a block. Very similar to MPEG4 except for a different
+ escape coding (same as H263) and more vlc tables.
+ */
+void ff_msmpeg4_encode_block(MpegEncContext * s, DCTELEM * block, int n)
+{
+ int level, run, last, i, j, last_index;
+ int last_non_zero, sign, slevel;
+ int code, run_diff, dc_pred_dir;
+ const RLTable *rl;
+ const uint8_t *scantable;
+
+ if (s->mb_intra) {
+ msmpeg4_encode_dc(s, block[0], n, &dc_pred_dir);
+ i = 1;
+ if (n < 4) {
+ rl = &ff_rl_table[s->rl_table_index];
+ } else {
+ rl = &ff_rl_table[3 + s->rl_chroma_table_index];
+ }
+ run_diff = s->msmpeg4_version>=4;
+ scantable= s->intra_scantable.permutated;
+ } else {
+ i = 0;
+ rl = &ff_rl_table[3 + s->rl_table_index];
+ if(s->msmpeg4_version<=2)
+ run_diff = 0;
+ else
+ run_diff = 1;
+ scantable= s->inter_scantable.permutated;
+ }
+
+ /* recalculate block_last_index for M$ wmv1 */
+ if(s->msmpeg4_version>=4 && s->msmpeg4_version<6 && s->block_last_index[n]>0){
+ for(last_index=63; last_index>=0; last_index--){
+ if(block[scantable[last_index]]) break;
+ }
+ s->block_last_index[n]= last_index;
+ }else
+ last_index = s->block_last_index[n];
+ /* AC coefs */
+ last_non_zero = i - 1;
+ for (; i <= last_index; i++) {
+ j = scantable[i];
+ level = block[j];
+ if (level) {
+ run = i - last_non_zero - 1;
+ last = (i == last_index);
+ sign = 0;
+ slevel = level;
+ if (level < 0) {
+ sign = 1;
+ level = -level;
+ }
+
+ if(level<=MAX_LEVEL && run<=MAX_RUN){
+ s->ac_stats[s->mb_intra][n>3][level][run][last]++;
+ }
+
+ s->ac_stats[s->mb_intra][n > 3][40][63][0]++; //esc3 like
+
+ code = get_rl_index(rl, last, run, level);
+ put_bits(&s->pb, rl->table_vlc[code][1], rl->table_vlc[code][0]);
+ if (code == rl->n) {
+ int level1, run1;
+
+ level1 = level - rl->max_level[last][run];
+ if (level1 < 1)
+ goto esc2;
+ code = get_rl_index(rl, last, run, level1);
+ if (code == rl->n) {
+ esc2:
+ put_bits(&s->pb, 1, 0);
+ if (level > MAX_LEVEL)
+ goto esc3;
+ run1 = run - rl->max_run[last][level] - run_diff;
+ if (run1 < 0)
+ goto esc3;
+ code = get_rl_index(rl, last, run1+1, level);
+ if (s->msmpeg4_version == 4 && code == rl->n)
+ goto esc3;
+ code = get_rl_index(rl, last, run1, level);
+ if (code == rl->n) {
+ esc3:
+ /* third escape */
+ put_bits(&s->pb, 1, 0);
+ put_bits(&s->pb, 1, last);
+ if(s->msmpeg4_version>=4){
+ if(s->esc3_level_length==0){
+ s->esc3_level_length=8;
+ s->esc3_run_length= 6;
+ //ESCLVLSZ + ESCRUNSZ
+ if(s->qscale<8)
+ put_bits(&s->pb, 6 + (s->msmpeg4_version>=6), 3);
+ else
+ put_bits(&s->pb, 8, 3);
+ }
+ put_bits(&s->pb, s->esc3_run_length, run);
+ put_bits(&s->pb, 1, sign);
+ put_bits(&s->pb, s->esc3_level_length, level);
+ }else{
+ put_bits(&s->pb, 6, run);
+ put_sbits(&s->pb, 8, slevel);
+ }
+ } else {
+ /* second escape */
+ put_bits(&s->pb, 1, 1);
+ put_bits(&s->pb, rl->table_vlc[code][1], rl->table_vlc[code][0]);
+ put_bits(&s->pb, 1, sign);
+ }
+ } else {
+ /* first escape */
+ put_bits(&s->pb, 1, 1);
+ put_bits(&s->pb, rl->table_vlc[code][1], rl->table_vlc[code][0]);
+ put_bits(&s->pb, 1, sign);
+ }
+ } else {
+ put_bits(&s->pb, 1, sign);
+ }
+ last_non_zero = i;
+ }
+ }
+}
diff --git a/libavcodec/pamenc.c b/libavcodec/pamenc.c
index d1de7a0e33..7a3499e579 100644
--- a/libavcodec/pamenc.c
+++ b/libavcodec/pamenc.c
@@ -28,7 +28,7 @@ static int pam_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
const AVFrame *pict, int *got_packet)
{
PNMContext *s = avctx->priv_data;
- AVFrame * const p = (AVFrame*)&s->picture;
+ AVFrame * const p = &s->picture;
int i, h, w, n, linesize, depth, maxval, ret;
const char *tuple_type;
uint8_t *ptr;
diff --git a/libavcodec/pnm.c b/libavcodec/pnm.c
index 0e547deb5f..bfb4a25b7c 100644
--- a/libavcodec/pnm.c
+++ b/libavcodec/pnm.c
@@ -198,8 +198,8 @@ av_cold int ff_pnm_init(AVCodecContext *avctx)
{
PNMContext *s = avctx->priv_data;
- avcodec_get_frame_defaults((AVFrame*)&s->picture);
- avctx->coded_frame = (AVFrame*)&s->picture;
+ avcodec_get_frame_defaults(&s->picture);
+ avctx->coded_frame = &s->picture;
return 0;
}
diff --git a/libavcodec/pnmdec.c b/libavcodec/pnmdec.c
index 0be7ec9326..0c1fad868b 100644
--- a/libavcodec/pnmdec.c
+++ b/libavcodec/pnmdec.c
@@ -31,7 +31,7 @@ static int pnm_decode_frame(AVCodecContext *avctx, void *data,
int buf_size = avpkt->size;
PNMContext * const s = avctx->priv_data;
AVFrame *picture = data;
- AVFrame * const p = (AVFrame*)&s->picture;
+ AVFrame * const p = &s->picture;
int i, j, n, linesize, h, upgrade = 0, is_mono = 0;
unsigned char *ptr;
int components, sample_len;
@@ -184,7 +184,7 @@ static int pnm_decode_frame(AVCodecContext *avctx, void *data,
}
break;
}
- *picture = *(AVFrame*)&s->picture;
+ *picture = s->picture;
*data_size = sizeof(AVPicture);
return s->bytestream - s->bytestream_start;
diff --git a/libavcodec/pnmenc.c b/libavcodec/pnmenc.c
index 1f96db51b9..b1f27b4a3e 100644
--- a/libavcodec/pnmenc.c
+++ b/libavcodec/pnmenc.c
@@ -28,7 +28,7 @@ static int pnm_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
const AVFrame *pict, int *got_packet)
{
PNMContext *s = avctx->priv_data;
- AVFrame * const p = (AVFrame*)&s->picture;
+ AVFrame * const p = &s->picture;
int i, h, h1, c, n, linesize, ret;
uint8_t *ptr, *ptr1, *ptr2;
diff --git a/libavcodec/ppc/vp8dsp_altivec.c b/libavcodec/ppc/vp8dsp_altivec.c
index e9defbc67f..7f7d3bd35c 100644
--- a/libavcodec/ppc/vp8dsp_altivec.c
+++ b/libavcodec/ppc/vp8dsp_altivec.c
@@ -75,8 +75,8 @@ static const vec_s8 h_subpel_filters_outer[3] =
dstv = vec_sra(dstv, c7)
static av_always_inline
-void put_vp8_epel_h_altivec_core(uint8_t *dst, int dst_stride,
- uint8_t *src, int src_stride,
+void put_vp8_epel_h_altivec_core(uint8_t *dst, ptrdiff_t dst_stride,
+ uint8_t *src, ptrdiff_t src_stride,
int h, int mx, int w, int is6tap)
{
LOAD_H_SUBPEL_FILTER(mx-1);
@@ -161,8 +161,8 @@ static const vec_u8 v_subpel_filters[7] =
dstv = vec_sra(dstv, c7)
static av_always_inline
-void put_vp8_epel_v_altivec_core(uint8_t *dst, int dst_stride,
- uint8_t *src, int src_stride,
+void put_vp8_epel_v_altivec_core(uint8_t *dst, ptrdiff_t dst_stride,
+ uint8_t *src, ptrdiff_t src_stride,
int h, int my, int w, int is6tap)
{
LOAD_V_SUBPEL_FILTER(my-1);
@@ -226,19 +226,19 @@ void put_vp8_epel_v_altivec_core(uint8_t *dst, int dst_stride,
#define EPEL_FUNCS(WIDTH, TAPS) \
static av_noinline \
-void put_vp8_epel ## WIDTH ## _h ## TAPS ## _altivec(uint8_t *dst, int dst_stride, uint8_t *src, int src_stride, int h, int mx, int my) \
+void put_vp8_epel ## WIDTH ## _h ## TAPS ## _altivec(uint8_t *dst, ptrdiff_t dst_stride, uint8_t *src, ptrdiff_t src_stride, int h, int mx, int my) \
{ \
put_vp8_epel_h_altivec_core(dst, dst_stride, src, src_stride, h, mx, WIDTH, TAPS == 6); \
} \
\
static av_noinline \
-void put_vp8_epel ## WIDTH ## _v ## TAPS ## _altivec(uint8_t *dst, int dst_stride, uint8_t *src, int src_stride, int h, int mx, int my) \
+void put_vp8_epel ## WIDTH ## _v ## TAPS ## _altivec(uint8_t *dst, ptrdiff_t dst_stride, uint8_t *src, ptrdiff_t src_stride, int h, int mx, int my) \
{ \
put_vp8_epel_v_altivec_core(dst, dst_stride, src, src_stride, h, my, WIDTH, TAPS == 6); \
}
#define EPEL_HV(WIDTH, HTAPS, VTAPS) \
-static void put_vp8_epel ## WIDTH ## _h ## HTAPS ## v ## VTAPS ## _altivec(uint8_t *dst, int stride, uint8_t *src, int s, int h, int mx, int my) \
+static void put_vp8_epel ## WIDTH ## _h ## HTAPS ## v ## VTAPS ## _altivec(uint8_t *dst, ptrdiff_t stride, uint8_t *src, ptrdiff_t s, int h, int mx, int my) \
{ \
DECLARE_ALIGNED(16, uint8_t, tmp)[(2*WIDTH+5)*16]; \
if (VTAPS == 6) { \
@@ -266,7 +266,7 @@ EPEL_HV(4, 4,6)
EPEL_HV(4, 6,4)
EPEL_HV(4, 4,4)
-static void put_vp8_pixels16_altivec(uint8_t *dst, int stride, uint8_t *src, int s, int h, int mx, int my)
+static void put_vp8_pixels16_altivec(uint8_t *dst, ptrdiff_t stride, uint8_t *src, ptrdiff_t s, int h, int mx, int my)
{
ff_put_pixels16_altivec(dst, src, stride, h);
}
diff --git a/libavcodec/qdrw.c b/libavcodec/qdrw.c
index 54deec5c83..751e1a1909 100644
--- a/libavcodec/qdrw.c
+++ b/libavcodec/qdrw.c
@@ -40,7 +40,7 @@ static int decode_frame(AVCodecContext *avctx,
const uint8_t *buf_end = avpkt->data + avpkt->size;
int buf_size = avpkt->size;
QdrawContext * const a = avctx->priv_data;
- AVFrame * const p= (AVFrame*)&a->pic;
+ AVFrame * const p = &a->pic;
uint8_t* outdata;
int colors;
int i;
diff --git a/libavcodec/qpeg.c b/libavcodec/qpeg.c
index bbb9f71aae..ca48b6bcf3 100644
--- a/libavcodec/qpeg.c
+++ b/libavcodec/qpeg.c
@@ -259,8 +259,8 @@ static int decode_frame(AVCodecContext *avctx,
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
QpegContext * const a = avctx->priv_data;
- AVFrame * p= (AVFrame*)&a->pic;
- AVFrame * ref= (AVFrame*)&a->ref;
+ AVFrame * p = &a->pic;
+ AVFrame * ref= &a->ref;
uint8_t* outdata;
int delta, ret = 0;
const uint8_t *pal = av_packet_get_side_data(avpkt, AV_PKT_DATA_PALETTE, NULL);
@@ -311,8 +311,8 @@ static av_cold int decode_init(AVCodecContext *avctx){
static av_cold int decode_end(AVCodecContext *avctx){
QpegContext * const a = avctx->priv_data;
- AVFrame * const p= (AVFrame*)&a->pic;
- AVFrame * const ref= (AVFrame*)&a->ref;
+ AVFrame * const p = &a->pic;
+ AVFrame * const ref= &a->ref;
if(p->data[0])
avctx->release_buffer(avctx, p);
diff --git a/libavcodec/rawdec.c b/libavcodec/rawdec.c
index d912ca285d..71ab5df501 100644
--- a/libavcodec/rawdec.c
+++ b/libavcodec/rawdec.c
@@ -140,8 +140,8 @@ static int raw_decode(AVCodecContext *avctx,
int linesize_align = 4;
RawVideoContext *context = avctx->priv_data;
- AVFrame * frame = (AVFrame *) data;
- AVPicture * picture = (AVPicture *) data;
+ AVFrame *frame = data;
+ AVPicture *picture = data;
frame->pict_type = avctx->coded_frame->pict_type;
frame->interlaced_frame = avctx->coded_frame->interlaced_frame;
diff --git a/libavcodec/rawenc.c b/libavcodec/rawenc.c
index 577a8fc98a..cb491e9094 100644
--- a/libavcodec/rawenc.c
+++ b/libavcodec/rawenc.c
@@ -32,7 +32,7 @@
static av_cold int raw_init_encoder(AVCodecContext *avctx)
{
- avctx->coded_frame = (AVFrame *)avctx->priv_data;
+ avctx->coded_frame = avctx->priv_data;
avcodec_get_frame_defaults(avctx->coded_frame);
avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I;
avctx->bits_per_coded_sample = av_get_bits_per_pixel(&av_pix_fmt_descriptors[avctx->pix_fmt]);
diff --git a/libavcodec/rv10.c b/libavcodec/rv10.c
index dd8715b300..bc86b69a2e 100644
--- a/libavcodec/rv10.c
+++ b/libavcodec/rv10.c
@@ -515,9 +515,10 @@ static int rv10_decode_packet(AVCodecContext *avctx,
const uint8_t *buf, int buf_size, int buf_size2)
{
MpegEncContext *s = avctx->priv_data;
- int mb_count, mb_pos, left, start_mb_x;
+ int mb_count, mb_pos, left, start_mb_x, active_bits_size;
- init_get_bits(&s->gb, buf, buf_size*8);
+ active_bits_size = buf_size * 8;
+ init_get_bits(&s->gb, buf, FFMAX(buf_size, buf_size2) * 8);
if(s->codec_id ==CODEC_ID_RV10)
mb_count = rv10_decode_picture_header(s);
else
@@ -601,13 +602,26 @@ static int rv10_decode_packet(AVCodecContext *avctx,
s->mv_type = MV_TYPE_16X16;
ret=ff_h263_decode_mb(s, s->block);
- if (ret != SLICE_ERROR && s->gb.size_in_bits < get_bits_count(&s->gb) && 8*buf_size2 >= get_bits_count(&s->gb)){
- av_log(avctx, AV_LOG_DEBUG, "update size from %d to %d\n", s->gb.size_in_bits, 8*buf_size2);
- s->gb.size_in_bits= 8*buf_size2;
+ // Repeat the slice end check from ff_h263_decode_mb with our active
+ // bitstream size
+ if (ret != SLICE_ERROR) {
+ int v = show_bits(&s->gb, 16);
+
+ if (get_bits_count(&s->gb) + 16 > active_bits_size)
+ v >>= get_bits_count(&s->gb) + 16 - active_bits_size;
+
+ if (!v)
+ ret = SLICE_END;
+ }
+ if (ret != SLICE_ERROR && active_bits_size < get_bits_count(&s->gb) &&
+ 8 * buf_size2 >= get_bits_count(&s->gb)) {
+ active_bits_size = buf_size2 * 8;
+ av_log(avctx, AV_LOG_DEBUG, "update size from %d to %d\n",
+ 8 * buf_size, active_bits_size);
ret= SLICE_OK;
}
- if (ret == SLICE_ERROR || s->gb.size_in_bits < get_bits_count(&s->gb)) {
+ if (ret == SLICE_ERROR || active_bits_size < get_bits_count(&s->gb)) {
av_log(s->avctx, AV_LOG_ERROR, "ERROR at MB %d %d\n", s->mb_x, s->mb_y);
return -1;
}
@@ -629,7 +643,7 @@ static int rv10_decode_packet(AVCodecContext *avctx,
ff_er_add_slice(s, start_mb_x, s->resync_mb_y, s->mb_x-1, s->mb_y, ER_MB_END);
- return s->gb.size_in_bits;
+ return active_bits_size;
}
static int get_slice_offset(AVCodecContext *avctx, const uint8_t *buf, int n)
@@ -661,8 +675,12 @@ static int rv10_decode_frame(AVCodecContext *avctx,
if(!avctx->slice_count){
slice_count = (*buf++) + 1;
+ buf_size--;
slices_hdr = buf + 4;
buf += 8 * slice_count;
+ buf_size -= 8 * slice_count;
+ if (buf_size <= 0)
+ return AVERROR_INVALIDDATA;
}else
slice_count = avctx->slice_count;
@@ -696,9 +714,9 @@ static int rv10_decode_frame(AVCodecContext *avctx,
ff_MPV_frame_end(s);
if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay) {
- *pict= *(AVFrame*)s->current_picture_ptr;
+ *pict = s->current_picture_ptr->f;
} else if (s->last_picture_ptr != NULL) {
- *pict= *(AVFrame*)s->last_picture_ptr;
+ *pict = s->last_picture_ptr->f;
}
if(s->last_picture_ptr || s->low_delay){
@@ -708,7 +726,7 @@ static int rv10_decode_frame(AVCodecContext *avctx,
s->current_picture_ptr= NULL; //so we can detect if frame_end wasnt called (find some nicer solution...)
}
- return buf_size;
+ return avpkt->size;
}
AVCodec ff_rv10_decoder = {
diff --git a/libavcodec/rv34.c b/libavcodec/rv34.c
index 69273874c2..5550a74d0b 100644
--- a/libavcodec/rv34.c
+++ b/libavcodec/rv34.c
@@ -1660,7 +1660,7 @@ int ff_rv34_decode_frame(AVCodecContext *avctx,
if (buf_size == 0) {
/* special case for last picture */
if (s->low_delay==0 && s->next_picture_ptr) {
- *pict = *(AVFrame*)s->next_picture_ptr;
+ *pict = s->next_picture_ptr->f;
s->next_picture_ptr = NULL;
*data_size = sizeof(AVFrame);
@@ -1747,9 +1747,9 @@ int ff_rv34_decode_frame(AVCodecContext *avctx,
ff_thread_report_progress(&s->current_picture_ptr->f, INT_MAX, 0);
if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay) {
- *pict = *(AVFrame*)s->current_picture_ptr;
+ *pict = s->current_picture_ptr->f;
} else if (s->last_picture_ptr != NULL) {
- *pict = *(AVFrame*)s->last_picture_ptr;
+ *pict = s->last_picture_ptr->f;
}
if(s->last_picture_ptr || s->low_delay){
diff --git a/libavcodec/svq1dec.c b/libavcodec/svq1dec.c
index cb89b3f619..381cd63937 100644
--- a/libavcodec/svq1dec.c
+++ b/libavcodec/svq1dec.c
@@ -735,7 +735,7 @@ static int svq1_decode_frame(AVCodecContext *avctx,
}
}
- *pict = *(AVFrame*)&s->current_picture;
+ *pict = s->current_picture.f;
ff_MPV_frame_end(s);
diff --git a/libavcodec/svq1enc.c b/libavcodec/svq1enc.c
index 84ff0d8814..a3936a8b37 100644
--- a/libavcodec/svq1enc.c
+++ b/libavcodec/svq1enc.c
@@ -473,7 +473,7 @@ static av_cold int svq1_encode_init(AVCodecContext *avctx)
SVQ1Context * const s = avctx->priv_data;
ff_dsputil_init(&s->dsp, avctx);
- avctx->coded_frame= (AVFrame*)&s->picture;
+ avctx->coded_frame = &s->picture;
s->frame_width = avctx->width;
s->frame_height = avctx->height;
@@ -501,7 +501,7 @@ static int svq1_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
const AVFrame *pict, int *got_packet)
{
SVQ1Context * const s = avctx->priv_data;
- AVFrame * const p= (AVFrame*)&s->picture;
+ AVFrame * const p = &s->picture;
AVFrame temp;
int i, ret;
diff --git a/libavcodec/svq3.c b/libavcodec/svq3.c
index 3ebe95ef7f..8b112412cd 100644
--- a/libavcodec/svq3.c
+++ b/libavcodec/svq3.c
@@ -958,7 +958,7 @@ static int svq3_decode_frame(AVCodecContext *avctx,
/* special case for last picture */
if (buf_size == 0) {
if (s->next_picture_ptr && !s->low_delay) {
- *(AVFrame *) data = *(AVFrame *) &s->next_picture;
+ *(AVFrame *) data = s->next_picture.f;
s->next_picture_ptr = NULL;
*data_size = sizeof(AVFrame);
}
@@ -1101,9 +1101,9 @@ static int svq3_decode_frame(AVCodecContext *avctx,
ff_MPV_frame_end(s);
if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay) {
- *(AVFrame *) data = *(AVFrame *) &s->current_picture;
+ *(AVFrame *) data = s->current_picture.f;
} else {
- *(AVFrame *) data = *(AVFrame *) &s->last_picture;
+ *(AVFrame *) data = s->last_picture.f;
}
/* Do not output the last pic after seeking. */
diff --git a/libavcodec/targa.c b/libavcodec/targa.c
index 573992ff8a..5ddd21bfef 100644
--- a/libavcodec/targa.c
+++ b/libavcodec/targa.c
@@ -106,7 +106,7 @@ static int decode_frame(AVCodecContext *avctx,
const uint8_t *buf_end = avpkt->data + avpkt->size;
TargaContext * const s = avctx->priv_data;
AVFrame *picture = data;
- AVFrame * const p= (AVFrame*)&s->picture;
+ AVFrame * const p = &s->picture;
uint8_t *dst;
int stride;
int idlen, pal, compr, y, w, h, bpp, flags;
@@ -262,7 +262,7 @@ static int decode_frame(AVCodecContext *avctx,
}
}
- *picture= *(AVFrame*)&s->picture;
+ *picture = s->picture;
*data_size = sizeof(AVPicture);
return avpkt->size;
@@ -271,8 +271,8 @@ static int decode_frame(AVCodecContext *avctx,
static av_cold int targa_init(AVCodecContext *avctx){
TargaContext *s = avctx->priv_data;
- avcodec_get_frame_defaults((AVFrame*)&s->picture);
- avctx->coded_frame= (AVFrame*)&s->picture;
+ avcodec_get_frame_defaults(&s->picture);
+ avctx->coded_frame = &s->picture;
return 0;
}
diff --git a/libavcodec/tiff.c b/libavcodec/tiff.c
index 5d01e44ad8..5adf5a3b55 100644
--- a/libavcodec/tiff.c
+++ b/libavcodec/tiff.c
@@ -577,7 +577,7 @@ static int decode_frame(AVCodecContext *avctx,
int buf_size = avpkt->size;
TiffContext * const s = avctx->priv_data;
AVFrame *picture = data;
- AVFrame * const p= (AVFrame*)&s->picture;
+ AVFrame * const p = &s->picture;
const uint8_t *orig_buf = buf, *end_buf = buf + buf_size;
unsigned off;
int id, le, ret;
@@ -691,7 +691,7 @@ static int decode_frame(AVCodecContext *avctx,
dst += s->picture.linesize[0];
}
}
- *picture= *(AVFrame*)&s->picture;
+ *picture = s->picture;
*data_size = sizeof(AVPicture);
return buf_size;
@@ -703,8 +703,8 @@ static av_cold int tiff_init(AVCodecContext *avctx){
s->width = 0;
s->height = 0;
s->avctx = avctx;
- avcodec_get_frame_defaults((AVFrame*)&s->picture);
- avctx->coded_frame= (AVFrame*)&s->picture;
+ avcodec_get_frame_defaults(&s->picture);
+ avctx->coded_frame = &s->picture;
ff_lzw_decode_open(&s->lzw);
ff_ccitt_unpack_init();
diff --git a/libavcodec/tiffenc.c b/libavcodec/tiffenc.c
index a0fecf7b0f..c061ac985f 100644
--- a/libavcodec/tiffenc.c
+++ b/libavcodec/tiffenc.c
@@ -206,7 +206,7 @@ static int encode_frame(AVCodecContext * avctx, AVPacket *pkt,
const AVFrame *pict, int *got_packet)
{
TiffEncoderContext *s = avctx->priv_data;
- AVFrame *const p = (AVFrame *) & s->picture;
+ AVFrame *const p = &s->picture;
int i;
uint8_t *ptr;
uint8_t *offset;
diff --git a/libavcodec/truemotion2.c b/libavcodec/truemotion2.c
index 985823dc25..ead7a39dbf 100644
--- a/libavcodec/truemotion2.c
+++ b/libavcodec/truemotion2.c
@@ -767,7 +767,7 @@ static int decode_frame(AVCodecContext *avctx,
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
TM2Context * const l = avctx->priv_data;
- AVFrame * const p= (AVFrame*)&l->pic;
+ AVFrame * const p = &l->pic;
int i, skip, t;
av_fast_padded_malloc(&l->buffer, &l->buffer_size, buf_size);
diff --git a/libavcodec/ulti.c b/libavcodec/ulti.c
index cd6f2d24ab..22b932a7e7 100644
--- a/libavcodec/ulti.c
+++ b/libavcodec/ulti.c
@@ -50,7 +50,7 @@ static av_cold int ulti_decode_init(AVCodecContext *avctx)
s->height = avctx->height;
s->blocks = (s->width / 8) * (s->height / 8);
avctx->pix_fmt = PIX_FMT_YUV410P;
- avcodec_get_frame_defaults(&s->frame);
+ avctx->coded_frame = &s->frame;
avctx->coded_frame = (AVFrame*) &s->frame;
s->ulti_codebook = ulti_codebook;
diff --git a/libavcodec/utils.c b/libavcodec/utils.c
index b612ca1ace..2b6e30e5bf 100644
--- a/libavcodec/utils.c
+++ b/libavcodec/utils.c
@@ -1226,6 +1226,12 @@ int attribute_align_arg avcodec_encode_video2(AVCodecContext *avctx,
avpkt->data = new_data;
}
+ if (!user_packet && avpkt->data) {
+ uint8_t *new_data = av_realloc(avpkt->data, avpkt->size);
+ if (new_data)
+ avpkt->data = new_data;
+ }
+
avctx->frame_number++;
}
diff --git a/libavcodec/vc1dec.c b/libavcodec/vc1dec.c
index 6fb6b17ff8..15e5adb5c5 100644
--- a/libavcodec/vc1dec.c
+++ b/libavcodec/vc1dec.c
@@ -5456,7 +5456,7 @@ static int vc1_decode_frame(AVCodecContext *avctx, void *data,
if (buf_size == 0 || (buf_size == 4 && AV_RB32(buf) == VC1_CODE_ENDOFSEQ)) {
/* special case for last picture */
if (s->low_delay == 0 && s->next_picture_ptr) {
- *pict = *(AVFrame*)s->next_picture_ptr;
+ *pict = s->next_picture_ptr->f;
s->next_picture_ptr = NULL;
*data_size = sizeof(AVFrame);
@@ -5764,9 +5764,9 @@ image:
*data_size = sizeof(AVFrame);
} else {
if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay) {
- *pict = *(AVFrame*)s->current_picture_ptr;
+ *pict = s->current_picture_ptr->f;
} else if (s->last_picture_ptr != NULL) {
- *pict = *(AVFrame*)s->last_picture_ptr;
+ *pict = s->last_picture_ptr->f;
}
if (s->last_picture_ptr || s->low_delay) {
*data_size = sizeof(AVFrame);
diff --git a/libavcodec/vcr1.c b/libavcodec/vcr1.c
index 1526215314..7b0e7a53b4 100644
--- a/libavcodec/vcr1.c
+++ b/libavcodec/vcr1.c
@@ -49,7 +49,7 @@ static int decode_frame(AVCodecContext *avctx,
int buf_size = avpkt->size;
VCR1Context * const a = avctx->priv_data;
AVFrame *picture = data;
- AVFrame * const p= (AVFrame*)&a->picture;
+ AVFrame * const p = &a->picture;
const uint8_t *bytestream= buf;
int i, x, y;
@@ -116,7 +116,7 @@ static int decode_frame(AVCodecContext *avctx,
}
}
- *picture= *(AVFrame*)&a->picture;
+ *picture = a->picture;
*data_size = sizeof(AVPicture);
return buf_size;
@@ -126,7 +126,7 @@ static int decode_frame(AVCodecContext *avctx,
static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size, void *data){
VCR1Context * const a = avctx->priv_data;
AVFrame *pict = data;
- AVFrame * const p= (AVFrame*)&a->picture;
+ AVFrame * const p = &a->picture;
int size;
*p = *pict;
@@ -146,7 +146,7 @@ static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size,
static av_cold void common_init(AVCodecContext *avctx){
VCR1Context * const a = avctx->priv_data;
- avctx->coded_frame= (AVFrame*)&a->picture;
+ avctx->coded_frame = &a->picture;
avcodec_get_frame_defaults(&a->picture);
a->avctx= avctx;
}
diff --git a/libavcodec/version.h b/libavcodec/version.h
index 1b72037572..1aa75eabbd 100644
--- a/libavcodec/version.h
+++ b/libavcodec/version.h
@@ -21,7 +21,7 @@
#define AVCODEC_VERSION_H
#define LIBAVCODEC_VERSION_MAJOR 54
-#define LIBAVCODEC_VERSION_MINOR 5
+#define LIBAVCODEC_VERSION_MINOR 6
#define LIBAVCODEC_VERSION_MICRO 100
#define LIBAVCODEC_VERSION_INT AV_VERSION_INT(LIBAVCODEC_VERSION_MAJOR, \
diff --git a/libavcodec/vp8dsp.c b/libavcodec/vp8dsp.c
index ce90675d87..12f6988bff 100644
--- a/libavcodec/vp8dsp.c
+++ b/libavcodec/vp8dsp.c
@@ -77,7 +77,7 @@ static void vp8_luma_dc_wht_dc_c(DCTELEM block[4][4][16], DCTELEM dc[16])
#define MUL_20091(a) ((((a)*20091) >> 16) + (a))
#define MUL_35468(a) (((a)*35468) >> 16)
-static void vp8_idct_add_c(uint8_t *dst, DCTELEM block[16], int stride)
+static void vp8_idct_add_c(uint8_t *dst, DCTELEM block[16], ptrdiff_t stride)
{
int i, t0, t1, t2, t3;
uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
@@ -113,7 +113,7 @@ static void vp8_idct_add_c(uint8_t *dst, DCTELEM block[16], int stride)
}
}
-static void vp8_idct_dc_add_c(uint8_t *dst, DCTELEM block[16], int stride)
+static void vp8_idct_dc_add_c(uint8_t *dst, DCTELEM block[16], ptrdiff_t stride)
{
int i, dc = (block[0] + 4) >> 3;
uint8_t *cm = ff_cropTbl + MAX_NEG_CROP + dc;
@@ -128,7 +128,7 @@ static void vp8_idct_dc_add_c(uint8_t *dst, DCTELEM block[16], int stride)
}
}
-static void vp8_idct_dc_add4uv_c(uint8_t *dst, DCTELEM block[4][16], int stride)
+static void vp8_idct_dc_add4uv_c(uint8_t *dst, DCTELEM block[4][16], ptrdiff_t stride)
{
vp8_idct_dc_add_c(dst+stride*0+0, block[0], stride);
vp8_idct_dc_add_c(dst+stride*0+4, block[1], stride);
@@ -136,7 +136,7 @@ static void vp8_idct_dc_add4uv_c(uint8_t *dst, DCTELEM block[4][16], int stride)
vp8_idct_dc_add_c(dst+stride*4+4, block[3], stride);
}
-static void vp8_idct_dc_add4y_c(uint8_t *dst, DCTELEM block[4][16], int stride)
+static void vp8_idct_dc_add4y_c(uint8_t *dst, DCTELEM block[4][16], ptrdiff_t stride)
{
vp8_idct_dc_add_c(dst+ 0, block[0], stride);
vp8_idct_dc_add_c(dst+ 4, block[1], stride);
@@ -157,7 +157,7 @@ static void vp8_idct_dc_add4y_c(uint8_t *dst, DCTELEM block[4][16], int stride)
#define clip_int8(n) (cm[n+0x80]-0x80)
-static av_always_inline void filter_common(uint8_t *p, int stride, int is4tap)
+static av_always_inline void filter_common(uint8_t *p, ptrdiff_t stride, int is4tap)
{
LOAD_PIXELS
int a, f1, f2;
@@ -188,7 +188,7 @@ static av_always_inline void filter_common(uint8_t *p, int stride, int is4tap)
}
}
-static av_always_inline int simple_limit(uint8_t *p, int stride, int flim)
+static av_always_inline int simple_limit(uint8_t *p, ptrdiff_t stride, int flim)
{
LOAD_PIXELS
return 2*FFABS(p0-q0) + (FFABS(p1-q1) >> 1) <= flim;
@@ -198,7 +198,7 @@ static av_always_inline int simple_limit(uint8_t *p, int stride, int flim)
* E - limit at the macroblock edge
* I - limit for interior difference
*/
-static av_always_inline int normal_limit(uint8_t *p, int stride, int E, int I)
+static av_always_inline int normal_limit(uint8_t *p, ptrdiff_t stride, int E, int I)
{
LOAD_PIXELS
return simple_limit(p, stride, E)
@@ -207,13 +207,13 @@ static av_always_inline int normal_limit(uint8_t *p, int stride, int E, int I)
}
// high edge variance
-static av_always_inline int hev(uint8_t *p, int stride, int thresh)
+static av_always_inline int hev(uint8_t *p, ptrdiff_t stride, int thresh)
{
LOAD_PIXELS
return FFABS(p1-p0) > thresh || FFABS(q1-q0) > thresh;
}
-static av_always_inline void filter_mbedge(uint8_t *p, int stride)
+static av_always_inline void filter_mbedge(uint8_t *p, ptrdiff_t stride)
{
int a0, a1, a2, w;
uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
@@ -236,7 +236,7 @@ static av_always_inline void filter_mbedge(uint8_t *p, int stride)
}
#define LOOP_FILTER(dir, size, stridea, strideb, maybe_inline) \
-static maybe_inline void vp8_ ## dir ## _loop_filter ## size ## _c(uint8_t *dst, int stride,\
+static maybe_inline void vp8_ ## dir ## _loop_filter ## size ## _c(uint8_t *dst, ptrdiff_t stride,\
int flim_E, int flim_I, int hev_thresh)\
{\
int i;\
@@ -250,7 +250,7 @@ static maybe_inline void vp8_ ## dir ## _loop_filter ## size ## _c(uint8_t *dst,
}\
}\
\
-static maybe_inline void vp8_ ## dir ## _loop_filter ## size ## _inner_c(uint8_t *dst, int stride,\
+static maybe_inline void vp8_ ## dir ## _loop_filter ## size ## _inner_c(uint8_t *dst, ptrdiff_t stride,\
int flim_E, int flim_I, int hev_thresh)\
{\
int i;\
@@ -270,13 +270,13 @@ LOOP_FILTER(h, 16, stride, 1,)
#define UV_LOOP_FILTER(dir, stridea, strideb) \
LOOP_FILTER(dir, 8, stridea, strideb, av_always_inline) \
-static void vp8_ ## dir ## _loop_filter8uv_c(uint8_t *dstU, uint8_t *dstV, int stride,\
+static void vp8_ ## dir ## _loop_filter8uv_c(uint8_t *dstU, uint8_t *dstV, ptrdiff_t stride,\
int fE, int fI, int hev_thresh)\
{\
vp8_ ## dir ## _loop_filter8_c(dstU, stride, fE, fI, hev_thresh);\
vp8_ ## dir ## _loop_filter8_c(dstV, stride, fE, fI, hev_thresh);\
}\
-static void vp8_ ## dir ## _loop_filter8uv_inner_c(uint8_t *dstU, uint8_t *dstV, int stride,\
+static void vp8_ ## dir ## _loop_filter8uv_inner_c(uint8_t *dstU, uint8_t *dstV, ptrdiff_t stride,\
int fE, int fI, int hev_thresh)\
{\
vp8_ ## dir ## _loop_filter8_inner_c(dstU, stride, fE, fI, hev_thresh);\
@@ -286,7 +286,7 @@ static void vp8_ ## dir ## _loop_filter8uv_inner_c(uint8_t *dstU, uint8_t *dstV,
UV_LOOP_FILTER(v, 1, stride)
UV_LOOP_FILTER(h, stride, 1)
-static void vp8_v_loop_filter_simple_c(uint8_t *dst, int stride, int flim)
+static void vp8_v_loop_filter_simple_c(uint8_t *dst, ptrdiff_t stride, int flim)
{
int i;
@@ -295,7 +295,7 @@ static void vp8_v_loop_filter_simple_c(uint8_t *dst, int stride, int flim)
filter_common(dst+i, stride, 1);
}
-static void vp8_h_loop_filter_simple_c(uint8_t *dst, int stride, int flim)
+static void vp8_h_loop_filter_simple_c(uint8_t *dst, ptrdiff_t stride, int flim)
{
int i;
@@ -315,7 +315,7 @@ static const uint8_t subpel_filters[7][6] = {
};
#define PUT_PIXELS(WIDTH) \
-static void put_vp8_pixels ## WIDTH ##_c(uint8_t *dst, int dststride, uint8_t *src, int srcstride, int h, int x, int y) { \
+static void put_vp8_pixels ## WIDTH ##_c(uint8_t *dst, ptrdiff_t dststride, uint8_t *src, ptrdiff_t srcstride, int h, int x, int y) { \
int i; \
for (i = 0; i < h; i++, dst+= dststride, src+= srcstride) { \
memcpy(dst, src, WIDTH); \
@@ -335,7 +335,7 @@ PUT_PIXELS(4)
F[3]*src[x+1*stride] - F[4]*src[x+2*stride] + 64) >> 7]
#define VP8_EPEL_H(SIZE, TAPS) \
-static void put_vp8_epel ## SIZE ## _h ## TAPS ## _c(uint8_t *dst, int dststride, uint8_t *src, int srcstride, int h, int mx, int my) \
+static void put_vp8_epel ## SIZE ## _h ## TAPS ## _c(uint8_t *dst, ptrdiff_t dststride, uint8_t *src, ptrdiff_t srcstride, int h, int mx, int my) \
{ \
const uint8_t *filter = subpel_filters[mx-1]; \
uint8_t *cm = ff_cropTbl + MAX_NEG_CROP; \
@@ -349,7 +349,7 @@ static void put_vp8_epel ## SIZE ## _h ## TAPS ## _c(uint8_t *dst, int dststride
} \
}
#define VP8_EPEL_V(SIZE, TAPS) \
-static void put_vp8_epel ## SIZE ## _v ## TAPS ## _c(uint8_t *dst, int dststride, uint8_t *src, int srcstride, int h, int mx, int my) \
+static void put_vp8_epel ## SIZE ## _v ## TAPS ## _c(uint8_t *dst, ptrdiff_t dststride, uint8_t *src, ptrdiff_t srcstride, int h, int mx, int my) \
{ \
const uint8_t *filter = subpel_filters[my-1]; \
uint8_t *cm = ff_cropTbl + MAX_NEG_CROP; \
@@ -363,7 +363,7 @@ static void put_vp8_epel ## SIZE ## _v ## TAPS ## _c(uint8_t *dst, int dststride
} \
}
#define VP8_EPEL_HV(SIZE, HTAPS, VTAPS) \
-static void put_vp8_epel ## SIZE ## _h ## HTAPS ## v ## VTAPS ## _c(uint8_t *dst, int dststride, uint8_t *src, int srcstride, int h, int mx, int my) \
+static void put_vp8_epel ## SIZE ## _h ## HTAPS ## v ## VTAPS ## _c(uint8_t *dst, ptrdiff_t dststride, uint8_t *src, ptrdiff_t srcstride, int h, int mx, int my) \
{ \
const uint8_t *filter = subpel_filters[mx-1]; \
uint8_t *cm = ff_cropTbl + MAX_NEG_CROP; \
@@ -416,7 +416,7 @@ VP8_EPEL_HV(8, 6, 6)
VP8_EPEL_HV(4, 6, 6)
#define VP8_BILINEAR(SIZE) \
-static void put_vp8_bilinear ## SIZE ## _h_c(uint8_t *dst, int stride, uint8_t *src, int s2, int h, int mx, int my) \
+static void put_vp8_bilinear ## SIZE ## _h_c(uint8_t *dst, ptrdiff_t stride, uint8_t *src, ptrdiff_t s2, int h, int mx, int my) \
{ \
int a = 8-mx, b = mx; \
int x, y; \
@@ -428,7 +428,7 @@ static void put_vp8_bilinear ## SIZE ## _h_c(uint8_t *dst, int stride, uint8_t *
src += stride; \
} \
} \
-static void put_vp8_bilinear ## SIZE ## _v_c(uint8_t *dst, int stride, uint8_t *src, int s2, int h, int mx, int my) \
+static void put_vp8_bilinear ## SIZE ## _v_c(uint8_t *dst, ptrdiff_t stride, uint8_t *src, ptrdiff_t s2, int h, int mx, int my) \
{ \
int c = 8-my, d = my; \
int x, y; \
@@ -441,7 +441,7 @@ static void put_vp8_bilinear ## SIZE ## _v_c(uint8_t *dst, int stride, uint8_t *
} \
} \
\
-static void put_vp8_bilinear ## SIZE ## _hv_c(uint8_t *dst, int stride, uint8_t *src, int s2, int h, int mx, int my) \
+static void put_vp8_bilinear ## SIZE ## _hv_c(uint8_t *dst, ptrdiff_t stride, uint8_t *src, ptrdiff_t s2, int h, int mx, int my) \
{ \
int a = 8-mx, b = mx; \
int c = 8-my, d = my; \
diff --git a/libavcodec/vp8dsp.h b/libavcodec/vp8dsp.h
index 951a5566a7..d0c7d4aacc 100644
--- a/libavcodec/vp8dsp.h
+++ b/libavcodec/vp8dsp.h
@@ -29,40 +29,44 @@
#include "dsputil.h"
-typedef void (*vp8_mc_func)(uint8_t *dst/*align 8*/, int dstStride,
- uint8_t *src/*align 1*/, int srcStride,
+typedef void (*vp8_mc_func)(uint8_t *dst/*align 8*/, ptrdiff_t dstStride,
+ uint8_t *src/*align 1*/, ptrdiff_t srcStride,
int h, int x, int y);
typedef struct VP8DSPContext {
void (*vp8_luma_dc_wht)(DCTELEM block[4][4][16], DCTELEM dc[16]);
void (*vp8_luma_dc_wht_dc)(DCTELEM block[4][4][16], DCTELEM dc[16]);
- void (*vp8_idct_add)(uint8_t *dst, DCTELEM block[16], int stride);
- void (*vp8_idct_dc_add)(uint8_t *dst, DCTELEM block[16], int stride);
- void (*vp8_idct_dc_add4y)(uint8_t *dst, DCTELEM block[4][16], int stride);
- void (*vp8_idct_dc_add4uv)(uint8_t *dst, DCTELEM block[4][16], int stride);
+ void (*vp8_idct_add)(uint8_t *dst, DCTELEM block[16], ptrdiff_t stride);
+ void (*vp8_idct_dc_add)(uint8_t *dst, DCTELEM block[16], ptrdiff_t stride);
+ void (*vp8_idct_dc_add4y)(uint8_t *dst, DCTELEM block[4][16],
+ ptrdiff_t stride);
+ void (*vp8_idct_dc_add4uv)(uint8_t *dst, DCTELEM block[4][16],
+ ptrdiff_t stride);
// loop filter applied to edges between macroblocks
- void (*vp8_v_loop_filter16y)(uint8_t *dst, int stride,
+ void (*vp8_v_loop_filter16y)(uint8_t *dst, ptrdiff_t stride,
int flim_E, int flim_I, int hev_thresh);
- void (*vp8_h_loop_filter16y)(uint8_t *dst, int stride,
+ void (*vp8_h_loop_filter16y)(uint8_t *dst, ptrdiff_t stride,
int flim_E, int flim_I, int hev_thresh);
- void (*vp8_v_loop_filter8uv)(uint8_t *dstU, uint8_t *dstV, int stride,
+ void (*vp8_v_loop_filter8uv)(uint8_t *dstU, uint8_t *dstV, ptrdiff_t stride,
int flim_E, int flim_I, int hev_thresh);
- void (*vp8_h_loop_filter8uv)(uint8_t *dstU, uint8_t *dstV, int stride,
+ void (*vp8_h_loop_filter8uv)(uint8_t *dstU, uint8_t *dstV, ptrdiff_t stride,
int flim_E, int flim_I, int hev_thresh);
// loop filter applied to inner macroblock edges
- void (*vp8_v_loop_filter16y_inner)(uint8_t *dst, int stride,
+ void (*vp8_v_loop_filter16y_inner)(uint8_t *dst, ptrdiff_t stride,
int flim_E, int flim_I, int hev_thresh);
- void (*vp8_h_loop_filter16y_inner)(uint8_t *dst, int stride,
+ void (*vp8_h_loop_filter16y_inner)(uint8_t *dst, ptrdiff_t stride,
int flim_E, int flim_I, int hev_thresh);
- void (*vp8_v_loop_filter8uv_inner)(uint8_t *dstU, uint8_t *dstV, int stride,
+ void (*vp8_v_loop_filter8uv_inner)(uint8_t *dstU, uint8_t *dstV,
+ ptrdiff_t stride,
int flim_E, int flim_I, int hev_thresh);
- void (*vp8_h_loop_filter8uv_inner)(uint8_t *dstU, uint8_t *dstV, int stride,
+ void (*vp8_h_loop_filter8uv_inner)(uint8_t *dstU, uint8_t *dstV,
+ ptrdiff_t stride,
int flim_E, int flim_I, int hev_thresh);
- void (*vp8_v_loop_filter_simple)(uint8_t *dst, int stride, int flim);
- void (*vp8_h_loop_filter_simple)(uint8_t *dst, int stride, int flim);
+ void (*vp8_v_loop_filter_simple)(uint8_t *dst, ptrdiff_t stride, int flim);
+ void (*vp8_h_loop_filter_simple)(uint8_t *dst, ptrdiff_t stride, int flim);
/**
* first dimension: width>>3, height is assumed equal to width
@@ -76,9 +80,12 @@ typedef struct VP8DSPContext {
vp8_mc_func put_vp8_bilinear_pixels_tab[3][3][3];
} VP8DSPContext;
-void ff_put_vp8_pixels16_c(uint8_t *dst, uint8_t *src, int stride, int h, int x, int y);
-void ff_put_vp8_pixels8_c(uint8_t *dst, uint8_t *src, int stride, int h, int x, int y);
-void ff_put_vp8_pixels4_c(uint8_t *dst, uint8_t *src, int stride, int h, int x, int y);
+void ff_put_vp8_pixels16_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride,
+ int h, int x, int y);
+void ff_put_vp8_pixels8_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride,
+ int h, int x, int y);
+void ff_put_vp8_pixels4_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride,
+ int h, int x, int y);
void ff_vp8dsp_init(VP8DSPContext *c);
void ff_vp8dsp_init_x86(VP8DSPContext *c);
diff --git a/libavcodec/wma.h b/libavcodec/wma.h
index 6c8e944b79..954153fac0 100644
--- a/libavcodec/wma.h
+++ b/libavcodec/wma.h
@@ -124,7 +124,7 @@ typedef struct WMACodecContext {
/* output buffer for one frame and the last for IMDCT windowing */
DECLARE_ALIGNED(32, float, frame_out)[MAX_CHANNELS][BLOCK_MAX_SIZE * 2];
/* last frame info */
- uint8_t last_superframe[MAX_CODED_SUPERFRAME_SIZE + 4]; /* padding added */
+ uint8_t last_superframe[MAX_CODED_SUPERFRAME_SIZE + FF_INPUT_BUFFER_PADDING_SIZE]; /* padding added */
int last_bitoffset;
int last_superframe_len;
float noise_table[NOISE_TAB_SIZE];
diff --git a/libavcodec/wmadec.c b/libavcodec/wmadec.c
index 7dae379ba8..f26a1433a9 100644
--- a/libavcodec/wmadec.c
+++ b/libavcodec/wmadec.c
@@ -863,6 +863,12 @@ static int wma_decode_superframe(AVCodecContext *avctx, void *data,
if (s->use_bit_reservoir) {
bit_offset = get_bits(&s->gb, s->byte_offset_bits + 3);
+ if (bit_offset > get_bits_left(&s->gb)) {
+ av_log(avctx, AV_LOG_ERROR,
+ "Invalid last frame bit offset %d > buf size %d (%d)\n",
+ bit_offset, get_bits_left(&s->gb), buf_size);
+ goto fail;
+ }
if (s->last_superframe_len > 0) {
// printf("skip=%d\n", s->last_bitoffset);
@@ -879,9 +885,10 @@ static int wma_decode_superframe(AVCodecContext *avctx, void *data,
if (len > 0) {
*q++ = (get_bits)(&s->gb, len) << (8 - len);
}
+ memset(q, 0, FF_INPUT_BUFFER_PADDING_SIZE);
/* XXX: bit_offset bits into last frame */
- init_get_bits(&s->gb, s->last_superframe, MAX_CODED_SUPERFRAME_SIZE*8);
+ init_get_bits(&s->gb, s->last_superframe, s->last_superframe_len * 8 + bit_offset);
/* skip unused bits */
if (s->last_bitoffset > 0)
skip_bits(&s->gb, s->last_bitoffset);
@@ -895,9 +902,9 @@ static int wma_decode_superframe(AVCodecContext *avctx, void *data,
/* read each frame starting from bit_offset */
pos = bit_offset + 4 + 4 + s->byte_offset_bits + 3;
- if (pos >= MAX_CODED_SUPERFRAME_SIZE * 8)
+ if (pos >= MAX_CODED_SUPERFRAME_SIZE * 8 || pos > buf_size * 8)
return AVERROR_INVALIDDATA;
- init_get_bits(&s->gb, buf + (pos >> 3), (MAX_CODED_SUPERFRAME_SIZE - (pos >> 3))*8);
+ init_get_bits(&s->gb, buf + (pos >> 3), (buf_size - (pos >> 3))*8);
len = pos & 7;
if (len > 0)
skip_bits(&s->gb, len);
diff --git a/libavcodec/wmalosslessdec.c b/libavcodec/wmalosslessdec.c
index b32731f96e..973edc438a 100644
--- a/libavcodec/wmalosslessdec.c
+++ b/libavcodec/wmalosslessdec.c
@@ -1,8 +1,9 @@
/*
- * Wmall compatible decoder
+ * Windows Media Audio Lossless decoder
* Copyright (c) 2007 Baptiste Coudurier, Benjamin Larsson, Ulion
* Copyright (c) 2008 - 2011 Sascha Sommer, Benjamin Larsson
* Copyright (c) 2011 Andreas Öman
+ * Copyright (c) 2011 - 2012 Mashiat Sarker Shakkhar
*
* This file is part of FFmpeg.
*
@@ -21,210 +22,94 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
-/**
- * @file
- * @brief wmall decoder implementation
- * Wmall is an MDCT based codec comparable to wma standard or AAC.
- * The decoding therefore consists of the following steps:
- * - bitstream decoding
- * - reconstruction of per-channel data
- * - rescaling and inverse quantization
- * - IMDCT
- * - windowing and overlapp-add
- *
- * The compressed wmall bitstream is split into individual packets.
- * Every such packet contains one or more wma frames.
- * The compressed frames may have a variable length and frames may
- * cross packet boundaries.
- * Common to all wmall frames is the number of samples that are stored in
- * a frame.
- * The number of samples and a few other decode flags are stored
- * as extradata that has to be passed to the decoder.
- *
- * The wmall frames themselves are again split into a variable number of
- * subframes. Every subframe contains the data for 2^N time domain samples
- * where N varies between 7 and 12.
- *
- * Example wmall bitstream (in samples):
- *
- * || packet 0 || packet 1 || packet 2 packets
- * ---------------------------------------------------
- * || frame 0 || frame 1 || frame 2 || frames
- * ---------------------------------------------------
- * || | | || | | | || || subframes of channel 0
- * ---------------------------------------------------
- * || | | || | | | || || subframes of channel 1
- * ---------------------------------------------------
- *
- * The frame layouts for the individual channels of a wma frame does not need
- * to be the same.
- *
- * However, if the offsets and lengths of several subframes of a frame are the
- * same, the subframes of the channels can be grouped.
- * Every group may then use special coding techniques like M/S stereo coding
- * to improve the compression ratio. These channel transformations do not
- * need to be applied to a whole subframe. Instead, they can also work on
- * individual scale factor bands (see below).
- * The coefficients that carry the audio signal in the frequency domain
- * are transmitted as huffman-coded vectors with 4, 2 and 1 elements.
- * In addition to that, the encoder can switch to a runlevel coding scheme
- * by transmitting subframe_length / 128 zero coefficients.
- *
- * Before the audio signal can be converted to the time domain, the
- * coefficients have to be rescaled and inverse quantized.
- * A subframe is therefore split into several scale factor bands that get
- * scaled individually.
- * Scale factors are submitted for every frame but they might be shared
- * between the subframes of a channel. Scale factors are initially DPCM-coded.
- * Once scale factors are shared, the differences are transmitted as runlevel
- * codes.
- * Every subframe length and offset combination in the frame layout shares a
- * common quantization factor that can be adjusted for every channel by a
- * modifier.
- * After the inverse quantization, the coefficients get processed by an IMDCT.
- * The resulting values are then windowed with a sine window and the first half
- * of the values are added to the second half of the output from the previous
- * subframe in order to reconstruct the output samples.
- */
-
#include "avcodec.h"
#include "internal.h"
#include "get_bits.h"
#include "put_bits.h"
-#include "dsputil.h"
#include "wma.h"
/** current decoder limitations */
-#define WMALL_MAX_CHANNELS 8 ///< max number of handled channels
-#define MAX_SUBFRAMES 32 ///< max number of subframes per channel
-#define MAX_BANDS 29 ///< max number of scale factor bands
-#define MAX_FRAMESIZE 32768 ///< maximum compressed frame size
-
-#define WMALL_BLOCK_MIN_BITS 6 ///< log2 of min block size
-#define WMALL_BLOCK_MAX_BITS 12 ///< log2 of max block size
-#define WMALL_BLOCK_MAX_SIZE (1 << WMALL_BLOCK_MAX_BITS) ///< maximum block size
+#define WMALL_MAX_CHANNELS 8 ///< max number of handled channels
+#define MAX_SUBFRAMES 32 ///< max number of subframes per channel
+#define MAX_BANDS 29 ///< max number of scale factor bands
+#define MAX_FRAMESIZE 32768 ///< maximum compressed frame size
+
+#define WMALL_BLOCK_MIN_BITS 6 ///< log2 of min block size
+#define WMALL_BLOCK_MAX_BITS 12 ///< log2 of max block size
+#define WMALL_BLOCK_MAX_SIZE (1 << WMALL_BLOCK_MAX_BITS) ///< maximum block size
#define WMALL_BLOCK_SIZES (WMALL_BLOCK_MAX_BITS - WMALL_BLOCK_MIN_BITS + 1) ///< possible block sizes
-#define VLCBITS 9
-#define SCALEVLCBITS 8
-#define VEC4MAXDEPTH ((HUFF_VEC4_MAXBITS+VLCBITS-1)/VLCBITS)
-#define VEC2MAXDEPTH ((HUFF_VEC2_MAXBITS+VLCBITS-1)/VLCBITS)
-#define VEC1MAXDEPTH ((HUFF_VEC1_MAXBITS+VLCBITS-1)/VLCBITS)
-#define SCALEMAXDEPTH ((HUFF_SCALE_MAXBITS+SCALEVLCBITS-1)/SCALEVLCBITS)
-#define SCALERLMAXDEPTH ((HUFF_SCALE_RL_MAXBITS+VLCBITS-1)/VLCBITS)
-
-static float sin64[33]; ///< sinus table for decorrelation
-
/**
- * @brief frame specific decoder context for a single channel
+ * @brief frame-specific decoder context for a single channel
*/
typedef struct {
- int16_t prev_block_len; ///< length of the previous block
- uint8_t transmit_coefs;
- uint8_t num_subframes;
- uint16_t subframe_len[MAX_SUBFRAMES]; ///< subframe length in samples
- uint16_t subframe_offset[MAX_SUBFRAMES]; ///< subframe positions in the current frame
- uint8_t cur_subframe; ///< current subframe number
- uint16_t decoded_samples; ///< number of already processed samples
- uint8_t grouped; ///< channel is part of a group
- int quant_step; ///< quantization step for the current subframe
- int8_t reuse_sf; ///< share scale factors between subframes
- int8_t scale_factor_step; ///< scaling step for the current subframe
- int max_scale_factor; ///< maximum scale factor for the current subframe
- int saved_scale_factors[2][MAX_BANDS]; ///< resampled and (previously) transmitted scale factor values
- int8_t scale_factor_idx; ///< index for the transmitted scale factor values (used for resampling)
- int* scale_factors; ///< pointer to the scale factor values used for decoding
- uint8_t table_idx; ///< index in sf_offsets for the scale factor reference block
- float* coeffs; ///< pointer to the subframe decode buffer
- uint16_t num_vec_coeffs; ///< number of vector coded coefficients
- DECLARE_ALIGNED(16, float, out)[WMALL_BLOCK_MAX_SIZE + WMALL_BLOCK_MAX_SIZE / 2]; ///< output buffer
- int transient_counter; ///< number of transient samples from the beginning of transient zone
+ int16_t prev_block_len; ///< length of the previous block
+ uint8_t transmit_coefs;
+ uint8_t num_subframes;
+ uint16_t subframe_len[MAX_SUBFRAMES]; ///< subframe length in samples
+ uint16_t subframe_offsets[MAX_SUBFRAMES]; ///< subframe positions in the current frame
+ uint8_t cur_subframe; ///< current subframe number
+ uint16_t decoded_samples; ///< number of already processed samples
+ int quant_step; ///< quantization step for the current subframe
+ int transient_counter; ///< number of transient samples from the beginning of the transient zone
} WmallChannelCtx;
/**
- * @brief channel group for channel transformations
- */
-typedef struct {
- uint8_t num_channels; ///< number of channels in the group
- int8_t transform; ///< transform on / off
- int8_t transform_band[MAX_BANDS]; ///< controls if the transform is enabled for a certain band
- float decorrelation_matrix[WMALL_MAX_CHANNELS*WMALL_MAX_CHANNELS];
- float* channel_data[WMALL_MAX_CHANNELS]; ///< transformation coefficients
-} WmallChannelGrp;
-
-/**
* @brief main decoder context
*/
typedef struct WmallDecodeCtx {
/* generic decoder variables */
- AVCodecContext* avctx; ///< codec context for av_log
- DSPContext dsp; ///< accelerated DSP functions
- AVFrame frame;
- uint8_t frame_data[MAX_FRAMESIZE +
- FF_INPUT_BUFFER_PADDING_SIZE];///< compressed frame data
- PutBitContext pb; ///< context for filling the frame_data buffer
- FFTContext mdct_ctx[WMALL_BLOCK_SIZES]; ///< MDCT context per block size
- DECLARE_ALIGNED(16, float, tmp)[WMALL_BLOCK_MAX_SIZE]; ///< IMDCT output buffer
- float* windows[WMALL_BLOCK_SIZES]; ///< windows for the different block sizes
+ AVCodecContext *avctx;
+ AVFrame frame;
+ uint8_t frame_data[MAX_FRAMESIZE + FF_INPUT_BUFFER_PADDING_SIZE]; ///< compressed frame data
+ PutBitContext pb; ///< context for filling the frame_data buffer
/* frame size dependent frame information (set during initialization) */
- uint32_t decode_flags; ///< used compression features
- uint8_t len_prefix; ///< frame is prefixed with its length
- uint8_t dynamic_range_compression; ///< frame contains DRC data
- uint8_t bits_per_sample; ///< integer audio sample size for the unscaled IMDCT output (used to scale to [-1.0, 1.0])
- uint16_t samples_per_frame; ///< number of samples to output
- uint16_t log2_frame_size;
- int8_t num_channels; ///< number of channels in the stream (same as AVCodecContext.num_channels)
- int8_t lfe_channel; ///< lfe channel index
- uint8_t max_num_subframes;
- uint8_t subframe_len_bits; ///< number of bits used for the subframe length
- uint8_t max_subframe_len_bit; ///< flag indicating that the subframe is of maximum size when the first subframe length bit is 1
- uint16_t min_samples_per_subframe;
- int8_t num_sfb[WMALL_BLOCK_SIZES]; ///< scale factor bands per block size
- int16_t sfb_offsets[WMALL_BLOCK_SIZES][MAX_BANDS]; ///< scale factor band offsets (multiples of 4)
- int8_t sf_offsets[WMALL_BLOCK_SIZES][WMALL_BLOCK_SIZES][MAX_BANDS]; ///< scale factor resample matrix
- int16_t subwoofer_cutoffs[WMALL_BLOCK_SIZES]; ///< subwoofer cutoff values
+ uint32_t decode_flags; ///< used compression features
+ int len_prefix; ///< frame is prefixed with its length
+ int dynamic_range_compression; ///< frame contains DRC data
+ uint8_t bits_per_sample; ///< integer audio sample size for the unscaled IMDCT output (used to scale to [-1.0, 1.0])
+ uint16_t samples_per_frame; ///< number of samples to output
+ uint16_t log2_frame_size;
+ int8_t num_channels; ///< number of channels in the stream (same as AVCodecContext.num_channels)
+ int8_t lfe_channel; ///< lfe channel index
+ uint8_t max_num_subframes;
+ uint8_t subframe_len_bits; ///< number of bits used for the subframe length
+ uint8_t max_subframe_len_bit; ///< flag indicating that the subframe is of maximum size when the first subframe length bit is 1
+ uint16_t min_samples_per_subframe;
/* packet decode state */
- GetBitContext pgb; ///< bitstream reader context for the packet
- int next_packet_start; ///< start offset of the next wma packet in the demuxer packet
- uint8_t packet_offset; ///< frame offset in the packet
- uint8_t packet_sequence_number; ///< current packet number
- int num_saved_bits; ///< saved number of bits
- int frame_offset; ///< frame offset in the bit reservoir
- int subframe_offset; ///< subframe offset in the bit reservoir
- uint8_t packet_loss; ///< set in case of bitstream error
- uint8_t packet_done; ///< set when a packet is fully decoded
+ GetBitContext pgb; ///< bitstream reader context for the packet
+ int next_packet_start; ///< start offset of the next WMA packet in the demuxer packet
+ uint8_t packet_offset; ///< offset to the frame in the packet
+ uint8_t packet_sequence_number; ///< current packet number
+ int num_saved_bits; ///< saved number of bits
+ int frame_offset; ///< frame offset in the bit reservoir
+ int subframe_offset; ///< subframe offset in the bit reservoir
+ uint8_t packet_loss; ///< set in case of bitstream error
+ uint8_t packet_done; ///< set when a packet is fully decoded
/* frame decode state */
- uint32_t frame_num; ///< current frame number (not used for decoding)
- GetBitContext gb; ///< bitstream reader context
- int buf_bit_size; ///< buffer size in bits
- int16_t* samples_16; ///< current samplebuffer pointer (16-bit)
- int16_t* samples_16_end; ///< maximum samplebuffer pointer
+ uint32_t frame_num; ///< current frame number (not used for decoding)
+ GetBitContext gb; ///< bitstream reader context
+ int buf_bit_size; ///< buffer size in bits
+ int16_t *samples_16; ///< current samplebuffer pointer (16-bit)
+ int16_t *samples_16_end; ///< maximum samplebuffer pointer
int *samples_32; ///< current samplebuffer pointer (24-bit)
int *samples_32_end; ///< maximum samplebuffer pointer
- uint8_t drc_gain; ///< gain for the DRC tool
- int8_t skip_frame; ///< skip output step
- int8_t parsed_all_subframes; ///< all subframes decoded?
+ uint8_t drc_gain; ///< gain for the DRC tool
+ int8_t skip_frame; ///< skip output step
+ int8_t parsed_all_subframes; ///< all subframes decoded?
/* subframe/block decode state */
- int16_t subframe_len; ///< current subframe length
- int8_t channels_for_cur_subframe; ///< number of channels that contain the subframe
- int8_t channel_indexes_for_cur_subframe[WMALL_MAX_CHANNELS];
- int8_t num_bands; ///< number of scale factor bands
- int8_t transmit_num_vec_coeffs; ///< number of vector coded coefficients is part of the bitstream
- int16_t* cur_sfb_offsets; ///< sfb offsets for the current block
- uint8_t table_idx; ///< index for the num_sfb, sfb_offsets, sf_offsets and subwoofer_cutoffs tables
- int8_t esc_len; ///< length of escaped coefficients
-
- uint8_t num_chgroups; ///< number of channel groups
- WmallChannelGrp chgroup[WMALL_MAX_CHANNELS]; ///< channel group information
+ int16_t subframe_len; ///< current subframe length
+ int8_t channels_for_cur_subframe; ///< number of channels that contain the subframe
+ int8_t channel_indexes_for_cur_subframe[WMALL_MAX_CHANNELS];
WmallChannelCtx channel[WMALL_MAX_CHANNELS]; ///< per channel data
- // WMA lossless
+ // WMA Lossless-specific
uint8_t do_arith_coding;
uint8_t do_ac_filter;
@@ -232,41 +117,38 @@ typedef struct WmallDecodeCtx {
uint8_t do_mclms;
uint8_t do_lpc;
- int8_t acfilter_order;
- int8_t acfilter_scaling;
+ int8_t acfilter_order;
+ int8_t acfilter_scaling;
int64_t acfilter_coeffs[16];
- int acfilter_prevvalues[2][16];
+ int acfilter_prevvalues[2][16];
- int8_t mclms_order;
- int8_t mclms_scaling;
+ int8_t mclms_order;
+ int8_t mclms_scaling;
int16_t mclms_coeffs[128];
int16_t mclms_coeffs_cur[4];
- int mclms_prevvalues[64]; // FIXME: should be 32-bit / 16-bit depending on bit-depth
+ int16_t mclms_prevvalues[64];
int16_t mclms_updates[64];
- int mclms_recent;
+ int mclms_recent;
- int movave_scaling;
- int quant_stepsize;
+ int movave_scaling;
+ int quant_stepsize;
struct {
- int order;
- int scaling;
- int coefsend;
- int bitsend;
- int16_t coefs[256];
- int lms_prevvalues[512]; // FIXME: see above
- int16_t lms_updates[512]; // and here too
- int recent;
- } cdlms[2][9]; /* XXX: Here, 2 is the max. no. of channels allowed,
- 9 is the maximum no. of filters per channel.
- Question is, why 2 if WMALL_MAX_CHANNELS == 8 */
-
+ int order;
+ int scaling;
+ int coefsend;
+ int bitsend;
+ int16_t coefs[256];
+ int16_t lms_prevvalues[512];
+ int16_t lms_updates[512];
+ int recent;
+ } cdlms[2][9];
int cdlms_ttl[2];
int bV3RTM;
- int is_channel_coded[2]; // XXX: same question as above applies here too (and below)
+ int is_channel_coded[2];
int update_speed[2];
int transient[2];
@@ -277,139 +159,76 @@ typedef struct WmallDecodeCtx {
int channel_residues[2][2048];
-
int lpc_coefs[2][40];
int lpc_order;
int lpc_scaling;
int lpc_intbits;
- int channel_coeffs[2][2048]; // FIXME: should be 32-bit / 16-bit depending on bit-depth
-
+ int channel_coeffs[2][2048];
} WmallDecodeCtx;
-#undef dprintf
-#define dprintf(pctx, ...) av_log(pctx, AV_LOG_DEBUG, __VA_ARGS__)
-
-
-static int num_logged_tiles = 0;
-static int num_logged_subframes = 0;
-static int num_lms_update_call = 0;
-
-/**
- *@brief helper function to print the most important members of the context
- *@param s context
- */
-static void av_cold dump_context(WmallDecodeCtx *s)
-{
-#define PRINT(a, b) av_log(s->avctx, AV_LOG_DEBUG, " %s = %d\n", a, b);
-#define PRINT_HEX(a, b) av_log(s->avctx, AV_LOG_DEBUG, " %s = %x\n", a, b);
-
- PRINT("ed sample bit depth", s->bits_per_sample);
- PRINT_HEX("ed decode flags", s->decode_flags);
- PRINT("samples per frame", s->samples_per_frame);
- PRINT("log2 frame size", s->log2_frame_size);
- PRINT("max num subframes", s->max_num_subframes);
- PRINT("len prefix", s->len_prefix);
- PRINT("num channels", s->num_channels);
-}
-
-static void dump_int_buffer(uint8_t *buffer, int size, int length, int delimiter)
-{
- int i;
-
- for (i=0 ; i<length ; i++) {
- if (!(i%delimiter))
- av_log(0, 0, "\n[%d] ", i);
- av_log(0, 0, "%d, ", *(int16_t *)(buffer + i * size));
- }
- av_log(0, 0, "\n");
-}
-
-/**
- *@brief Uninitialize the decoder and free all resources.
- *@param avctx codec context
- *@return 0 on success, < 0 otherwise
- */
-static av_cold int decode_end(AVCodecContext *avctx)
-{
- WmallDecodeCtx *s = avctx->priv_data;
- int i;
-
- for (i = 0; i < WMALL_BLOCK_SIZES; i++)
- ff_mdct_end(&s->mdct_ctx[i]);
-
- return 0;
-}
-
-/**
- *@brief Initialize the decoder.
- *@param avctx codec context
- *@return 0 on success, -1 otherwise
- */
static av_cold int decode_init(AVCodecContext *avctx)
{
- WmallDecodeCtx *s = avctx->priv_data;
+ WmallDecodeCtx *s = avctx->priv_data;
uint8_t *edata_ptr = avctx->extradata;
unsigned int channel_mask;
- int i;
- int log2_max_num_subframes;
- int num_possible_block_sizes;
+ int i, log2_max_num_subframes, num_possible_block_sizes;
s->avctx = avctx;
- dsputil_init(&s->dsp, avctx);
init_put_bits(&s->pb, s->frame_data, MAX_FRAMESIZE);
if (avctx->extradata_size >= 18) {
- s->decode_flags = AV_RL16(edata_ptr+14);
- channel_mask = AV_RL32(edata_ptr+2);
+ s->decode_flags = AV_RL16(edata_ptr + 14);
+ channel_mask = AV_RL32(edata_ptr + 2);
s->bits_per_sample = AV_RL16(edata_ptr);
if (s->bits_per_sample == 16)
avctx->sample_fmt = AV_SAMPLE_FMT_S16;
- else if (s->bits_per_sample == 24)
+ else if (s->bits_per_sample == 24) {
avctx->sample_fmt = AV_SAMPLE_FMT_S32;
- else {
+ av_log_missing_feature(avctx, "bit-depth higher than 16", 0);
+ return AVERROR_PATCHWELCOME;
+ } else {
av_log(avctx, AV_LOG_ERROR, "Unknown bit-depth: %d\n",
s->bits_per_sample);
return AVERROR_INVALIDDATA;
}
- /** dump the extradata */
+ /* dump the extradata */
for (i = 0; i < avctx->extradata_size; i++)
- dprintf(avctx, "[%x] ", avctx->extradata[i]);
- dprintf(avctx, "\n");
+ av_dlog(avctx, AV_LOG_DEBUG, "[%x] ", avctx->extradata[i]);
+ av_dlog(avctx, AV_LOG_DEBUG, "\n");
} else {
- av_log_ask_for_sample(avctx, "Unknown extradata size\n");
+ av_log_ask_for_sample(avctx, "Unsupported extradata size\n");
return AVERROR_INVALIDDATA;
}
- /** generic init */
+ /* generic init */
s->log2_frame_size = av_log2(avctx->block_align) + 4;
- /** frame info */
+ /* frame info */
s->skip_frame = 1; /* skip first frame */
s->packet_loss = 1;
- s->len_prefix = (s->decode_flags & 0x40);
+ s->len_prefix = s->decode_flags & 0x40;
- /** get frame len */
+ /* get frame len */
s->samples_per_frame = 1 << ff_wma_get_frame_len_bits(avctx->sample_rate,
3, s->decode_flags);
- /** init previous block len */
+ /* init previous block len */
for (i = 0; i < avctx->channels; i++)
s->channel[i].prev_block_len = s->samples_per_frame;
- /** subframe info */
- log2_max_num_subframes = ((s->decode_flags & 0x38) >> 3);
+ /* subframe info */
+ log2_max_num_subframes = (s->decode_flags & 0x38) >> 3;
s->max_num_subframes = 1 << log2_max_num_subframes;
s->max_subframe_len_bit = 0;
s->subframe_len_bits = av_log2(log2_max_num_subframes) + 1;
num_possible_block_sizes = log2_max_num_subframes + 1;
s->min_samples_per_subframe = s->samples_per_frame / s->max_num_subframes;
- s->dynamic_range_compression = (s->decode_flags & 0x80);
-
- s->bV3RTM = s->decode_flags & 0x100;
+ s->dynamic_range_compression = s->decode_flags & 0x80;
+ s->bV3RTM = s->decode_flags & 0x100;
if (s->max_num_subframes > MAX_SUBFRAMES) {
av_log(avctx, AV_LOG_ERROR, "invalid number of subframes %i\n",
@@ -419,19 +238,19 @@ static av_cold int decode_init(AVCodecContext *avctx)
s->num_channels = avctx->channels;
- /** extract lfe channel position */
+ /* extract lfe channel position */
s->lfe_channel = -1;
if (channel_mask & 8) {
unsigned int mask;
- for (mask = 1; mask < 16; mask <<= 1) {
+ for (mask = 1; mask < 16; mask <<= 1)
if (channel_mask & mask)
++s->lfe_channel;
- }
}
if (s->num_channels < 0) {
- av_log(avctx, AV_LOG_ERROR, "invalid number of channels %d\n", s->num_channels);
+ av_log(avctx, AV_LOG_ERROR, "invalid number of channels %d\n",
+ s->num_channels);
return AVERROR_INVALIDDATA;
} else if (s->num_channels > WMALL_MAX_CHANNELS) {
av_log_ask_for_sample(avctx, "unsupported number of channels\n");
@@ -439,33 +258,30 @@ static av_cold int decode_init(AVCodecContext *avctx)
}
avcodec_get_frame_defaults(&s->frame);
- avctx->coded_frame = &s->frame;
-
+ avctx->coded_frame = &s->frame;
avctx->channel_layout = channel_mask;
return 0;
}
/**
- *@brief Decode the subframe length.
- *@param s context
- *@param offset sample offset in the frame
- *@return decoded subframe length on success, < 0 in case of an error
+ * @brief Decode the subframe length.
+ * @param s context
+ * @param offset sample offset in the frame
+ * @return decoded subframe length on success, < 0 in case of an error
*/
static int decode_subframe_length(WmallDecodeCtx *s, int offset)
{
- int frame_len_ratio;
- int subframe_len, len;
+ int frame_len_ratio, subframe_len, len;
- /** no need to read from the bitstream when only one length is possible */
+ /* no need to read from the bitstream when only one length is possible */
if (offset == s->samples_per_frame - s->min_samples_per_subframe)
return s->min_samples_per_subframe;
- len = av_log2(s->max_num_subframes - 1) + 1;
+ len = av_log2(s->max_num_subframes - 1) + 1;
frame_len_ratio = get_bits(&s->gb, len);
+ subframe_len = s->min_samples_per_subframe * (frame_len_ratio + 1);
- subframe_len = s->min_samples_per_subframe * (frame_len_ratio + 1);
-
- /** sanity check the length */
+ /* sanity check the length */
if (subframe_len < s->min_samples_per_subframe ||
subframe_len > s->samples_per_frame) {
av_log(s->avctx, AV_LOG_ERROR, "broken frame: subframe_len %i\n",
@@ -476,7 +292,7 @@ static int decode_subframe_length(WmallDecodeCtx *s, int offset)
}
/**
- *@brief Decode how the data in the frame is split into subframes.
+ * @brief Decode how the data in the frame is split into subframes.
* Every WMA frame contains the encoded data for a fixed number of
* samples per channel. The data for every channel might be split
* into several subframes. This function will reconstruct the list of
@@ -492,38 +308,31 @@ static int decode_subframe_length(WmallDecodeCtx *s, int offset)
* The algorithm repeats these steps until the frame is properly divided
* between the individual channels.
*
- *@param s context
- *@return 0 on success, < 0 in case of an error
+ * @param s context
+ * @return 0 on success, < 0 in case of an error
*/
static int decode_tilehdr(WmallDecodeCtx *s)
{
- uint16_t num_samples[WMALL_MAX_CHANNELS]; /**< sum of samples for all currently known subframes of a channel */
- uint8_t contains_subframe[WMALL_MAX_CHANNELS]; /**< flag indicating if a channel contains the current subframe */
- int channels_for_cur_subframe = s->num_channels; /**< number of channels that contain the current subframe */
- int fixed_channel_layout = 0; /**< flag indicating that all channels use the same subfra2me offsets and sizes */
- int min_channel_len = 0; /**< smallest sum of samples (channels with this length will be processed first) */
- int c;
-
- /* Should never consume more than 3073 bits (256 iterations for the
- * while loop when always the minimum amount of 128 samples is substracted
- * from missing samples in the 8 channel case).
- * 1 + BLOCK_MAX_SIZE * MAX_CHANNELS / BLOCK_MIN_SIZE * (MAX_CHANNELS + 4)
- */
-
- /** reset tiling information */
+ uint16_t num_samples[WMALL_MAX_CHANNELS] = { 0 }; /* sum of samples for all currently known subframes of a channel */
+ uint8_t contains_subframe[WMALL_MAX_CHANNELS]; /* flag indicating if a channel contains the current subframe */
+ int channels_for_cur_subframe = s->num_channels; /* number of channels that contain the current subframe */
+ int fixed_channel_layout = 0; /* flag indicating that all channels use the same subfra2me offsets and sizes */
+ int min_channel_len = 0; /* smallest sum of samples (channels with this length will be processed first) */
+ int c, tile_aligned;
+
+ /* reset tiling information */
for (c = 0; c < s->num_channels; c++)
s->channel[c].num_subframes = 0;
- memset(num_samples, 0, sizeof(num_samples));
-
- if (s->max_num_subframes == 1 || get_bits1(&s->gb))
+ tile_aligned = get_bits1(&s->gb);
+ if (s->max_num_subframes == 1 || tile_aligned)
fixed_channel_layout = 1;
- /** loop until the frame data is split between the subframes */
+ /* loop until the frame data is split between the subframes */
do {
int subframe_len;
- /** check which channels contain the subframe */
+ /* check which channels contain the subframe */
for (c = 0; c < s->num_channels; c++) {
if (num_samples[c] == min_channel_len) {
if (fixed_channel_layout || channels_for_cur_subframe == 1 ||
@@ -536,13 +345,13 @@ static int decode_tilehdr(WmallDecodeCtx *s)
contains_subframe[c] = 0;
}
- /** get subframe length, subframe_len == 0 is not allowed */
+ /* get subframe length, subframe_len == 0 is not allowed */
if ((subframe_len = decode_subframe_length(s, min_channel_len)) <= 0)
return AVERROR_INVALIDDATA;
- /** add subframes to the individual channels and find new min_channel_len */
+ /* add subframes to the individual channels and find new min_channel_len */
min_channel_len += subframe_len;
for (c = 0; c < s->num_channels; c++) {
- WmallChannelCtx* chan = &s->channel[c];
+ WmallChannelCtx *chan = &s->channel[c];
if (contains_subframe[c]) {
if (chan->num_subframes >= MAX_SUBFRAMES) {
@@ -570,10 +379,9 @@ static int decode_tilehdr(WmallDecodeCtx *s)
} while (min_channel_len < s->samples_per_frame);
for (c = 0; c < s->num_channels; c++) {
- int i;
- int offset = 0;
+ int i, offset = 0;
for (i = 0; i < s->channel[c].num_subframes; i++) {
- s->channel[c].subframe_offset[i] = offset;
+ s->channel[c].subframe_offsets[i] = offset;
offset += s->channel[c].subframe_len[i];
}
}
@@ -581,215 +389,175 @@ static int decode_tilehdr(WmallDecodeCtx *s)
return 0;
}
-
-static int my_log2(unsigned int i)
-{
- unsigned int iLog2 = 0;
- while ((i >> iLog2) > 1)
- iLog2++;
- return iLog2;
-}
-
-
-/**
- *
- */
static void decode_ac_filter(WmallDecodeCtx *s)
{
int i;
- s->acfilter_order = get_bits(&s->gb, 4) + 1;
+ s->acfilter_order = get_bits(&s->gb, 4) + 1;
s->acfilter_scaling = get_bits(&s->gb, 4);
- for(i = 0; i < s->acfilter_order; i++) {
+ for (i = 0; i < s->acfilter_order; i++)
s->acfilter_coeffs[i] = get_bits(&s->gb, s->acfilter_scaling) + 1;
- }
}
-
-/**
- *
- */
static void decode_mclms(WmallDecodeCtx *s)
{
- s->mclms_order = (get_bits(&s->gb, 4) + 1) * 2;
+ s->mclms_order = (get_bits(&s->gb, 4) + 1) * 2;
s->mclms_scaling = get_bits(&s->gb, 4);
- if(get_bits1(&s->gb)) {
- // mclms_send_coef
- int i;
- int send_coef_bits;
+ if (get_bits1(&s->gb)) {
+ int i, send_coef_bits;
int cbits = av_log2(s->mclms_scaling + 1);
assert(cbits == my_log2(s->mclms_scaling + 1));
- if(1 << cbits < s->mclms_scaling + 1)
+ if (1 << cbits < s->mclms_scaling + 1)
cbits++;
send_coef_bits = (cbits ? get_bits(&s->gb, cbits) : 0) + 2;
- for(i = 0; i < s->mclms_order * s->num_channels * s->num_channels; i++) {
+ for (i = 0; i < s->mclms_order * s->num_channels * s->num_channels; i++)
s->mclms_coeffs[i] = get_bits(&s->gb, send_coef_bits);
- }
- for(i = 0; i < s->num_channels; i++) {
+ for (i = 0; i < s->num_channels; i++) {
int c;
- for(c = 0; c < i; c++) {
+ for (c = 0; c < i; c++)
s->mclms_coeffs_cur[i * s->num_channels + c] = get_bits(&s->gb, send_coef_bits);
- }
}
}
}
-
-/**
- *
- */
static void decode_cdlms(WmallDecodeCtx *s)
{
int c, i;
int cdlms_send_coef = get_bits1(&s->gb);
- for(c = 0; c < s->num_channels; c++) {
+ for (c = 0; c < s->num_channels; c++) {
s->cdlms_ttl[c] = get_bits(&s->gb, 3) + 1;
- for(i = 0; i < s->cdlms_ttl[c]; i++) {
+ for (i = 0; i < s->cdlms_ttl[c]; i++)
s->cdlms[c][i].order = (get_bits(&s->gb, 7) + 1) * 8;
- }
- for(i = 0; i < s->cdlms_ttl[c]; i++) {
+ for (i = 0; i < s->cdlms_ttl[c]; i++)
s->cdlms[c][i].scaling = get_bits(&s->gb, 4);
- }
- if(cdlms_send_coef) {
- for(i = 0; i < s->cdlms_ttl[c]; i++) {
+ if (cdlms_send_coef) {
+ for (i = 0; i < s->cdlms_ttl[c]; i++) {
int cbits, shift_l, shift_r, j;
cbits = av_log2(s->cdlms[c][i].order);
- if(1 << cbits < s->cdlms[c][i].order)
+ if ((1 << cbits) < s->cdlms[c][i].order)
cbits++;
s->cdlms[c][i].coefsend = get_bits(&s->gb, cbits) + 1;
cbits = av_log2(s->cdlms[c][i].scaling + 1);
- if(1 << cbits < s->cdlms[c][i].scaling + 1)
+ if ((1 << cbits) < s->cdlms[c][i].scaling + 1)
cbits++;
s->cdlms[c][i].bitsend = get_bits(&s->gb, cbits) + 2;
shift_l = 32 - s->cdlms[c][i].bitsend;
- shift_r = 32 - 2 - s->cdlms[c][i].scaling;
- for(j = 0; j < s->cdlms[c][i].coefsend; j++) {
+ shift_r = 32 - s->cdlms[c][i].scaling - 2;
+ for (j = 0; j < s->cdlms[c][i].coefsend; j++)
s->cdlms[c][i].coefs[j] =
(get_bits(&s->gb, s->cdlms[c][i].bitsend) << shift_l) >> shift_r;
- }
}
}
}
}
-/**
- *
- */
static int decode_channel_residues(WmallDecodeCtx *s, int ch, int tile_size)
{
int i = 0;
unsigned int ave_mean;
s->transient[ch] = get_bits1(&s->gb);
- if(s->transient[ch]) {
- s->transient_pos[ch] = get_bits(&s->gb, av_log2(tile_size));
+ if (s->transient[ch]) {
+ s->transient_pos[ch] = get_bits(&s->gb, av_log2(tile_size));
if (s->transient_pos[ch])
- s->transient[ch] = 0;
- s->channel[ch].transient_counter =
- FFMAX(s->channel[ch].transient_counter, s->samples_per_frame / 2);
- } else if (s->channel[ch].transient_counter)
- s->transient[ch] = 1;
+ s->transient[ch] = 0;
+ s->channel[ch].transient_counter =
+ FFMAX(s->channel[ch].transient_counter, s->samples_per_frame / 2);
+ } else if (s->channel[ch].transient_counter)
+ s->transient[ch] = 1;
- if(s->seekable_tile) {
+ if (s->seekable_tile) {
ave_mean = get_bits(&s->gb, s->bits_per_sample);
s->ave_sum[ch] = ave_mean << (s->movave_scaling + 1);
-// s->ave_sum[ch] *= 2;
}
- if(s->seekable_tile) {
- if(s->do_inter_ch_decorr)
+ if (s->seekable_tile) {
+ if (s->do_inter_ch_decorr)
s->channel_residues[ch][0] = get_sbits(&s->gb, s->bits_per_sample + 1);
else
s->channel_residues[ch][0] = get_sbits(&s->gb, s->bits_per_sample);
i++;
}
- //av_log(0, 0, "%8d: ", num_logged_tiles++);
- for(; i < tile_size; i++) {
+ for (; i < tile_size; i++) {
int quo = 0, rem, rem_bits, residue;
while(get_bits1(&s->gb)) {
quo++;
if (get_bits_left(&s->gb) <= 0)
return -1;
}
- if(quo >= 32)
+ if (quo >= 32)
quo += get_bits_long(&s->gb, get_bits(&s->gb, 5) + 1);
- ave_mean = (s->ave_sum[ch] + (1 << s->movave_scaling)) >> (s->movave_scaling + 1);
- if (ave_mean <= 1)
- residue = quo;
- else
- {
- rem_bits = av_ceil_log2(ave_mean);
- rem = rem_bits ? get_bits(&s->gb, rem_bits) : 0;
- residue = (quo << rem_bits) + rem;
- }
+ ave_mean = (s->ave_sum[ch] + (1 << s->movave_scaling)) >> (s->movave_scaling + 1);
+ if (ave_mean <= 1)
+ residue = quo;
+ else {
+ rem_bits = av_ceil_log2(ave_mean);
+ rem = rem_bits ? get_bits(&s->gb, rem_bits) : 0;
+ residue = (quo << rem_bits) + rem;
+ }
- s->ave_sum[ch] = residue + s->ave_sum[ch] - (s->ave_sum[ch] >> s->movave_scaling);
+ s->ave_sum[ch] = residue + s->ave_sum[ch] -
+ (s->ave_sum[ch] >> s->movave_scaling);
- if(residue & 1)
+ if (residue & 1)
residue = -(residue >> 1) - 1;
else
residue = residue >> 1;
s->channel_residues[ch][i] = residue;
}
- //dump_int_buffer(s->channel_residues[ch], 4, tile_size, 16);
return 0;
}
-
-/**
- *
- */
-static void
-decode_lpc(WmallDecodeCtx *s)
+static void decode_lpc(WmallDecodeCtx *s)
{
int ch, i, cbits;
- s->lpc_order = get_bits(&s->gb, 5) + 1;
+ s->lpc_order = get_bits(&s->gb, 5) + 1;
s->lpc_scaling = get_bits(&s->gb, 4);
s->lpc_intbits = get_bits(&s->gb, 3) + 1;
cbits = s->lpc_scaling + s->lpc_intbits;
- for(ch = 0; ch < s->num_channels; ch++) {
- for(i = 0; i < s->lpc_order; i++) {
+ for (ch = 0; ch < s->num_channels; ch++)
+ for (i = 0; i < s->lpc_order; i++)
s->lpc_coefs[ch][i] = get_sbits(&s->gb, cbits);
- }
- }
}
-
static void clear_codec_buffers(WmallDecodeCtx *s)
{
int ich, ilms;
- memset(s->acfilter_coeffs , 0, 16 * sizeof(int));
- memset(s->acfilter_prevvalues, 0, 16 * 2 * sizeof(int)); // may be wrong
- memset(s->lpc_coefs , 0, 40 * 2 * sizeof(int));
+ memset(s->acfilter_coeffs, 0, sizeof(s->acfilter_coeffs));
+ memset(s->acfilter_prevvalues, 0, sizeof(s->acfilter_prevvalues));
+ memset(s->lpc_coefs, 0, sizeof(s->lpc_coefs));
- memset(s->mclms_coeffs , 0, 128 * sizeof(int16_t));
- memset(s->mclms_coeffs_cur, 0, 4 * sizeof(int16_t));
- memset(s->mclms_prevvalues, 0, 64 * sizeof(int));
- memset(s->mclms_updates , 0, 64 * sizeof(int16_t));
+ memset(s->mclms_coeffs, 0, sizeof(s->mclms_coeffs));
+ memset(s->mclms_coeffs_cur, 0, sizeof(s->mclms_coeffs_cur));
+ memset(s->mclms_prevvalues, 0, sizeof(s->mclms_prevvalues));
+ memset(s->mclms_updates, 0, sizeof(s->mclms_updates));
for (ich = 0; ich < s->num_channels; ich++) {
for (ilms = 0; ilms < s->cdlms_ttl[ich]; ilms++) {
- memset(s->cdlms[ich][ilms].coefs , 0, 256 * sizeof(int16_t));
- memset(s->cdlms[ich][ilms].lms_prevvalues, 0, 512 * sizeof(int));
- memset(s->cdlms[ich][ilms].lms_updates , 0, 512 * sizeof(int16_t));
+ memset(s->cdlms[ich][ilms].coefs, 0,
+ sizeof(s->cdlms[ich][ilms].coefs));
+ memset(s->cdlms[ich][ilms].lms_prevvalues, 0,
+ sizeof(s->cdlms[ich][ilms].lms_prevvalues));
+ memset(s->cdlms[ich][ilms].lms_updates, 0,
+ sizeof(s->cdlms[ich][ilms].lms_updates));
}
s->ave_sum[ich] = 0;
}
}
/**
- *@brief Resets filter parameters and transient area at new seekable tile
+ * @brief Reset filter parameters and transient area at new seekable tile.
*/
static void reset_codec(WmallDecodeCtx *s)
{
@@ -799,23 +567,19 @@ static void reset_codec(WmallDecodeCtx *s)
for (ilms = 0; ilms < s->cdlms_ttl[ich]; ilms++)
s->cdlms[ich][ilms].recent = s->cdlms[ich][ilms].order;
/* first sample of a seekable subframe is considered as the starting of
- a transient area which is samples_per_frame samples long */
+ a transient area which is samples_per_frame samples long */
s->channel[ich].transient_counter = s->samples_per_frame;
- s->transient[ich] = 1;
+ s->transient[ich] = 1;
s->transient_pos[ich] = 0;
}
}
-
-
static void mclms_update(WmallDecodeCtx *s, int icoef, int *pred)
{
- int i, j, ich;
- int pred_error;
- int order = s->mclms_order;
+ int i, j, ich, pred_error;
+ int order = s->mclms_order;
int num_channels = s->num_channels;
- int range = 1 << (s->bits_per_sample - 1);
- //int bps = s->bits_per_sample > 16 ? 4 : 2; // bytes per sample
+ int range = 1 << (s->bits_per_sample - 1);
for (ich = 0; ich < num_channels; ich++) {
pred_error = s->channel_residues[ich][icoef] - pred[ich];
@@ -860,7 +624,7 @@ static void mclms_update(WmallDecodeCtx *s, int icoef, int *pred)
if (s->mclms_recent == 0) {
memcpy(&s->mclms_prevvalues[order * num_channels],
s->mclms_prevvalues,
- 4 * order * num_channels);
+ 2 * order * num_channels);
memcpy(&s->mclms_updates[order * num_channels],
s->mclms_updates,
2 * order * num_channels);
@@ -871,7 +635,7 @@ static void mclms_update(WmallDecodeCtx *s, int icoef, int *pred)
static void mclms_predict(WmallDecodeCtx *s, int icoef, int *pred)
{
int ich, i;
- int order = s->mclms_order;
+ int order = s->mclms_order;
int num_channels = s->num_channels;
for (ich = 0; ich < num_channels; ich++) {
@@ -892,7 +656,7 @@ static void mclms_predict(WmallDecodeCtx *s, int icoef, int *pred)
static void revert_mclms(WmallDecodeCtx *s, int tile_size)
{
- int icoef, pred[WMALL_MAX_CHANNELS] = {0};
+ int icoef, pred[WMALL_MAX_CHANNELS] = { 0 };
for (icoef = 0; icoef < tile_size; icoef++) {
mclms_predict(s, icoef, pred);
mclms_update(s, icoef, pred);
@@ -901,28 +665,22 @@ static void revert_mclms(WmallDecodeCtx *s, int tile_size)
static int lms_predict(WmallDecodeCtx *s, int ich, int ilms)
{
- int pred = 0;
- int icoef;
+ int pred = 0, icoef;
int recent = s->cdlms[ich][ilms].recent;
for (icoef = 0; icoef < s->cdlms[ich][ilms].order; icoef++)
pred += s->cdlms[ich][ilms].coefs[icoef] *
- s->cdlms[ich][ilms].lms_prevvalues[icoef + recent];
+ s->cdlms[ich][ilms].lms_prevvalues[icoef + recent];
- //pred += (1 << (s->cdlms[ich][ilms].scaling - 1));
- /* XXX: Table 29 has:
- iPred >= cdlms[iCh][ilms].scaling;
- seems to me like a missing > */
- //pred >>= s->cdlms[ich][ilms].scaling;
return pred;
}
-static void lms_update(WmallDecodeCtx *s, int ich, int ilms, int input, int residue)
+static void lms_update(WmallDecodeCtx *s, int ich, int ilms,
+ int input, int residue)
{
int icoef;
int recent = s->cdlms[ich][ilms].recent;
- int range = 1 << s->bits_per_sample - 1;
- //int bps = s->bits_per_sample > 16 ? 4 : 2; // bytes per sample
+ int range = 1 << s->bits_per_sample - 1;
if (residue < 0) {
for (icoef = 0; icoef < s->cdlms[ich][ilms].order; icoef++)
@@ -931,18 +689,15 @@ static void lms_update(WmallDecodeCtx *s, int ich, int ilms, int input, int resi
} else if (residue > 0) {
for (icoef = 0; icoef < s->cdlms[ich][ilms].order; icoef++)
s->cdlms[ich][ilms].coefs[icoef] +=
- s->cdlms[ich][ilms].lms_updates[icoef + recent]; /* spec mistakenly
- dropped the recent */
+ s->cdlms[ich][ilms].lms_updates[icoef + recent];
}
if (recent)
recent--;
else {
- /* XXX: This memcpy()s will probably fail if a fixed 32-bit buffer is used.
- follow kshishkov's suggestion of using a union. */
memcpy(&s->cdlms[ich][ilms].lms_prevvalues[s->cdlms[ich][ilms].order],
s->cdlms[ich][ilms].lms_prevvalues,
- 4 * s->cdlms[ich][ilms].order);
+ 2 * s->cdlms[ich][ilms].order);
memcpy(&s->cdlms[ich][ilms].lms_updates[s->cdlms[ich][ilms].order],
s->cdlms[ich][ilms].lms_updates,
2 * s->cdlms[ich][ilms].order);
@@ -957,14 +712,6 @@ static void lms_update(WmallDecodeCtx *s, int ich, int ilms, int input, int resi
else
s->cdlms[ich][ilms].lms_updates[recent] = s->update_speed[ich];
- /* XXX: spec says:
- cdlms[iCh][ilms].updates[iRecent + cdlms[iCh][ilms].order >> 4] >>= 2;
- lms_updates[iCh][ilms][iRecent + cdlms[iCh][ilms].order >> 3] >>= 1;
-
- Questions is - are cdlms[iCh][ilms].updates[] and lms_updates[][][] two
- seperate buffers? Here I've assumed that the two are same which makes
- more sense to me.
- */
s->cdlms[ich][ilms].lms_updates[recent + (s->cdlms[ich][ilms].order >> 4)] >>= 2;
s->cdlms[ich][ilms].lms_updates[recent + (s->cdlms[ich][ilms].order >> 3)] >>= 1;
s->cdlms[ich][ilms].recent = recent;
@@ -995,27 +742,23 @@ static void use_normal_update_speed(WmallDecodeCtx *s, int ich)
recent = s->cdlms[ich][ilms].recent;
if (s->update_speed[ich] == 8)
continue;
- if (s->bV3RTM) {
+ if (s->bV3RTM)
for (icoef = 0; icoef < s->cdlms[ich][ilms].order; icoef++)
s->cdlms[ich][ilms].lms_updates[icoef + recent] /= 2;
- } else {
+ else
for (icoef = 0; icoef < s->cdlms[ich][ilms].order; icoef++)
s->cdlms[ich][ilms].lms_updates[icoef] /= 2;
- }
}
s->update_speed[ich] = 8;
}
-static void revert_cdlms(WmallDecodeCtx *s, int ch, int coef_begin, int coef_end)
+static void revert_cdlms(WmallDecodeCtx *s, int ch,
+ int coef_begin, int coef_end)
{
- int icoef;
- int pred;
- int ilms, num_lms;
- int residue, input;
+ int icoef, pred, ilms, num_lms, residue, input;
num_lms = s->cdlms_ttl[ch];
for (ilms = num_lms - 1; ilms >= 0; ilms--) {
- //s->cdlms[ch][ilms].recent = s->cdlms[ch][ilms].order;
for (icoef = coef_begin; icoef < coef_end; icoef++) {
pred = 1 << (s->cdlms[ch][ilms].scaling - 1);
residue = s->channel_residues[ch][icoef];
@@ -1029,10 +772,10 @@ static void revert_cdlms(WmallDecodeCtx *s, int ch, int coef_begin, int coef_end
static void revert_inter_ch_decorr(WmallDecodeCtx *s, int tile_size)
{
- int icoef;
if (s->num_channels != 2)
return;
else if (s->is_channel_coded[0] && s->is_channel_coded[1]) {
+ int icoef;
for (icoef = 0; icoef < tile_size; icoef++) {
s->channel_residues[0][icoef] -= s->channel_residues[1][icoef] >> 1;
s->channel_residues[1][icoef] += s->channel_residues[0][icoef];
@@ -1042,12 +785,10 @@ static void revert_inter_ch_decorr(WmallDecodeCtx *s, int tile_size)
static void revert_acfilter(WmallDecodeCtx *s, int tile_size)
{
- int ich, icoef;
- int pred;
- int i, j;
+ int ich, pred, i, j;
int64_t *filter_coeffs = s->acfilter_coeffs;
- int scaling = s->acfilter_scaling;
- int order = s->acfilter_order;
+ int scaling = s->acfilter_scaling;
+ int order = s->acfilter_order;
for (ich = 0; ich < s->num_channels; ich++) {
int *prevvalues = s->acfilter_prevvalues[ich];
@@ -1074,28 +815,19 @@ static void revert_acfilter(WmallDecodeCtx *s, int tile_size)
}
}
-/**
- *@brief Decode a single subframe (block).
- *@param s codec context
- *@return 0 on success, < 0 when decoding failed
- */
static int decode_subframe(WmallDecodeCtx *s)
{
- int offset = s->samples_per_frame;
- int subframe_len = s->samples_per_frame;
- int i, j;
- int total_samples = s->samples_per_frame * s->num_channels;
- int rawpcm_tile;
- int padding_zeroes;
+ int offset = s->samples_per_frame;
+ int subframe_len = s->samples_per_frame;
+ int total_samples = s->samples_per_frame * s->num_channels;
+ int i, j, rawpcm_tile, padding_zeroes;
s->subframe_offset = get_bits_count(&s->gb);
- /** reset channel context and find the next block offset and size
+ /* reset channel context and find the next block offset and size
== the next block of the channel with the smallest number of
- decoded samples
- */
+ decoded samples */
for (i = 0; i < s->num_channels; i++) {
- s->channel[i].grouped = 0;
if (offset > s->channel[i].decoded_samples) {
offset = s->channel[i].decoded_samples;
subframe_len =
@@ -1103,14 +835,14 @@ static int decode_subframe(WmallDecodeCtx *s)
}
}
- /** get a list of all channels that contain the estimated block */
+ /* get a list of all channels that contain the estimated block */
s->channels_for_cur_subframe = 0;
for (i = 0; i < s->num_channels; i++) {
const int cur_subframe = s->channel[i].cur_subframe;
- /** substract already processed samples */
+ /* subtract already processed samples */
total_samples -= s->channel[i].decoded_samples;
- /** and count if there are multiple subframes that match our profile */
+ /* and count if there are multiple subframes that match our profile */
if (offset == s->channel[i].decoded_samples &&
subframe_len == s->channel[i].subframe_len[cur_subframe]) {
total_samples -= s->channel[i].subframe_len[cur_subframe];
@@ -1121,95 +853,89 @@ static int decode_subframe(WmallDecodeCtx *s)
}
}
- /** check if the frame will be complete after processing the
+ /* check if the frame will be complete after processing the
estimated block */
if (!total_samples)
s->parsed_all_subframes = 1;
s->seekable_tile = get_bits1(&s->gb);
- if(s->seekable_tile) {
+ if (s->seekable_tile) {
clear_codec_buffers(s);
s->do_arith_coding = get_bits1(&s->gb);
- if(s->do_arith_coding) {
- dprintf(s->avctx, "do_arith_coding == 1");
+ if (s->do_arith_coding) {
+ av_dlog(s->avctx, AV_LOG_DEBUG, "do_arith_coding == 1");
abort();
}
s->do_ac_filter = get_bits1(&s->gb);
s->do_inter_ch_decorr = get_bits1(&s->gb);
s->do_mclms = get_bits1(&s->gb);
- if(s->do_ac_filter)
+ if (s->do_ac_filter)
decode_ac_filter(s);
- if(s->do_mclms)
+ if (s->do_mclms)
decode_mclms(s);
decode_cdlms(s);
s->movave_scaling = get_bits(&s->gb, 3);
s->quant_stepsize = get_bits(&s->gb, 8) + 1;
- reset_codec(s);
+ reset_codec(s);
}
rawpcm_tile = get_bits1(&s->gb);
- for(i = 0; i < s->num_channels; i++) {
+ for (i = 0; i < s->num_channels; i++)
s->is_channel_coded[i] = 1;
- }
- if(!rawpcm_tile) {
-
- for(i = 0; i < s->num_channels; i++) {
+ if (!rawpcm_tile) {
+ for (i = 0; i < s->num_channels; i++)
s->is_channel_coded[i] = get_bits1(&s->gb);
- }
- if(s->bV3RTM) {
+ if (s->bV3RTM) {
// LPC
s->do_lpc = get_bits1(&s->gb);
- if(s->do_lpc) {
+ if (s->do_lpc) {
decode_lpc(s);
+ av_log_ask_for_sample(s->avctx, "Inverse LPC filter not "
+ "implemented. Expect wrong output.\n");
}
- } else {
+ } else
s->do_lpc = 0;
- }
}
- if(get_bits1(&s->gb)) {
+ if (get_bits1(&s->gb))
padding_zeroes = get_bits(&s->gb, 5);
- } else {
+ else
padding_zeroes = 0;
- }
-
- if(rawpcm_tile) {
+ if (rawpcm_tile) {
int bits = s->bits_per_sample - padding_zeroes;
- dprintf(s->avctx, "RAWPCM %d bits per sample. total %d bits, remain=%d\n", bits,
+ av_dlog(s->avctx, AV_LOG_DEBUG, "RAWPCM %d bits per sample. "
+ "total %d bits, remain=%d\n", bits,
bits * s->num_channels * subframe_len, get_bits_count(&s->gb));
- for(i = 0; i < s->num_channels; i++) {
- for(j = 0; j < subframe_len; j++) {
+ for (i = 0; i < s->num_channels; i++)
+ for (j = 0; j < subframe_len; j++)
s->channel_coeffs[i][j] = get_sbits(&s->gb, bits);
-// dprintf(s->avctx, "PCM[%d][%d] = 0x%04x\n", i, j, s->channel_coeffs[i][j]);
- }
- }
} else {
- for(i = 0; i < s->num_channels; i++)
- if(s->is_channel_coded[i]) {
- decode_channel_residues(s, i, subframe_len);
- if (s->seekable_tile)
- use_high_update_speed(s, i);
- else
- use_normal_update_speed(s, i);
- revert_cdlms(s, i, 0, subframe_len);
- }
+ for (i = 0; i < s->num_channels; i++)
+ if (s->is_channel_coded[i]) {
+ decode_channel_residues(s, i, subframe_len);
+ if (s->seekable_tile)
+ use_high_update_speed(s, i);
+ else
+ use_normal_update_speed(s, i);
+ revert_cdlms(s, i, 0, subframe_len);
+ }
}
if (s->do_mclms)
revert_mclms(s, subframe_len);
if (s->do_inter_ch_decorr)
revert_inter_ch_decorr(s, subframe_len);
- if(s->do_ac_filter)
+ if (s->do_ac_filter)
revert_acfilter(s, subframe_len);
/* Dequantize */
@@ -1218,7 +944,7 @@ static int decode_subframe(WmallDecodeCtx *s)
for (j = 0; j < subframe_len; j++)
s->channel_residues[i][j] *= s->quant_stepsize;
- // Write to proper output buffer depending on bit-depth
+ /* Write to proper output buffer depending on bit-depth */
for (i = 0; i < subframe_len; i++)
for (j = 0; j < s->num_channels; j++) {
if (s->bits_per_sample == 16)
@@ -1227,8 +953,7 @@ static int decode_subframe(WmallDecodeCtx *s)
*s->samples_32++ = s->channel_residues[j][i];
}
- /** handled one subframe */
-
+ /* handled one subframe */
for (i = 0; i < s->channels_for_cur_subframe; i++) {
int c = s->channel_indexes_for_cur_subframe[i];
if (s->channel[c].cur_subframe >= s->channel[c].num_subframes) {
@@ -1237,77 +962,72 @@ static int decode_subframe(WmallDecodeCtx *s)
}
++s->channel[c].cur_subframe;
}
- num_logged_subframes++;
return 0;
}
/**
- *@brief Decode one WMA frame.
- *@param s codec context
- *@return 0 if the trailer bit indicates that this is the last frame,
- * 1 if there are additional frames
+ * @brief Decode one WMA frame.
+ * @param s codec context
+ * @return 0 if the trailer bit indicates that this is the last frame,
+ * 1 if there are additional frames
*/
static int decode_frame(WmallDecodeCtx *s)
{
GetBitContext* gb = &s->gb;
- int more_frames = 0;
- int len = 0;
- int i, ret;
+ int more_frames = 0, len = 0, i, ret;
s->frame.nb_samples = s->samples_per_frame;
if ((ret = s->avctx->get_buffer(s->avctx, &s->frame)) < 0) {
- /** return an error if no frame could be decoded at all */
+ /* return an error if no frame could be decoded at all */
av_log(s->avctx, AV_LOG_ERROR,
"not enough space for the output samples\n");
s->packet_loss = 1;
- return 0;
+ return ret;
}
s->samples_16 = (int16_t *)s->frame.data[0];
s->samples_32 = (int32_t *)s->frame.data[0];
- /** get frame length */
+ /* get frame length */
if (s->len_prefix)
len = get_bits(gb, s->log2_frame_size);
- /** decode tile information */
+ /* decode tile information */
if (decode_tilehdr(s)) {
s->packet_loss = 1;
return 0;
}
- /** read drc info */
- if (s->dynamic_range_compression) {
+ /* read drc info */
+ if (s->dynamic_range_compression)
s->drc_gain = get_bits(gb, 8);
- }
- /** no idea what these are for, might be the number of samples
- that need to be skipped at the beginning or end of a stream */
+ /* no idea what these are for, might be the number of samples
+ that need to be skipped at the beginning or end of a stream */
if (get_bits1(gb)) {
int skip;
- /** usually true for the first frame */
+ /* usually true for the first frame */
if (get_bits1(gb)) {
skip = get_bits(gb, av_log2(s->samples_per_frame * 2));
- dprintf(s->avctx, "start skip: %i\n", skip);
+ av_dlog(s->avctx, AV_LOG_DEBUG, "start skip: %i\n", skip);
}
- /** sometimes true for the last frame */
+ /* sometimes true for the last frame */
if (get_bits1(gb)) {
skip = get_bits(gb, av_log2(s->samples_per_frame * 2));
- dprintf(s->avctx, "end skip: %i\n", skip);
+ av_dlog(s->avctx, AV_LOG_DEBUG, "end skip: %i\n", skip);
}
}
- /** reset subframe states */
+ /* reset subframe states */
s->parsed_all_subframes = 0;
for (i = 0; i < s->num_channels; i++) {
s->channel[i].decoded_samples = 0;
s->channel[i].cur_subframe = 0;
- s->channel[i].reuse_sf = 0;
}
- /** decode all subframes */
+ /* decode all subframes */
while (!s->parsed_all_subframes) {
if (decode_subframe(s) < 0) {
s->packet_loss = 1;
@@ -1315,15 +1035,14 @@ static int decode_frame(WmallDecodeCtx *s)
}
}
- dprintf(s->avctx, "Frame done\n");
+ av_dlog(s->avctx, AV_LOG_DEBUG, "Frame done\n");
- if (s->skip_frame) {
+ if (s->skip_frame)
s->skip_frame = 0;
- }
if (s->len_prefix) {
if (len != (get_bits_count(gb) - s->frame_offset) + 2) {
- /** FIXME: not sure if this is always an error */
+ /* FIXME: not sure if this is always an error */
av_log(s->avctx, AV_LOG_ERROR,
"frame[%i] would have to skip %i bits\n", s->frame_num,
len - (get_bits_count(gb) - s->frame_offset) - 1);
@@ -1331,27 +1050,21 @@ static int decode_frame(WmallDecodeCtx *s)
return 0;
}
- /** skip the rest of the frame data */
+ /* skip the rest of the frame data */
skip_bits_long(gb, len - (get_bits_count(gb) - s->frame_offset) - 1);
- } else {
-/*
- while (get_bits_count(gb) < s->num_saved_bits && get_bits1(gb) == 0) {
- dprintf(s->avctx, "skip1\n");
- }
-*/
}
- /** decode trailer bit */
+ /* decode trailer bit */
more_frames = get_bits1(gb);
++s->frame_num;
return more_frames;
}
/**
- *@brief Calculate remaining input buffer length.
- *@param s codec context
- *@param gb bitstream reader context
- *@return remaining size in bits
+ * @brief Calculate remaining input buffer length.
+ * @param s codec context
+ * @param gb bitstream reader context
+ * @return remaining size in bits
*/
static int remaining_bits(WmallDecodeCtx *s, GetBitContext *gb)
{
@@ -1359,23 +1072,24 @@ static int remaining_bits(WmallDecodeCtx *s, GetBitContext *gb)
}
/**
- *@brief Fill the bit reservoir with a (partial) frame.
- *@param s codec context
- *@param gb bitstream reader context
- *@param len length of the partial frame
- *@param append decides wether to reset the buffer or not
+ * @brief Fill the bit reservoir with a (partial) frame.
+ * @param s codec context
+ * @param gb bitstream reader context
+ * @param len length of the partial frame
+ * @param append decides whether to reset the buffer or not
*/
static void save_bits(WmallDecodeCtx *s, GetBitContext* gb, int len,
int append)
{
int buflen;
+ PutBitContext tmp;
- /** when the frame data does not need to be concatenated, the input buffer
- is resetted and additional bits from the previous frame are copyed
+ /* when the frame data does not need to be concatenated, the input buffer
+ is reset and additional bits from the previous frame are copied
and skipped later so that a fast byte copy is possible */
if (!append) {
- s->frame_offset = get_bits_count(gb) & 7;
+ s->frame_offset = get_bits_count(gb) & 7;
s->num_saved_bits = s->frame_offset;
init_put_bits(&s->pb, s->frame_data, MAX_FRAMESIZE);
}
@@ -1391,7 +1105,7 @@ static void save_bits(WmallDecodeCtx *s, GetBitContext* gb, int len,
s->num_saved_bits += len;
if (!append) {
avpriv_copy_bits(&s->pb, gb->buffer + (get_bits_count(gb) >> 3),
- s->num_saved_bits);
+ s->num_saved_bits);
} else {
int align = 8 - (get_bits_count(gb) & 7);
align = FFMIN(align, len);
@@ -1401,57 +1115,52 @@ static void save_bits(WmallDecodeCtx *s, GetBitContext* gb, int len,
}
skip_bits_long(gb, len);
- {
- PutBitContext tmp = s->pb;
- flush_put_bits(&tmp);
- }
+ tmp = s->pb;
+ flush_put_bits(&tmp);
init_get_bits(&s->gb, s->frame_data, s->num_saved_bits);
skip_bits(&s->gb, s->frame_offset);
}
/**
- *@brief Decode a single WMA packet.
- *@param avctx codec context
- *@param data the output buffer
- *@param data_size number of bytes that were written to the output buffer
- *@param avpkt input packet
- *@return number of bytes that were read from the input buffer
+ * @brief Decode a single WMA packet.
+ * @param avctx codec context
+ * @param data the output buffer
+ * @param data_size number of bytes that were written to the output buffer
+ * @param avpkt input packet
+ * @return number of bytes that were read from the input buffer
*/
-static int decode_packet(AVCodecContext *avctx,
- void *data, int *got_frame_ptr, AVPacket* avpkt)
+static int decode_packet(AVCodecContext *avctx, void *data, int *got_frame_ptr,
+ AVPacket* avpkt)
{
WmallDecodeCtx *s = avctx->priv_data;
GetBitContext* gb = &s->pgb;
const uint8_t* buf = avpkt->data;
int buf_size = avpkt->size;
- int num_bits_prev_frame;
- int packet_sequence_number;
- int seekable_frame_in_packet;
- int spliced_packet;
+ int num_bits_prev_frame, packet_sequence_number,
+ seekable_frame_in_packet, spliced_packet;
if (s->packet_done || s->packet_loss) {
- int seekable_frame_in_packet, spliced_packet;
s->packet_done = 0;
- /** sanity check for the buffer length */
+ /* sanity check for the buffer length */
if (buf_size < avctx->block_align)
return 0;
s->next_packet_start = buf_size - avctx->block_align;
- buf_size = avctx->block_align;
- s->buf_bit_size = buf_size << 3;
+ buf_size = avctx->block_align;
+ s->buf_bit_size = buf_size << 3;
- /** parse packet header */
+ /* parse packet header */
init_get_bits(gb, buf, s->buf_bit_size);
- packet_sequence_number = get_bits(gb, 4);
+ packet_sequence_number = get_bits(gb, 4);
seekable_frame_in_packet = get_bits1(gb);
- spliced_packet = get_bits1(gb);
+ spliced_packet = get_bits1(gb);
- /** get number of bits that need to be added to the previous frame */
+ /* get number of bits that need to be added to the previous frame */
num_bits_prev_frame = get_bits(gb, s->log2_frame_size);
- /** check for packet loss */
+ /* check for packet loss */
if (!s->packet_loss &&
((s->packet_sequence_number + 1) & 0xF) != packet_sequence_number) {
s->packet_loss = 1;
@@ -1467,24 +1176,23 @@ static int decode_packet(AVCodecContext *avctx,
s->packet_done = 1;
}
- /** append the previous frame data to the remaining data from the
- previous packet to create a full frame */
+ /* Append the previous frame data to the remaining data from the
+ * previous packet to create a full frame. */
save_bits(s, gb, num_bits_prev_frame, 1);
- /** decode the cross packet frame if it is valid */
+ /* decode the cross packet frame if it is valid */
if (!s->packet_loss)
decode_frame(s);
} else if (s->num_saved_bits - s->frame_offset) {
- dprintf(avctx, "ignoring %x previously saved bits\n",
+ av_dlog(avctx, AV_LOG_DEBUG, "ignoring %x previously saved bits\n",
s->num_saved_bits - s->frame_offset);
}
if (s->packet_loss) {
- /** reset number of saved bits so that the decoder
- does not start to decode incomplete frames in the
- s->len_prefix == 0 case */
+ /* Reset number of saved bits so that the decoder does not start
+ * to decode incomplete frames in the s->len_prefix == 0 case. */
s->num_saved_bits = 0;
- s->packet_loss = 0;
+ s->packet_loss = 0;
}
} else {
@@ -1501,13 +1209,12 @@ static int decode_packet(AVCodecContext *avctx,
s->packet_done = !decode_frame(s);
} else if (!s->len_prefix
&& s->num_saved_bits > get_bits_count(&s->gb)) {
- /** when the frames do not have a length prefix, we don't know
- the compressed length of the individual frames
- however, we know what part of a new packet belongs to the
- previous frame
- therefore we save the incoming packet first, then we append
- the "previous frame" data from the next packet so that
- we get a buffer that only contains full frames */
+ /* when the frames do not have a length prefix, we don't know the
+ * compressed length of the individual frames however, we know what
+ * part of a new packet belongs to the previous frame therefore we
+ * save the incoming packet first, then we append the "previous
+ * frame" data from the next packet so that we get a buffer that
+ * only contains full frames */
s->packet_done = !decode_frame(s);
} else {
s->packet_done = 1;
@@ -1516,47 +1223,26 @@ static int decode_packet(AVCodecContext *avctx,
if (s->packet_done && !s->packet_loss &&
remaining_bits(s, gb) > 0) {
- /** save the rest of the data so that it can be decoded
- with the next packet */
+ /* save the rest of the data so that it can be decoded
+ * with the next packet */
save_bits(s, gb, remaining_bits(s, gb), 0);
}
*(AVFrame *)data = s->frame;
- *got_frame_ptr = 1;
+ *got_frame_ptr = 1;
s->packet_offset = get_bits_count(gb) & 7;
return (s->packet_loss) ? AVERROR_INVALIDDATA : get_bits_count(gb) >> 3;
}
-/**
- *@brief Clear decoder buffers (for seeking).
- *@param avctx codec context
- */
-static void flush(AVCodecContext *avctx)
-{
- WmallDecodeCtx *s = avctx->priv_data;
- int i;
- /** reset output buffer as a part of it is used during the windowing of a
- new frame */
- for (i = 0; i < s->num_channels; i++)
- memset(s->channel[i].out, 0, s->samples_per_frame *
- sizeof(*s->channel[i].out));
- s->packet_loss = 1;
-}
-
-/**
- *@brief wmall decoder
- */
AVCodec ff_wmalossless_decoder = {
.name = "wmalossless",
.type = AVMEDIA_TYPE_AUDIO,
.id = CODEC_ID_WMALOSSLESS,
.priv_data_size = sizeof(WmallDecodeCtx),
.init = decode_init,
- .close = decode_end,
.decode = decode_packet,
- .flush = flush,
- .capabilities = CODEC_CAP_SUBFRAMES | CODEC_CAP_EXPERIMENTAL | CODEC_CAP_DR1,
- .long_name = NULL_IF_CONFIG_SMALL("Windows Media Audio 9 Lossless"),
+ .capabilities = CODEC_CAP_SUBFRAMES | CODEC_CAP_DR1,
+ .long_name = NULL_IF_CONFIG_SMALL("Windows Media Audio Lossless"),
};
diff --git a/libavcodec/wnv1.c b/libavcodec/wnv1.c
index 6429a5b748..5ce5e9db51 100644
--- a/libavcodec/wnv1.c
+++ b/libavcodec/wnv1.c
@@ -64,7 +64,7 @@ static int decode_frame(AVCodecContext *avctx,
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
WNV1Context * const l = avctx->priv_data;
- AVFrame * const p= (AVFrame*)&l->pic;
+ AVFrame * const p = &l->pic;
unsigned char *Y,*U,*V;
int i, j;
int prev_y = 0, prev_u = 0, prev_v = 0;
diff --git a/libavcodec/x86/h264_qpel_10bit.asm b/libavcodec/x86/h264_qpel_10bit.asm
index 51412e3977..bdacf9f472 100644
--- a/libavcodec/x86/h264_qpel_10bit.asm
+++ b/libavcodec/x86/h264_qpel_10bit.asm
@@ -619,7 +619,7 @@ MC MC33
%define PAD 12
%define COUNT 2
%else
-%define PAD 0
+%define PAD 4
%define COUNT 3
%endif
put_hv%2_10_%1:
diff --git a/libavcodec/x86/vp8dsp-init.c b/libavcodec/x86/vp8dsp-init.c
index a75fdf5bc5..e3b727d1b1 100644
--- a/libavcodec/x86/vp8dsp-init.c
+++ b/libavcodec/x86/vp8dsp-init.c
@@ -29,98 +29,98 @@
/*
* MC functions
*/
-extern void ff_put_vp8_epel4_h4_mmxext(uint8_t *dst, int dststride,
- uint8_t *src, int srcstride,
+extern void ff_put_vp8_epel4_h4_mmxext(uint8_t *dst, ptrdiff_t dststride,
+ uint8_t *src, ptrdiff_t srcstride,
int height, int mx, int my);
-extern void ff_put_vp8_epel4_h6_mmxext(uint8_t *dst, int dststride,
- uint8_t *src, int srcstride,
+extern void ff_put_vp8_epel4_h6_mmxext(uint8_t *dst, ptrdiff_t dststride,
+ uint8_t *src, ptrdiff_t srcstride,
int height, int mx, int my);
-extern void ff_put_vp8_epel4_v4_mmxext(uint8_t *dst, int dststride,
- uint8_t *src, int srcstride,
+extern void ff_put_vp8_epel4_v4_mmxext(uint8_t *dst, ptrdiff_t dststride,
+ uint8_t *src, ptrdiff_t srcstride,
int height, int mx, int my);
-extern void ff_put_vp8_epel4_v6_mmxext(uint8_t *dst, int dststride,
- uint8_t *src, int srcstride,
+extern void ff_put_vp8_epel4_v6_mmxext(uint8_t *dst, ptrdiff_t dststride,
+ uint8_t *src, ptrdiff_t srcstride,
int height, int mx, int my);
-extern void ff_put_vp8_epel8_h4_sse2 (uint8_t *dst, int dststride,
- uint8_t *src, int srcstride,
+extern void ff_put_vp8_epel8_h4_sse2 (uint8_t *dst, ptrdiff_t dststride,
+ uint8_t *src, ptrdiff_t srcstride,
int height, int mx, int my);
-extern void ff_put_vp8_epel8_h6_sse2 (uint8_t *dst, int dststride,
- uint8_t *src, int srcstride,
+extern void ff_put_vp8_epel8_h6_sse2 (uint8_t *dst, ptrdiff_t dststride,
+ uint8_t *src, ptrdiff_t srcstride,
int height, int mx, int my);
-extern void ff_put_vp8_epel8_v4_sse2 (uint8_t *dst, int dststride,
- uint8_t *src, int srcstride,
+extern void ff_put_vp8_epel8_v4_sse2 (uint8_t *dst, ptrdiff_t dststride,
+ uint8_t *src, ptrdiff_t srcstride,
int height, int mx, int my);
-extern void ff_put_vp8_epel8_v6_sse2 (uint8_t *dst, int dststride,
- uint8_t *src, int srcstride,
+extern void ff_put_vp8_epel8_v6_sse2 (uint8_t *dst, ptrdiff_t dststride,
+ uint8_t *src, ptrdiff_t srcstride,
int height, int mx, int my);
-extern void ff_put_vp8_epel4_h4_ssse3 (uint8_t *dst, int dststride,
- uint8_t *src, int srcstride,
+extern void ff_put_vp8_epel4_h4_ssse3 (uint8_t *dst, ptrdiff_t dststride,
+ uint8_t *src, ptrdiff_t srcstride,
int height, int mx, int my);
-extern void ff_put_vp8_epel4_h6_ssse3 (uint8_t *dst, int dststride,
- uint8_t *src, int srcstride,
+extern void ff_put_vp8_epel4_h6_ssse3 (uint8_t *dst, ptrdiff_t dststride,
+ uint8_t *src, ptrdiff_t srcstride,
int height, int mx, int my);
-extern void ff_put_vp8_epel4_v4_ssse3 (uint8_t *dst, int dststride,
- uint8_t *src, int srcstride,
+extern void ff_put_vp8_epel4_v4_ssse3 (uint8_t *dst, ptrdiff_t dststride,
+ uint8_t *src, ptrdiff_t srcstride,
int height, int mx, int my);
-extern void ff_put_vp8_epel4_v6_ssse3 (uint8_t *dst, int dststride,
- uint8_t *src, int srcstride,
+extern void ff_put_vp8_epel4_v6_ssse3 (uint8_t *dst, ptrdiff_t dststride,
+ uint8_t *src, ptrdiff_t srcstride,
int height, int mx, int my);
-extern void ff_put_vp8_epel8_h4_ssse3 (uint8_t *dst, int dststride,
- uint8_t *src, int srcstride,
+extern void ff_put_vp8_epel8_h4_ssse3 (uint8_t *dst, ptrdiff_t dststride,
+ uint8_t *src, ptrdiff_t srcstride,
int height, int mx, int my);
-extern void ff_put_vp8_epel8_h6_ssse3 (uint8_t *dst, int dststride,
- uint8_t *src, int srcstride,
+extern void ff_put_vp8_epel8_h6_ssse3 (uint8_t *dst, ptrdiff_t dststride,
+ uint8_t *src, ptrdiff_t srcstride,
int height, int mx, int my);
-extern void ff_put_vp8_epel8_v4_ssse3 (uint8_t *dst, int dststride,
- uint8_t *src, int srcstride,
+extern void ff_put_vp8_epel8_v4_ssse3 (uint8_t *dst, ptrdiff_t dststride,
+ uint8_t *src, ptrdiff_t srcstride,
int height, int mx, int my);
-extern void ff_put_vp8_epel8_v6_ssse3 (uint8_t *dst, int dststride,
- uint8_t *src, int srcstride,
+extern void ff_put_vp8_epel8_v6_ssse3 (uint8_t *dst, ptrdiff_t dststride,
+ uint8_t *src, ptrdiff_t srcstride,
int height, int mx, int my);
-extern void ff_put_vp8_bilinear4_h_mmxext(uint8_t *dst, int dststride,
- uint8_t *src, int srcstride,
+extern void ff_put_vp8_bilinear4_h_mmxext(uint8_t *dst, ptrdiff_t dststride,
+ uint8_t *src, ptrdiff_t srcstride,
int height, int mx, int my);
-extern void ff_put_vp8_bilinear8_h_sse2 (uint8_t *dst, int dststride,
- uint8_t *src, int srcstride,
+extern void ff_put_vp8_bilinear8_h_sse2 (uint8_t *dst, ptrdiff_t dststride,
+ uint8_t *src, ptrdiff_t srcstride,
int height, int mx, int my);
-extern void ff_put_vp8_bilinear4_h_ssse3 (uint8_t *dst, int dststride,
- uint8_t *src, int srcstride,
+extern void ff_put_vp8_bilinear4_h_ssse3 (uint8_t *dst, ptrdiff_t dststride,
+ uint8_t *src, ptrdiff_t srcstride,
int height, int mx, int my);
-extern void ff_put_vp8_bilinear8_h_ssse3 (uint8_t *dst, int dststride,
- uint8_t *src, int srcstride,
+extern void ff_put_vp8_bilinear8_h_ssse3 (uint8_t *dst, ptrdiff_t dststride,
+ uint8_t *src, ptrdiff_t srcstride,
int height, int mx, int my);
-extern void ff_put_vp8_bilinear4_v_mmxext(uint8_t *dst, int dststride,
- uint8_t *src, int srcstride,
+extern void ff_put_vp8_bilinear4_v_mmxext(uint8_t *dst, ptrdiff_t dststride,
+ uint8_t *src, ptrdiff_t srcstride,
int height, int mx, int my);
-extern void ff_put_vp8_bilinear8_v_sse2 (uint8_t *dst, int dststride,
- uint8_t *src, int srcstride,
+extern void ff_put_vp8_bilinear8_v_sse2 (uint8_t *dst, ptrdiff_t dststride,
+ uint8_t *src, ptrdiff_t srcstride,
int height, int mx, int my);
-extern void ff_put_vp8_bilinear4_v_ssse3 (uint8_t *dst, int dststride,
- uint8_t *src, int srcstride,
+extern void ff_put_vp8_bilinear4_v_ssse3 (uint8_t *dst, ptrdiff_t dststride,
+ uint8_t *src, ptrdiff_t srcstride,
int height, int mx, int my);
-extern void ff_put_vp8_bilinear8_v_ssse3 (uint8_t *dst, int dststride,
- uint8_t *src, int srcstride,
+extern void ff_put_vp8_bilinear8_v_ssse3 (uint8_t *dst, ptrdiff_t dststride,
+ uint8_t *src, ptrdiff_t srcstride,
int height, int mx, int my);
-extern void ff_put_vp8_pixels8_mmx (uint8_t *dst, int dststride,
- uint8_t *src, int srcstride,
+extern void ff_put_vp8_pixels8_mmx (uint8_t *dst, ptrdiff_t dststride,
+ uint8_t *src, ptrdiff_t srcstride,
int height, int mx, int my);
-extern void ff_put_vp8_pixels16_mmx(uint8_t *dst, int dststride,
- uint8_t *src, int srcstride,
+extern void ff_put_vp8_pixels16_mmx(uint8_t *dst, ptrdiff_t dststride,
+ uint8_t *src, ptrdiff_t srcstride,
int height, int mx, int my);
-extern void ff_put_vp8_pixels16_sse(uint8_t *dst, int dststride,
- uint8_t *src, int srcstride,
+extern void ff_put_vp8_pixels16_sse(uint8_t *dst, ptrdiff_t dststride,
+ uint8_t *src, ptrdiff_t srcstride,
int height, int mx, int my);
#define TAP_W16(OPT, FILTERTYPE, TAPTYPE) \
static void ff_put_vp8_ ## FILTERTYPE ## 16_ ## TAPTYPE ## _ ## OPT( \
- uint8_t *dst, int dststride, uint8_t *src, \
- int srcstride, int height, int mx, int my) \
+ uint8_t *dst, ptrdiff_t dststride, uint8_t *src, \
+ ptrdiff_t srcstride, int height, int mx, int my) \
{ \
ff_put_vp8_ ## FILTERTYPE ## 8_ ## TAPTYPE ## _ ## OPT( \
dst, dststride, src, srcstride, height, mx, my); \
@@ -129,8 +129,8 @@ static void ff_put_vp8_ ## FILTERTYPE ## 16_ ## TAPTYPE ## _ ## OPT( \
}
#define TAP_W8(OPT, FILTERTYPE, TAPTYPE) \
static void ff_put_vp8_ ## FILTERTYPE ## 8_ ## TAPTYPE ## _ ## OPT( \
- uint8_t *dst, int dststride, uint8_t *src, \
- int srcstride, int height, int mx, int my) \
+ uint8_t *dst, ptrdiff_t dststride, uint8_t *src, \
+ ptrdiff_t srcstride, int height, int mx, int my) \
{ \
ff_put_vp8_ ## FILTERTYPE ## 4_ ## TAPTYPE ## _ ## OPT( \
dst, dststride, src, srcstride, height, mx, my); \
@@ -138,6 +138,7 @@ static void ff_put_vp8_ ## FILTERTYPE ## 8_ ## TAPTYPE ## _ ## OPT( \
dst + 4, dststride, src + 4, srcstride, height, mx, my); \
}
+#if ARCH_X86_32
TAP_W8 (mmxext, epel, h4)
TAP_W8 (mmxext, epel, h6)
TAP_W16(mmxext, epel, h6)
@@ -148,6 +149,7 @@ TAP_W8 (mmxext, bilinear, h)
TAP_W16(mmxext, bilinear, h)
TAP_W8 (mmxext, bilinear, v)
TAP_W16(mmxext, bilinear, v)
+#endif
TAP_W16(sse2, epel, h6)
TAP_W16(sse2, epel, v6)
@@ -161,8 +163,8 @@ TAP_W16(ssse3, bilinear, v)
#define HVTAP(OPT, ALIGN, TAPNUMX, TAPNUMY, SIZE, MAXHEIGHT) \
static void ff_put_vp8_epel ## SIZE ## _h ## TAPNUMX ## v ## TAPNUMY ## _ ## OPT( \
- uint8_t *dst, int dststride, uint8_t *src, \
- int srcstride, int height, int mx, int my) \
+ uint8_t *dst, ptrdiff_t dststride, uint8_t *src, \
+ ptrdiff_t srcstride, int height, int mx, int my) \
{ \
DECLARE_ALIGNED(ALIGN, uint8_t, tmp)[SIZE * (MAXHEIGHT + TAPNUMY - 1)]; \
uint8_t *tmpptr = tmp + SIZE * (TAPNUMY / 2 - 1); \
@@ -173,15 +175,21 @@ static void ff_put_vp8_epel ## SIZE ## _h ## TAPNUMX ## v ## TAPNUMY ## _ ## OPT
dst, dststride, tmpptr, SIZE, height, mx, my); \
}
+#if ARCH_X86_32
#define HVTAPMMX(x, y) \
HVTAP(mmxext, 8, x, y, 4, 8) \
HVTAP(mmxext, 8, x, y, 8, 16)
+HVTAP(mmxext, 8, 6, 6, 16, 16)
+#else
+#define HVTAPMMX(x, y) \
+HVTAP(mmxext, 8, x, y, 4, 8)
+#endif
+
HVTAPMMX(4, 4)
HVTAPMMX(4, 6)
HVTAPMMX(6, 4)
HVTAPMMX(6, 6)
-HVTAP(mmxext, 8, 6, 6, 16, 16)
#define HVTAPSSE2(x, y, w) \
HVTAP(sse2, 16, x, y, w, 16) \
@@ -200,8 +208,8 @@ HVTAP(ssse3, 16, 6, 6, 4, 8)
#define HVBILIN(OPT, ALIGN, SIZE, MAXHEIGHT) \
static void ff_put_vp8_bilinear ## SIZE ## _hv_ ## OPT( \
- uint8_t *dst, int dststride, uint8_t *src, \
- int srcstride, int height, int mx, int my) \
+ uint8_t *dst, ptrdiff_t dststride, uint8_t *src, \
+ ptrdiff_t srcstride, int height, int mx, int my) \
{ \
DECLARE_ALIGNED(ALIGN, uint8_t, tmp)[SIZE * (MAXHEIGHT + 2)]; \
ff_put_vp8_bilinear ## SIZE ## _h_ ## OPT( \
@@ -211,43 +219,68 @@ static void ff_put_vp8_bilinear ## SIZE ## _hv_ ## OPT( \
}
HVBILIN(mmxext, 8, 4, 8)
+#if ARCH_X86_32
HVBILIN(mmxext, 8, 8, 16)
HVBILIN(mmxext, 8, 16, 16)
+#endif
HVBILIN(sse2, 8, 8, 16)
HVBILIN(sse2, 8, 16, 16)
HVBILIN(ssse3, 8, 4, 8)
HVBILIN(ssse3, 8, 8, 16)
HVBILIN(ssse3, 8, 16, 16)
-extern void ff_vp8_idct_dc_add_mmx(uint8_t *dst, DCTELEM block[16], int stride);
-extern void ff_vp8_idct_dc_add_sse4(uint8_t *dst, DCTELEM block[16], int stride);
-extern void ff_vp8_idct_dc_add4y_mmx(uint8_t *dst, DCTELEM block[4][16], int stride);
-extern void ff_vp8_idct_dc_add4y_sse2(uint8_t *dst, DCTELEM block[4][16], int stride);
-extern void ff_vp8_idct_dc_add4uv_mmx(uint8_t *dst, DCTELEM block[2][16], int stride);
+extern void ff_vp8_idct_dc_add_mmx(uint8_t *dst, DCTELEM block[16],
+ ptrdiff_t stride);
+extern void ff_vp8_idct_dc_add_sse4(uint8_t *dst, DCTELEM block[16],
+ ptrdiff_t stride);
+extern void ff_vp8_idct_dc_add4y_mmx(uint8_t *dst, DCTELEM block[4][16],
+ ptrdiff_t stride);
+extern void ff_vp8_idct_dc_add4y_sse2(uint8_t *dst, DCTELEM block[4][16],
+ ptrdiff_t stride);
+extern void ff_vp8_idct_dc_add4uv_mmx(uint8_t *dst, DCTELEM block[2][16],
+ ptrdiff_t stride);
extern void ff_vp8_luma_dc_wht_mmx(DCTELEM block[4][4][16], DCTELEM dc[16]);
extern void ff_vp8_luma_dc_wht_sse(DCTELEM block[4][4][16], DCTELEM dc[16]);
-extern void ff_vp8_idct_add_mmx(uint8_t *dst, DCTELEM block[16], int stride);
-extern void ff_vp8_idct_add_sse(uint8_t *dst, DCTELEM block[16], int stride);
+extern void ff_vp8_idct_add_mmx(uint8_t *dst, DCTELEM block[16],
+ ptrdiff_t stride);
+extern void ff_vp8_idct_add_sse(uint8_t *dst, DCTELEM block[16],
+ ptrdiff_t stride);
#define DECLARE_LOOP_FILTER(NAME)\
-extern void ff_vp8_v_loop_filter_simple_ ## NAME(uint8_t *dst, int stride, int flim);\
-extern void ff_vp8_h_loop_filter_simple_ ## NAME(uint8_t *dst, int stride, int flim);\
-extern void ff_vp8_v_loop_filter16y_inner_ ## NAME (uint8_t *dst, int stride,\
+extern void ff_vp8_v_loop_filter_simple_ ## NAME(uint8_t *dst, \
+ ptrdiff_t stride, \
+ int flim);\
+extern void ff_vp8_h_loop_filter_simple_ ## NAME(uint8_t *dst, \
+ ptrdiff_t stride, \
+ int flim);\
+extern void ff_vp8_v_loop_filter16y_inner_ ## NAME (uint8_t *dst, \
+ ptrdiff_t stride,\
+ int e, int i, int hvt);\
+extern void ff_vp8_h_loop_filter16y_inner_ ## NAME (uint8_t *dst, \
+ ptrdiff_t stride,\
+ int e, int i, int hvt);\
+extern void ff_vp8_v_loop_filter8uv_inner_ ## NAME (uint8_t *dstU, \
+ uint8_t *dstV,\
+ ptrdiff_t s, \
+ int e, int i, int hvt);\
+extern void ff_vp8_h_loop_filter8uv_inner_ ## NAME (uint8_t *dstU, \
+ uint8_t *dstV,\
+ ptrdiff_t s, \
int e, int i, int hvt);\
-extern void ff_vp8_h_loop_filter16y_inner_ ## NAME (uint8_t *dst, int stride,\
+extern void ff_vp8_v_loop_filter16y_mbedge_ ## NAME(uint8_t *dst, \
+ ptrdiff_t stride,\
int e, int i, int hvt);\
-extern void ff_vp8_v_loop_filter8uv_inner_ ## NAME (uint8_t *dstU, uint8_t *dstV,\
- int s, int e, int i, int hvt);\
-extern void ff_vp8_h_loop_filter8uv_inner_ ## NAME (uint8_t *dstU, uint8_t *dstV,\
- int s, int e, int i, int hvt);\
-extern void ff_vp8_v_loop_filter16y_mbedge_ ## NAME(uint8_t *dst, int stride,\
+extern void ff_vp8_h_loop_filter16y_mbedge_ ## NAME(uint8_t *dst, \
+ ptrdiff_t stride,\
int e, int i, int hvt);\
-extern void ff_vp8_h_loop_filter16y_mbedge_ ## NAME(uint8_t *dst, int stride,\
+extern void ff_vp8_v_loop_filter8uv_mbedge_ ## NAME(uint8_t *dstU, \
+ uint8_t *dstV,\
+ ptrdiff_t s, \
int e, int i, int hvt);\
-extern void ff_vp8_v_loop_filter8uv_mbedge_ ## NAME(uint8_t *dstU, uint8_t *dstV,\
- int s, int e, int i, int hvt);\
-extern void ff_vp8_h_loop_filter8uv_mbedge_ ## NAME(uint8_t *dstU, uint8_t *dstV,\
- int s, int e, int i, int hvt);
+extern void ff_vp8_h_loop_filter8uv_mbedge_ ## NAME(uint8_t *dstU, \
+ uint8_t *dstV,\
+ ptrdiff_t s, \
+ int e, int i, int hvt);
DECLARE_LOOP_FILTER(mmx)
DECLARE_LOOP_FILTER(mmxext)
@@ -288,15 +321,18 @@ av_cold void ff_vp8dsp_init_x86(VP8DSPContext* c)
if (mm_flags & AV_CPU_FLAG_MMX) {
c->vp8_idct_dc_add = ff_vp8_idct_dc_add_mmx;
- c->vp8_idct_dc_add4y = ff_vp8_idct_dc_add4y_mmx;
c->vp8_idct_dc_add4uv = ff_vp8_idct_dc_add4uv_mmx;
+#if ARCH_X86_32
+ c->vp8_idct_dc_add4y = ff_vp8_idct_dc_add4y_mmx;
c->vp8_idct_add = ff_vp8_idct_add_mmx;
c->vp8_luma_dc_wht = ff_vp8_luma_dc_wht_mmx;
c->put_vp8_epel_pixels_tab[0][0][0] =
c->put_vp8_bilinear_pixels_tab[0][0][0] = ff_put_vp8_pixels16_mmx;
+#endif
c->put_vp8_epel_pixels_tab[1][0][0] =
c->put_vp8_bilinear_pixels_tab[1][0][0] = ff_put_vp8_pixels8_mmx;
+#if ARCH_X86_32
c->vp8_v_loop_filter_simple = ff_vp8_v_loop_filter_simple_mmx;
c->vp8_h_loop_filter_simple = ff_vp8_h_loop_filter_simple_mmx;
@@ -309,17 +345,19 @@ av_cold void ff_vp8dsp_init_x86(VP8DSPContext* c)
c->vp8_h_loop_filter16y = ff_vp8_h_loop_filter16y_mbedge_mmx;
c->vp8_v_loop_filter8uv = ff_vp8_v_loop_filter8uv_mbedge_mmx;
c->vp8_h_loop_filter8uv = ff_vp8_h_loop_filter8uv_mbedge_mmx;
+#endif
}
/* note that 4-tap width=16 functions are missing because w=16
* is only used for luma, and luma is always a copy or sixtap. */
if (mm_flags & AV_CPU_FLAG_MMX2) {
+ VP8_MC_FUNC(2, 4, mmxext);
+ VP8_BILINEAR_MC_FUNC(2, 4, mmxext);
+#if ARCH_X86_32
VP8_LUMA_MC_FUNC(0, 16, mmxext);
VP8_MC_FUNC(1, 8, mmxext);
- VP8_MC_FUNC(2, 4, mmxext);
VP8_BILINEAR_MC_FUNC(0, 16, mmxext);
VP8_BILINEAR_MC_FUNC(1, 8, mmxext);
- VP8_BILINEAR_MC_FUNC(2, 4, mmxext);
c->vp8_v_loop_filter_simple = ff_vp8_v_loop_filter_simple_mmxext;
c->vp8_h_loop_filter_simple = ff_vp8_h_loop_filter_simple_mmxext;
@@ -333,6 +371,7 @@ av_cold void ff_vp8dsp_init_x86(VP8DSPContext* c)
c->vp8_h_loop_filter16y = ff_vp8_h_loop_filter16y_mbedge_mmxext;
c->vp8_v_loop_filter8uv = ff_vp8_v_loop_filter8uv_mbedge_mmxext;
c->vp8_h_loop_filter8uv = ff_vp8_h_loop_filter8uv_mbedge_mmxext;
+#endif
}
if (mm_flags & AV_CPU_FLAG_SSE) {
diff --git a/libavcodec/x86/vp8dsp.asm b/libavcodec/x86/vp8dsp.asm
index 833c88a1a0..140597031f 100644
--- a/libavcodec/x86/vp8dsp.asm
+++ b/libavcodec/x86/vp8dsp.asm
@@ -865,6 +865,7 @@ cglobal put_vp8_pixels8_mmx, 5,5
jg .nextrow
REP_RET
+%if ARCH_X86_32
cglobal put_vp8_pixels16_mmx, 5,5
.nextrow:
movq mm0, [r2+r3*0+0]
@@ -880,6 +881,7 @@ cglobal put_vp8_pixels16_mmx, 5,5
sub r4d, 2
jg .nextrow
REP_RET
+%endif
cglobal put_vp8_pixels16_sse, 5,5,2
.nextrow:
@@ -973,6 +975,7 @@ cglobal vp8_idct_dc_add_sse4, 3, 3, 6
; void vp8_idct_dc_add4y_<opt>(uint8_t *dst, DCTELEM block[4][16], int stride);
;-----------------------------------------------------------------------------
+%if ARCH_X86_32
INIT_MMX
cglobal vp8_idct_dc_add4y_mmx, 3, 3
; load data
@@ -1007,6 +1010,7 @@ cglobal vp8_idct_dc_add4y_mmx, 3, 3
ADD_DC m0, m6, 0, mova
ADD_DC m1, m7, 8, mova
RET
+%endif
INIT_XMM
cglobal vp8_idct_dc_add4y_sse2, 3, 3, 6
@@ -1152,7 +1156,9 @@ cglobal vp8_idct_add_%1, 3, 3
RET
%endmacro
+%if ARCH_X86_32
VP8_IDCT_ADD mmx
+%endif
VP8_IDCT_ADD sse
;-----------------------------------------------------------------------------
@@ -1217,7 +1223,9 @@ cglobal vp8_luma_dc_wht_%1, 2,3
%endmacro
INIT_MMX
+%if ARCH_X86_32
VP8_DC_WHT mmx
+%endif
VP8_DC_WHT sse
;-----------------------------------------------------------------------------
@@ -1610,6 +1618,7 @@ cglobal vp8_%2_loop_filter_simple_%1, 3, %3, %4
%endif
%endmacro
+%if ARCH_X86_32
INIT_MMX
%define SPLATB_REG SPLATB_REG_MMX
SIMPLE_LOOPFILTER mmx, v, 4, 0
@@ -1617,6 +1626,8 @@ SIMPLE_LOOPFILTER mmx, h, 5, 0
%define SPLATB_REG SPLATB_REG_MMXEXT
SIMPLE_LOOPFILTER mmxext, v, 4, 0
SIMPLE_LOOPFILTER mmxext, h, 5, 0
+%endif
+
INIT_XMM
%define SPLATB_REG SPLATB_REG_SSE2
%define WRITE_8W WRITE_8W_SSE2
@@ -2118,6 +2129,7 @@ cglobal vp8_%2_loop_filter16y_inner_%1, 5, %3, %5
RET
%endmacro
+%if ARCH_X86_32
INIT_MMX
%define SPLATB_REG SPLATB_REG_MMX
INNER_LOOPFILTER mmx, v, 6, 16, 0
@@ -2130,6 +2142,7 @@ INNER_LOOPFILTER mmxext, v, 6, 16, 0
INNER_LOOPFILTER mmxext, h, 6, 16, 0
INNER_LOOPFILTER mmxext, v, 6, 8, 0
INNER_LOOPFILTER mmxext, h, 6, 8, 0
+%endif
INIT_XMM
%define SPLATB_REG SPLATB_REG_SSE2
@@ -2814,6 +2827,7 @@ cglobal vp8_%2_loop_filter16y_mbedge_%1, 5, %3, %5
RET
%endmacro
+%if ARCH_X86_32
INIT_MMX
%define SPLATB_REG SPLATB_REG_MMX
MBEDGE_LOOPFILTER mmx, v, 6, 16, 0
@@ -2826,6 +2840,7 @@ MBEDGE_LOOPFILTER mmxext, v, 6, 16, 0
MBEDGE_LOOPFILTER mmxext, h, 6, 16, 0
MBEDGE_LOOPFILTER mmxext, v, 6, 8, 0
MBEDGE_LOOPFILTER mmxext, h, 6, 8, 0
+%endif
INIT_XMM
%define SPLATB_REG SPLATB_REG_SSE2
diff --git a/libavcodec/xl.c b/libavcodec/xl.c
index 78f34afa4e..91a0df0683 100644
--- a/libavcodec/xl.c
+++ b/libavcodec/xl.c
@@ -45,7 +45,7 @@ static int decode_frame(AVCodecContext *avctx,
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
VideoXLContext * const a = avctx->priv_data;
- AVFrame * const p= (AVFrame*)&a->pic;
+ AVFrame * const p = &a->pic;
uint8_t *Y, *U, *V;
int i, j;
int stride;
diff --git a/libavcodec/zmbvenc.c b/libavcodec/zmbvenc.c
index abb34a9bdb..a7fdc280a5 100644
--- a/libavcodec/zmbvenc.c
+++ b/libavcodec/zmbvenc.c
@@ -313,7 +313,7 @@ static av_cold int encode_init(AVCodecContext *avctx)
return -1;
}
- avctx->coded_frame = (AVFrame*)&c->pic;
+ avctx->coded_frame = &c->pic;
return 0;
}