summaryrefslogtreecommitdiff
path: root/libavcodec
diff options
context:
space:
mode:
authorMichael Niedermayer <michaelni@gmx.at>2011-12-05 00:11:57 +0100
committerMichael Niedermayer <michaelni@gmx.at>2011-12-05 00:11:57 +0100
commit707138593af5c4783035d0b9cc2d7c8cb2137dfa (patch)
tree7ead2e3c73fd33764dede26546b0238bb40d484b /libavcodec
parent2f8b6e909dd733d9b722a5266ca516a9a5ba67e9 (diff)
parentdc6d0430503ecd7ed0d81276f977b26b4c4bd916 (diff)
Merge remote-tracking branch 'qatar/master'
* qatar/master: adpcmenc: cosmetics: pretty-printing ac3dec: cosmetics: pretty-printing yuv4mpeg: cosmetics: pretty-printing shorten: remove dead initialization roqvideodec: set AVFrame reference before reget_buffer. bmp: fix some 1bit samples. latmdec: add fate test for audio config change oma: PCM support oma: better format detection with small probe buffer oma: clearify ambiguous if condition wavpack: Properly clip samples during lossy decode Code clean-up for crc.c, lfg.c, log.c, random_see.d, rational.c and tree.c. Cleaned pixdesc.c file in libavutil zmbv.c: coding style clean-up. xan.c: coding style clean-up. mpegvideo.c: code cleanup - first 500 lines. Conflicts: Changelog libavcodec/adpcmenc.c libavcodec/bmp.c libavcodec/zmbv.c libavutil/log.c libavutil/pixdesc.c Merged-by: Michael Niedermayer <michaelni@gmx.at>
Diffstat (limited to 'libavcodec')
-rw-r--r--libavcodec/ac3dec.c509
-rw-r--r--libavcodec/adpcmenc.c488
-rw-r--r--libavcodec/bmp.c4
-rw-r--r--libavcodec/mpegvideo.c333
-rw-r--r--libavcodec/shorten.c1
-rw-r--r--libavcodec/wavpack.c11
-rw-r--r--libavcodec/xan.c53
-rw-r--r--libavcodec/zmbv.c165
8 files changed, 849 insertions, 715 deletions
diff --git a/libavcodec/ac3dec.c b/libavcodec/ac3dec.c
index c650881430..5e8b36404b 100644
--- a/libavcodec/ac3dec.c
+++ b/libavcodec/ac3dec.c
@@ -44,7 +44,6 @@
*/
static uint8_t ungroup_3_in_7_bits_tab[128][3];
-
/** tables for ungrouping mantissas */
static int b1_mantissas[32][3];
static int b2_mantissas[128][3];
@@ -124,7 +123,7 @@ static av_cold void ac3_tables_init(void)
/* generate table for ungrouping 3 values in 7 bits
reference: Section 7.1.3 Exponent Decoding */
- for(i=0; i<128; i++) {
+ for (i = 0; i < 128; i++) {
ungroup_3_in_7_bits_tab[i][0] = i / 25;
ungroup_3_in_7_bits_tab[i][1] = (i % 25) / 5;
ungroup_3_in_7_bits_tab[i][2] = (i % 25) % 5;
@@ -132,13 +131,13 @@ static av_cold void ac3_tables_init(void)
/* generate grouped mantissa tables
reference: Section 7.3.5 Ungrouping of Mantissas */
- for(i=0; i<32; i++) {
+ for (i = 0; i < 32; i++) {
/* bap=1 mantissas */
b1_mantissas[i][0] = symmetric_dequant(ff_ac3_ungroup_3_in_5_bits_tab[i][0], 3);
b1_mantissas[i][1] = symmetric_dequant(ff_ac3_ungroup_3_in_5_bits_tab[i][1], 3);
b1_mantissas[i][2] = symmetric_dequant(ff_ac3_ungroup_3_in_5_bits_tab[i][2], 3);
}
- for(i=0; i<128; i++) {
+ for (i = 0; i < 128; i++) {
/* bap=2 mantissas */
b2_mantissas[i][0] = symmetric_dequant(ungroup_3_in_7_bits_tab[i][0], 5);
b2_mantissas[i][1] = symmetric_dequant(ungroup_3_in_7_bits_tab[i][1], 5);
@@ -150,24 +149,23 @@ static av_cold void ac3_tables_init(void)
}
/* generate ungrouped mantissa tables
reference: Tables 7.21 and 7.23 */
- for(i=0; i<7; i++) {
+ for (i = 0; i < 7; i++) {
/* bap=3 mantissas */
b3_mantissas[i] = symmetric_dequant(i, 7);
}
- for(i=0; i<15; i++) {
+ for (i = 0; i < 15; i++) {
/* bap=5 mantissas */
b5_mantissas[i] = symmetric_dequant(i, 15);
}
/* generate dynamic range table
reference: Section 7.7.1 Dynamic Range Control */
- for(i=0; i<256; i++) {
+ for (i = 0; i < 256; i++) {
int v = (i >> 5) - ((i >> 7) << 3) - 5;
dynamic_range_tab[i] = powf(2.0f, v) * ((i & 0x1F) | 0x20);
}
}
-
/**
* AVCodec initialization
*/
@@ -250,7 +248,7 @@ static int ac3_parse_header(AC3DecodeContext *s)
i = get_bits(gbc, 6);
do {
skip_bits(gbc, 8);
- } while(i--);
+ } while (i--);
}
return 0;
@@ -265,7 +263,7 @@ static int parse_frame_header(AC3DecodeContext *s)
int err;
err = avpriv_ac3_parse_header(&s->gbc, &hdr);
- if(err)
+ if (err)
return err;
/* get decoding parameters from header info */
@@ -287,9 +285,9 @@ static int parse_frame_header(AC3DecodeContext *s)
s->frame_type = hdr.frame_type;
s->substreamid = hdr.substreamid;
- if(s->lfe_on) {
- s->start_freq[s->lfe_ch] = 0;
- s->end_freq[s->lfe_ch] = 7;
+ if (s->lfe_on) {
+ s->start_freq[s->lfe_ch] = 0;
+ s->end_freq[s->lfe_ch] = 7;
s->num_exp_groups[s->lfe_ch] = 2;
s->channel_in_cpl[s->lfe_ch] = 0;
}
@@ -326,38 +324,39 @@ static void set_downmix_coeffs(AC3DecodeContext *s)
float smix = gain_levels[surround_levels[s->surround_mix_level]];
float norm0, norm1;
- for(i=0; i<s->fbw_channels; i++) {
+ for (i = 0; i < s->fbw_channels; i++) {
s->downmix_coeffs[i][0] = gain_levels[ac3_default_coeffs[s->channel_mode][i][0]];
s->downmix_coeffs[i][1] = gain_levels[ac3_default_coeffs[s->channel_mode][i][1]];
}
- if(s->channel_mode > 1 && s->channel_mode & 1) {
+ if (s->channel_mode > 1 && s->channel_mode & 1) {
s->downmix_coeffs[1][0] = s->downmix_coeffs[1][1] = cmix;
}
- if(s->channel_mode == AC3_CHMODE_2F1R || s->channel_mode == AC3_CHMODE_3F1R) {
+ if (s->channel_mode == AC3_CHMODE_2F1R || s->channel_mode == AC3_CHMODE_3F1R) {
int nf = s->channel_mode - 2;
s->downmix_coeffs[nf][0] = s->downmix_coeffs[nf][1] = smix * LEVEL_MINUS_3DB;
}
- if(s->channel_mode == AC3_CHMODE_2F2R || s->channel_mode == AC3_CHMODE_3F2R) {
+ if (s->channel_mode == AC3_CHMODE_2F2R || s->channel_mode == AC3_CHMODE_3F2R) {
int nf = s->channel_mode - 4;
s->downmix_coeffs[nf][0] = s->downmix_coeffs[nf+1][1] = smix;
}
/* renormalize */
norm0 = norm1 = 0.0;
- for(i=0; i<s->fbw_channels; i++) {
+ for (i = 0; i < s->fbw_channels; i++) {
norm0 += s->downmix_coeffs[i][0];
norm1 += s->downmix_coeffs[i][1];
}
norm0 = 1.0f / norm0;
norm1 = 1.0f / norm1;
- for(i=0; i<s->fbw_channels; i++) {
+ for (i = 0; i < s->fbw_channels; i++) {
s->downmix_coeffs[i][0] *= norm0;
s->downmix_coeffs[i][1] *= norm1;
}
- if(s->output_mode == AC3_CHMODE_MONO) {
- for(i=0; i<s->fbw_channels; i++)
- s->downmix_coeffs[i][0] = (s->downmix_coeffs[i][0] + s->downmix_coeffs[i][1]) * LEVEL_MINUS_3DB;
+ if (s->output_mode == AC3_CHMODE_MONO) {
+ for (i = 0; i < s->fbw_channels; i++)
+ s->downmix_coeffs[i][0] = (s->downmix_coeffs[i][0] +
+ s->downmix_coeffs[i][1]) * LEVEL_MINUS_3DB;
}
}
@@ -374,7 +373,7 @@ static int decode_exponents(GetBitContext *gbc, int exp_strategy, int ngrps,
/* unpack groups */
group_size = exp_strategy + (exp_strategy == EXP_D45);
- for(grp=0,i=0; grp<ngrps; grp++) {
+ for (grp = 0, i = 0; grp < ngrps; grp++) {
expacc = get_bits(gbc, 7);
dexp[i++] = ungroup_3_in_7_bits_tab[expacc][0];
dexp[i++] = ungroup_3_in_7_bits_tab[expacc][1];
@@ -383,15 +382,15 @@ static int decode_exponents(GetBitContext *gbc, int exp_strategy, int ngrps,
/* convert to absolute exps and expand groups */
prevexp = absexp;
- for(i=0,j=0; i<ngrps*3; i++) {
+ for (i = 0, j = 0; i < ngrps * 3; i++) {
prevexp += dexp[i] - 2;
if (prevexp > 24U)
return -1;
switch (group_size) {
- case 4: dexps[j++] = prevexp;
- dexps[j++] = prevexp;
- case 2: dexps[j++] = prevexp;
- case 1: dexps[j++] = prevexp;
+ case 4: dexps[j++] = prevexp;
+ dexps[j++] = prevexp;
+ case 2: dexps[j++] = prevexp;
+ case 1: dexps[j++] = prevexp;
}
}
return 0;
@@ -414,7 +413,8 @@ static void calc_transform_coeffs_cpl(AC3DecodeContext *s)
if (s->channel_in_cpl[ch]) {
int cpl_coord = s->cpl_coords[ch][band] << 5;
for (bin = band_start; bin < band_end; bin++) {
- s->fixed_coeffs[ch][bin] = MULH(s->fixed_coeffs[CPL_CH][bin] << 4, cpl_coord);
+ s->fixed_coeffs[ch][bin] =
+ MULH(s->fixed_coeffs[CPL_CH][bin] << 4, cpl_coord);
}
if (ch == 2 && s->phase_flags[band]) {
for (bin = band_start; bin < band_end; bin++)
@@ -445,73 +445,70 @@ typedef struct {
static void ac3_decode_transform_coeffs_ch(AC3DecodeContext *s, int ch_index, mant_groups *m)
{
int start_freq = s->start_freq[ch_index];
- int end_freq = s->end_freq[ch_index];
- uint8_t *baps = s->bap[ch_index];
- int8_t *exps = s->dexps[ch_index];
- int *coeffs = s->fixed_coeffs[ch_index];
- int dither = (ch_index == CPL_CH) || s->dither_flag[ch_index];
+ int end_freq = s->end_freq[ch_index];
+ uint8_t *baps = s->bap[ch_index];
+ int8_t *exps = s->dexps[ch_index];
+ int *coeffs = s->fixed_coeffs[ch_index];
+ int dither = (ch_index == CPL_CH) || s->dither_flag[ch_index];
GetBitContext *gbc = &s->gbc;
int freq;
- for(freq = start_freq; freq < end_freq; freq++){
+ for (freq = start_freq; freq < end_freq; freq++) {
int bap = baps[freq];
int mantissa;
- switch(bap){
- case 0:
- if (dither)
- mantissa = (av_lfg_get(&s->dith_state) & 0x7FFFFF) - 0x400000;
- else
- mantissa = 0;
- break;
- case 1:
- if(m->b1){
- m->b1--;
- mantissa = m->b1_mant[m->b1];
- }
- else{
- int bits = get_bits(gbc, 5);
- mantissa = b1_mantissas[bits][0];
- m->b1_mant[1] = b1_mantissas[bits][1];
- m->b1_mant[0] = b1_mantissas[bits][2];
- m->b1 = 2;
- }
- break;
- case 2:
- if(m->b2){
- m->b2--;
- mantissa = m->b2_mant[m->b2];
- }
- else{
- int bits = get_bits(gbc, 7);
- mantissa = b2_mantissas[bits][0];
- m->b2_mant[1] = b2_mantissas[bits][1];
- m->b2_mant[0] = b2_mantissas[bits][2];
- m->b2 = 2;
- }
- break;
- case 3:
- mantissa = b3_mantissas[get_bits(gbc, 3)];
- break;
- case 4:
- if(m->b4){
- m->b4 = 0;
- mantissa = m->b4_mant;
- }
- else{
- int bits = get_bits(gbc, 7);
- mantissa = b4_mantissas[bits][0];
- m->b4_mant = b4_mantissas[bits][1];
- m->b4 = 1;
- }
- break;
- case 5:
- mantissa = b5_mantissas[get_bits(gbc, 4)];
- break;
- default: /* 6 to 15 */
- /* Shift mantissa and sign-extend it. */
- mantissa = get_sbits(gbc, quantization_tab[bap]);
- mantissa <<= 24 - quantization_tab[bap];
- break;
+ switch (bap) {
+ case 0:
+ if (dither)
+ mantissa = (av_lfg_get(&s->dith_state) & 0x7FFFFF) - 0x400000;
+ else
+ mantissa = 0;
+ break;
+ case 1:
+ if (m->b1) {
+ m->b1--;
+ mantissa = m->b1_mant[m->b1];
+ } else {
+ int bits = get_bits(gbc, 5);
+ mantissa = b1_mantissas[bits][0];
+ m->b1_mant[1] = b1_mantissas[bits][1];
+ m->b1_mant[0] = b1_mantissas[bits][2];
+ m->b1 = 2;
+ }
+ break;
+ case 2:
+ if (m->b2) {
+ m->b2--;
+ mantissa = m->b2_mant[m->b2];
+ } else {
+ int bits = get_bits(gbc, 7);
+ mantissa = b2_mantissas[bits][0];
+ m->b2_mant[1] = b2_mantissas[bits][1];
+ m->b2_mant[0] = b2_mantissas[bits][2];
+ m->b2 = 2;
+ }
+ break;
+ case 3:
+ mantissa = b3_mantissas[get_bits(gbc, 3)];
+ break;
+ case 4:
+ if (m->b4) {
+ m->b4 = 0;
+ mantissa = m->b4_mant;
+ } else {
+ int bits = get_bits(gbc, 7);
+ mantissa = b4_mantissas[bits][0];
+ m->b4_mant = b4_mantissas[bits][1];
+ m->b4 = 1;
+ }
+ break;
+ case 5:
+ mantissa = b5_mantissas[get_bits(gbc, 4)];
+ break;
+ default: /* 6 to 15 */
+ /* Shift mantissa and sign-extend it. */
+ mantissa = get_sbits(gbc, quantization_tab[bap]);
+ mantissa <<= 24 - quantization_tab[bap];
+ break;
}
coeffs[freq] = mantissa >> exps[freq];
}
@@ -525,10 +522,10 @@ static void ac3_decode_transform_coeffs_ch(AC3DecodeContext *s, int ch_index, ma
static void remove_dithering(AC3DecodeContext *s) {
int ch, i;
- for(ch=1; ch<=s->fbw_channels; ch++) {
- if(!s->dither_flag[ch] && s->channel_in_cpl[ch]) {
- for(i = s->start_freq[CPL_CH]; i<s->end_freq[CPL_CH]; i++) {
- if(!s->bap[CPL_CH][i])
+ for (ch = 1; ch <= s->fbw_channels; ch++) {
+ if (!s->dither_flag[ch] && s->channel_in_cpl[ch]) {
+ for (i = s->start_freq[CPL_CH]; i < s->end_freq[CPL_CH]; i++) {
+ if (!s->bap[CPL_CH][i])
s->fixed_coeffs[ch][i] = 0;
}
}
@@ -536,7 +533,7 @@ static void remove_dithering(AC3DecodeContext *s) {
}
static void decode_transform_coeffs_ch(AC3DecodeContext *s, int blk, int ch,
- mant_groups *m)
+ mant_groups *m)
{
if (!s->channel_uses_aht[ch]) {
ac3_decode_transform_coeffs_ch(s, ch, m);
@@ -580,7 +577,7 @@ static void decode_transform_coeffs(AC3DecodeContext *s, int blk)
}
do
s->fixed_coeffs[ch][end] = 0;
- while(++end < 256);
+ while (++end < 256);
}
/* zero the dithered coefficients for appropriate channels */
@@ -598,10 +595,10 @@ static void do_rematrixing(AC3DecodeContext *s)
end = FFMIN(s->end_freq[1], s->end_freq[2]);
- for(bnd=0; bnd<s->num_rematrixing_bands; bnd++) {
- if(s->rematrixing_flags[bnd]) {
- bndend = FFMIN(end, ff_ac3_rematrix_band_tab[bnd+1]);
- for(i=ff_ac3_rematrix_band_tab[bnd]; i<bndend; i++) {
+ for (bnd = 0; bnd < s->num_rematrixing_bands; bnd++) {
+ if (s->rematrixing_flags[bnd]) {
+ bndend = FFMIN(end, ff_ac3_rematrix_band_tab[bnd + 1]);
+ for (i = ff_ac3_rematrix_band_tab[bnd]; i < bndend; i++) {
int tmp0 = s->fixed_coeffs[1][i];
s->fixed_coeffs[1][i] += s->fixed_coeffs[2][i];
s->fixed_coeffs[2][i] = tmp0 - s->fixed_coeffs[2][i];
@@ -619,21 +616,23 @@ static inline void do_imdct(AC3DecodeContext *s, int channels)
{
int ch;
- for (ch=1; ch<=channels; ch++) {
+ for (ch = 1; ch <= channels; ch++) {
if (s->block_switch[ch]) {
int i;
- float *x = s->tmp_output+128;
- for(i=0; i<128; i++)
- x[i] = s->transform_coeffs[ch][2*i];
+ float *x = s->tmp_output + 128;
+ for (i = 0; i < 128; i++)
+ x[i] = s->transform_coeffs[ch][2 * i];
s->imdct_256.imdct_half(&s->imdct_256, s->tmp_output, x);
- s->dsp.vector_fmul_window(s->output[ch-1], s->delay[ch-1], s->tmp_output, s->window, 128);
- for(i=0; i<128; i++)
- x[i] = s->transform_coeffs[ch][2*i+1];
- s->imdct_256.imdct_half(&s->imdct_256, s->delay[ch-1], x);
+ s->dsp.vector_fmul_window(s->output[ch - 1], s->delay[ch - 1],
+ s->tmp_output, s->window, 128);
+ for (i = 0; i < 128; i++)
+ x[i] = s->transform_coeffs[ch][2 * i + 1];
+ s->imdct_256.imdct_half(&s->imdct_256, s->delay[ch - 1], x);
} else {
s->imdct_512.imdct_half(&s->imdct_512, s->tmp_output, s->transform_coeffs[ch]);
- s->dsp.vector_fmul_window(s->output[ch-1], s->delay[ch-1], s->tmp_output, s->window, 128);
- memcpy(s->delay[ch-1], s->tmp_output+128, 128*sizeof(float));
+ s->dsp.vector_fmul_window(s->output[ch - 1], s->delay[ch - 1],
+ s->tmp_output, s->window, 128);
+ memcpy(s->delay[ch - 1], s->tmp_output + 128, 128 * sizeof(float));
}
}
}
@@ -641,24 +640,25 @@ static inline void do_imdct(AC3DecodeContext *s, int channels)
/**
* Downmix the output to mono or stereo.
*/
-void ff_ac3_downmix_c(float (*samples)[256], float (*matrix)[2], int out_ch, int in_ch, int len)
+void ff_ac3_downmix_c(float (*samples)[256], float (*matrix)[2],
+ int out_ch, int in_ch, int len)
{
int i, j;
float v0, v1;
- if(out_ch == 2) {
- for(i=0; i<len; i++) {
+ if (out_ch == 2) {
+ for (i = 0; i < len; i++) {
v0 = v1 = 0.0f;
- for(j=0; j<in_ch; j++) {
+ for (j = 0; j < in_ch; j++) {
v0 += samples[j][i] * matrix[j][0];
v1 += samples[j][i] * matrix[j][1];
}
samples[0][i] = v0;
samples[1][i] = v1;
}
- } else if(out_ch == 1) {
- for(i=0; i<len; i++) {
+ } else if (out_ch == 1) {
+ for (i = 0; i < len; i++) {
v0 = 0.0f;
- for(j=0; j<in_ch; j++)
+ for (j = 0; j < in_ch; j++)
v0 += samples[j][i] * matrix[j][0];
samples[0][i] = v0;
}
@@ -671,25 +671,25 @@ void ff_ac3_downmix_c(float (*samples)[256], float (*matrix)[2], int out_ch, int
static void ac3_upmix_delay(AC3DecodeContext *s)
{
int channel_data_size = sizeof(s->delay[0]);
- switch(s->channel_mode) {
- case AC3_CHMODE_DUALMONO:
- case AC3_CHMODE_STEREO:
- /* upmix mono to stereo */
- memcpy(s->delay[1], s->delay[0], channel_data_size);
- break;
- case AC3_CHMODE_2F2R:
- memset(s->delay[3], 0, channel_data_size);
- case AC3_CHMODE_2F1R:
- memset(s->delay[2], 0, channel_data_size);
- break;
- case AC3_CHMODE_3F2R:
- memset(s->delay[4], 0, channel_data_size);
- case AC3_CHMODE_3F1R:
- memset(s->delay[3], 0, channel_data_size);
- case AC3_CHMODE_3F:
- memcpy(s->delay[2], s->delay[1], channel_data_size);
- memset(s->delay[1], 0, channel_data_size);
- break;
+ switch (s->channel_mode) {
+ case AC3_CHMODE_DUALMONO:
+ case AC3_CHMODE_STEREO:
+ /* upmix mono to stereo */
+ memcpy(s->delay[1], s->delay[0], channel_data_size);
+ break;
+ case AC3_CHMODE_2F2R:
+ memset(s->delay[3], 0, channel_data_size);
+ case AC3_CHMODE_2F1R:
+ memset(s->delay[2], 0, channel_data_size);
+ break;
+ case AC3_CHMODE_3F2R:
+ memset(s->delay[4], 0, channel_data_size);
+ case AC3_CHMODE_3F1R:
+ memset(s->delay[3], 0, channel_data_size);
+ case AC3_CHMODE_3F:
+ memcpy(s->delay[2], s->delay[1], channel_data_size);
+ memset(s->delay[1], 0, channel_data_size);
+ break;
}
}
@@ -742,7 +742,7 @@ static void decode_band_structure(GetBitContext *gbc, int blk, int eac3,
bnd_sz[0] = ecpl ? 6 : 12;
for (bnd = 0, subbnd = 1; subbnd < n_subbands; subbnd++) {
int subbnd_size = (ecpl && subbnd < 4) ? 6 : 12;
- if (band_struct[subbnd-1]) {
+ if (band_struct[subbnd - 1]) {
n_bands--;
bnd_sz[bnd] += subbnd_size;
} else {
@@ -779,7 +779,7 @@ static int decode_audio_block(AC3DecodeContext *s, int blk)
if (s->block_switch_syntax) {
for (ch = 1; ch <= fbw_channels; ch++) {
s->block_switch[ch] = get_bits1(gbc);
- if(ch > 1 && s->block_switch[ch] != s->block_switch[1])
+ if (ch > 1 && s->block_switch[ch] != s->block_switch[1])
different_transforms = 1;
}
}
@@ -794,13 +794,13 @@ static int decode_audio_block(AC3DecodeContext *s, int blk)
/* dynamic range */
i = !(s->channel_mode);
do {
- if(get_bits1(gbc)) {
- s->dynamic_range[i] = ((dynamic_range_tab[get_bits(gbc, 8)]-1.0) *
- s->drc_scale)+1.0;
- } else if(blk == 0) {
+ if (get_bits1(gbc)) {
+ s->dynamic_range[i] = ((dynamic_range_tab[get_bits(gbc, 8)] - 1.0) *
+ s->drc_scale) + 1.0;
+ } else if (blk == 0) {
s->dynamic_range[i] = 1.0f;
}
- } while(i--);
+ } while (i--);
/* spectral extension strategy */
if (s->eac3 && (!blk || get_bits1(gbc))) {
@@ -881,7 +881,8 @@ static int decode_audio_block(AC3DecodeContext *s, int blk)
bandsize = s->spx_band_sizes[bnd];
nratio = ((float)((bin + (bandsize >> 1))) / s->spx_dst_end_freq) - spx_blend;
nratio = av_clipf(nratio, 0.0f, 1.0f);
- nblend = sqrtf(3.0f * nratio); // noise is scaled by sqrt(3) to give unity variance
+ nblend = sqrtf(3.0f * nratio); // noise is scaled by sqrt(3)
+ // to give unity variance
sblend = sqrtf(1.0f - nratio);
bin += bandsize;
@@ -891,7 +892,7 @@ static int decode_audio_block(AC3DecodeContext *s, int blk)
if (spx_coord_exp == 15) spx_coord_mant <<= 1;
else spx_coord_mant += 4;
spx_coord_mant <<= (25 - spx_coord_exp - master_spx_coord);
- spx_coord = spx_coord_mant * (1.0f/(1<<23));
+ spx_coord = spx_coord_mant * (1.0f / (1 << 23));
/* multiply noise and signal blending factors by spx coordinate */
s->spx_noise_blend [ch][bnd] = nblend * spx_coord;
@@ -964,8 +965,9 @@ static int decode_audio_block(AC3DecodeContext *s, int blk)
s->phase_flags_in_use = 0;
}
} else if (!s->eac3) {
- if(!blk) {
- av_log(s->avctx, AV_LOG_ERROR, "new coupling strategy must be present in block 0\n");
+ if (!blk) {
+ av_log(s->avctx, AV_LOG_ERROR, "new coupling strategy must "
+ "be present in block 0\n");
return -1;
} else {
s->cpl_in_use[blk] = s->cpl_in_use[blk-1];
@@ -994,7 +996,8 @@ static int decode_audio_block(AC3DecodeContext *s, int blk)
s->cpl_coords[ch][bnd] >>= (cpl_coord_exp + master_cpl_coord);
}
} else if (!blk) {
- av_log(s->avctx, AV_LOG_ERROR, "new coupling coordinates must be present in block 0\n");
+ av_log(s->avctx, AV_LOG_ERROR, "new coupling coordinates must "
+ "be present in block 0\n");
return -1;
}
} else {
@@ -1019,10 +1022,11 @@ static int decode_audio_block(AC3DecodeContext *s, int blk)
} else if (s->spx_in_use && s->spx_src_start_freq <= 61) {
s->num_rematrixing_bands--;
}
- for(bnd=0; bnd<s->num_rematrixing_bands; bnd++)
+ for (bnd = 0; bnd < s->num_rematrixing_bands; bnd++)
s->rematrixing_flags[bnd] = get_bits1(gbc);
} else if (!blk) {
- av_log(s->avctx, AV_LOG_WARNING, "Warning: new rematrixing strategy not present in block 0\n");
+ av_log(s->avctx, AV_LOG_WARNING, "Warning: "
+ "new rematrixing strategy not present in block 0\n");
s->num_rematrixing_bands = 0;
}
}
@@ -1031,7 +1035,7 @@ static int decode_audio_block(AC3DecodeContext *s, int blk)
for (ch = !cpl_in_use; ch <= s->channels; ch++) {
if (!s->eac3)
s->exp_strategy[blk][ch] = get_bits(gbc, 2 - (ch == s->lfe_ch));
- if(s->exp_strategy[blk][ch] != EXP_REUSE)
+ if (s->exp_strategy[blk][ch] != EXP_REUSE)
bit_alloc_stages[ch] = 3;
}
@@ -1054,8 +1058,8 @@ static int decode_audio_block(AC3DecodeContext *s, int blk)
s->end_freq[ch] = bandwidth_code * 3 + 73;
}
group_size = 3 << (s->exp_strategy[blk][ch] - 1);
- s->num_exp_groups[ch] = (s->end_freq[ch]+group_size-4) / group_size;
- if(blk > 0 && s->end_freq[ch] != prev)
+ s->num_exp_groups[ch] = (s->end_freq[ch] + group_size-4) / group_size;
+ if (blk > 0 && s->end_freq[ch] != prev)
memset(bit_alloc_stages, 3, AC3_MAX_CHANNELS);
}
}
@@ -1074,7 +1078,7 @@ static int decode_audio_block(AC3DecodeContext *s, int blk)
av_log(s->avctx, AV_LOG_ERROR, "exponent out-of-range\n");
return -1;
}
- if(ch != CPL_CH && ch != s->lfe_ch)
+ if (ch != CPL_CH && ch != s->lfe_ch)
skip_bits(gbc, 2); /* skip gainrng */
}
}
@@ -1087,17 +1091,18 @@ static int decode_audio_block(AC3DecodeContext *s, int blk)
s->bit_alloc_params.slow_gain = ff_ac3_slow_gain_tab[get_bits(gbc, 2)];
s->bit_alloc_params.db_per_bit = ff_ac3_db_per_bit_tab[get_bits(gbc, 2)];
s->bit_alloc_params.floor = ff_ac3_floor_tab[get_bits(gbc, 3)];
- for(ch=!cpl_in_use; ch<=s->channels; ch++)
+ for (ch = !cpl_in_use; ch <= s->channels; ch++)
bit_alloc_stages[ch] = FFMAX(bit_alloc_stages[ch], 2);
} else if (!blk) {
- av_log(s->avctx, AV_LOG_ERROR, "new bit allocation info must be present in block 0\n");
+ av_log(s->avctx, AV_LOG_ERROR, "new bit allocation info must "
+ "be present in block 0\n");
return -1;
}
}
/* signal-to-noise ratio offsets and fast gains (signal-to-mask ratios) */
- if(!s->eac3 || !blk){
- if(s->snr_offset_strategy && get_bits1(gbc)) {
+ if (!s->eac3 || !blk) {
+ if (s->snr_offset_strategy && get_bits1(gbc)) {
int snr = 0;
int csnr;
csnr = (get_bits(gbc, 6) - 15) << 4;
@@ -1106,7 +1111,7 @@ static int decode_audio_block(AC3DecodeContext *s, int blk)
if (ch == i || s->snr_offset_strategy == 2)
snr = (csnr + get_bits(gbc, 4)) << 2;
/* run at least last bit allocation stage if snr offset changes */
- if(blk && s->snr_offset[ch] != snr) {
+ if (blk && s->snr_offset[ch] != snr) {
bit_alloc_stages[ch] = FFMAX(bit_alloc_stages[ch], 1);
}
s->snr_offset[ch] = snr;
@@ -1116,7 +1121,7 @@ static int decode_audio_block(AC3DecodeContext *s, int blk)
int prev = s->fast_gain[ch];
s->fast_gain[ch] = ff_ac3_fast_gain_tab[get_bits(gbc, 3)];
/* run last 2 bit allocation stages if fast gain changes */
- if(blk && prev != s->fast_gain[ch])
+ if (blk && prev != s->fast_gain[ch])
bit_alloc_stages[ch] = FFMAX(bit_alloc_stages[ch], 2);
}
}
@@ -1132,7 +1137,7 @@ static int decode_audio_block(AC3DecodeContext *s, int blk)
int prev = s->fast_gain[ch];
s->fast_gain[ch] = ff_ac3_fast_gain_tab[get_bits(gbc, 3)];
/* run last 2 bit allocation stages if fast gain changes */
- if(blk && prev != s->fast_gain[ch])
+ if (blk && prev != s->fast_gain[ch])
bit_alloc_stages[ch] = FFMAX(bit_alloc_stages[ch], 2);
}
} else if (s->eac3 && !blk) {
@@ -1152,14 +1157,15 @@ static int decode_audio_block(AC3DecodeContext *s, int blk)
int sl = get_bits(gbc, 3);
/* run last 2 bit allocation stages for coupling channel if
coupling leak changes */
- if(blk && (fl != s->bit_alloc_params.cpl_fast_leak ||
- sl != s->bit_alloc_params.cpl_slow_leak)) {
+ if (blk && (fl != s->bit_alloc_params.cpl_fast_leak ||
+ sl != s->bit_alloc_params.cpl_slow_leak)) {
bit_alloc_stages[CPL_CH] = FFMAX(bit_alloc_stages[CPL_CH], 2);
}
s->bit_alloc_params.cpl_fast_leak = fl;
s->bit_alloc_params.cpl_slow_leak = sl;
} else if (!s->eac3 && !blk) {
- av_log(s->avctx, AV_LOG_ERROR, "new coupling leak info must be present in block 0\n");
+ av_log(s->avctx, AV_LOG_ERROR, "new coupling leak info must "
+ "be present in block 0\n");
return -1;
}
s->first_cpl_leak = 0;
@@ -1183,40 +1189,40 @@ static int decode_audio_block(AC3DecodeContext *s, int blk)
for (seg = 0; seg < s->dba_nsegs[ch]; seg++) {
s->dba_offsets[ch][seg] = get_bits(gbc, 5);
s->dba_lengths[ch][seg] = get_bits(gbc, 4);
- s->dba_values[ch][seg] = get_bits(gbc, 3);
+ s->dba_values[ch][seg] = get_bits(gbc, 3);
}
/* run last 2 bit allocation stages if new dba values */
bit_alloc_stages[ch] = FFMAX(bit_alloc_stages[ch], 2);
}
}
- } else if(blk == 0) {
- for(ch=0; ch<=s->channels; ch++) {
+ } else if (blk == 0) {
+ for (ch = 0; ch <= s->channels; ch++) {
s->dba_mode[ch] = DBA_NONE;
}
}
/* Bit allocation */
- for(ch=!cpl_in_use; ch<=s->channels; ch++) {
- if(bit_alloc_stages[ch] > 2) {
+ for (ch = !cpl_in_use; ch <= s->channels; ch++) {
+ if (bit_alloc_stages[ch] > 2) {
/* Exponent mapping into PSD and PSD integration */
ff_ac3_bit_alloc_calc_psd(s->dexps[ch],
s->start_freq[ch], s->end_freq[ch],
s->psd[ch], s->band_psd[ch]);
}
- if(bit_alloc_stages[ch] > 1) {
+ if (bit_alloc_stages[ch] > 1) {
/* Compute excitation function, Compute masking curve, and
Apply delta bit allocation */
if (ff_ac3_bit_alloc_calc_mask(&s->bit_alloc_params, s->band_psd[ch],
- s->start_freq[ch], s->end_freq[ch],
- s->fast_gain[ch], (ch == s->lfe_ch),
- s->dba_mode[ch], s->dba_nsegs[ch],
+ s->start_freq[ch], s->end_freq[ch],
+ s->fast_gain[ch], (ch == s->lfe_ch),
+ s->dba_mode[ch], s->dba_nsegs[ch],
s->dba_offsets[ch], s->dba_lengths[ch],
- s->dba_values[ch], s->mask[ch])) {
+ s->dba_values[ch], s->mask[ch])) {
av_log(s->avctx, AV_LOG_ERROR, "error in bit allocation\n");
return -1;
}
}
- if(bit_alloc_stages[ch] > 0) {
+ if (bit_alloc_stages[ch] > 0) {
/* Compute bit allocation */
const uint8_t *bap_tab = s->channel_uses_aht[ch] ?
ff_eac3_hebap_tab : ff_ac3_bap_tab;
@@ -1231,7 +1237,7 @@ static int decode_audio_block(AC3DecodeContext *s, int blk)
/* unused dummy data */
if (s->skip_syntax && get_bits1(gbc)) {
int skipl = get_bits(gbc, 9);
- while(skipl--)
+ while (skipl--)
skip_bits(gbc, 8);
}
@@ -1242,18 +1248,19 @@ static int decode_audio_block(AC3DecodeContext *s, int blk)
/* TODO: generate enhanced coupling coordinates and uncouple */
/* recover coefficients if rematrixing is in use */
- if(s->channel_mode == AC3_CHMODE_STEREO)
+ if (s->channel_mode == AC3_CHMODE_STEREO)
do_rematrixing(s);
/* apply scaling to coefficients (headroom, dynrng) */
- for(ch=1; ch<=s->channels; ch++) {
+ for (ch = 1; ch <= s->channels; ch++) {
float gain = s->mul_bias / 4194304.0f;
- if(s->channel_mode == AC3_CHMODE_DUALMONO) {
- gain *= s->dynamic_range[2-ch];
+ if (s->channel_mode == AC3_CHMODE_DUALMONO) {
+ gain *= s->dynamic_range[2 - ch];
} else {
gain *= s->dynamic_range[0];
}
- s->fmt_conv.int32_to_float_fmul_scalar(s->transform_coeffs[ch], s->fixed_coeffs[ch], gain, 256);
+ s->fmt_conv.int32_to_float_fmul_scalar(s->transform_coeffs[ch],
+ s->fixed_coeffs[ch], gain, 256);
}
/* apply spectral extension to high frequency bins */
@@ -1267,27 +1274,30 @@ static int decode_audio_block(AC3DecodeContext *s, int blk)
downmix_output = s->channels != s->out_channels &&
!((s->output_mode & AC3_OUTPUT_LFEON) &&
s->fbw_channels == s->out_channels);
- if(different_transforms) {
+ if (different_transforms) {
/* the delay samples have already been downmixed, so we upmix the delay
samples in order to reconstruct all channels before downmixing. */
- if(s->downmixed) {
+ if (s->downmixed) {
s->downmixed = 0;
ac3_upmix_delay(s);
}
do_imdct(s, s->channels);
- if(downmix_output) {
- s->dsp.ac3_downmix(s->output, s->downmix_coeffs, s->out_channels, s->fbw_channels, 256);
+ if (downmix_output) {
+ s->dsp.ac3_downmix(s->output, s->downmix_coeffs,
+ s->out_channels, s->fbw_channels, 256);
}
} else {
- if(downmix_output) {
- s->dsp.ac3_downmix(s->transform_coeffs+1, s->downmix_coeffs, s->out_channels, s->fbw_channels, 256);
+ if (downmix_output) {
+ s->dsp.ac3_downmix(s->transform_coeffs + 1, s->downmix_coeffs,
+ s->out_channels, s->fbw_channels, 256);
}
- if(downmix_output && !s->downmixed) {
+ if (downmix_output && !s->downmixed) {
s->downmixed = 1;
- s->dsp.ac3_downmix(s->delay, s->downmix_coeffs, s->out_channels, s->fbw_channels, 128);
+ s->dsp.ac3_downmix(s->delay, s->downmix_coeffs, s->out_channels,
+ s->fbw_channels, 128);
}
do_imdct(s, s->out_channels);
@@ -1327,33 +1337,34 @@ static int ac3_decode_frame(AVCodecContext * avctx, void *data,
err = parse_frame_header(s);
if (err) {
- switch(err) {
- case AAC_AC3_PARSE_ERROR_SYNC:
- av_log(avctx, AV_LOG_ERROR, "frame sync error\n");
- return -1;
- case AAC_AC3_PARSE_ERROR_BSID:
- av_log(avctx, AV_LOG_ERROR, "invalid bitstream id\n");
- break;
- case AAC_AC3_PARSE_ERROR_SAMPLE_RATE:
- av_log(avctx, AV_LOG_ERROR, "invalid sample rate\n");
- break;
- case AAC_AC3_PARSE_ERROR_FRAME_SIZE:
- av_log(avctx, AV_LOG_ERROR, "invalid frame size\n");
- break;
- case AAC_AC3_PARSE_ERROR_FRAME_TYPE:
- /* skip frame if CRC is ok. otherwise use error concealment. */
- /* TODO: add support for substreams and dependent frames */
- if(s->frame_type == EAC3_FRAME_TYPE_DEPENDENT || s->substreamid) {
- av_log(avctx, AV_LOG_ERROR, "unsupported frame type : skipping frame\n");
- *got_frame_ptr = 0;
- return s->frame_size;
- } else {
- av_log(avctx, AV_LOG_ERROR, "invalid frame type\n");
- }
- break;
- default:
- av_log(avctx, AV_LOG_ERROR, "invalid header\n");
- break;
+ switch (err) {
+ case AAC_AC3_PARSE_ERROR_SYNC:
+ av_log(avctx, AV_LOG_ERROR, "frame sync error\n");
+ return -1;
+ case AAC_AC3_PARSE_ERROR_BSID:
+ av_log(avctx, AV_LOG_ERROR, "invalid bitstream id\n");
+ break;
+ case AAC_AC3_PARSE_ERROR_SAMPLE_RATE:
+ av_log(avctx, AV_LOG_ERROR, "invalid sample rate\n");
+ break;
+ case AAC_AC3_PARSE_ERROR_FRAME_SIZE:
+ av_log(avctx, AV_LOG_ERROR, "invalid frame size\n");
+ break;
+ case AAC_AC3_PARSE_ERROR_FRAME_TYPE:
+ /* skip frame if CRC is ok. otherwise use error concealment. */
+ /* TODO: add support for substreams and dependent frames */
+ if (s->frame_type == EAC3_FRAME_TYPE_DEPENDENT || s->substreamid) {
+ av_log(avctx, AV_LOG_ERROR, "unsupported frame type : "
+ "skipping frame\n");
+ *got_frame_ptr = 0;
+ return s->frame_size;
+ } else {
+ av_log(avctx, AV_LOG_ERROR, "invalid frame type\n");
+ }
+ break;
+ default:
+ av_log(avctx, AV_LOG_ERROR, "invalid header\n");
+ break;
}
} else {
/* check that reported frame size fits in input buffer */
@@ -1362,7 +1373,8 @@ static int ac3_decode_frame(AVCodecContext * avctx, void *data,
err = AAC_AC3_PARSE_ERROR_FRAME_SIZE;
} else if (avctx->err_recognition & AV_EF_CRCCHECK) {
/* check for crc mismatch */
- if (av_crc(av_crc_get_table(AV_CRC_16_ANSI), 0, &buf[2], s->frame_size-2)) {
+ if (av_crc(av_crc_get_table(AV_CRC_16_ANSI), 0, &buf[2],
+ s->frame_size - 2)) {
av_log(avctx, AV_LOG_ERROR, "frame CRC mismatch\n");
err = AAC_AC3_PARSE_ERROR_CRC;
}
@@ -1372,12 +1384,12 @@ static int ac3_decode_frame(AVCodecContext * avctx, void *data,
/* if frame is ok, set audio parameters */
if (!err) {
avctx->sample_rate = s->sample_rate;
- avctx->bit_rate = s->bit_rate;
+ avctx->bit_rate = s->bit_rate;
/* channel config */
s->out_channels = s->channels;
- s->output_mode = s->channel_mode;
- if(s->lfe_on)
+ s->output_mode = s->channel_mode;
+ if (s->lfe_on)
s->output_mode |= AC3_OUTPUT_LFEON;
if (avctx->request_channels > 0 && avctx->request_channels <= 2 &&
avctx->request_channels < s->channels) {
@@ -1385,7 +1397,7 @@ static int ac3_decode_frame(AVCodecContext * avctx, void *data,
s->output_mode = avctx->request_channels == 1 ? AC3_CHMODE_MONO : AC3_CHMODE_STEREO;
s->channel_layout = ff_ac3_channel_layout_tab[s->output_mode];
}
- avctx->channels = s->out_channels;
+ avctx->channels = s->out_channels;
avctx->channel_layout = s->channel_layout;
s->loro_center_mix_level = gain_levels[ center_levels[s-> center_mix_level]];
@@ -1393,13 +1405,13 @@ static int ac3_decode_frame(AVCodecContext * avctx, void *data,
s->ltrt_center_mix_level = LEVEL_MINUS_3DB;
s->ltrt_surround_mix_level = LEVEL_MINUS_3DB;
/* set downmixing coefficients if needed */
- if(s->channels != s->out_channels && !((s->output_mode & AC3_OUTPUT_LFEON) &&
+ if (s->channels != s->out_channels && !((s->output_mode & AC3_OUTPUT_LFEON) &&
s->fbw_channels == s->out_channels)) {
set_downmix_coeffs(s);
}
} else if (!s->out_channels) {
s->out_channels = avctx->channels;
- if(s->out_channels < s->channels)
+ if (s->out_channels < s->channels)
s->output_mode = s->out_channels == 1 ? AC3_CHMODE_MONO : AC3_CHMODE_STEREO;
}
/* set audio service type based on bitstream mode for AC-3 */
@@ -1476,19 +1488,19 @@ static const AVClass ac3_decoder_class = {
};
AVCodec ff_ac3_decoder = {
- .name = "ac3",
- .type = AVMEDIA_TYPE_AUDIO,
- .id = CODEC_ID_AC3,
+ .name = "ac3",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .id = CODEC_ID_AC3,
.priv_data_size = sizeof (AC3DecodeContext),
- .init = ac3_decode_init,
- .close = ac3_decode_end,
- .decode = ac3_decode_frame,
- .capabilities = CODEC_CAP_DR1,
- .long_name = NULL_IF_CONFIG_SMALL("ATSC A/52A (AC-3)"),
- .sample_fmts = (const enum AVSampleFormat[]) {
- AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_NONE
- },
- .priv_class = &ac3_decoder_class,
+ .init = ac3_decode_init,
+ .close = ac3_decode_end,
+ .decode = ac3_decode_frame,
+ .capabilities = CODEC_CAP_DR1,
+ .long_name = NULL_IF_CONFIG_SMALL("ATSC A/52A (AC-3)"),
+ .sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_FLT,
+ AV_SAMPLE_FMT_S16,
+ AV_SAMPLE_FMT_NONE },
+ .priv_class = &ac3_decoder_class,
};
#if CONFIG_EAC3_DECODER
@@ -1498,19 +1510,20 @@ static const AVClass eac3_decoder_class = {
.option = options,
.version = LIBAVUTIL_VERSION_INT,
};
+
AVCodec ff_eac3_decoder = {
- .name = "eac3",
- .type = AVMEDIA_TYPE_AUDIO,
- .id = CODEC_ID_EAC3,
+ .name = "eac3",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .id = CODEC_ID_EAC3,
.priv_data_size = sizeof (AC3DecodeContext),
- .init = ac3_decode_init,
- .close = ac3_decode_end,
- .decode = ac3_decode_frame,
- .capabilities = CODEC_CAP_DR1,
- .long_name = NULL_IF_CONFIG_SMALL("ATSC A/52B (AC-3, E-AC-3)"),
- .sample_fmts = (const enum AVSampleFormat[]) {
- AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_NONE
- },
- .priv_class = &eac3_decoder_class,
+ .init = ac3_decode_init,
+ .close = ac3_decode_end,
+ .decode = ac3_decode_frame,
+ .capabilities = CODEC_CAP_DR1,
+ .long_name = NULL_IF_CONFIG_SMALL("ATSC A/52B (AC-3, E-AC-3)"),
+ .sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_FLT,
+ AV_SAMPLE_FMT_S16,
+ AV_SAMPLE_FMT_NONE },
+ .priv_class = &eac3_decoder_class,
};
#endif
diff --git a/libavcodec/adpcmenc.c b/libavcodec/adpcmenc.c
index c193f5c7ef..e500a1cdbf 100644
--- a/libavcodec/adpcmenc.c
+++ b/libavcodec/adpcmenc.c
@@ -66,37 +66,45 @@ static av_cold int adpcm_encode_init(AVCodecContext *avctx)
if (avctx->channels > 2)
return -1; /* only stereo or mono =) */
- if(avctx->trellis && (unsigned)avctx->trellis > 16U){
+ if (avctx->trellis && (unsigned)avctx->trellis > 16U) {
av_log(avctx, AV_LOG_ERROR, "invalid trellis size\n");
return -1;
}
if (avctx->trellis) {
- int frontier = 1 << avctx->trellis;
+ int frontier = 1 << avctx->trellis;
int max_paths = frontier * FREEZE_INTERVAL;
- FF_ALLOC_OR_GOTO(avctx, s->paths, max_paths * sizeof(*s->paths), error);
- FF_ALLOC_OR_GOTO(avctx, s->node_buf, 2 * frontier * sizeof(*s->node_buf), error);
- FF_ALLOC_OR_GOTO(avctx, s->nodep_buf, 2 * frontier * sizeof(*s->nodep_buf), error);
- FF_ALLOC_OR_GOTO(avctx, s->trellis_hash, 65536 * sizeof(*s->trellis_hash), error);
+ FF_ALLOC_OR_GOTO(avctx, s->paths,
+ max_paths * sizeof(*s->paths), error);
+ FF_ALLOC_OR_GOTO(avctx, s->node_buf,
+ 2 * frontier * sizeof(*s->node_buf), error);
+ FF_ALLOC_OR_GOTO(avctx, s->nodep_buf,
+ 2 * frontier * sizeof(*s->nodep_buf), error);
+ FF_ALLOC_OR_GOTO(avctx, s->trellis_hash,
+ 65536 * sizeof(*s->trellis_hash), error);
}
avctx->bits_per_coded_sample = av_get_bits_per_sample(avctx->codec->id);
- switch(avctx->codec->id) {
+ switch (avctx->codec->id) {
case CODEC_ID_ADPCM_IMA_WAV:
- avctx->frame_size = (BLKSIZE - 4 * avctx->channels) * 8 / (4 * avctx->channels) + 1; /* each 16 bits sample gives one nibble */
- /* and we have 4 bytes per channel overhead */
+ /* each 16 bits sample gives one nibble
+ and we have 4 bytes per channel overhead */
+ avctx->frame_size = (BLKSIZE - 4 * avctx->channels) * 8 /
+ (4 * avctx->channels) + 1;
+ /* seems frame_size isn't taken into account...
+ have to buffer the samples :-( */
avctx->block_align = BLKSIZE;
avctx->bits_per_coded_sample = 4;
- /* seems frame_size isn't taken into account... have to buffer the samples :-( */
break;
case CODEC_ID_ADPCM_IMA_QT:
- avctx->frame_size = 64;
+ avctx->frame_size = 64;
avctx->block_align = 34 * avctx->channels;
break;
case CODEC_ID_ADPCM_MS:
- avctx->frame_size = (BLKSIZE - 7 * avctx->channels) * 2 / avctx->channels + 2; /* each 16 bits sample gives one nibble */
- /* and we have 7 bytes per channel overhead */
+ /* each 16 bits sample gives one nibble
+ and we have 7 bytes per channel overhead */
+ avctx->frame_size = (BLKSIZE - 7 * avctx->channels) * 2 / avctx->channels + 2;
avctx->block_align = BLKSIZE;
avctx->bits_per_coded_sample = 4;
avctx->extradata_size = 32;
@@ -111,14 +119,15 @@ static av_cold int adpcm_encode_init(AVCodecContext *avctx)
}
break;
case CODEC_ID_ADPCM_YAMAHA:
- avctx->frame_size = BLKSIZE * avctx->channels;
+ avctx->frame_size = BLKSIZE * avctx->channels;
avctx->block_align = BLKSIZE;
break;
case CODEC_ID_ADPCM_SWF:
if (avctx->sample_rate != 11025 &&
avctx->sample_rate != 22050 &&
avctx->sample_rate != 44100) {
- av_log(avctx, AV_LOG_ERROR, "Sample rate must be 11025, 22050 or 44100\n");
+ av_log(avctx, AV_LOG_ERROR, "Sample rate must be 11025, "
+ "22050 or 44100\n");
goto error;
}
avctx->frame_size = 512 * (avctx->sample_rate / 11025);
@@ -127,7 +136,7 @@ static av_cold int adpcm_encode_init(AVCodecContext *avctx)
goto error;
}
- avctx->coded_frame= avcodec_alloc_frame();
+ avctx->coded_frame = avcodec_alloc_frame();
avctx->coded_frame->key_frame= 1;
return 0;
@@ -152,19 +161,23 @@ static av_cold int adpcm_encode_close(AVCodecContext *avctx)
}
-static inline unsigned char adpcm_ima_compress_sample(ADPCMChannelStatus *c, short sample)
+static inline unsigned char adpcm_ima_compress_sample(ADPCMChannelStatus *c,
+ short sample)
{
- int delta = sample - c->prev_sample;
- int nibble = FFMIN(7, abs(delta)*4/ff_adpcm_step_table[c->step_index]) + (delta<0)*8;
- c->prev_sample += ((ff_adpcm_step_table[c->step_index] * ff_adpcm_yamaha_difflookup[nibble]) / 8);
+ int delta = sample - c->prev_sample;
+ int nibble = FFMIN(7, abs(delta) * 4 /
+ ff_adpcm_step_table[c->step_index]) + (delta < 0) * 8;
+ c->prev_sample += ((ff_adpcm_step_table[c->step_index] *
+ ff_adpcm_yamaha_difflookup[nibble]) / 8);
c->prev_sample = av_clip_int16(c->prev_sample);
- c->step_index = av_clip(c->step_index + ff_adpcm_index_table[nibble], 0, 88);
+ c->step_index = av_clip(c->step_index + ff_adpcm_index_table[nibble], 0, 88);
return nibble;
}
-static inline unsigned char adpcm_ima_qt_compress_sample(ADPCMChannelStatus *c, short sample)
+static inline unsigned char adpcm_ima_qt_compress_sample(ADPCMChannelStatus *c,
+ short sample)
{
- int delta = sample - c->prev_sample;
+ int delta = sample - c->prev_sample;
int diff, step = ff_adpcm_step_table[c->step_index];
int nibble = 8*(delta < 0);
@@ -173,17 +186,17 @@ static inline unsigned char adpcm_ima_qt_compress_sample(ADPCMChannelStatus *c,
if (delta >= step) {
nibble |= 4;
- delta -= step;
+ delta -= step;
}
step >>= 1;
if (delta >= step) {
nibble |= 2;
- delta -= step;
+ delta -= step;
}
step >>= 1;
if (delta >= step) {
nibble |= 1;
- delta -= step;
+ delta -= step;
}
diff -= delta;
@@ -193,47 +206,53 @@ static inline unsigned char adpcm_ima_qt_compress_sample(ADPCMChannelStatus *c,
c->prev_sample += diff;
c->prev_sample = av_clip_int16(c->prev_sample);
- c->step_index = av_clip(c->step_index + ff_adpcm_index_table[nibble], 0, 88);
+ c->step_index = av_clip(c->step_index + ff_adpcm_index_table[nibble], 0, 88);
return nibble;
}
-static inline unsigned char adpcm_ms_compress_sample(ADPCMChannelStatus *c, short sample)
+static inline unsigned char adpcm_ms_compress_sample(ADPCMChannelStatus *c,
+ short sample)
{
int predictor, nibble, bias;
- predictor = (((c->sample1) * (c->coeff1)) + ((c->sample2) * (c->coeff2))) / 64;
+ predictor = (((c->sample1) * (c->coeff1)) +
+ (( c->sample2) * (c->coeff2))) / 64;
- nibble= sample - predictor;
- if(nibble>=0) bias= c->idelta/2;
- else bias=-c->idelta/2;
+ nibble = sample - predictor;
+ if (nibble >= 0)
+ bias = c->idelta / 2;
+ else
+ bias = -c->idelta / 2;
- nibble= (nibble + bias) / c->idelta;
- nibble= av_clip(nibble, -8, 7)&0x0F;
+ nibble = (nibble + bias) / c->idelta;
+ nibble = av_clip(nibble, -8, 7) & 0x0F;
- predictor += (signed)((nibble & 0x08)?(nibble - 0x10):(nibble)) * c->idelta;
+ predictor += (signed)((nibble & 0x08) ? (nibble - 0x10) : nibble) * c->idelta;
c->sample2 = c->sample1;
c->sample1 = av_clip_int16(predictor);
c->idelta = (ff_adpcm_AdaptationTable[(int)nibble] * c->idelta) >> 8;
- if (c->idelta < 16) c->idelta = 16;
+ if (c->idelta < 16)
+ c->idelta = 16;
return nibble;
}
-static inline unsigned char adpcm_yamaha_compress_sample(ADPCMChannelStatus *c, short sample)
+static inline unsigned char adpcm_yamaha_compress_sample(ADPCMChannelStatus *c,
+ short sample)
{
int nibble, delta;
- if(!c->step) {
+ if (!c->step) {
c->predictor = 0;
- c->step = 127;
+ c->step = 127;
}
delta = sample - c->predictor;
- nibble = FFMIN(7, abs(delta)*4/c->step) + (delta<0)*8;
+ nibble = FFMIN(7, abs(delta) * 4 / c->step) + (delta < 0) * 8;
c->predictor += ((c->step * ff_adpcm_yamaha_difflookup[nibble]) / 8);
c->predictor = av_clip_int16(c->predictor);
@@ -249,57 +268,61 @@ static void adpcm_compress_trellis(AVCodecContext *avctx, const short *samples,
//FIXME 6% faster if frontier is a compile-time constant
ADPCMEncodeContext *s = avctx->priv_data;
const int frontier = 1 << avctx->trellis;
- const int stride = avctx->channels;
- const int version = avctx->codec->id;
- TrellisPath *paths = s->paths, *p;
- TrellisNode *node_buf = s->node_buf;
- TrellisNode **nodep_buf = s->nodep_buf;
- TrellisNode **nodes = nodep_buf; // nodes[] is always sorted by .ssd
+ const int stride = avctx->channels;
+ const int version = avctx->codec->id;
+ TrellisPath *paths = s->paths, *p;
+ TrellisNode *node_buf = s->node_buf;
+ TrellisNode **nodep_buf = s->nodep_buf;
+ TrellisNode **nodes = nodep_buf; // nodes[] is always sorted by .ssd
TrellisNode **nodes_next = nodep_buf + frontier;
int pathn = 0, froze = -1, i, j, k, generation = 0;
uint8_t *hash = s->trellis_hash;
memset(hash, 0xff, 65536 * sizeof(*hash));
memset(nodep_buf, 0, 2 * frontier * sizeof(*nodep_buf));
- nodes[0] = node_buf + frontier;
- nodes[0]->ssd = 0;
- nodes[0]->path = 0;
- nodes[0]->step = c->step_index;
+ nodes[0] = node_buf + frontier;
+ nodes[0]->ssd = 0;
+ nodes[0]->path = 0;
+ nodes[0]->step = c->step_index;
nodes[0]->sample1 = c->sample1;
nodes[0]->sample2 = c->sample2;
- if((version == CODEC_ID_ADPCM_IMA_WAV) || (version == CODEC_ID_ADPCM_IMA_QT) || (version == CODEC_ID_ADPCM_SWF))
+ if (version == CODEC_ID_ADPCM_IMA_WAV ||
+ version == CODEC_ID_ADPCM_IMA_QT ||
+ version == CODEC_ID_ADPCM_SWF)
nodes[0]->sample1 = c->prev_sample;
- if(version == CODEC_ID_ADPCM_MS)
+ if (version == CODEC_ID_ADPCM_MS)
nodes[0]->step = c->idelta;
- if(version == CODEC_ID_ADPCM_YAMAHA) {
- if(c->step == 0) {
- nodes[0]->step = 127;
+ if (version == CODEC_ID_ADPCM_YAMAHA) {
+ if (c->step == 0) {
+ nodes[0]->step = 127;
nodes[0]->sample1 = 0;
} else {
- nodes[0]->step = c->step;
+ nodes[0]->step = c->step;
nodes[0]->sample1 = c->predictor;
}
}
- for(i=0; i<n; i++) {
+ for (i = 0; i < n; i++) {
TrellisNode *t = node_buf + frontier*(i&1);
TrellisNode **u;
- int sample = samples[i*stride];
+ int sample = samples[i * stride];
int heap_pos = 0;
- memset(nodes_next, 0, frontier*sizeof(TrellisNode*));
- for(j=0; j<frontier && nodes[j]; j++) {
- // higher j have higher ssd already, so they're likely to yield a suboptimal next sample too
- const int range = (j < frontier/2) ? 1 : 0;
- const int step = nodes[j]->step;
+ memset(nodes_next, 0, frontier * sizeof(TrellisNode*));
+ for (j = 0; j < frontier && nodes[j]; j++) {
+ // higher j have higher ssd already, so they're likely
+ // to yield a suboptimal next sample too
+ const int range = (j < frontier / 2) ? 1 : 0;
+ const int step = nodes[j]->step;
int nidx;
- if(version == CODEC_ID_ADPCM_MS) {
- const int predictor = ((nodes[j]->sample1 * c->coeff1) + (nodes[j]->sample2 * c->coeff2)) / 64;
- const int div = (sample - predictor) / step;
+ if (version == CODEC_ID_ADPCM_MS) {
+ const int predictor = ((nodes[j]->sample1 * c->coeff1) +
+ (nodes[j]->sample2 * c->coeff2)) / 64;
+ const int div = (sample - predictor) / step;
const int nmin = av_clip(div-range, -8, 6);
const int nmax = av_clip(div+range, -7, 7);
- for(nidx=nmin; nidx<=nmax; nidx++) {
+ for (nidx = nmin; nidx <= nmax; nidx++) {
const int nibble = nidx & 0xf;
- int dec_sample = predictor + nidx * step;
+ int dec_sample = predictor + nidx * step;
#define STORE_NODE(NAME, STEP_INDEX)\
int d;\
uint32_t ssd;\
@@ -334,25 +357,26 @@ static void adpcm_compress_trellis(AVCodecContext *avctx, const short *samples,
} else {\
/* Try to replace one of the leaf nodes with the new \
* one, but try a different slot each time. */\
- pos = (frontier >> 1) + (heap_pos & ((frontier >> 1) - 1));\
+ pos = (frontier >> 1) +\
+ (heap_pos & ((frontier >> 1) - 1));\
if (ssd > nodes_next[pos]->ssd)\
goto next_##NAME;\
heap_pos++;\
}\
*h = generation;\
- u = nodes_next[pos];\
- if(!u) {\
- assert(pathn < FREEZE_INTERVAL<<avctx->trellis);\
+ u = nodes_next[pos];\
+ if (!u) {\
+ assert(pathn < FREEZE_INTERVAL << avctx->trellis);\
u = t++;\
nodes_next[pos] = u;\
u->path = pathn++;\
}\
- u->ssd = ssd;\
+ u->ssd = ssd;\
u->step = STEP_INDEX;\
u->sample2 = nodes[j]->sample1;\
u->sample1 = dec_sample;\
paths[u->path].nibble = nibble;\
- paths[u->path].prev = nodes[j]->path;\
+ paths[u->path].prev = nodes[j]->path;\
/* Sift the newly inserted node up in the heap to \
* restore the heap property. */\
while (pos > 0) {\
@@ -363,24 +387,34 @@ static void adpcm_compress_trellis(AVCodecContext *avctx, const short *samples,
pos = parent;\
}\
next_##NAME:;
- STORE_NODE(ms, FFMAX(16, (ff_adpcm_AdaptationTable[nibble] * step) >> 8));
+ STORE_NODE(ms, FFMAX(16,
+ (ff_adpcm_AdaptationTable[nibble] * step) >> 8));
}
- } else if((version == CODEC_ID_ADPCM_IMA_WAV)|| (version == CODEC_ID_ADPCM_IMA_QT)|| (version == CODEC_ID_ADPCM_SWF)) {
+ } else if (version == CODEC_ID_ADPCM_IMA_WAV ||
+ version == CODEC_ID_ADPCM_IMA_QT ||
+ version == CODEC_ID_ADPCM_SWF) {
#define LOOP_NODES(NAME, STEP_TABLE, STEP_INDEX)\
const int predictor = nodes[j]->sample1;\
const int div = (sample - predictor) * 4 / STEP_TABLE;\
- int nmin = av_clip(div-range, -7, 6);\
- int nmax = av_clip(div+range, -6, 7);\
- if(nmin<=0) nmin--; /* distinguish -0 from +0 */\
- if(nmax<0) nmax--;\
- for(nidx=nmin; nidx<=nmax; nidx++) {\
- const int nibble = nidx<0 ? 7-nidx : nidx;\
- int dec_sample = predictor + (STEP_TABLE * ff_adpcm_yamaha_difflookup[nibble]) / 8;\
+ int nmin = av_clip(div - range, -7, 6);\
+ int nmax = av_clip(div + range, -6, 7);\
+ if (nmin <= 0)\
+ nmin--; /* distinguish -0 from +0 */\
+ if (nmax < 0)\
+ nmax--;\
+ for (nidx = nmin; nidx <= nmax; nidx++) {\
+ const int nibble = nidx < 0 ? 7 - nidx : nidx;\
+ int dec_sample = predictor +\
+ (STEP_TABLE *\
+ ff_adpcm_yamaha_difflookup[nibble]) / 8;\
STORE_NODE(NAME, STEP_INDEX);\
}
- LOOP_NODES(ima, ff_adpcm_step_table[step], av_clip(step + ff_adpcm_index_table[nibble], 0, 88));
+ LOOP_NODES(ima, ff_adpcm_step_table[step],
+ av_clip(step + ff_adpcm_index_table[nibble], 0, 88));
} else { //CODEC_ID_ADPCM_YAMAHA
- LOOP_NODES(yamaha, step, av_clip((step * ff_adpcm_yamaha_indexscale[nibble]) >> 8, 127, 24567));
+ LOOP_NODES(yamaha, step,
+ av_clip((step * ff_adpcm_yamaha_indexscale[nibble]) >> 8,
+ 127, 24567));
#undef LOOP_NODES
#undef STORE_NODE
}
@@ -397,16 +431,16 @@ static void adpcm_compress_trellis(AVCodecContext *avctx, const short *samples,
}
// prevent overflow
- if(nodes[0]->ssd > (1<<28)) {
- for(j=1; j<frontier && nodes[j]; j++)
+ if (nodes[0]->ssd > (1 << 28)) {
+ for (j = 1; j < frontier && nodes[j]; j++)
nodes[j]->ssd -= nodes[0]->ssd;
nodes[0]->ssd = 0;
}
// merge old paths to save memory
- if(i == froze + FREEZE_INTERVAL) {
+ if (i == froze + FREEZE_INTERVAL) {
p = &paths[nodes[0]->path];
- for(k=i; k>froze; k--) {
+ for (k = i; k > froze; k--) {
dst[k] = p->nibble;
p = &paths[p->prev];
}
@@ -415,26 +449,26 @@ static void adpcm_compress_trellis(AVCodecContext *avctx, const short *samples,
// other nodes might use paths that don't coincide with the frozen one.
// checking which nodes do so is too slow, so just kill them all.
// this also slightly improves quality, but I don't know why.
- memset(nodes+1, 0, (frontier-1)*sizeof(TrellisNode*));
+ memset(nodes + 1, 0, (frontier - 1) * sizeof(TrellisNode*));
}
}
p = &paths[nodes[0]->path];
- for(i=n-1; i>froze; i--) {
+ for (i = n - 1; i > froze; i--) {
dst[i] = p->nibble;
p = &paths[p->prev];
}
- c->predictor = nodes[0]->sample1;
- c->sample1 = nodes[0]->sample1;
- c->sample2 = nodes[0]->sample2;
+ c->predictor = nodes[0]->sample1;
+ c->sample1 = nodes[0]->sample1;
+ c->sample2 = nodes[0]->sample2;
c->step_index = nodes[0]->step;
- c->step = nodes[0]->step;
- c->idelta = nodes[0]->step;
+ c->step = nodes[0]->step;
+ c->idelta = nodes[0]->step;
}
static int adpcm_encode_frame(AVCodecContext *avctx,
- unsigned char *frame, int buf_size, void *data)
+ unsigned char *frame, int buf_size, void *data)
{
int n, i, st;
short *samples;
@@ -444,98 +478,96 @@ static int adpcm_encode_frame(AVCodecContext *avctx,
dst = frame;
samples = (short *)data;
- st= avctx->channels == 2;
-/* n = (BLKSIZE - 4 * avctx->channels) / (2 * 8 * avctx->channels); */
+ st = avctx->channels == 2;
+ /* n = (BLKSIZE - 4 * avctx->channels) / (2 * 8 * avctx->channels); */
switch(avctx->codec->id) {
case CODEC_ID_ADPCM_IMA_WAV:
n = avctx->frame_size / 8;
- c->status[0].prev_sample = (signed short)samples[0]; /* XXX */
-/* c->status[0].step_index = 0; *//* XXX: not sure how to init the state machine */
- bytestream_put_le16(&dst, c->status[0].prev_sample);
- *dst++ = (unsigned char)c->status[0].step_index;
- *dst++ = 0; /* unknown */
+ c->status[0].prev_sample = (signed short)samples[0]; /* XXX */
+ /* c->status[0].step_index = 0;
+ XXX: not sure how to init the state machine */
+ bytestream_put_le16(&dst, c->status[0].prev_sample);
+ *dst++ = (unsigned char)c->status[0].step_index;
+ *dst++ = 0; /* unknown */
+ samples++;
+ if (avctx->channels == 2) {
+ c->status[1].prev_sample = (signed short)samples[0];
+ /* c->status[1].step_index = 0; */
+ bytestream_put_le16(&dst, c->status[1].prev_sample);
+ *dst++ = (unsigned char)c->status[1].step_index;
+ *dst++ = 0;
samples++;
- if (avctx->channels == 2) {
- c->status[1].prev_sample = (signed short)samples[0];
-/* c->status[1].step_index = 0; */
- bytestream_put_le16(&dst, c->status[1].prev_sample);
- *dst++ = (unsigned char)c->status[1].step_index;
- *dst++ = 0;
- samples++;
- }
+ }
- /* stereo: 4 bytes (8 samples) for left, 4 bytes for right, 4 bytes left, ... */
- if(avctx->trellis > 0) {
- FF_ALLOC_OR_GOTO(avctx, buf, 2*n*8, error);
- adpcm_compress_trellis(avctx, samples, buf, &c->status[0], n*8);
- if(avctx->channels == 2)
- adpcm_compress_trellis(avctx, samples+1, buf + n*8, &c->status[1], n*8);
- for(i=0; i<n; i++) {
- *dst++ = buf[8*i+0] | (buf[8*i+1] << 4);
- *dst++ = buf[8*i+2] | (buf[8*i+3] << 4);
- *dst++ = buf[8*i+4] | (buf[8*i+5] << 4);
- *dst++ = buf[8*i+6] | (buf[8*i+7] << 4);
- if (avctx->channels == 2) {
- uint8_t *buf1 = buf + n*8;
- *dst++ = buf1[8*i+0] | (buf1[8*i+1] << 4);
- *dst++ = buf1[8*i+2] | (buf1[8*i+3] << 4);
- *dst++ = buf1[8*i+4] | (buf1[8*i+5] << 4);
- *dst++ = buf1[8*i+6] | (buf1[8*i+7] << 4);
- }
+ /* stereo: 4 bytes (8 samples) for left,
+ 4 bytes for right, 4 bytes left, ... */
+ if (avctx->trellis > 0) {
+ FF_ALLOC_OR_GOTO(avctx, buf, 2 * n * 8, error);
+ adpcm_compress_trellis(avctx, samples, buf, &c->status[0], n * 8);
+ if (avctx->channels == 2)
+ adpcm_compress_trellis(avctx, samples + 1, buf + n * 8,
+ &c->status[1], n * 8);
+ for (i = 0; i < n; i++) {
+ *dst++ = buf[8 * i + 0] | (buf[8 * i + 1] << 4);
+ *dst++ = buf[8 * i + 2] | (buf[8 * i + 3] << 4);
+ *dst++ = buf[8 * i + 4] | (buf[8 * i + 5] << 4);
+ *dst++ = buf[8 * i + 6] | (buf[8 * i + 7] << 4);
+ if (avctx->channels == 2) {
+ uint8_t *buf1 = buf + n * 8;
+ *dst++ = buf1[8 * i + 0] | (buf1[8 * i + 1] << 4);
+ *dst++ = buf1[8 * i + 2] | (buf1[8 * i + 3] << 4);
+ *dst++ = buf1[8 * i + 4] | (buf1[8 * i + 5] << 4);
+ *dst++ = buf1[8 * i + 6] | (buf1[8 * i + 7] << 4);
}
- av_free(buf);
- } else
- for (; n>0; n--) {
- *dst = adpcm_ima_compress_sample(&c->status[0], samples[0]);
- *dst |= adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels]) << 4;
- dst++;
- *dst = adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels * 2]);
- *dst |= adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels * 3]) << 4;
- dst++;
- *dst = adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels * 4]);
- *dst |= adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels * 5]) << 4;
- dst++;
- *dst = adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels * 6]);
- *dst |= adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels * 7]) << 4;
- dst++;
+ }
+ av_free(buf);
+ } else {
+ for (; n > 0; n--) {
+ *dst = adpcm_ima_compress_sample(&c->status[0], samples[0]);
+ *dst++ |= adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels ]) << 4;
+ *dst = adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels * 2]);
+ *dst++ |= adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels * 3]) << 4;
+ *dst = adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels * 4]);
+ *dst++ |= adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels * 5]) << 4;
+ *dst = adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels * 6]);
+ *dst++ |= adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels * 7]) << 4;
/* right channel */
if (avctx->channels == 2) {
- *dst = adpcm_ima_compress_sample(&c->status[1], samples[1]);
- *dst |= adpcm_ima_compress_sample(&c->status[1], samples[3]) << 4;
- dst++;
- *dst = adpcm_ima_compress_sample(&c->status[1], samples[5]);
- *dst |= adpcm_ima_compress_sample(&c->status[1], samples[7]) << 4;
- dst++;
- *dst = adpcm_ima_compress_sample(&c->status[1], samples[9]);
- *dst |= adpcm_ima_compress_sample(&c->status[1], samples[11]) << 4;
- dst++;
- *dst = adpcm_ima_compress_sample(&c->status[1], samples[13]);
- *dst |= adpcm_ima_compress_sample(&c->status[1], samples[15]) << 4;
- dst++;
+ *dst = adpcm_ima_compress_sample(&c->status[1], samples[1 ]);
+ *dst++ |= adpcm_ima_compress_sample(&c->status[1], samples[3 ]) << 4;
+ *dst = adpcm_ima_compress_sample(&c->status[1], samples[5 ]);
+ *dst++ |= adpcm_ima_compress_sample(&c->status[1], samples[7 ]) << 4;
+ *dst = adpcm_ima_compress_sample(&c->status[1], samples[9 ]);
+ *dst++ |= adpcm_ima_compress_sample(&c->status[1], samples[11]) << 4;
+ *dst = adpcm_ima_compress_sample(&c->status[1], samples[13]);
+ *dst++ |= adpcm_ima_compress_sample(&c->status[1], samples[15]) << 4;
}
samples += 8 * avctx->channels;
}
+ }
break;
case CODEC_ID_ADPCM_IMA_QT:
{
int ch, i;
PutBitContext pb;
- init_put_bits(&pb, dst, buf_size*8);
+ init_put_bits(&pb, dst, buf_size * 8);
- for(ch=0; ch<avctx->channels; ch++){
+ for (ch = 0; ch < avctx->channels; ch++) {
put_bits(&pb, 9, (c->status[ch].prev_sample + 0x10000) >> 7);
- put_bits(&pb, 7, c->status[ch].step_index);
- if(avctx->trellis > 0) {
+ put_bits(&pb, 7, c->status[ch].step_index);
+ if (avctx->trellis > 0) {
uint8_t buf[64];
adpcm_compress_trellis(avctx, samples+ch, buf, &c->status[ch], 64);
- for(i=0; i<64; i++)
- put_bits(&pb, 4, buf[i^1]);
+ for (i = 0; i < 64; i++)
+ put_bits(&pb, 4, buf[i ^ 1]);
} else {
- for (i=0; i<64; i+=2){
+ for (i = 0; i < 64; i += 2) {
int t1, t2;
- t1 = adpcm_ima_qt_compress_sample(&c->status[ch], samples[avctx->channels*(i+0)+ch]);
- t2 = adpcm_ima_qt_compress_sample(&c->status[ch], samples[avctx->channels*(i+1)+ch]);
+ t1 = adpcm_ima_qt_compress_sample(&c->status[ch],
+ samples[avctx->channels * (i + 0) + ch]);
+ t2 = adpcm_ima_qt_compress_sample(&c->status[ch],
+ samples[avctx->channels * (i + 1) + ch]);
put_bits(&pb, 4, t2);
put_bits(&pb, 4, t1);
}
@@ -543,119 +575,120 @@ static int adpcm_encode_frame(AVCodecContext *avctx,
}
flush_put_bits(&pb);
- dst += put_bits_count(&pb)>>3;
+ dst += put_bits_count(&pb) >> 3;
break;
}
case CODEC_ID_ADPCM_SWF:
{
int i;
PutBitContext pb;
- init_put_bits(&pb, dst, buf_size*8);
+ init_put_bits(&pb, dst, buf_size * 8);
- n = avctx->frame_size-1;
+ n = avctx->frame_size - 1;
- //Store AdpcmCodeSize
- put_bits(&pb, 2, 2); //Set 4bits flash adpcm format
+ // store AdpcmCodeSize
+ put_bits(&pb, 2, 2); // set 4-bit flash adpcm format
- //Init the encoder state
- for(i=0; i<avctx->channels; i++){
- c->status[i].step_index = av_clip(c->status[i].step_index, 0, 63); // clip step so it fits 6 bits
+ // init the encoder state
+ for (i = 0; i < avctx->channels; i++) {
+ // clip step so it fits 6 bits
+ c->status[i].step_index = av_clip(c->status[i].step_index, 0, 63);
put_sbits(&pb, 16, samples[i]);
put_bits(&pb, 6, c->status[i].step_index);
c->status[i].prev_sample = (signed short)samples[i];
}
- if(avctx->trellis > 0) {
- FF_ALLOC_OR_GOTO(avctx, buf, 2*n, error);
- adpcm_compress_trellis(avctx, samples+2, buf, &c->status[0], n);
+ if (avctx->trellis > 0) {
+ FF_ALLOC_OR_GOTO(avctx, buf, 2 * n, error);
+ adpcm_compress_trellis(avctx, samples + 2, buf, &c->status[0], n);
if (avctx->channels == 2)
- adpcm_compress_trellis(avctx, samples+3, buf+n, &c->status[1], n);
- for(i=0; i<n; i++) {
+ adpcm_compress_trellis(avctx, samples + 3, buf + n,
+ &c->status[1], n);
+ for (i = 0; i < n; i++) {
put_bits(&pb, 4, buf[i]);
if (avctx->channels == 2)
- put_bits(&pb, 4, buf[n+i]);
+ put_bits(&pb, 4, buf[n + i]);
}
av_free(buf);
} else {
- for (i=1; i<avctx->frame_size; i++) {
- put_bits(&pb, 4, adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels*i]));
+ for (i = 1; i < avctx->frame_size; i++) {
+ put_bits(&pb, 4, adpcm_ima_compress_sample(&c->status[0],
+ samples[avctx->channels * i]));
if (avctx->channels == 2)
- put_bits(&pb, 4, adpcm_ima_compress_sample(&c->status[1], samples[2*i+1]));
+ put_bits(&pb, 4, adpcm_ima_compress_sample(&c->status[1],
+ samples[2 * i + 1]));
}
}
flush_put_bits(&pb);
- dst += put_bits_count(&pb)>>3;
+ dst += put_bits_count(&pb) >> 3;
break;
}
case CODEC_ID_ADPCM_MS:
- for(i=0; i<avctx->channels; i++){
- int predictor=0;
-
+ for (i = 0; i < avctx->channels; i++) {
+ int predictor = 0;
*dst++ = predictor;
c->status[i].coeff1 = ff_adpcm_AdaptCoeff1[predictor];
c->status[i].coeff2 = ff_adpcm_AdaptCoeff2[predictor];
}
- for(i=0; i<avctx->channels; i++){
+ for (i = 0; i < avctx->channels; i++) {
if (c->status[i].idelta < 16)
c->status[i].idelta = 16;
-
bytestream_put_le16(&dst, c->status[i].idelta);
}
- for(i=0; i<avctx->channels; i++){
+ for (i = 0; i < avctx->channels; i++)
c->status[i].sample2= *samples++;
- }
- for(i=0; i<avctx->channels; i++){
- c->status[i].sample1= *samples++;
-
+ for (i = 0; i < avctx->channels; i++) {
+ c->status[i].sample1 = *samples++;
bytestream_put_le16(&dst, c->status[i].sample1);
}
- for(i=0; i<avctx->channels; i++)
+ for (i = 0; i < avctx->channels; i++)
bytestream_put_le16(&dst, c->status[i].sample2);
- if(avctx->trellis > 0) {
- int n = avctx->block_align - 7*avctx->channels;
- FF_ALLOC_OR_GOTO(avctx, buf, 2*n, error);
- if(avctx->channels == 1) {
+ if (avctx->trellis > 0) {
+ int n = avctx->block_align - 7 * avctx->channels;
+ FF_ALLOC_OR_GOTO(avctx, buf, 2 * n, error);
+ if (avctx->channels == 1) {
adpcm_compress_trellis(avctx, samples, buf, &c->status[0], n);
- for(i=0; i<n; i+=2)
- *dst++ = (buf[i] << 4) | buf[i+1];
+ for (i = 0; i < n; i += 2)
+ *dst++ = (buf[i] << 4) | buf[i + 1];
} else {
- adpcm_compress_trellis(avctx, samples, buf, &c->status[0], n);
- adpcm_compress_trellis(avctx, samples+1, buf+n, &c->status[1], n);
- for(i=0; i<n; i++)
- *dst++ = (buf[i] << 4) | buf[n+i];
+ adpcm_compress_trellis(avctx, samples, buf, &c->status[0], n);
+ adpcm_compress_trellis(avctx, samples + 1, buf + n, &c->status[1], n);
+ for (i = 0; i < n; i++)
+ *dst++ = (buf[i] << 4) | buf[n + i];
}
av_free(buf);
- } else
- for(i=7*avctx->channels; i<avctx->block_align; i++) {
- int nibble;
- nibble = adpcm_ms_compress_sample(&c->status[ 0], *samples++)<<4;
- nibble|= adpcm_ms_compress_sample(&c->status[st], *samples++);
- *dst++ = nibble;
+ } else {
+ for (i = 7 * avctx->channels; i < avctx->block_align; i++) {
+ int nibble;
+ nibble = adpcm_ms_compress_sample(&c->status[ 0], *samples++) << 4;
+ nibble |= adpcm_ms_compress_sample(&c->status[st], *samples++);
+ *dst++ = nibble;
+ }
}
break;
case CODEC_ID_ADPCM_YAMAHA:
n = avctx->frame_size / 2;
- if(avctx->trellis > 0) {
- FF_ALLOC_OR_GOTO(avctx, buf, 2*n*2, error);
+ if (avctx->trellis > 0) {
+ FF_ALLOC_OR_GOTO(avctx, buf, 2 * n * 2, error);
n *= 2;
- if(avctx->channels == 1) {
+ if (avctx->channels == 1) {
adpcm_compress_trellis(avctx, samples, buf, &c->status[0], n);
- for(i=0; i<n; i+=2)
- *dst++ = buf[i] | (buf[i+1] << 4);
+ for (i = 0; i < n; i += 2)
+ *dst++ = buf[i] | (buf[i + 1] << 4);
} else {
- adpcm_compress_trellis(avctx, samples, buf, &c->status[0], n);
- adpcm_compress_trellis(avctx, samples+1, buf+n, &c->status[1], n);
- for(i=0; i<n; i++)
- *dst++ = buf[i] | (buf[n+i] << 4);
+ adpcm_compress_trellis(avctx, samples, buf, &c->status[0], n);
+ adpcm_compress_trellis(avctx, samples + 1, buf + n, &c->status[1], n);
+ for (i = 0; i < n; i++)
+ *dst++ = buf[i] | (buf[n + i] << 4);
}
av_free(buf);
} else
- for (n *= avctx->channels; n>0; n--) {
+ for (n *= avctx->channels; n > 0; n--) {
int nibble;
nibble = adpcm_yamaha_compress_sample(&c->status[ 0], *samples++);
nibble |= adpcm_yamaha_compress_sample(&c->status[st], *samples++) << 4;
- *dst++ = nibble;
+ *dst++ = nibble;
}
break;
default:
@@ -675,12 +708,13 @@ AVCodec ff_ ## name_ ## _encoder = { \
.init = adpcm_encode_init, \
.encode = adpcm_encode_frame, \
.close = adpcm_encode_close, \
- .sample_fmts = (const enum AVSampleFormat[]){AV_SAMPLE_FMT_S16,AV_SAMPLE_FMT_NONE}, \
+ .sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_S16, \
+ AV_SAMPLE_FMT_NONE}, \
.long_name = NULL_IF_CONFIG_SMALL(long_name_), \
}
-ADPCM_ENCODER(CODEC_ID_ADPCM_IMA_QT, adpcm_ima_qt, "ADPCM IMA QuickTime");
+ADPCM_ENCODER(CODEC_ID_ADPCM_IMA_QT, adpcm_ima_qt, "ADPCM IMA QuickTime");
ADPCM_ENCODER(CODEC_ID_ADPCM_IMA_WAV, adpcm_ima_wav, "ADPCM IMA WAV");
-ADPCM_ENCODER(CODEC_ID_ADPCM_MS, adpcm_ms, "ADPCM Microsoft");
-ADPCM_ENCODER(CODEC_ID_ADPCM_SWF, adpcm_swf, "ADPCM Shockwave Flash");
-ADPCM_ENCODER(CODEC_ID_ADPCM_YAMAHA, adpcm_yamaha, "ADPCM Yamaha");
+ADPCM_ENCODER(CODEC_ID_ADPCM_MS, adpcm_ms, "ADPCM Microsoft");
+ADPCM_ENCODER(CODEC_ID_ADPCM_SWF, adpcm_swf, "ADPCM Shockwave Flash");
+ADPCM_ENCODER(CODEC_ID_ADPCM_YAMAHA, adpcm_yamaha, "ADPCM Yamaha");
diff --git a/libavcodec/bmp.c b/libavcodec/bmp.c
index 81ecf6e50b..5971145401 100644
--- a/libavcodec/bmp.c
+++ b/libavcodec/bmp.c
@@ -263,9 +263,9 @@ static int bmp_decode_frame(AVCodecContext *avctx,
}else{
switch(depth){
case 1:
- for(i = 0; i < avctx->height; i++){
+ for (i = 0; i < avctx->height; i++) {
int j;
- for(j = 0; j < n; j++){
+ for (j = 0; j < n; j++) {
ptr[j*8+0] = buf[j] >> 7;
ptr[j*8+1] = (buf[j] >> 6) & 1;
ptr[j*8+2] = (buf[j] >> 5) & 1;
diff --git a/libavcodec/mpegvideo.c b/libavcodec/mpegvideo.c
index 1d3bb0cdf2..d72e6d23de 100644
--- a/libavcodec/mpegvideo.c
+++ b/libavcodec/mpegvideo.c
@@ -66,44 +66,61 @@ static void dct_unquantize_h263_inter_c(MpegEncContext *s,
//#define DEBUG
-static const uint8_t ff_default_chroma_qscale_table[32]={
-// 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
- 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
+static const uint8_t ff_default_chroma_qscale_table[32] = {
+// 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+ 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
};
-const uint8_t ff_mpeg1_dc_scale_table[128]={
-// 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
- 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
- 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
- 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
- 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
+const uint8_t ff_mpeg1_dc_scale_table[128] = {
+// 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
+ 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
+ 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
+ 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
+ 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
+ 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
+ 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
+ 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
+ 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
};
-static const uint8_t mpeg2_dc_scale_table1[128]={
-// 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
- 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
- 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
- 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
- 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
+static const uint8_t mpeg2_dc_scale_table1[128] = {
+// 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
+ 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
+ 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
+ 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
+ 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
+ 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
+ 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
+ 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
+ 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
};
-static const uint8_t mpeg2_dc_scale_table2[128]={
-// 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
- 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
- 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
- 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
- 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+static const uint8_t mpeg2_dc_scale_table2[128] = {
+// 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
};
-static const uint8_t mpeg2_dc_scale_table3[128]={
-// 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+static const uint8_t mpeg2_dc_scale_table3[128] = {
+// 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
};
-const uint8_t * const ff_mpeg2_dc_scale_table[4]={
+const uint8_t *const ff_mpeg2_dc_scale_table[4] = {
ff_mpeg1_dc_scale_table,
mpeg2_dc_scale_table1,
mpeg2_dc_scale_table2,
@@ -123,34 +140,37 @@ const enum PixelFormat ff_hwaccel_pixfmt_list_420[] = {
PIX_FMT_NONE
};
-const uint8_t *avpriv_mpv_find_start_code(const uint8_t * restrict p, const uint8_t *end, uint32_t * restrict state){
+const uint8_t *avpriv_mpv_find_start_code(const uint8_t *restrict p,
+ const uint8_t *end,
+ uint32_t * restrict state)
+{
int i;
- assert(p<=end);
- if(p>=end)
+ assert(p <= end);
+ if (p >= end)
return end;
- for(i=0; i<3; i++){
- uint32_t tmp= *state << 8;
- *state= tmp + *(p++);
- if(tmp == 0x100 || p==end)
+ for (i = 0; i < 3; i++) {
+ uint32_t tmp = *state << 8;
+ *state = tmp + *(p++);
+ if (tmp == 0x100 || p == end)
return p;
}
- while(p<end){
- if (p[-1] > 1 ) p+= 3;
- else if(p[-2] ) p+= 2;
- else if(p[-3]|(p[-1]-1)) p++;
- else{
+ while (p < end) {
+ if (p[-1] > 1 ) p += 3;
+ else if (p[-2] ) p += 2;
+ else if (p[-3]|(p[-1]-1)) p++;
+ else {
p++;
break;
}
}
- p= FFMIN(p, end)-4;
- *state= AV_RB32(p);
+ p = FFMIN(p, end) - 4;
+ *state = AV_RB32(p);
- return p+4;
+ return p + 4;
}
/* init common dct for both encoder and decoder */
@@ -163,11 +183,11 @@ av_cold int ff_dct_common_init(MpegEncContext *s)
s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
- if(s->flags & CODEC_FLAG_BITEXACT)
+ if (s->flags & CODEC_FLAG_BITEXACT)
s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
-#if HAVE_MMX
+#if HAVE_MMX
MPV_common_init_mmx(s);
#elif ARCH_ALPHA
MPV_common_init_axp(s);
@@ -184,12 +204,12 @@ av_cold int ff_dct_common_init(MpegEncContext *s)
#endif
/* load & permutate scantables
- note: only wmv uses different ones
- */
- if(s->alternate_scan){
+ * note: only wmv uses different ones
+ */
+ if (s->alternate_scan) {
ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_alternate_vertical_scan);
ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_alternate_vertical_scan);
- }else{
+ } else {
ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_zigzag_direct);
ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_zigzag_direct);
}
@@ -199,9 +219,10 @@ av_cold int ff_dct_common_init(MpegEncContext *s)
return 0;
}
-void ff_copy_picture(Picture *dst, Picture *src){
+void ff_copy_picture(Picture *dst, Picture *src)
+{
*dst = *src;
- dst->f.type= FF_BUFFER_TYPE_COPY;
+ dst->f.type = FF_BUFFER_TYPE_COPY;
}
/**
@@ -210,11 +231,12 @@ void ff_copy_picture(Picture *dst, Picture *src){
static void free_frame_buffer(MpegEncContext *s, Picture *pic)
{
/* Windows Media Image codecs allocate internal buffers with different
- dimensions; ignore user defined callbacks for these */
+ * dimensions; ignore user defined callbacks for these
+ */
if (s->codec_id != CODEC_ID_WMV3IMAGE && s->codec_id != CODEC_ID_VC1IMAGE)
- ff_thread_release_buffer(s->avctx, (AVFrame*)pic);
+ ff_thread_release_buffer(s->avctx, (AVFrame *) pic);
else
- avcodec_default_release_buffer(s->avctx, (AVFrame*)pic);
+ avcodec_default_release_buffer(s->avctx, (AVFrame *) pic);
av_freep(&pic->f.hwaccel_picture_private);
}
@@ -237,9 +259,9 @@ static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
}
if (s->codec_id != CODEC_ID_WMV3IMAGE && s->codec_id != CODEC_ID_VC1IMAGE)
- r = ff_thread_get_buffer(s->avctx, (AVFrame*)pic);
+ r = ff_thread_get_buffer(s->avctx, (AVFrame *) pic);
else
- r = avcodec_default_get_buffer(s->avctx, (AVFrame*)pic);
+ r = avcodec_default_get_buffer(s->avctx, (AVFrame *) pic);
if (r < 0 || !pic->f.age || !pic->f.type || !pic->f.data[0]) {
av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %d %d %p)\n",
@@ -248,14 +270,17 @@ static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
return -1;
}
- if (s->linesize && (s->linesize != pic->f.linesize[0] || s->uvlinesize != pic->f.linesize[1])) {
- av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (stride changed)\n");
+ if (s->linesize && (s->linesize != pic->f.linesize[0] ||
+ s->uvlinesize != pic->f.linesize[1])) {
+ av_log(s->avctx, AV_LOG_ERROR,
+ "get_buffer() failed (stride changed)\n");
free_frame_buffer(s, pic);
return -1;
}
if (pic->f.linesize[1] != pic->f.linesize[2]) {
- av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (uv stride mismatch)\n");
+ av_log(s->avctx, AV_LOG_ERROR,
+ "get_buffer() failed (uv stride mismatch)\n");
free_frame_buffer(s, pic);
return -1;
}
@@ -265,21 +290,25 @@ static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
/**
* allocates a Picture
- * The pixels are allocated/set by calling get_buffer() if shared=0
+ * The pixels are allocated/set by calling get_buffer() if shared = 0
*/
-int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared){
- const int big_mb_num= s->mb_stride*(s->mb_height+1) + 1; //the +1 is needed so memset(,,stride*height) does not sig11
- const int mb_array_size= s->mb_stride*s->mb_height;
- const int b8_array_size= s->b8_stride*s->mb_height*2;
- const int b4_array_size= s->b4_stride*s->mb_height*4;
+int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared)
+{
+ const int big_mb_num = s->mb_stride * (s->mb_height + 1) + 1;
+
+ // the + 1 is needed so memset(,,stride*height) does not sig11
+
+ const int mb_array_size = s->mb_stride * s->mb_height;
+ const int b8_array_size = s->b8_stride * s->mb_height * 2;
+ const int b4_array_size = s->b4_stride * s->mb_height * 4;
int i;
- int r= -1;
+ int r = -1;
- if(shared){
+ if (shared) {
assert(pic->f.data[0]);
assert(pic->f.type == 0 || pic->f.type == FF_BUFFER_TYPE_SHARED);
pic->f.type = FF_BUFFER_TYPE_SHARED;
- }else{
+ } else {
assert(!pic->f.data[0]);
if (alloc_frame_buffer(s, pic) < 0)
@@ -291,49 +320,69 @@ int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared){
if (pic->f.qscale_table == NULL) {
if (s->encoding) {
- FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_var , mb_array_size * sizeof(int16_t) , fail)
- FF_ALLOCZ_OR_GOTO(s->avctx, pic->mc_mb_var, mb_array_size * sizeof(int16_t) , fail)
- FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_mean , mb_array_size * sizeof(int8_t ) , fail)
+ FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_var,
+ mb_array_size * sizeof(int16_t), fail)
+ FF_ALLOCZ_OR_GOTO(s->avctx, pic->mc_mb_var,
+ mb_array_size * sizeof(int16_t), fail)
+ FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_mean,
+ mb_array_size * sizeof(int8_t ), fail)
}
- FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.mbskip_table, mb_array_size * sizeof(uint8_t) + 2, fail) //the +2 is for the slice end check
- FF_ALLOCZ_OR_GOTO(s->avctx, pic->qscale_table_base , (big_mb_num + s->mb_stride) * sizeof(uint8_t) , fail)
- FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_type_base , (big_mb_num + s->mb_stride) * sizeof(uint32_t), fail)
- pic->f.mb_type = pic->mb_type_base + 2*s->mb_stride + 1;
- pic->f.qscale_table = pic->qscale_table_base + 2*s->mb_stride + 1;
- if(s->out_format == FMT_H264){
- for(i=0; i<2; i++){
- FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i], 2 * (b4_array_size+4) * sizeof(int16_t), fail)
+ FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.mbskip_table,
+ mb_array_size * sizeof(uint8_t) + 2, fail)// the + 2 is for the slice end check
+ FF_ALLOCZ_OR_GOTO(s->avctx, pic->qscale_table_base,
+ (big_mb_num + s->mb_stride) * sizeof(uint8_t),
+ fail)
+ FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_type_base,
+ (big_mb_num + s->mb_stride) * sizeof(uint32_t),
+ fail)
+ pic->f.mb_type = pic->mb_type_base + 2 * s->mb_stride + 1;
+ pic->f.qscale_table = pic->qscale_table_base + 2 * s->mb_stride + 1;
+ if (s->out_format == FMT_H264) {
+ for (i = 0; i < 2; i++) {
+ FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i],
+ 2 * (b4_array_size + 4) * sizeof(int16_t),
+ fail)
pic->f.motion_val[i] = pic->motion_val_base[i] + 4;
- FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.ref_index[i], 4*mb_array_size * sizeof(uint8_t), fail)
+ FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.ref_index[i],
+ 4 * mb_array_size * sizeof(uint8_t), fail)
}
pic->f.motion_subsample_log2 = 2;
- }else if(s->out_format == FMT_H263 || s->encoding || (s->avctx->debug&FF_DEBUG_MV) || (s->avctx->debug_mv)){
- for(i=0; i<2; i++){
- FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i], 2 * (b8_array_size+4) * sizeof(int16_t), fail)
+ } else if (s->out_format == FMT_H263 || s->encoding ||
+ (s->avctx->debug & FF_DEBUG_MV) || s->avctx->debug_mv) {
+ for (i = 0; i < 2; i++) {
+ FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i],
+ 2 * (b8_array_size + 4) * sizeof(int16_t),
+ fail)
pic->f.motion_val[i] = pic->motion_val_base[i] + 4;
- FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.ref_index[i], 4*mb_array_size * sizeof(uint8_t), fail)
+ FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.ref_index[i],
+ 4 * mb_array_size * sizeof(uint8_t), fail)
}
pic->f.motion_subsample_log2 = 3;
}
- if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
- FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.dct_coeff, 64 * mb_array_size * sizeof(DCTELEM) * 6, fail)
+ if (s->avctx->debug&FF_DEBUG_DCT_COEFF) {
+ FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.dct_coeff,
+ 64 * mb_array_size * sizeof(DCTELEM) * 6, fail)
}
pic->f.qstride = s->mb_stride;
- FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.pan_scan , 1 * sizeof(AVPanScan), fail)
+ FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.pan_scan,
+ 1 * sizeof(AVPanScan), fail)
}
/* It might be nicer if the application would keep track of these
* but it would require an API change. */
- memmove(s->prev_pict_types+1, s->prev_pict_types, PREV_PICT_TYPES_BUFFER_SIZE-1);
- s->prev_pict_types[0]= s->dropable ? AV_PICTURE_TYPE_B : s->pict_type;
- if (pic->f.age < PREV_PICT_TYPES_BUFFER_SIZE && s->prev_pict_types[pic->f.age] == AV_PICTURE_TYPE_B)
- pic->f.age = INT_MAX; // Skipped MBs in B-frames are quite rare in MPEG-1/2 and it is a bit tricky to skip them anyway.
+ memmove(s->prev_pict_types + 1, s->prev_pict_types,
+ PREV_PICT_TYPES_BUFFER_SIZE-1);
+ s->prev_pict_types[0] = s->dropable ? AV_PICTURE_TYPE_B : s->pict_type;
+ if (pic->f.age < PREV_PICT_TYPES_BUFFER_SIZE &&
+ s->prev_pict_types[pic->f.age] == AV_PICTURE_TYPE_B)
+ pic->f.age = INT_MAX; // Skipped MBs in B-frames are quite rare in MPEG-1/2
+ // and it is a bit tricky to skip them anyway.
pic->owner2 = s;
return 0;
-fail: //for the FF_ALLOCZ_OR_GOTO macro
- if(r>=0)
+fail: // for the FF_ALLOCZ_OR_GOTO macro
+ if (r >= 0)
free_frame_buffer(s, pic);
return -1;
}
@@ -341,7 +390,8 @@ fail: //for the FF_ALLOCZ_OR_GOTO macro
/**
* deallocates a picture
*/
-static void free_picture(MpegEncContext *s, Picture *pic){
+static void free_picture(MpegEncContext *s, Picture *pic)
+{
int i;
if (pic->f.data[0] && pic->f.type != FF_BUFFER_TYPE_SHARED) {
@@ -357,13 +407,13 @@ static void free_picture(MpegEncContext *s, Picture *pic){
av_freep(&pic->f.dct_coeff);
av_freep(&pic->f.pan_scan);
pic->f.mb_type = NULL;
- for(i=0; i<2; i++){
+ for (i = 0; i < 2; i++) {
av_freep(&pic->motion_val_base[i]);
av_freep(&pic->f.ref_index[i]);
}
if (pic->f.type == FF_BUFFER_TYPE_SHARED) {
- for(i=0; i<4; i++){
+ for (i = 0; i < 4; i++) {
pic->f.base[i] =
pic->f.data[i] = NULL;
}
@@ -371,38 +421,47 @@ static void free_picture(MpegEncContext *s, Picture *pic){
}
}
-static int init_duplicate_context(MpegEncContext *s, MpegEncContext *base){
+static int init_duplicate_context(MpegEncContext *s, MpegEncContext *base)
+{
int y_size = s->b8_stride * (2 * s->mb_height + 1);
int c_size = s->mb_stride * (s->mb_height + 1);
int yc_size = y_size + 2 * c_size;
int i;
- // edge emu needs blocksize + filter length - 1 (=17x17 for halfpel / 21x21 for h264)
- FF_ALLOCZ_OR_GOTO(s->avctx, s->edge_emu_buffer, (s->width+64)*2*21*2, fail); //(width + edge + align)*interlaced*MBsize*tolerance
-
- //FIXME should be linesize instead of s->width*2 but that is not known before get_buffer()
- FF_ALLOCZ_OR_GOTO(s->avctx, s->me.scratchpad, (s->width+64)*4*16*2*sizeof(uint8_t), fail)
- s->me.temp= s->me.scratchpad;
- s->rd_scratchpad= s->me.scratchpad;
- s->b_scratchpad= s->me.scratchpad;
- s->obmc_scratchpad= s->me.scratchpad + 16;
+ // edge emu needs blocksize + filter length - 1
+ // (= 17x17 for halfpel / 21x21 for h264)
+ FF_ALLOCZ_OR_GOTO(s->avctx, s->edge_emu_buffer,
+ (s->width + 64) * 2 * 21 * 2, fail); // (width + edge + align)*interlaced*MBsize*tolerance
+
+ // FIXME should be linesize instead of s->width * 2
+ // but that is not known before get_buffer()
+ FF_ALLOCZ_OR_GOTO(s->avctx, s->me.scratchpad,
+ (s->width + 64) * 4 * 16 * 2 * sizeof(uint8_t), fail)
+ s->me.temp = s->me.scratchpad;
+ s->rd_scratchpad = s->me.scratchpad;
+ s->b_scratchpad = s->me.scratchpad;
+ s->obmc_scratchpad = s->me.scratchpad + 16;
if (s->encoding) {
- FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map , ME_MAP_SIZE*sizeof(uint32_t), fail)
- FF_ALLOCZ_OR_GOTO(s->avctx, s->me.score_map, ME_MAP_SIZE*sizeof(uint32_t), fail)
- if(s->avctx->noise_reduction){
- FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_error_sum, 2 * 64 * sizeof(int), fail)
+ FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map,
+ ME_MAP_SIZE * sizeof(uint32_t), fail)
+ FF_ALLOCZ_OR_GOTO(s->avctx, s->me.score_map,
+ ME_MAP_SIZE * sizeof(uint32_t), fail)
+ if (s->avctx->noise_reduction) {
+ FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_error_sum,
+ 2 * 64 * sizeof(int), fail)
}
}
- FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64*12*2 * sizeof(DCTELEM), fail)
- s->block= s->blocks[0];
+ FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64 * 12 * 2 * sizeof(DCTELEM), fail)
+ s->block = s->blocks[0];
- for(i=0;i<12;i++){
+ for (i = 0; i < 12; i++) {
s->pblocks[i] = &s->block[i];
}
if (s->out_format == FMT_H263) {
/* ac values */
- FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_val_base, yc_size * sizeof(int16_t) * 16, fail);
+ FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_val_base,
+ yc_size * sizeof(int16_t) * 16, fail);
s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
s->ac_val[2] = s->ac_val[1] + c_size;
@@ -410,29 +469,32 @@ static int init_duplicate_context(MpegEncContext *s, MpegEncContext *base){
return 0;
fail:
- return -1; //free() through MPV_common_end()
+ return -1; // free() through MPV_common_end()
}
-static void free_duplicate_context(MpegEncContext *s){
- if(s==NULL) return;
+static void free_duplicate_context(MpegEncContext *s)
+{
+ if (s == NULL)
+ return;
av_freep(&s->edge_emu_buffer);
av_freep(&s->me.scratchpad);
- s->me.temp=
- s->rd_scratchpad=
- s->b_scratchpad=
- s->obmc_scratchpad= NULL;
+ s->me.temp =
+ s->rd_scratchpad =
+ s->b_scratchpad =
+ s->obmc_scratchpad = NULL;
av_freep(&s->dct_error_sum);
av_freep(&s->me.map);
av_freep(&s->me.score_map);
av_freep(&s->blocks);
av_freep(&s->ac_val_base);
- s->block= NULL;
+ s->block = NULL;
}
-static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src){
-#define COPY(a) bak->a= src->a
+static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src)
+{
+#define COPY(a) bak->a = src->a
COPY(edge_emu_buffer);
COPY(me.scratchpad);
COPY(me.temp);
@@ -457,28 +519,33 @@ static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src){
#undef COPY
}
-void ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src){
+void ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
+{
MpegEncContext bak;
int i;
- //FIXME copy only needed parts
-//START_TIMER
+ // FIXME copy only needed parts
+ // START_TIMER
backup_duplicate_context(&bak, dst);
memcpy(dst, src, sizeof(MpegEncContext));
backup_duplicate_context(dst, &bak);
- for(i=0;i<12;i++){
+ for (i = 0; i < 12; i++) {
dst->pblocks[i] = &dst->block[i];
}
-//STOP_TIMER("update_duplicate_context") //about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads
+ // STOP_TIMER("update_duplicate_context")
+ // about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads
}
-int ff_mpeg_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
+int ff_mpeg_update_thread_context(AVCodecContext *dst,
+ const AVCodecContext *src)
{
MpegEncContext *s = dst->priv_data, *s1 = src->priv_data;
- if(dst == src || !s1->context_initialized) return 0;
+ if (dst == src || !s1->context_initialized)
+ return 0;
- //FIXME can parameters change on I-frames? in that case dst may need a reinit
- if(!s->context_initialized){
+ // FIXME can parameters change on I-frames?
+ // in that case dst may need a reinit
+ if (!s->context_initialized) {
memcpy(s, s1, sizeof(MpegEncContext));
s->avctx = dst;
diff --git a/libavcodec/shorten.c b/libavcodec/shorten.c
index 26ce6fe885..f0a173cc7e 100644
--- a/libavcodec/shorten.c
+++ b/libavcodec/shorten.c
@@ -331,7 +331,6 @@ static int read_header(ShortenContext *s)
s->lpcqoffset = 0;
s->blocksize = DEFAULT_BLOCK_SIZE;
- s->channels = 1;
s->nmean = -1;
s->version = get_bits(&s->gb, 8);
s->internal_ftype = get_uint(s, TYPESIZE);
diff --git a/libavcodec/wavpack.c b/libavcodec/wavpack.c
index 1ccaec665d..1a8c25943f 100644
--- a/libavcodec/wavpack.c
+++ b/libavcodec/wavpack.c
@@ -110,7 +110,7 @@ typedef struct WavpackFrameContext {
int extra_bits;
int and, or, shift;
int post_shift;
- int hybrid, hybrid_bitrate;
+ int hybrid, hybrid_bitrate, hybrid_maxclip;
int float_flag;
int float_shift;
int float_max_exp;
@@ -403,8 +403,14 @@ static inline int wv_get_value_integer(WavpackFrameContext *s, uint32_t *crc, in
*crc = *crc * 9 + (S&0xffff) * 3 + ((unsigned)S>>16);
}
}
+
bit = (S & s->and) | s->or;
- return (((S + bit) << s->shift) - bit) << s->post_shift;
+ bit = (((S + bit) << s->shift) - bit);
+
+ if(s->hybrid)
+ bit = av_clip(bit, -s->hybrid_maxclip, s->hybrid_maxclip - 1);
+
+ return bit << s->post_shift;
}
static float wv_get_value_float(WavpackFrameContext *s, uint32_t *crc, int S)
@@ -792,6 +798,7 @@ static int wavpack_decode_block(AVCodecContext *avctx, int block_no,
s->joint = s->frame_flags & WV_JOINT_STEREO;
s->hybrid = s->frame_flags & WV_HYBRID_MODE;
s->hybrid_bitrate = s->frame_flags & WV_HYBRID_BITRATE;
+ s->hybrid_maxclip = 1 << ((((s->frame_flags & 0x03) + 1) << 3) - 1);
s->post_shift = 8 * (bpp-1-(s->frame_flags&0x03)) + ((s->frame_flags >> 13) & 0x1f);
s->CRC = AV_RL32(buf); buf += 4;
if(wc->mkv_mode)
diff --git a/libavcodec/xan.c b/libavcodec/xan.c
index edd4fe8197..c469594e34 100644
--- a/libavcodec/xan.c
+++ b/libavcodec/xan.c
@@ -113,13 +113,13 @@ static int xan_huffman_decode(unsigned char *dest, int dest_len,
init_get_bits(&gb, ptr, ptr_len * 8);
- while ( val != 0x16 ) {
+ while (val != 0x16) {
unsigned idx = val - 0x17 + get_bits1(&gb) * byte;
if (idx >= 2 * byte)
return -1;
val = src[idx];
- if ( val < 0x16 ) {
+ if (val < 0x16) {
if (dest >= dest_end)
return 0;
*dest++ = val;
@@ -149,27 +149,23 @@ static void xan_unpack(unsigned char *dest, int dest_len,
if (opcode < 0xe0) {
int size2, back;
- if ( (opcode & 0x80) == 0 ) {
-
+ if ((opcode & 0x80) == 0) {
size = opcode & 3;
back = ((opcode & 0x60) << 3) + *src++ + 1;
size2 = ((opcode & 0x1c) >> 2) + 3;
-
- } else if ( (opcode & 0x40) == 0 ) {
-
+ } else if ((opcode & 0x40) == 0) {
size = *src >> 6;
back = (bytestream_get_be16(&src) & 0x3fff) + 1;
size2 = (opcode & 0x3f) + 4;
-
} else {
-
size = opcode & 3;
back = ((opcode & 0x10) << 12) + bytestream_get_be16(&src) + 1;
size2 = ((opcode & 0x0c) << 6) + *src++ + 5;
}
+
if (dest_end - dest < size + size2 ||
dest + size - dest_org < back ||
src_end - src < size)
@@ -205,7 +201,7 @@ static inline void xan_wc3_output_pixel_run(XanContext *s,
line_inc = stride - width;
index = y * stride + x;
current_x = x;
- while(pixel_count && (index < s->frame_size)) {
+ while (pixel_count && index < s->frame_size) {
int count = FFMIN(pixel_count, width - current_x);
memcpy(palette_plane + index, pixel_buffer, count);
pixel_count -= count;
@@ -220,8 +216,9 @@ static inline void xan_wc3_output_pixel_run(XanContext *s,
}
}
-static inline void xan_wc3_copy_pixel_run(XanContext *s,
- int x, int y, int pixel_count, int motion_x, int motion_y)
+static inline void xan_wc3_copy_pixel_run(XanContext *s, int x, int y,
+ int pixel_count, int motion_x,
+ int motion_y)
{
int stride;
int line_inc;
@@ -230,8 +227,8 @@ static inline void xan_wc3_copy_pixel_run(XanContext *s,
int width = s->avctx->width;
unsigned char *palette_plane, *prev_palette_plane;
- if ( y + motion_y < 0 || y + motion_y >= s->avctx->height ||
- x + motion_x < 0 || x + motion_x >= s->avctx->width)
+ if (y + motion_y < 0 || y + motion_y >= s->avctx->height ||
+ x + motion_x < 0 || x + motion_x >= s->avctx->width)
return;
palette_plane = s->current_frame.data[0];
@@ -244,12 +241,14 @@ static inline void xan_wc3_copy_pixel_run(XanContext *s,
curframe_x = x;
prevframe_index = (y + motion_y) * stride + x + motion_x;
prevframe_x = x + motion_x;
- while(pixel_count &&
- curframe_index < s->frame_size &&
- prevframe_index < s->frame_size) {
- int count = FFMIN3(pixel_count, width - curframe_x, width - prevframe_x);
-
- memcpy(palette_plane + curframe_index, prev_palette_plane + prevframe_index, count);
+ while (pixel_count &&
+ curframe_index < s->frame_size &&
+ prevframe_index < s->frame_size) {
+ int count = FFMIN3(pixel_count, width - curframe_x,
+ width - prevframe_x);
+
+ memcpy(palette_plane + curframe_index,
+ prev_palette_plane + prevframe_index, count);
pixel_count -= count;
curframe_index += count;
prevframe_index += count;
@@ -270,7 +269,7 @@ static inline void xan_wc3_copy_pixel_run(XanContext *s,
static int xan_wc3_decode_frame(XanContext *s) {
- int width = s->avctx->width;
+ int width = s->avctx->width;
int height = s->avctx->height;
int total_pixels = width * height;
unsigned char opcode;
@@ -289,7 +288,8 @@ static int xan_wc3_decode_frame(XanContext *s) {
const unsigned char *size_segment;
const unsigned char *vector_segment;
const unsigned char *imagedata_segment;
- int huffman_offset, size_offset, vector_offset, imagedata_offset, imagedata_size;
+ int huffman_offset, size_offset, vector_offset, imagedata_offset,
+ imagedata_size;
if (s->size < 8)
return AVERROR_INVALIDDATA;
@@ -374,6 +374,7 @@ static int xan_wc3_decode_frame(XanContext *s) {
size_segment += 3;
break;
}
+
if (size > total_pixels)
break;
@@ -518,7 +519,8 @@ static int xan_decode_frame(AVCodecContext *avctx,
return AVERROR_INVALIDDATA;
if (s->palettes_count >= PALETTES_MAX)
return AVERROR_INVALIDDATA;
- tmpptr = av_realloc(s->palettes, (s->palettes_count + 1) * AVPALETTE_SIZE);
+ tmpptr = av_realloc(s->palettes,
+ (s->palettes_count + 1) * AVPALETTE_SIZE);
if (!tmpptr)
return AVERROR(ENOMEM);
s->palettes = tmpptr;
@@ -569,7 +571,8 @@ static int xan_decode_frame(AVCodecContext *avctx,
if (!s->frame_size)
s->frame_size = s->current_frame.linesize[0] * s->avctx->height;
- memcpy(s->current_frame.data[1], s->palettes + s->cur_palette * AVPALETTE_COUNT, AVPALETTE_SIZE);
+ memcpy(s->current_frame.data[1],
+ s->palettes + s->cur_palette * AVPALETTE_COUNT, AVPALETTE_SIZE);
s->buf = buf;
s->size = buf_size;
@@ -617,5 +620,5 @@ AVCodec ff_xan_wc3_decoder = {
.close = xan_decode_end,
.decode = xan_decode_frame,
.capabilities = CODEC_CAP_DR1,
- .long_name = NULL_IF_CONFIG_SMALL("Wing Commander III / Xan"),
+ .long_name = NULL_IF_CONFIG_SMALL("Wing Commander III / Xan"),
};
diff --git a/libavcodec/zmbv.c b/libavcodec/zmbv.c
index 2eb12e8031..6f89c7e3ad 100644
--- a/libavcodec/zmbv.c
+++ b/libavcodec/zmbv.c
@@ -88,8 +88,8 @@ static int zmbv_decode_xor_8(ZmbvContext *c)
output = c->cur;
prev = c->prev;
- if(c->flags & ZMBV_DELTAPAL){
- for(i = 0; i < 768; i++)
+ if (c->flags & ZMBV_DELTAPAL) {
+ for (i = 0; i < 768; i++)
c->pal[i] ^= *src++;
}
@@ -97,9 +97,9 @@ static int zmbv_decode_xor_8(ZmbvContext *c)
src += ((c->bx * c->by * 2 + 3) & ~3);
block = 0;
- for(y = 0; y < c->height; y += c->bh) {
+ for (y = 0; y < c->height; y += c->bh) {
bh2 = ((c->height - y) > c->bh) ? c->bh : (c->height - y);
- for(x = 0; x < c->width; x += c->bw) {
+ for (x = 0; x < c->width; x += c->bw) {
uint8_t *out, *tprev;
d = mvec[block] & 1;
@@ -114,12 +114,12 @@ static int zmbv_decode_xor_8(ZmbvContext *c)
tprev = prev + x + dx + dy * c->width;
mx = x + dx;
my = y + dy;
- for(j = 0; j < bh2; j++){
- if((my + j < 0) || (my + j >= c->height)) {
+ for (j = 0; j < bh2; j++) {
+ if (my + j < 0 || my + j >= c->height) {
memset(out, 0, bw2);
} else {
- for(i = 0; i < bw2; i++){
- if((mx + i < 0) || (mx + i >= c->width))
+ for (i = 0; i < bw2; i++) {
+ if (mx + i < 0 || mx + i >= c->width)
out[i] = 0;
else
out[i] = tprev[i];
@@ -129,10 +129,10 @@ static int zmbv_decode_xor_8(ZmbvContext *c)
tprev += c->width;
}
- if(d) { /* apply XOR'ed difference */
+ if (d) { /* apply XOR'ed difference */
out = output + x;
- for(j = 0; j < bh2; j++){
- for(i = 0; i < bw2; i++)
+ for (j = 0; j < bh2; j++) {
+ for (i = 0; i < bw2; i++)
out[i] ^= *src++;
out += c->width;
}
@@ -141,8 +141,9 @@ static int zmbv_decode_xor_8(ZmbvContext *c)
output += c->width * c->bh;
prev += c->width * c->bh;
}
- if(src - c->decomp_buf != c->decomp_len)
- av_log(c->avctx, AV_LOG_ERROR, "Used %ti of %i bytes\n", src-c->decomp_buf, c->decomp_len);
+ if (src - c->decomp_buf != c->decomp_len)
+ av_log(c->avctx, AV_LOG_ERROR, "Used %ti of %i bytes\n",
+ src-c->decomp_buf, c->decomp_len);
return 0;
}
@@ -168,9 +169,9 @@ static int zmbv_decode_xor_16(ZmbvContext *c)
src += ((c->bx * c->by * 2 + 3) & ~3);
block = 0;
- for(y = 0; y < c->height; y += c->bh) {
+ for (y = 0; y < c->height; y += c->bh) {
bh2 = ((c->height - y) > c->bh) ? c->bh : (c->height - y);
- for(x = 0; x < c->width; x += c->bw) {
+ for (x = 0; x < c->width; x += c->bw) {
uint16_t *out, *tprev;
d = mvec[block] & 1;
@@ -185,12 +186,12 @@ static int zmbv_decode_xor_16(ZmbvContext *c)
tprev = prev + x + dx + dy * c->width;
mx = x + dx;
my = y + dy;
- for(j = 0; j < bh2; j++){
- if((my + j < 0) || (my + j >= c->height)) {
+ for (j = 0; j < bh2; j++) {
+ if (my + j < 0 || my + j >= c->height) {
memset(out, 0, bw2 * 2);
} else {
- for(i = 0; i < bw2; i++){
- if((mx + i < 0) || (mx + i >= c->width))
+ for (i = 0; i < bw2; i++) {
+ if (mx + i < 0 || mx + i >= c->width)
out[i] = 0;
else
out[i] = tprev[i];
@@ -200,10 +201,10 @@ static int zmbv_decode_xor_16(ZmbvContext *c)
tprev += c->width;
}
- if(d) { /* apply XOR'ed difference */
+ if (d) { /* apply XOR'ed difference */
out = output + x;
- for(j = 0; j < bh2; j++){
- for(i = 0; i < bw2; i++) {
+ for (j = 0; j < bh2; j++){
+ for (i = 0; i < bw2; i++) {
out[i] ^= *((uint16_t*)src);
src += 2;
}
@@ -214,8 +215,9 @@ static int zmbv_decode_xor_16(ZmbvContext *c)
output += c->width * c->bh;
prev += c->width * c->bh;
}
- if(src - c->decomp_buf != c->decomp_len)
- av_log(c->avctx, AV_LOG_ERROR, "Used %ti of %i bytes\n", src-c->decomp_buf, c->decomp_len);
+ if (src - c->decomp_buf != c->decomp_len)
+ av_log(c->avctx, AV_LOG_ERROR, "Used %ti of %i bytes\n",
+ src-c->decomp_buf, c->decomp_len);
return 0;
}
@@ -244,9 +246,9 @@ static int zmbv_decode_xor_24(ZmbvContext *c)
src += ((c->bx * c->by * 2 + 3) & ~3);
block = 0;
- for(y = 0; y < c->height; y += c->bh) {
+ for (y = 0; y < c->height; y += c->bh) {
bh2 = ((c->height - y) > c->bh) ? c->bh : (c->height - y);
- for(x = 0; x < c->width; x += c->bw) {
+ for (x = 0; x < c->width; x += c->bw) {
uint8_t *out, *tprev;
d = mvec[block] & 1;
@@ -261,12 +263,12 @@ static int zmbv_decode_xor_24(ZmbvContext *c)
tprev = prev + (x + dx) * 3 + dy * stride;
mx = x + dx;
my = y + dy;
- for(j = 0; j < bh2; j++){
- if((my + j < 0) || (my + j >= c->height)) {
+ for (j = 0; j < bh2; j++) {
+ if (my + j < 0 || my + j >= c->height) {
memset(out, 0, bw2 * 3);
} else {
- for(i = 0; i < bw2; i++){
- if((mx + i < 0) || (mx + i >= c->width)) {
+ for (i = 0; i < bw2; i++){
+ if (mx + i < 0 || mx + i >= c->width) {
out[i * 3 + 0] = 0;
out[i * 3 + 1] = 0;
out[i * 3 + 2] = 0;
@@ -281,10 +283,10 @@ static int zmbv_decode_xor_24(ZmbvContext *c)
tprev += stride;
}
- if(d) { /* apply XOR'ed difference */
+ if (d) { /* apply XOR'ed difference */
out = output + x * 3;
- for(j = 0; j < bh2; j++){
- for(i = 0; i < bw2; i++) {
+ for (j = 0; j < bh2; j++) {
+ for (i = 0; i < bw2; i++) {
out[i * 3 + 0] ^= *src++;
out[i * 3 + 1] ^= *src++;
out[i * 3 + 2] ^= *src++;
@@ -296,8 +298,9 @@ static int zmbv_decode_xor_24(ZmbvContext *c)
output += stride * c->bh;
prev += stride * c->bh;
}
- if(src - c->decomp_buf != c->decomp_len)
- av_log(c->avctx, AV_LOG_ERROR, "Used %i of %i bytes\n", src-c->decomp_buf, c->decomp_len);
+ if (src - c->decomp_buf != c->decomp_len)
+ av_log(c->avctx, AV_LOG_ERROR, "Used %i of %i bytes\n",
+ src-c->decomp_buf, c->decomp_len);
return 0;
}
#endif //ZMBV_ENABLE_24BPP
@@ -324,9 +327,9 @@ static int zmbv_decode_xor_32(ZmbvContext *c)
src += ((c->bx * c->by * 2 + 3) & ~3);
block = 0;
- for(y = 0; y < c->height; y += c->bh) {
+ for (y = 0; y < c->height; y += c->bh) {
bh2 = ((c->height - y) > c->bh) ? c->bh : (c->height - y);
- for(x = 0; x < c->width; x += c->bw) {
+ for (x = 0; x < c->width; x += c->bw) {
uint32_t *out, *tprev;
d = mvec[block] & 1;
@@ -341,12 +344,12 @@ static int zmbv_decode_xor_32(ZmbvContext *c)
tprev = prev + x + dx + dy * c->width;
mx = x + dx;
my = y + dy;
- for(j = 0; j < bh2; j++){
- if((my + j < 0) || (my + j >= c->height)) {
+ for (j = 0; j < bh2; j++) {
+ if (my + j < 0 || my + j >= c->height) {
memset(out, 0, bw2 * 4);
} else {
- for(i = 0; i < bw2; i++){
- if((mx + i < 0) || (mx + i >= c->width))
+ for (i = 0; i < bw2; i++){
+ if (mx + i < 0 || mx + i >= c->width)
out[i] = 0;
else
out[i] = tprev[i];
@@ -356,11 +359,11 @@ static int zmbv_decode_xor_32(ZmbvContext *c)
tprev += c->width;
}
- if(d) { /* apply XOR'ed difference */
+ if (d) { /* apply XOR'ed difference */
out = output + x;
- for(j = 0; j < bh2; j++){
- for(i = 0; i < bw2; i++) {
- out[i] ^= *((uint32_t*)src);
+ for (j = 0; j < bh2; j++){
+ for (i = 0; i < bw2; i++) {
+ out[i] ^= *((uint32_t *) src);
src += 4;
}
out += c->width;
@@ -368,10 +371,11 @@ static int zmbv_decode_xor_32(ZmbvContext *c)
}
}
output += c->width * c->bh;
- prev += c->width * c->bh;
+ prev += c->width * c->bh;
}
- if(src - c->decomp_buf != c->decomp_len)
- av_log(c->avctx, AV_LOG_ERROR, "Used %ti of %i bytes\n", src-c->decomp_buf, c->decomp_len);
+ if (src - c->decomp_buf != c->decomp_len)
+ av_log(c->avctx, AV_LOG_ERROR, "Used %ti of %i bytes\n",
+ src-c->decomp_buf, c->decomp_len);
return 0;
}
@@ -401,12 +405,12 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPac
int len = buf_size;
int hi_ver, lo_ver;
- if(c->pic.data[0])
+ if (c->pic.data[0])
avctx->release_buffer(avctx, &c->pic);
c->pic.reference = 3;
c->pic.buffer_hints = FF_BUFFER_HINTS_VALID;
- if(avctx->get_buffer(avctx, &c->pic) < 0){
+ if (avctx->get_buffer(avctx, &c->pic) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return -1;
}
@@ -414,7 +418,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPac
/* parse header */
c->flags = buf[0];
buf++; len--;
- if(c->flags & ZMBV_KEYFRAME) {
+ if (c->flags & ZMBV_KEYFRAME) {
void *decode_intra = NULL;
c->decode_intra= NULL;
hi_ver = buf[0];
@@ -426,21 +430,26 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPac
buf += 6;
len -= 6;
- av_log(avctx, AV_LOG_DEBUG, "Flags=%X ver=%i.%i comp=%i fmt=%i blk=%ix%i\n",c->flags,hi_ver,lo_ver,c->comp,c->fmt,c->bw,c->bh);
- if(hi_ver != 0 || lo_ver != 1) {
- av_log(avctx, AV_LOG_ERROR, "Unsupported version %i.%i\n", hi_ver, lo_ver);
+ av_log(avctx, AV_LOG_DEBUG,
+ "Flags=%X ver=%i.%i comp=%i fmt=%i blk=%ix%i\n",
+ c->flags,hi_ver,lo_ver,c->comp,c->fmt,c->bw,c->bh);
+ if (hi_ver != 0 || lo_ver != 1) {
+ av_log(avctx, AV_LOG_ERROR, "Unsupported version %i.%i\n",
+ hi_ver, lo_ver);
return -1;
}
- if(c->bw == 0 || c->bh == 0) {
- av_log(avctx, AV_LOG_ERROR, "Unsupported block size %ix%i\n", c->bw, c->bh);
+ if (c->bw == 0 || c->bh == 0) {
+ av_log(avctx, AV_LOG_ERROR, "Unsupported block size %ix%i\n",
+ c->bw, c->bh);
return -1;
}
- if(c->comp != 0 && c->comp != 1) {
- av_log(avctx, AV_LOG_ERROR, "Unsupported compression type %i\n", c->comp);
+ if (c->comp != 0 && c->comp != 1) {
+ av_log(avctx, AV_LOG_ERROR, "Unsupported compression type %i\n",
+ c->comp);
return -1;
}
- switch(c->fmt) {
+ switch (c->fmt) {
case ZMBV_FMT_8BPP:
c->bpp = 8;
decode_intra = zmbv_decode_intra;
@@ -466,7 +475,8 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPac
break;
default:
c->decode_xor = NULL;
- av_log(avctx, AV_LOG_ERROR, "Unsupported (for now) format %i\n", c->fmt);
+ av_log(avctx, AV_LOG_ERROR,
+ "Unsupported (for now) format %i\n", c->fmt);
return -1;
}
@@ -476,21 +486,21 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPac
return -1;
}
- c->cur = av_realloc_f(c->cur, avctx->width * avctx->height, (c->bpp / 8));
+ c->cur = av_realloc_f(c->cur, avctx->width * avctx->height, (c->bpp / 8));
c->prev = av_realloc_f(c->prev, avctx->width * avctx->height, (c->bpp / 8));
c->bx = (c->width + c->bw - 1) / c->bw;
c->by = (c->height+ c->bh - 1) / c->bh;
- if(!c->cur || !c->prev)
+ if (!c->cur || !c->prev)
return -1;
c->decode_intra= decode_intra;
}
- if(c->decode_intra == NULL) {
+ if (c->decode_intra == NULL) {
av_log(avctx, AV_LOG_ERROR, "Error! Got no format or no keyframe!\n");
return -1;
}
- if(c->comp == 0) { //Uncompressed data
+ if (c->comp == 0) { //Uncompressed data
memcpy(c->decomp_buf, buf, len);
c->decomp_size = 1;
} else { // ZLIB-compressed data
@@ -502,14 +512,14 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPac
inflate(&c->zstream, Z_FINISH);
c->decomp_len = c->zstream.total_out;
}
- if(c->flags & ZMBV_KEYFRAME) {
+ if (c->flags & ZMBV_KEYFRAME) {
c->pic.key_frame = 1;
c->pic.pict_type = AV_PICTURE_TYPE_I;
c->decode_intra(c);
} else {
c->pic.key_frame = 0;
c->pic.pict_type = AV_PICTURE_TYPE_P;
- if(c->decomp_len)
+ if (c->decomp_len)
c->decode_xor(c);
}
@@ -520,10 +530,10 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPac
out = c->pic.data[0];
src = c->cur;
- switch(c->fmt) {
+ switch (c->fmt) {
case ZMBV_FMT_8BPP:
- for(j = 0; j < c->height; j++) {
- for(i = 0; i < c->width; i++) {
+ for (j = 0; j < c->height; j++) {
+ for (i = 0; i < c->width; i++) {
out[i * 3 + 0] = c->pal[(*src) * 3 + 0];
out[i * 3 + 1] = c->pal[(*src) * 3 + 1];
out[i * 3 + 2] = c->pal[(*src) * 3 + 2];
@@ -533,8 +543,8 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPac
}
break;
case ZMBV_FMT_15BPP:
- for(j = 0; j < c->height; j++) {
- for(i = 0; i < c->width; i++) {
+ for (j = 0; j < c->height; j++) {
+ for (i = 0; i < c->width; i++) {
uint16_t tmp = AV_RL16(src);
src += 2;
out[i * 3 + 0] = (tmp & 0x7C00) >> 7;
@@ -545,8 +555,8 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPac
}
break;
case ZMBV_FMT_16BPP:
- for(j = 0; j < c->height; j++) {
- for(i = 0; i < c->width; i++) {
+ for (j = 0; j < c->height; j++) {
+ for (i = 0; i < c->width; i++) {
uint16_t tmp = AV_RL16(src);
src += 2;
out[i * 3 + 0] = (tmp & 0xF800) >> 8;
@@ -558,7 +568,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPac
break;
#ifdef ZMBV_ENABLE_24BPP
case ZMBV_FMT_24BPP:
- for(j = 0; j < c->height; j++) {
+ for (j = 0; j < c->height; j++) {
memcpy(out, src, c->width * 3);
src += c->width * 3;
out += c->pic.linesize[0];
@@ -566,8 +576,8 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPac
break;
#endif //ZMBV_ENABLE_24BPP
case ZMBV_FMT_32BPP:
- for(j = 0; j < c->height; j++) {
- for(i = 0; i < c->width; i++) {
+ for (j = 0; j < c->height; j++) {
+ for (i = 0; i < c->width; i++) {
uint32_t tmp = AV_RL32(src);
src += 4;
AV_WB24(out+(i*3), tmp);
@@ -616,7 +626,8 @@ static av_cold int decode_init(AVCodecContext *avctx)
/* Allocate decompression buffer */
if (c->decomp_size) {
if ((c->decomp_buf = av_malloc(c->decomp_size)) == NULL) {
- av_log(avctx, AV_LOG_ERROR, "Can't allocate decompression buffer.\n");
+ av_log(avctx, AV_LOG_ERROR,
+ "Can't allocate decompression buffer.\n");
return 1;
}
}