From 115329f16062074e11ccf3b89ead6176606c9696 Mon Sep 17 00:00:00 2001 From: Diego Biurrun Date: Sat, 17 Dec 2005 18:14:38 +0000 Subject: COSMETICS: Remove all trailing whitespace. Originally committed as revision 4749 to svn://svn.ffmpeg.org/ffmpeg/trunk --- libavcodec/adpcm.c | 76 +++++++++++++++++++++++++++--------------------------- 1 file changed, 38 insertions(+), 38 deletions(-) (limited to 'libavcodec/adpcm.c') diff --git a/libavcodec/adpcm.c b/libavcodec/adpcm.c index 3c67242f41..b22442e28c 100644 --- a/libavcodec/adpcm.c +++ b/libavcodec/adpcm.c @@ -59,7 +59,7 @@ static const int index_table[16] = { -1, -1, -1, -1, 2, 4, 6, 8, }; -/** +/** * This is the step table. Note that many programs use slight deviations from * this table, but such deviations are negligible: */ @@ -205,7 +205,7 @@ static inline unsigned char adpcm_ima_compress_sample(ADPCMChannelStatus *c, sho { int step_index; unsigned char nibble; - + int sign = 0; /* sign bit of the nibble (MSB) */ int delta, predicted_delta; @@ -241,7 +241,7 @@ static inline unsigned char adpcm_ima_compress_sample(ADPCMChannelStatus *c, sho CLAMP_TO_SHORT(c->prev_sample); - nibble += sign << 3; /* sign * 8 */ + nibble += sign << 3; /* sign * 8 */ /* save back */ c->step_index = step_index; @@ -254,14 +254,14 @@ static inline unsigned char adpcm_ms_compress_sample(ADPCMChannelStatus *c, shor int predictor, nibble, bias; predictor = (((c->sample1) * (c->coeff1)) + ((c->sample2) * (c->coeff2))) / 256; - + nibble= sample - predictor; if(nibble>=0) bias= c->idelta/2; else bias=-c->idelta/2; - + nibble= (nibble + bias) / c->idelta; nibble= clip(nibble, -8, 7)&0x0F; - + predictor += (signed)((nibble & 0x08)?(nibble - 0x10):(nibble)) * c->idelta; CLAMP_TO_SHORT(predictor); @@ -333,7 +333,7 @@ static int adpcm_encode_frame(AVCodecContext *avctx, *dst++ = 0; samples++; } - + /* stereo: 4 bytes (8 samples) for left, 4 bytes for right, 4 bytes left, ... */ for (; n>0; n--) { *dst = adpcm_ima_compress_sample(&c->status[0], samples[0]) & 0x0F; @@ -375,9 +375,9 @@ static int adpcm_encode_frame(AVCodecContext *avctx, c->status[i].coeff2 = AdaptCoeff2[predictor]; } for(i=0; ichannels; i++){ - if (c->status[i].idelta < 16) + if (c->status[i].idelta < 16) c->status[i].idelta = 16; - + *dst++ = c->status[i].idelta & 0xFF; *dst++ = c->status[i].idelta >> 8; } @@ -528,7 +528,7 @@ static inline short adpcm_yamaha_expand_nibble(ADPCMChannelStatus *c, unsigned c return c->predictor; } -static void xa_decode(short *out, const unsigned char *in, +static void xa_decode(short *out, const unsigned char *in, ADPCMChannelStatus *left, ADPCMChannelStatus *right, int inc) { int i, j; @@ -770,7 +770,7 @@ static int adpcm_decode_frame(AVCodecContext *avctx, c->status[0].coeff2 = AdaptCoeff2[block_predictor[0]]; c->status[1].coeff1 = AdaptCoeff1[block_predictor[1]]; c->status[1].coeff2 = AdaptCoeff2[block_predictor[1]]; - + c->status[0].sample1 = ((*src & 0xFF) | ((src[1] << 8) & 0xFF00)); src+=2; if (st) c->status[1].sample1 = ((*src & 0xFF) | ((src[1] << 8) & 0xFF00)); @@ -807,16 +807,16 @@ static int adpcm_decode_frame(AVCodecContext *avctx, while (src < buf + buf_size) { /* take care of the top nibble (always left or mono channel) */ - *samples++ = adpcm_ima_expand_nibble(&c->status[0], + *samples++ = adpcm_ima_expand_nibble(&c->status[0], (src[0] >> 4) & 0x0F, 3); /* take care of the bottom nibble, which is right sample for * stereo, or another mono sample */ if (st) - *samples++ = adpcm_ima_expand_nibble(&c->status[1], + *samples++ = adpcm_ima_expand_nibble(&c->status[1], src[0] & 0x0F, 3); else - *samples++ = adpcm_ima_expand_nibble(&c->status[0], + *samples++ = adpcm_ima_expand_nibble(&c->status[0], src[0] & 0x0F, 3); src++; @@ -869,14 +869,14 @@ static int adpcm_decode_frame(AVCodecContext *avctx, while (src < buf + buf_size) { if (st) { - *samples++ = adpcm_ima_expand_nibble(&c->status[0], + *samples++ = adpcm_ima_expand_nibble(&c->status[0], (src[0] >> 4) & 0x0F, 3); - *samples++ = adpcm_ima_expand_nibble(&c->status[1], + *samples++ = adpcm_ima_expand_nibble(&c->status[1], src[0] & 0x0F, 3); } else { - *samples++ = adpcm_ima_expand_nibble(&c->status[0], + *samples++ = adpcm_ima_expand_nibble(&c->status[0], (src[0] >> 4) & 0x0F, 3); - *samples++ = adpcm_ima_expand_nibble(&c->status[0], + *samples++ = adpcm_ima_expand_nibble(&c->status[0], src[0] & 0x0F, 3); } @@ -884,10 +884,10 @@ static int adpcm_decode_frame(AVCodecContext *avctx, } break; case CODEC_ID_ADPCM_XA: - c->status[0].sample1 = c->status[0].sample2 = + c->status[0].sample1 = c->status[0].sample2 = c->status[1].sample1 = c->status[1].sample2 = 0; while (buf_size >= 128) { - xa_decode(samples, src, &c->status[0], &c->status[1], + xa_decode(samples, src, &c->status[0], &c->status[1], avctx->channels); src += 128; samples += 28 * 8; @@ -926,11 +926,11 @@ static int adpcm_decode_frame(AVCodecContext *avctx, next_right_sample = (((*src & 0x0F) << 28) >> shift_right); src++; - next_left_sample = (next_left_sample + - (current_left_sample * coeff1l) + + next_left_sample = (next_left_sample + + (current_left_sample * coeff1l) + (previous_left_sample * coeff2l) + 0x80) >> 8; - next_right_sample = (next_right_sample + - (current_right_sample * coeff1r) + + next_right_sample = (next_right_sample + + (current_right_sample * coeff1r) + (previous_right_sample * coeff2r) + 0x80) >> 8; CLAMP_TO_SHORT(next_left_sample); CLAMP_TO_SHORT(next_right_sample); @@ -960,14 +960,14 @@ static int adpcm_decode_frame(AVCodecContext *avctx, case CODEC_ID_ADPCM_CT: while (src < buf + buf_size) { if (st) { - *samples++ = adpcm_ct_expand_nibble(&c->status[0], + *samples++ = adpcm_ct_expand_nibble(&c->status[0], (src[0] >> 4) & 0x0F); - *samples++ = adpcm_ct_expand_nibble(&c->status[1], + *samples++ = adpcm_ct_expand_nibble(&c->status[1], src[0] & 0x0F); } else { - *samples++ = adpcm_ct_expand_nibble(&c->status[0], + *samples++ = adpcm_ct_expand_nibble(&c->status[0], (src[0] >> 4) & 0x0F); - *samples++ = adpcm_ct_expand_nibble(&c->status[0], + *samples++ = adpcm_ct_expand_nibble(&c->status[0], src[0] & 0x0F); } src++; @@ -979,7 +979,7 @@ static int adpcm_decode_frame(AVCodecContext *avctx, const int *table; int k0, signmask; int size = buf_size*8; - + init_get_bits(&gb, buf, size); // first frame, read bits & inital values @@ -988,11 +988,11 @@ static int adpcm_decode_frame(AVCodecContext *avctx, c->nb_bits = get_bits(&gb, 2)+2; // av_log(NULL,AV_LOG_INFO,"nb_bits: %d\n", c->nb_bits); } - + table = swf_index_tables[c->nb_bits-2]; k0 = 1 << (c->nb_bits-2); signmask = 1 << (c->nb_bits-1); - + while (get_bits_count(&gb) <= size) { int i; @@ -1015,7 +1015,7 @@ static int adpcm_decode_frame(AVCodecContext *avctx, int step = step_table[c->status[i].step_index]; long vpdiff = 0; // vpdiff = (delta+0.5)*step/4 int k = k0; - + do { if (delta & k) vpdiff += step; @@ -1023,24 +1023,24 @@ static int adpcm_decode_frame(AVCodecContext *avctx, k >>= 1; } while(k); vpdiff += step; - + if (delta & signmask) c->status[i].predictor -= vpdiff; else c->status[i].predictor += vpdiff; - + c->status[i].step_index += table[delta & (~signmask)]; - + c->status[i].step_index = clip(c->status[i].step_index, 0, 88); c->status[i].predictor = clip(c->status[i].predictor, -32768, 32767); - + *samples++ = c->status[i].predictor; } } - + // src += get_bits_count(&gb)*8; src += size; - + break; } case CODEC_ID_ADPCM_YAMAHA: -- cgit v1.2.3