summaryrefslogtreecommitdiff
path: root/libavcodec
diff options
context:
space:
mode:
authorMichael Niedermayer <michaelni@gmx.at>2012-01-04 01:12:34 +0100
committerMichael Niedermayer <michaelni@gmx.at>2012-01-04 01:12:34 +0100
commitad1c8dd6734f0aa7a7a87b4669a166715c114b46 (patch)
treec03c08f30bf1a7e8d9859abc48de3391bb05c6a0 /libavcodec
parentd6da16dca5a64ed7ab2db54710a0c703f179d3ba (diff)
parentfd16f567987524a769d5d4f1f69089f000386ac2 (diff)
Merge remote-tracking branch 'qatar/master'
* qatar/master: fate: add dxtory test adx_parser: rewrite. adxdec: Validate channel count to fix a division by zero. adxdec: Do not require extradata. cmdutils: K&R reformatting cosmetics alacdec: implement the 2-pass prediction type. alacenc: implement the 2-pass prediction type. alacenc: do not generate invalid multi-channel ALAC files alacdec: fill in missing or guessed info about the extradata format. utvideo: proper median prediction for interlaced videos lavu: bump lavu minor for av_popcount64 dca: K&R formatting cosmetics dct: K&R formatting cosmetics lavf: flush decoders in avformat_find_stream_info(). win32: detect number of CPUs using affinity Add av_popcount64 snow: Restore three mistakenly removed casts. Conflicts: cmdutils.c doc/APIchanges libavcodec/adx_parser.c libavcodec/adxdec.c libavcodec/alacenc.c libavutil/avutil.h tests/fate/screen.mak Merged-by: Michael Niedermayer <michaelni@gmx.at>
Diffstat (limited to 'libavcodec')
-rw-r--r--libavcodec/adx.c2
-rw-r--r--libavcodec/adx_parser.c32
-rw-r--r--libavcodec/adxdec.c17
-rw-r--r--libavcodec/alac.c75
-rw-r--r--libavcodec/alacenc.c18
-rw-r--r--libavcodec/dca.c546
-rw-r--r--libavcodec/dct.c122
-rw-r--r--libavcodec/pthread.c11
-rw-r--r--libavcodec/snow.c6
-rw-r--r--libavcodec/utvideo.c101
10 files changed, 534 insertions, 396 deletions
diff --git a/libavcodec/adx.c b/libavcodec/adx.c
index aa90fd89c3..1e5d89c991 100644
--- a/libavcodec/adx.c
+++ b/libavcodec/adx.c
@@ -58,7 +58,7 @@ int avpriv_adx_decode_header(AVCodecContext *avctx, const uint8_t *buf,
/* channels */
avctx->channels = buf[7];
- if (avctx->channels > 2)
+ if (avctx->channels <= 0 || avctx->channels > 2)
return AVERROR_INVALIDDATA;
/* sample rate */
diff --git a/libavcodec/adx_parser.c b/libavcodec/adx_parser.c
index bbd865454e..de3b1b073f 100644
--- a/libavcodec/adx_parser.c
+++ b/libavcodec/adx_parser.c
@@ -45,27 +45,31 @@ static int adx_parse(AVCodecParserContext *s1,
ParseContext *pc = &s->pc;
int next = END_NOT_FOUND;
int i;
- uint64_t state= pc->state64;
+ uint64_t state = pc->state64;
- if(!s->header_size){
- for(i=0; i<buf_size; i++){
- state= (state<<8) | buf[i];
- if((state&0xFFFF0000FFFFFF00) == 0x8000000003120400ULL && (state&0xFF) && ((state>>32)&0xFFFF)>=4){
- s->header_size= ((state>>32)&0xFFFF) + 4;
- s->block_size = BLOCK_SIZE * (state&0xFF);
- s->remaining = i - 7 + s->header_size + s->block_size;
- break;
+ if (!s->header_size) {
+ for (i = 0; i < buf_size; i++) {
+ state = (state << 8) | buf[i];
+ /* check for fixed fields in ADX header for possible match */
+ if ((state & 0xFFFF0000FFFFFF00) == 0x8000000003120400ULL) {
+ int channels = state & 0xFF;
+ int header_size = ((state >> 32) & 0xFFFF) + 4;
+ if (channels > 0 && header_size >= 8) {
+ s->header_size = header_size;
+ s->block_size = BLOCK_SIZE * channels;
+ s->remaining = i - 7 + s->header_size + s->block_size;
+ break;
+ }
}
}
- pc->state64= state;
+ pc->state64 = state;
}
if (s->header_size) {
- if (!s->remaining) {
+ if (!s->remaining)
s->remaining = s->block_size;
- }
- if (s->remaining<=buf_size) {
- next= s->remaining;
+ if (s->remaining <= buf_size) {
+ next = s->remaining;
s->remaining = 0;
} else
s->remaining -= buf_size;
diff --git a/libavcodec/adxdec.c b/libavcodec/adxdec.c
index cf494c12d4..fdff6875e1 100644
--- a/libavcodec/adxdec.c
+++ b/libavcodec/adxdec.c
@@ -45,7 +45,8 @@ static av_cold int adx_decode_init(AVCodecContext *avctx)
av_log(avctx, AV_LOG_ERROR, "error parsing ADX header\n");
return AVERROR_INVALIDDATA;
}
- c->channels = avctx->channels;
+ c->channels = avctx->channels;
+ c->header_parsed = 1;
}
avctx->sample_fmt = AV_SAMPLE_FMT_S16;
@@ -106,21 +107,21 @@ static int adx_decode_frame(AVCodecContext *avctx, void *data,
return buf_size;
}
- if(AV_RB16(buf) == 0x8000){
+ if (!c->header_parsed && buf_size >= 2 && AV_RB16(buf) == 0x8000) {
int header_size;
- if ((ret = avpriv_adx_decode_header(avctx, buf,
- buf_size, &header_size,
+ if ((ret = avpriv_adx_decode_header(avctx, buf, buf_size, &header_size,
c->coeff)) < 0) {
av_log(avctx, AV_LOG_ERROR, "error parsing ADX header\n");
return AVERROR_INVALIDDATA;
}
- c->channels = avctx->channels;
- if(buf_size < header_size)
+ c->channels = avctx->channels;
+ c->header_parsed = 1;
+ if (buf_size < header_size)
return AVERROR_INVALIDDATA;
- buf += header_size;
+ buf += header_size;
buf_size -= header_size;
}
- if(c->channels <= 0)
+ if (!c->header_parsed)
return AVERROR_INVALIDDATA;
/* calculate number of blocks in the packet */
diff --git a/libavcodec/alac.c b/libavcodec/alac.c
index c08d2848ed..70e1a6438b 100644
--- a/libavcodec/alac.c
+++ b/libavcodec/alac.c
@@ -25,27 +25,23 @@
* @author 2005 David Hammerton
* @see http://crazney.net/programs/itunes/alac.html
*
- * Note: This decoder expects a 36- (0x24-)byte QuickTime atom to be
+ * Note: This decoder expects a 36-byte QuickTime atom to be
* passed through the extradata[_size] fields. This atom is tacked onto
* the end of an 'alac' stsd atom and has the following format:
- * bytes 0-3 atom size (0x24), big-endian
- * bytes 4-7 atom type ('alac', not the 'alac' tag from start of stsd)
- * bytes 8-35 data bytes needed by decoder
*
- * Extradata:
- * 32bit size
- * 32bit tag (=alac)
- * 32bit zero?
- * 32bit max sample per frame
- * 8bit ?? (zero?)
+ * 32bit atom size
+ * 32bit tag ("alac")
+ * 32bit tag version (0)
+ * 32bit samples per frame (used when not set explicitly in the frames)
+ * 8bit compatible version (0)
* 8bit sample size
- * 8bit history mult
- * 8bit initial history
- * 8bit kmodifier
- * 8bit channels?
- * 16bit ??
- * 32bit max coded frame size
- * 32bit bitrate?
+ * 8bit history mult (40)
+ * 8bit initial history (14)
+ * 8bit kmodifier (10)
+ * 8bit channels
+ * 16bit maxRun (255)
+ * 32bit max coded frame size (0 means unknown)
+ * 32bit average bitrate (0 means unknown)
* 32bit samplerate
*/
@@ -464,24 +460,29 @@ static int alac_decode_frame(AVCodecContext *avctx, void *data,
if(ret<0)
return ret;
- if (prediction_type[ch] == 0) {
- /* adaptive fir */
- predictor_decompress_fir_adapt(alac->predicterror_buffer[ch],
- alac->outputsamples_buffer[ch],
- outputsamples,
- readsamplesize,
- predictor_coef_table[ch],
- predictor_coef_num[ch],
- prediction_quantitization[ch]);
- } else {
- av_log(avctx, AV_LOG_ERROR, "FIXME: unhandled prediction type: %i\n", prediction_type[ch]);
- /* I think the only other prediction type (or perhaps this is
- * just a boolean?) runs adaptive fir twice.. like:
- * predictor_decompress_fir_adapt(predictor_error, tempout, ...)
- * predictor_decompress_fir_adapt(predictor_error, outputsamples ...)
- * little strange..
+ /* adaptive FIR filter */
+ if (prediction_type[ch] == 15) {
+ /* Prediction type 15 runs the adaptive FIR twice.
+ * The first pass uses the special-case coef_num = 31, while
+ * the second pass uses the coefs from the bitstream.
+ *
+ * However, this prediction type is not currently used by the
+ * reference encoder.
*/
+ predictor_decompress_fir_adapt(alac->predicterror_buffer[ch],
+ alac->predicterror_buffer[ch],
+ outputsamples, readsamplesize,
+ NULL, 31, 0);
+ } else if (prediction_type[ch] > 0) {
+ av_log(avctx, AV_LOG_WARNING, "unknown prediction type: %i\n",
+ prediction_type[ch]);
}
+ predictor_decompress_fir_adapt(alac->predicterror_buffer[ch],
+ alac->outputsamples_buffer[ch],
+ outputsamples, readsamplesize,
+ predictor_coef_table[ch],
+ predictor_coef_num[ch],
+ prediction_quantitization[ch]);
}
} else {
/* not compressed, easy case */
@@ -584,7 +585,7 @@ static int alac_set_info(ALACContext *alac)
ptr += 4; /* size */
ptr += 4; /* alac */
- ptr += 4; /* 0 ? */
+ ptr += 4; /* version */
if(AV_RB32(ptr) >= UINT_MAX/4){
av_log(alac->avctx, AV_LOG_ERROR, "setinfo_max_samples_per_frame too large\n");
@@ -593,15 +594,15 @@ static int alac_set_info(ALACContext *alac)
/* buffer size / 2 ? */
alac->setinfo_max_samples_per_frame = bytestream_get_be32(&ptr);
- ptr++; /* ??? */
+ ptr++; /* compatible version */
alac->setinfo_sample_size = *ptr++;
alac->setinfo_rice_historymult = *ptr++;
alac->setinfo_rice_initialhistory = *ptr++;
alac->setinfo_rice_kmodifier = *ptr++;
alac->numchannels = *ptr++;
- bytestream_get_be16(&ptr); /* ??? */
+ bytestream_get_be16(&ptr); /* maxRun */
bytestream_get_be32(&ptr); /* max coded frame size */
- bytestream_get_be32(&ptr); /* bitrate ? */
+ bytestream_get_be32(&ptr); /* average bitrate */
bytestream_get_be32(&ptr); /* samplerate */
return 0;
diff --git a/libavcodec/alacenc.c b/libavcodec/alacenc.c
index 305a5b825b..e8d1bc03f2 100644
--- a/libavcodec/alacenc.c
+++ b/libavcodec/alacenc.c
@@ -348,6 +348,7 @@ static void alac_entropy_coder(AlacEncodeContext *s)
static void write_compressed_frame(AlacEncodeContext *s)
{
int i, j;
+ int prediction_type = 0;
if (s->avctx->channels == 2)
alac_stereo_decorrelation(s);
@@ -358,7 +359,7 @@ static void write_compressed_frame(AlacEncodeContext *s)
calc_predictor_params(s, i);
- put_bits(&s->pbctx, 4, 0); // prediction type : currently only type 0 has been RE'd
+ put_bits(&s->pbctx, 4, prediction_type);
put_bits(&s->pbctx, 4, s->lpc[i].lpc_quant);
put_bits(&s->pbctx, 3, s->rc.rice_modifier);
@@ -373,6 +374,14 @@ static void write_compressed_frame(AlacEncodeContext *s)
for (i = 0; i < s->avctx->channels; i++) {
alac_linear_predictor(s, i);
+
+ // TODO: determine when this will actually help. for now it's not used.
+ if (prediction_type == 15) {
+ // 2nd pass 1st order filter
+ for (j = s->avctx->frame_size - 1; j > 0; j--)
+ s->predictor_buf[j] -= s->predictor_buf[j - 1];
+ }
+
alac_entropy_coder(s);
}
}
@@ -391,8 +400,11 @@ static av_cold int alac_encode_init(AVCodecContext *avctx)
return -1;
}
- if(avctx->channels > 2) {
- av_log(avctx, AV_LOG_ERROR, "channels > 2 not supported\n");
+ /* TODO: Correctly implement multi-channel ALAC.
+ It is similar to multi-channel AAC, in that it has a series of
+ single-channel (SCE), channel-pair (CPE), and LFE elements. */
+ if (avctx->channels > 2) {
+ av_log(avctx, AV_LOG_ERROR, "only mono or stereo input is currently supported\n");
return AVERROR_PATCHWELCOME;
}
diff --git a/libavcodec/dca.c b/libavcodec/dca.c
index 7c43b922b1..8622587efe 100644
--- a/libavcodec/dca.c
+++ b/libavcodec/dca.c
@@ -48,13 +48,13 @@
//#define TRACE
-#define DCA_PRIM_CHANNELS_MAX (7)
-#define DCA_SUBBANDS (32)
-#define DCA_ABITS_MAX (32) /* Should be 28 */
-#define DCA_SUBSUBFRAMES_MAX (4)
-#define DCA_SUBFRAMES_MAX (16)
-#define DCA_BLOCKS_MAX (16)
-#define DCA_LFE_MAX (3)
+#define DCA_PRIM_CHANNELS_MAX (7)
+#define DCA_SUBBANDS (32)
+#define DCA_ABITS_MAX (32) /* Should be 28 */
+#define DCA_SUBSUBFRAMES_MAX (4)
+#define DCA_SUBFRAMES_MAX (16)
+#define DCA_BLOCKS_MAX (16)
+#define DCA_LFE_MAX (3)
enum DCAMode {
DCA_MONO = 0,
@@ -127,28 +127,45 @@ static const int dca_ext_audio_descr_mask[] = {
* OV -> center back
* All 2 channel configurations -> AV_CH_LAYOUT_STEREO
*/
-
static const uint64_t dca_core_channel_layout[] = {
- AV_CH_FRONT_CENTER, ///< 1, A
- AV_CH_LAYOUT_STEREO, ///< 2, A + B (dual mono)
- AV_CH_LAYOUT_STEREO, ///< 2, L + R (stereo)
- AV_CH_LAYOUT_STEREO, ///< 2, (L+R) + (L-R) (sum-difference)
- AV_CH_LAYOUT_STEREO, ///< 2, LT +RT (left and right total)
- AV_CH_LAYOUT_STEREO|AV_CH_FRONT_CENTER, ///< 3, C+L+R
- AV_CH_LAYOUT_STEREO|AV_CH_BACK_CENTER, ///< 3, L+R+S
- AV_CH_LAYOUT_STEREO|AV_CH_FRONT_CENTER|AV_CH_BACK_CENTER, ///< 4, C + L + R+ S
- AV_CH_LAYOUT_STEREO|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT, ///< 4, L + R +SL+ SR
- AV_CH_LAYOUT_STEREO|AV_CH_FRONT_CENTER|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT, ///< 5, C + L + R+ SL+SR
- AV_CH_LAYOUT_STEREO|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT|AV_CH_FRONT_LEFT_OF_CENTER|AV_CH_FRONT_RIGHT_OF_CENTER, ///< 6, CL + CR + L + R + SL + SR
- AV_CH_LAYOUT_STEREO|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT|AV_CH_FRONT_CENTER|AV_CH_BACK_CENTER, ///< 6, C + L + R+ LR + RR + OV
- AV_CH_FRONT_CENTER|AV_CH_FRONT_RIGHT_OF_CENTER|AV_CH_FRONT_LEFT_OF_CENTER|AV_CH_BACK_CENTER|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT, ///< 6, CF+ CR+LF+ RF+LR + RR
- AV_CH_FRONT_LEFT_OF_CENTER|AV_CH_FRONT_CENTER|AV_CH_FRONT_RIGHT_OF_CENTER|AV_CH_LAYOUT_STEREO|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT, ///< 7, CL + C + CR + L + R + SL + SR
- AV_CH_FRONT_LEFT_OF_CENTER|AV_CH_FRONT_RIGHT_OF_CENTER|AV_CH_LAYOUT_STEREO|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT, ///< 8, CL + CR + L + R + SL1 + SL2+ SR1 + SR2
- AV_CH_FRONT_LEFT_OF_CENTER|AV_CH_FRONT_CENTER|AV_CH_FRONT_RIGHT_OF_CENTER|AV_CH_LAYOUT_STEREO|AV_CH_SIDE_LEFT|AV_CH_BACK_CENTER|AV_CH_SIDE_RIGHT, ///< 8, CL + C+ CR + L + R + SL + S+ SR
+ AV_CH_FRONT_CENTER, ///< 1, A
+ AV_CH_LAYOUT_STEREO, ///< 2, A + B (dual mono)
+ AV_CH_LAYOUT_STEREO, ///< 2, L + R (stereo)
+ AV_CH_LAYOUT_STEREO, ///< 2, (L + R) + (L - R) (sum-difference)
+ AV_CH_LAYOUT_STEREO, ///< 2, LT + RT (left and right total)
+ AV_CH_LAYOUT_STEREO | AV_CH_FRONT_CENTER, ///< 3, C + L + R
+ AV_CH_LAYOUT_STEREO | AV_CH_BACK_CENTER, ///< 3, L + R + S
+ AV_CH_LAYOUT_STEREO | AV_CH_FRONT_CENTER | AV_CH_BACK_CENTER, ///< 4, C + L + R + S
+ AV_CH_LAYOUT_STEREO | AV_CH_SIDE_LEFT | AV_CH_SIDE_RIGHT, ///< 4, L + R + SL + SR
+
+ AV_CH_LAYOUT_STEREO | AV_CH_FRONT_CENTER | AV_CH_SIDE_LEFT |
+ AV_CH_SIDE_RIGHT, ///< 5, C + L + R + SL + SR
+
+ AV_CH_LAYOUT_STEREO | AV_CH_SIDE_LEFT | AV_CH_SIDE_RIGHT |
+ AV_CH_FRONT_LEFT_OF_CENTER | AV_CH_FRONT_RIGHT_OF_CENTER, ///< 6, CL + CR + L + R + SL + SR
+
+ AV_CH_LAYOUT_STEREO | AV_CH_BACK_LEFT | AV_CH_BACK_RIGHT |
+ AV_CH_FRONT_CENTER | AV_CH_BACK_CENTER, ///< 6, C + L + R + LR + RR + OV
+
+ AV_CH_FRONT_CENTER | AV_CH_FRONT_RIGHT_OF_CENTER |
+ AV_CH_FRONT_LEFT_OF_CENTER | AV_CH_BACK_CENTER |
+ AV_CH_BACK_LEFT | AV_CH_BACK_RIGHT, ///< 6, CF + CR + LF + RF + LR + RR
+
+ AV_CH_FRONT_LEFT_OF_CENTER | AV_CH_FRONT_CENTER |
+ AV_CH_FRONT_RIGHT_OF_CENTER | AV_CH_LAYOUT_STEREO |
+ AV_CH_SIDE_LEFT | AV_CH_SIDE_RIGHT, ///< 7, CL + C + CR + L + R + SL + SR
+
+ AV_CH_FRONT_LEFT_OF_CENTER | AV_CH_FRONT_RIGHT_OF_CENTER |
+ AV_CH_LAYOUT_STEREO | AV_CH_SIDE_LEFT | AV_CH_SIDE_RIGHT |
+ AV_CH_BACK_LEFT | AV_CH_BACK_RIGHT, ///< 8, CL + CR + L + R + SL1 + SL2 + SR1 + SR2
+
+ AV_CH_FRONT_LEFT_OF_CENTER | AV_CH_FRONT_CENTER |
+ AV_CH_FRONT_RIGHT_OF_CENTER | AV_CH_LAYOUT_STEREO |
+ AV_CH_SIDE_LEFT | AV_CH_BACK_CENTER | AV_CH_SIDE_RIGHT, ///< 8, CL + C + CR + L + R + SL + S + SR
};
static const int8_t dca_lfe_index[] = {
- 1,2,2,2,2,3,2,3,2,3,2,3,1,3,2,3
+ 1, 2, 2, 2, 2, 3, 2, 3, 2, 3, 2, 3, 1, 3, 2, 3
};
static const int8_t dca_channel_reorder_lfe[][9] = {
@@ -227,19 +244,19 @@ static const int8_t dca_channel_reorder_nolfe_xch[][9] = {
{ 3, 2, 4, 0, 1, 5, 8, 7, 6},
};
-#define DCA_DOLBY 101 /* FIXME */
+#define DCA_DOLBY 101 /* FIXME */
-#define DCA_CHANNEL_BITS 6
-#define DCA_CHANNEL_MASK 0x3F
+#define DCA_CHANNEL_BITS 6
+#define DCA_CHANNEL_MASK 0x3F
-#define DCA_LFE 0x80
+#define DCA_LFE 0x80
-#define HEADER_SIZE 14
+#define HEADER_SIZE 14
-#define DCA_MAX_FRAME_SIZE 16384
-#define DCA_MAX_EXSS_HEADER_SIZE 4096
+#define DCA_MAX_FRAME_SIZE 16384
+#define DCA_MAX_EXSS_HEADER_SIZE 4096
-#define DCA_BUFFER_PADDING_SIZE 1024
+#define DCA_BUFFER_PADDING_SIZE 1024
/** Bit allocation */
typedef struct {
@@ -254,9 +271,11 @@ static BitAlloc dca_tmode; ///< transition mode VLCs
static BitAlloc dca_scalefactor; ///< scalefactor VLCs
static BitAlloc dca_smpl_bitalloc[11]; ///< samples VLCs
-static av_always_inline int get_bitalloc(GetBitContext *gb, BitAlloc *ba, int idx)
+static av_always_inline int get_bitalloc(GetBitContext *gb, BitAlloc *ba,
+ int idx)
{
- return get_vlc2(gb, ba->vlc[idx].table, ba->vlc[idx].bits, ba->wrap) + ba->offset;
+ return get_vlc2(gb, ba->vlc[idx].table, ba->vlc[idx].bits, ba->wrap) +
+ ba->offset;
}
typedef struct {
@@ -306,8 +325,8 @@ typedef struct {
float scalefactor_adj[DCA_PRIM_CHANNELS_MAX][DCA_ABITS_MAX]; ///< scale factor adjustment
/* Primary audio coding side information */
- int subsubframes[DCA_SUBFRAMES_MAX]; ///< number of subsubframes
- int partial_samples[DCA_SUBFRAMES_MAX]; ///< partial subsubframe samples count
+ int subsubframes[DCA_SUBFRAMES_MAX]; ///< number of subsubframes
+ int partial_samples[DCA_SUBFRAMES_MAX]; ///< partial subsubframe samples count
int prediction_mode[DCA_PRIM_CHANNELS_MAX][DCA_SUBBANDS]; ///< prediction mode (ADPCM used or not)
int prediction_vq[DCA_PRIM_CHANNELS_MAX][DCA_SUBBANDS]; ///< prediction VQ coefs
int bitalloc[DCA_PRIM_CHANNELS_MAX][DCA_SUBBANDS]; ///< bit allocation index
@@ -334,13 +353,13 @@ typedef struct {
float scale_bias; ///< output scale
DECLARE_ALIGNED(32, float, subband_samples)[DCA_BLOCKS_MAX][DCA_PRIM_CHANNELS_MAX][DCA_SUBBANDS][8];
- DECLARE_ALIGNED(32, float, samples)[(DCA_PRIM_CHANNELS_MAX+1)*256];
- const float *samples_chanptr[DCA_PRIM_CHANNELS_MAX+1];
+ DECLARE_ALIGNED(32, float, samples)[(DCA_PRIM_CHANNELS_MAX + 1) * 256];
+ const float *samples_chanptr[DCA_PRIM_CHANNELS_MAX + 1];
uint8_t dca_buffer[DCA_MAX_FRAME_SIZE + DCA_MAX_EXSS_HEADER_SIZE + DCA_BUFFER_PADDING_SIZE];
int dca_buffer_size; ///< how much data is in the dca_buffer
- const int8_t* channel_order_tab; ///< channel reordering table, lfe and non lfe
+ const int8_t *channel_order_tab; ///< channel reordering table, lfe and non lfe
GetBitContext gb;
/* Current position in DCA frame */
int current_subframe;
@@ -415,13 +434,15 @@ static av_cold void dca_init_vlcs(void)
}
for (i = 0; i < 10; i++)
- for (j = 0; j < 7; j++){
- if (!bitalloc_codes[i][j]) break;
- dca_smpl_bitalloc[i+1].offset = bitalloc_offsets[i];
- dca_smpl_bitalloc[i+1].wrap = 1 + (j > 4);
- dca_smpl_bitalloc[i+1].vlc[j].table = &dca_table[dca_vlc_offs[c]];
- dca_smpl_bitalloc[i+1].vlc[j].table_allocated = dca_vlc_offs[c + 1] - dca_vlc_offs[c];
- init_vlc(&dca_smpl_bitalloc[i+1].vlc[j], bitalloc_maxbits[i][j],
+ for (j = 0; j < 7; j++) {
+ if (!bitalloc_codes[i][j])
+ break;
+ dca_smpl_bitalloc[i + 1].offset = bitalloc_offsets[i];
+ dca_smpl_bitalloc[i + 1].wrap = 1 + (j > 4);
+ dca_smpl_bitalloc[i + 1].vlc[j].table = &dca_table[dca_vlc_offs[c]];
+ dca_smpl_bitalloc[i + 1].vlc[j].table_allocated = dca_vlc_offs[c + 1] - dca_vlc_offs[c];
+
+ init_vlc(&dca_smpl_bitalloc[i + 1].vlc[j], bitalloc_maxbits[i][j],
bitalloc_sizes[i],
bitalloc_bits[i][j], 1, 1,
bitalloc_codes[i][j], 2, 2, INIT_VLC_USE_NEW_STATIC);
@@ -432,19 +453,19 @@ static av_cold void dca_init_vlcs(void)
static inline void get_array(GetBitContext *gb, int *dst, int len, int bits)
{
- while(len--)
+ while (len--)
*dst++ = get_bits(gb, bits);
}
-static int dca_parse_audio_coding_header(DCAContext * s, int base_channel)
+static int dca_parse_audio_coding_header(DCAContext *s, int base_channel)
{
int i, j;
static const float adj_table[4] = { 1.0, 1.1250, 1.2500, 1.4375 };
static const int bitlen[11] = { 0, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3 };
- static const int thr[11] = { 0, 1, 3, 3, 3, 3, 7, 7, 7, 7, 7 };
+ static const int thr[11] = { 0, 1, 3, 3, 3, 3, 7, 7, 7, 7, 7 };
- s->total_channels = get_bits(&s->gb, 3) + 1 + base_channel;
- s->prim_channels = s->total_channels;
+ s->total_channels = get_bits(&s->gb, 3) + 1 + base_channel;
+ s->prim_channels = s->total_channels;
if (s->prim_channels > DCA_PRIM_CHANNELS_MAX)
s->prim_channels = DCA_PRIM_CHANNELS_MAX;
@@ -487,23 +508,28 @@ static int dca_parse_audio_coding_header(DCAContext * s, int base_channel)
get_bits(&s->gb, 16);
}
- s->current_subframe = 0;
+ s->current_subframe = 0;
s->current_subsubframe = 0;
#ifdef TRACE
av_log(s->avctx, AV_LOG_DEBUG, "subframes: %i\n", s->subframes);
av_log(s->avctx, AV_LOG_DEBUG, "prim channels: %i\n", s->prim_channels);
- for (i = base_channel; i < s->prim_channels; i++){
- av_log(s->avctx, AV_LOG_DEBUG, "subband activity: %i\n", s->subband_activity[i]);
- av_log(s->avctx, AV_LOG_DEBUG, "vq start subband: %i\n", s->vq_start_subband[i]);
- av_log(s->avctx, AV_LOG_DEBUG, "joint intensity: %i\n", s->joint_intensity[i]);
- av_log(s->avctx, AV_LOG_DEBUG, "transient mode codebook: %i\n", s->transient_huffman[i]);
- av_log(s->avctx, AV_LOG_DEBUG, "scale factor codebook: %i\n", s->scalefactor_huffman[i]);
- av_log(s->avctx, AV_LOG_DEBUG, "bit allocation quantizer: %i\n", s->bitalloc_huffman[i]);
+ for (i = base_channel; i < s->prim_channels; i++) {
+ av_log(s->avctx, AV_LOG_DEBUG, "subband activity: %i\n",
+ s->subband_activity[i]);
+ av_log(s->avctx, AV_LOG_DEBUG, "vq start subband: %i\n",
+ s->vq_start_subband[i]);
+ av_log(s->avctx, AV_LOG_DEBUG, "joint intensity: %i\n",
+ s->joint_intensity[i]);
+ av_log(s->avctx, AV_LOG_DEBUG, "transient mode codebook: %i\n",
+ s->transient_huffman[i]);
+ av_log(s->avctx, AV_LOG_DEBUG, "scale factor codebook: %i\n",
+ s->scalefactor_huffman[i]);
+ av_log(s->avctx, AV_LOG_DEBUG, "bit allocation quantizer: %i\n",
+ s->bitalloc_huffman[i]);
av_log(s->avctx, AV_LOG_DEBUG, "quant index huff:");
for (j = 0; j < 11; j++)
- av_log(s->avctx, AV_LOG_DEBUG, " %i",
- s->quant_index_huffman[i][j]);
+ av_log(s->avctx, AV_LOG_DEBUG, " %i", s->quant_index_huffman[i][j]);
av_log(s->avctx, AV_LOG_DEBUG, "\n");
av_log(s->avctx, AV_LOG_DEBUG, "scalefac adj:");
for (j = 0; j < 11; j++)
@@ -512,10 +538,10 @@ static int dca_parse_audio_coding_header(DCAContext * s, int base_channel)
}
#endif
- return 0;
+ return 0;
}
-static int dca_parse_frame_header(DCAContext * s)
+static int dca_parse_frame_header(DCAContext *s)
{
init_get_bits(&s->gb, s->dca_buffer, s->dca_buffer_size * 8);
@@ -564,7 +590,8 @@ static int dca_parse_frame_header(DCAContext * s)
/* FIXME: channels mixing levels */
s->output = s->amode;
- if (s->lfe) s->output |= DCA_LFE;
+ if (s->lfe)
+ s->output |= DCA_LFE;
#ifdef TRACE
av_log(s->avctx, AV_LOG_DEBUG, "frame type: %i\n", s->frame_type);
@@ -613,15 +640,15 @@ static int dca_parse_frame_header(DCAContext * s)
static inline int get_scale(GetBitContext *gb, int level, int value)
{
- if (level < 5) {
- /* huffman encoded */
- value += get_bitalloc(gb, &dca_scalefactor, level);
- } else if (level < 8)
- value = get_bits(gb, level + 1);
- return value;
+ if (level < 5) {
+ /* huffman encoded */
+ value += get_bitalloc(gb, &dca_scalefactor, level);
+ } else if (level < 8)
+ value = get_bits(gb, level + 1);
+ return value;
}
-static int dca_subframe_header(DCAContext * s, int base_channel, int block_index)
+static int dca_subframe_header(DCAContext *s, int base_channel, int block_index)
{
/* Primary audio coding side information */
int j, k;
@@ -630,7 +657,7 @@ static int dca_subframe_header(DCAContext * s, int base_channel, int block_index
return AVERROR_INVALIDDATA;
if (!base_channel) {
- s->subsubframes[s->current_subframe] = get_bits(&s->gb, 2) + 1;
+ s->subsubframes[s->current_subframe] = get_bits(&s->gb, 2) + 1;
s->partial_samples[s->current_subframe] = get_bits(&s->gb, 3);
}
@@ -666,8 +693,8 @@ static int dca_subframe_header(DCAContext * s, int base_channel, int block_index
}
if (s->bitalloc[j][k] > 26) {
-// av_log(s->avctx,AV_LOG_DEBUG,"bitalloc index [%i][%i] too big (%i)\n",
-// j, k, s->bitalloc[j][k]);
+ // av_log(s->avctx, AV_LOG_DEBUG, "bitalloc index [%i][%i] too big (%i)\n",
+ // j, k, s->bitalloc[j][k]);
return AVERROR_INVALIDDATA;
}
}
@@ -692,7 +719,8 @@ static int dca_subframe_header(DCAContext * s, int base_channel, int block_index
const uint32_t *scale_table;
int scale_sum;
- memset(s->scale_factor[j], 0, s->subband_activity[j] * sizeof(s->scale_factor[0][0][0]) * 2);
+ memset(s->scale_factor[j], 0,
+ s->subband_activity[j] * sizeof(s->scale_factor[0][0][0]) * 2);
if (s->scalefactor_huffman[j] == 6)
scale_table = scale_factor_quant7;
@@ -810,9 +838,11 @@ static int dca_subframe_header(DCAContext * s, int base_channel, int block_index
}
#ifdef TRACE
- av_log(s->avctx, AV_LOG_DEBUG, "subsubframes: %i\n", s->subsubframes[s->current_subframe]);
+ av_log(s->avctx, AV_LOG_DEBUG, "subsubframes: %i\n",
+ s->subsubframes[s->current_subframe]);
av_log(s->avctx, AV_LOG_DEBUG, "partial samples: %i\n",
s->partial_samples[s->current_subframe]);
+
for (j = base_channel; j < s->prim_channels; j++) {
av_log(s->avctx, AV_LOG_DEBUG, "prediction mode:");
for (k = 0; k < s->subband_activity[j]; k++)
@@ -821,12 +851,12 @@ static int dca_subframe_header(DCAContext * s, int base_channel, int block_index
}
for (j = base_channel; j < s->prim_channels; j++) {
for (k = 0; k < s->subband_activity[j]; k++)
- av_log(s->avctx, AV_LOG_DEBUG,
- "prediction coefs: %f, %f, %f, %f\n",
- (float) adpcm_vb[s->prediction_vq[j][k]][0] / 8192,
- (float) adpcm_vb[s->prediction_vq[j][k]][1] / 8192,
- (float) adpcm_vb[s->prediction_vq[j][k]][2] / 8192,
- (float) adpcm_vb[s->prediction_vq[j][k]][3] / 8192);
+ av_log(s->avctx, AV_LOG_DEBUG,
+ "prediction coefs: %f, %f, %f, %f\n",
+ (float) adpcm_vb[s->prediction_vq[j][k]][0] / 8192,
+ (float) adpcm_vb[s->prediction_vq[j][k]][1] / 8192,
+ (float) adpcm_vb[s->prediction_vq[j][k]][2] / 8192,
+ (float) adpcm_vb[s->prediction_vq[j][k]][3] / 8192);
}
for (j = base_channel; j < s->prim_channels; j++) {
av_log(s->avctx, AV_LOG_DEBUG, "bitalloc index: ");
@@ -862,8 +892,10 @@ static int dca_subframe_header(DCAContext * s, int base_channel, int block_index
if (!base_channel && s->prim_channels > 2 && s->downmix) {
av_log(s->avctx, AV_LOG_DEBUG, "Downmix coeffs:\n");
for (j = 0; j < s->prim_channels; j++) {
- av_log(s->avctx, AV_LOG_DEBUG, "Channel 0,%d = %f\n", j, dca_downmix_coeffs[s->downmix_coef[j][0]]);
- av_log(s->avctx, AV_LOG_DEBUG, "Channel 1,%d = %f\n", j, dca_downmix_coeffs[s->downmix_coef[j][1]]);
+ av_log(s->avctx, AV_LOG_DEBUG, "Channel 0, %d = %f\n", j,
+ dca_downmix_coeffs[s->downmix_coef[j][0]]);
+ av_log(s->avctx, AV_LOG_DEBUG, "Channel 1, %d = %f\n", j,
+ dca_downmix_coeffs[s->downmix_coef[j][1]]);
}
av_log(s->avctx, AV_LOG_DEBUG, "\n");
}
@@ -884,7 +916,7 @@ static int dca_subframe_header(DCAContext * s, int base_channel, int block_index
return 0;
}
-static void qmf_32_subbands(DCAContext * s, int chans,
+static void qmf_32_subbands(DCAContext *s, int chans,
float samples_in[32][8], float *samples_out,
float scale)
{
@@ -894,7 +926,7 @@ static void qmf_32_subbands(DCAContext * s, int chans,
int sb_act = s->subband_activity[chans];
int subindex;
- scale *= sqrt(1/8.0);
+ scale *= sqrt(1 / 8.0);
/* Select filter */
if (!s->multirate_inter) /* Non-perfect reconstruction */
@@ -908,18 +940,18 @@ static void qmf_32_subbands(DCAContext * s, int chans,
/* Reconstructed channel sample index */
for (subindex = 0; subindex < 8; subindex++) {
/* Load in one sample from each subband and clear inactive subbands */
- for (i = 0; i < sb_act; i++){
+ for (i = 0; i < sb_act; i++) {
unsigned sign = (i - 1) & 2;
- uint32_t v = AV_RN32A(&samples_in[i][subindex]) ^ sign << 30;
+ uint32_t v = AV_RN32A(&samples_in[i][subindex]) ^ sign << 30;
AV_WN32A(&s->raXin[i], v);
}
s->synth.synth_filter_float(&s->imdct,
- s->subband_fir_hist[chans], &s->hist_index[chans],
- s->subband_fir_noidea[chans], prCoeff,
- samples_out, s->raXin, scale);
- samples_out+= 32;
-
+ s->subband_fir_hist[chans],
+ &s->hist_index[chans],
+ s->subband_fir_noidea[chans], prCoeff,
+ samples_out, s->raXin, scale);
+ samples_out += 32;
}
}
@@ -949,45 +981,44 @@ static void lfe_interpolation_fir(DCAContext *s, int decimation_select,
}
/* Interpolation */
for (deciindex = 0; deciindex < num_deci_sample; deciindex++) {
- s->dcadsp.lfe_fir(samples_out, samples_in, prCoeff, decifactor,
- scale);
+ s->dcadsp.lfe_fir(samples_out, samples_in, prCoeff, decifactor, scale);
samples_in++;
samples_out += 2 * decifactor;
}
}
/* downmixing routines */
-#define MIX_REAR1(samples, si1, rs, coef) \
- samples[i] += samples[si1] * coef[rs][0]; \
- samples[i+256] += samples[si1] * coef[rs][1];
-
-#define MIX_REAR2(samples, si1, si2, rs, coef) \
- samples[i] += samples[si1] * coef[rs][0] + samples[si2] * coef[rs+1][0]; \
- samples[i+256] += samples[si1] * coef[rs][1] + samples[si2] * coef[rs+1][1];
-
-#define MIX_FRONT3(samples, coef) \
- t = samples[i+c]; \
- u = samples[i+l]; \
- v = samples[i+r]; \
+#define MIX_REAR1(samples, si1, rs, coef) \
+ samples[i] += samples[si1] * coef[rs][0]; \
+ samples[i+256] += samples[si1] * coef[rs][1];
+
+#define MIX_REAR2(samples, si1, si2, rs, coef) \
+ samples[i] += samples[si1] * coef[rs][0] + samples[si2] * coef[rs + 1][0]; \
+ samples[i+256] += samples[si1] * coef[rs][1] + samples[si2] * coef[rs + 1][1];
+
+#define MIX_FRONT3(samples, coef) \
+ t = samples[i + c]; \
+ u = samples[i + l]; \
+ v = samples[i + r]; \
samples[i] = t * coef[0][0] + u * coef[1][0] + v * coef[2][0]; \
samples[i+256] = t * coef[0][1] + u * coef[1][1] + v * coef[2][1];
-#define DOWNMIX_TO_STEREO(op1, op2) \
- for (i = 0; i < 256; i++){ \
- op1 \
- op2 \
+#define DOWNMIX_TO_STEREO(op1, op2) \
+ for (i = 0; i < 256; i++) { \
+ op1 \
+ op2 \
}
static void dca_downmix(float *samples, int srcfmt,
int downmix_coef[DCA_PRIM_CHANNELS_MAX][2],
const int8_t *channel_mapping)
{
- int c,l,r,sl,sr,s;
+ int c, l, r, sl, sr, s;
int i;
float t, u, v;
float coef[DCA_PRIM_CHANNELS_MAX][2];
- for (i=0; i<DCA_PRIM_CHANNELS_MAX; i++) {
+ for (i = 0; i < DCA_PRIM_CHANNELS_MAX; i++) {
coef[i][0] = dca_downmix_coeffs[downmix_coef[i][0]];
coef[i][1] = dca_downmix_coeffs[downmix_coef[i][1]];
}
@@ -1006,11 +1037,11 @@ static void dca_downmix(float *samples, int srcfmt,
c = channel_mapping[0] * 256;
l = channel_mapping[1] * 256;
r = channel_mapping[2] * 256;
- DOWNMIX_TO_STEREO(MIX_FRONT3(samples, coef),);
+ DOWNMIX_TO_STEREO(MIX_FRONT3(samples, coef), );
break;
case DCA_2F1R:
s = channel_mapping[2] * 256;
- DOWNMIX_TO_STEREO(MIX_REAR1(samples, i + s, 2, coef),);
+ DOWNMIX_TO_STEREO(MIX_REAR1(samples, i + s, 2, coef), );
break;
case DCA_3F1R:
c = channel_mapping[0] * 256;
@@ -1023,12 +1054,12 @@ static void dca_downmix(float *samples, int srcfmt,
case DCA_2F2R:
sl = channel_mapping[2] * 256;
sr = channel_mapping[3] * 256;
- DOWNMIX_TO_STEREO(MIX_REAR2(samples, i + sl, i + sr, 2, coef),);
+ DOWNMIX_TO_STEREO(MIX_REAR2(samples, i + sl, i + sr, 2, coef), );
break;
case DCA_3F2R:
- c = channel_mapping[0] * 256;
- l = channel_mapping[1] * 256;
- r = channel_mapping[2] * 256;
+ c = channel_mapping[0] * 256;
+ l = channel_mapping[1] * 256;
+ r = channel_mapping[2] * 256;
sl = channel_mapping[3] * 256;
sr = channel_mapping[4] * 256;
DOWNMIX_TO_STEREO(MIX_FRONT3(samples, coef),
@@ -1048,7 +1079,7 @@ static int decode_blockcode(int code, int levels, int *values)
for (i = 0; i < 4; i++) {
int div = FASTDIV(code, levels);
- values[i] = code - offset - div*levels;
+ values[i] = code - offset - div * levels;
code = div;
}
@@ -1062,8 +1093,8 @@ static int decode_blockcodes(int code1, int code2, int levels, int *values)
}
#endif
-static const uint8_t abits_sizes[7] = { 7, 10, 12, 13, 15, 17, 19 };
-static const uint8_t abits_levels[7] = { 3, 5, 7, 9, 13, 17, 25 };
+static const uint8_t abits_sizes[7] = { 7, 10, 12, 13, 15, 17, 19 };
+static const uint8_t abits_levels[7] = { 3, 5, 7, 9, 13, 17, 25 };
#ifndef int8x8_fmul_int32
static inline void int8x8_fmul_int32(float *dst, const int8_t *src, int scale)
@@ -1075,7 +1106,7 @@ static inline void int8x8_fmul_int32(float *dst, const int8_t *src, int scale)
}
#endif
-static int dca_subsubframe(DCAContext * s, int base_channel, int block_index)
+static int dca_subsubframe(DCAContext *s, int base_channel, int block_index)
{
int k, l;
int subsubframe = s->current_subsubframe;
@@ -1118,20 +1149,21 @@ static int dca_subsubframe(DCAContext * s, int base_channel, int block_index)
/*
* Extract bits from the bit stream
*/
- if (!abits){
+ if (!abits) {
memset(subband_samples[k][l], 0, 8 * sizeof(subband_samples[0][0][0]));
} else {
/* Deal with transients */
int sfi = s->transition_mode[k][l] && subsubframe >= s->transition_mode[k][l];
- float rscale = quant_step_size * s->scale_factor[k][l][sfi] * s->scalefactor_adj[k][sel];
+ float rscale = quant_step_size * s->scale_factor[k][l][sfi] *
+ s->scalefactor_adj[k][sel];
- if (abits >= 11 || !dca_smpl_bitalloc[abits].vlc[sel].table){
- if (abits <= 7){
+ if (abits >= 11 || !dca_smpl_bitalloc[abits].vlc[sel].table) {
+ if (abits <= 7) {
/* Block code */
int block_code1, block_code2, size, levels, err;
- size = abits_sizes[abits-1];
- levels = abits_levels[abits-1];
+ size = abits_sizes[abits - 1];
+ levels = abits_levels[abits - 1];
block_code1 = get_bits(&s->gb, size);
block_code2 = get_bits(&s->gb, size);
@@ -1142,19 +1174,20 @@ static int dca_subsubframe(DCAContext * s, int base_channel, int block_index)
"ERROR: block code look-up failed\n");
return AVERROR_INVALIDDATA;
}
- }else{
+ } else {
/* no coding */
for (m = 0; m < 8; m++)
block[m] = get_sbits(&s->gb, abits - 3);
}
- }else{
+ } else {
/* Huffman coded */
for (m = 0; m < 8; m++)
- block[m] = get_bitalloc(&s->gb, &dca_smpl_bitalloc[abits], sel);
+ block[m] = get_bitalloc(&s->gb,
+ &dca_smpl_bitalloc[abits], sel);
}
s->fmt_conv.int32_to_float_fmul_scalar(subband_samples[k][l],
- block, rscale, 8);
+ block, rscale, 8);
}
/*
@@ -1171,8 +1204,7 @@ static int dca_subsubframe(DCAContext * s, int base_channel, int block_index)
else if (s->predictor_history)
subband_samples[k][l][m] +=
(adpcm_vb[s->prediction_vq[k][l]][n - 1] *
- s->subband_samples_hist[k][l][m - n +
- 4] / 8192);
+ s->subband_samples_hist[k][l][m - n + 4] / 8192);
}
}
}
@@ -1186,7 +1218,8 @@ static int dca_subsubframe(DCAContext * s, int base_channel, int block_index)
int hfvq = s->high_freq_vq[k][l];
if (!s->debug_flag & 0x01) {
- av_log(s->avctx, AV_LOG_DEBUG, "Stream with high frequencies VQ coding\n");
+ av_log(s->avctx, AV_LOG_DEBUG,
+ "Stream with high frequencies VQ coding\n");
s->debug_flag |= 0x01;
}
@@ -1210,23 +1243,25 @@ static int dca_subsubframe(DCAContext * s, int base_channel, int block_index)
/* Backup predictor history for adpcm */
for (k = base_channel; k < s->prim_channels; k++)
for (l = 0; l < s->vq_start_subband[k]; l++)
- memcpy(s->subband_samples_hist[k][l], &subband_samples[k][l][4],
- 4 * sizeof(subband_samples[0][0][0]));
+ memcpy(s->subband_samples_hist[k][l],
+ &subband_samples[k][l][4],
+ 4 * sizeof(subband_samples[0][0][0]));
return 0;
}
-static int dca_filter_channels(DCAContext * s, int block_index)
+static int dca_filter_channels(DCAContext *s, int block_index)
{
float (*subband_samples)[DCA_SUBBANDS][8] = s->subband_samples[block_index];
int k;
/* 32 subbands QMF */
for (k = 0; k < s->prim_channels; k++) {
-/* static float pcm_to_double[8] =
- {32768.0, 32768.0, 524288.0, 524288.0, 0, 8388608.0, 8388608.0};*/
- qmf_32_subbands(s, k, subband_samples[k], &s->samples[256 * s->channel_order_tab[k]],
- M_SQRT1_2*s->scale_bias /*pcm_to_double[s->source_pcm_res] */ );
+/* static float pcm_to_double[8] = { 32768.0, 32768.0, 524288.0, 524288.0,
+ 0, 8388608.0, 8388608.0 };*/
+ qmf_32_subbands(s, k, subband_samples[k],
+ &s->samples[256 * s->channel_order_tab[k]],
+ M_SQRT1_2 * s->scale_bias /* pcm_to_double[s->source_pcm_res] */);
}
/* Down mixing */
@@ -1239,7 +1274,7 @@ static int dca_filter_channels(DCAContext * s, int block_index)
lfe_interpolation_fir(s, s->lfe, 2 * s->lfe,
s->lfe_data + 2 * s->lfe * (block_index + 4),
&s->samples[256 * dca_lfe_index[s->amode]],
- (1.0/256.0)*s->scale_bias);
+ (1.0 / 256.0) * s->scale_bias);
/* Outputs 20bits pcm samples */
}
@@ -1247,7 +1282,7 @@ static int dca_filter_channels(DCAContext * s, int block_index)
}
-static int dca_subframe_footer(DCAContext * s, int base_channel)
+static int dca_subframe_footer(DCAContext *s, int base_channel)
{
int aux_data_count = 0, i;
@@ -1279,7 +1314,7 @@ static int dca_subframe_footer(DCAContext * s, int base_channel)
* @param s pointer to the DCAContext
*/
-static int dca_decode_block(DCAContext * s, int base_channel, int block_index)
+static int dca_decode_block(DCAContext *s, int base_channel, int block_index)
{
int ret;
@@ -1327,8 +1362,8 @@ static int dca_decode_block(DCAContext * s, int base_channel, int block_index)
/**
* Convert bitstream to one representation based on sync marker
*/
-static int dca_convert_bitstream(const uint8_t * src, int src_size, uint8_t * dst,
- int max_size)
+static int dca_convert_bitstream(const uint8_t *src, int src_size, uint8_t *dst,
+ int max_size)
{
uint32_t mrk;
int i, tmp;
@@ -1336,7 +1371,7 @@ static int dca_convert_bitstream(const uint8_t * src, int src_size, uint8_t * ds
uint16_t *sdst = (uint16_t *) dst;
PutBitContext pb;
- if ((unsigned)src_size > (unsigned)max_size) {
+ if ((unsigned) src_size > (unsigned) max_size) {
// av_log(NULL, AV_LOG_ERROR, "Input frame size larger than DCA_MAX_FRAME_SIZE!\n");
// return -1;
src_size = max_size;
@@ -1371,18 +1406,16 @@ static int dca_convert_bitstream(const uint8_t * src, int src_size, uint8_t * ds
static int dca_exss_mask2count(int mask)
{
/* count bits that mean speaker pairs twice */
- return av_popcount(mask)
- + av_popcount(mask & (
- DCA_EXSS_CENTER_LEFT_RIGHT
- | DCA_EXSS_FRONT_LEFT_RIGHT
- | DCA_EXSS_FRONT_HIGH_LEFT_RIGHT
- | DCA_EXSS_WIDE_LEFT_RIGHT
- | DCA_EXSS_SIDE_LEFT_RIGHT
- | DCA_EXSS_SIDE_HIGH_LEFT_RIGHT
- | DCA_EXSS_SIDE_REAR_LEFT_RIGHT
- | DCA_EXSS_REAR_LEFT_RIGHT
- | DCA_EXSS_REAR_HIGH_LEFT_RIGHT
- ));
+ return av_popcount(mask) +
+ av_popcount(mask & (DCA_EXSS_CENTER_LEFT_RIGHT |
+ DCA_EXSS_FRONT_LEFT_RIGHT |
+ DCA_EXSS_FRONT_HIGH_LEFT_RIGHT |
+ DCA_EXSS_WIDE_LEFT_RIGHT |
+ DCA_EXSS_SIDE_LEFT_RIGHT |
+ DCA_EXSS_SIDE_HIGH_LEFT_RIGHT |
+ DCA_EXSS_SIDE_REAR_LEFT_RIGHT |
+ DCA_EXSS_REAR_LEFT_RIGHT |
+ DCA_EXSS_REAR_HIGH_LEFT_RIGHT));
}
/**
@@ -1408,7 +1441,7 @@ static int dca_exss_parse_asset_header(DCAContext *s)
int header_size;
int channels;
int embedded_stereo = 0;
- int embedded_6ch = 0;
+ int embedded_6ch = 0;
int drc_code_present;
int extensions_mask;
int i, j;
@@ -1543,7 +1576,8 @@ static int dca_exss_parse_asset_header(DCAContext *s)
if (!(extensions_mask & DCA_EXT_CORE))
av_log(s->avctx, AV_LOG_WARNING, "DTS core detection mismatch.\n");
if ((extensions_mask & DCA_CORE_EXTS) != s->core_ext_mask)
- av_log(s->avctx, AV_LOG_WARNING, "DTS extensions detection mismatch (%d, %d)\n",
+ av_log(s->avctx, AV_LOG_WARNING,
+ "DTS extensions detection mismatch (%d, %d)\n",
extensions_mask & DCA_CORE_EXTS, s->core_ext_mask);
return 0;
@@ -1568,7 +1602,7 @@ static void dca_exss_parse_header(DCAContext *s)
ss_index = get_bits(&s->gb, 2);
blownup = get_bits1(&s->gb);
- skip_bits(&s->gb, 8 + 4 * blownup); // header_size
+ skip_bits(&s->gb, 8 + 4 * blownup); // header_size
skip_bits(&s->gb, 16 + 4 * blownup); // hd_size
s->static_fields = get_bits1(&s->gb);
@@ -1609,18 +1643,18 @@ static void dca_exss_parse_header(DCAContext *s)
int mix_out_mask_size;
skip_bits(&s->gb, 2); // adjustment level
- mix_out_mask_size = (get_bits(&s->gb, 2) + 1) << 2;
- s->num_mix_configs = get_bits(&s->gb, 2) + 1;
+ mix_out_mask_size = (get_bits(&s->gb, 2) + 1) << 2;
+ s->num_mix_configs = get_bits(&s->gb, 2) + 1;
for (i = 0; i < s->num_mix_configs; i++) {
- int mix_out_mask = get_bits(&s->gb, mix_out_mask_size);
+ int mix_out_mask = get_bits(&s->gb, mix_out_mask_size);
s->mix_config_num_ch[i] = dca_exss_mask2count(mix_out_mask);
}
}
}
for (i = 0; i < num_assets; i++)
- skip_bits_long(&s->gb, 16 + 4 * blownup); // asset size
+ skip_bits_long(&s->gb, 16 + 4 * blownup); // asset size
for (i = 0; i < num_assets; i++) {
if (dca_exss_parse_asset_header(s))
@@ -1667,8 +1701,8 @@ static int dca_decode_frame(AVCodecContext *avctx, void *data,
}
//set AVCodec values with parsed data
avctx->sample_rate = s->sample_rate;
- avctx->bit_rate = s->bit_rate;
- avctx->frame_size = s->sample_blocks * 32;
+ avctx->bit_rate = s->bit_rate;
+ avctx->frame_size = s->sample_blocks * 32;
s->profile = FF_PROFILE_DTS;
@@ -1700,72 +1734,71 @@ static int dca_decode_frame(AVCodecContext *avctx, void *data,
/* extensions start at 32-bit boundaries into bitstream */
skip_bits_long(&s->gb, (-get_bits_count(&s->gb)) & 31);
- while(core_ss_end - get_bits_count(&s->gb) >= 32) {
- uint32_t bits = get_bits_long(&s->gb, 32);
+ while (core_ss_end - get_bits_count(&s->gb) >= 32) {
+ uint32_t bits = get_bits_long(&s->gb, 32);
- switch(bits) {
- case 0x5a5a5a5a: {
- int ext_amode, xch_fsize;
+ switch (bits) {
+ case 0x5a5a5a5a: {
+ int ext_amode, xch_fsize;
- s->xch_base_channel = s->prim_channels;
+ s->xch_base_channel = s->prim_channels;
- /* validate sync word using XCHFSIZE field */
- xch_fsize = show_bits(&s->gb, 10);
- if((s->frame_size != (get_bits_count(&s->gb) >> 3) - 4 + xch_fsize) &&
- (s->frame_size != (get_bits_count(&s->gb) >> 3) - 4 + xch_fsize + 1))
- continue;
-
- /* skip length-to-end-of-frame field for the moment */
- skip_bits(&s->gb, 10);
-
- s->core_ext_mask |= DCA_EXT_XCH;
+ /* validate sync word using XCHFSIZE field */
+ xch_fsize = show_bits(&s->gb, 10);
+ if ((s->frame_size != (get_bits_count(&s->gb) >> 3) - 4 + xch_fsize) &&
+ (s->frame_size != (get_bits_count(&s->gb) >> 3) - 4 + xch_fsize + 1))
+ continue;
- /* extension amode should == 1, number of channels in extension */
- /* AFAIK XCh is not used for more channels */
- if ((ext_amode = get_bits(&s->gb, 4)) != 1) {
- av_log(avctx, AV_LOG_ERROR, "XCh extension amode %d not"
- " supported!\n",ext_amode);
- continue;
- }
+ /* skip length-to-end-of-frame field for the moment */
+ skip_bits(&s->gb, 10);
- /* much like core primary audio coding header */
- dca_parse_audio_coding_header(s, s->xch_base_channel);
+ s->core_ext_mask |= DCA_EXT_XCH;
- for (i = 0; i < (s->sample_blocks / 8); i++) {
- if ((ret = dca_decode_block(s, s->xch_base_channel, i))) {
- av_log(avctx, AV_LOG_ERROR, "error decoding XCh extension\n");
+ /* extension amode(number of channels in extension) should be 1 */
+ /* AFAIK XCh is not used for more channels */
+ if ((ext_amode = get_bits(&s->gb, 4)) != 1) {
+ av_log(avctx, AV_LOG_ERROR, "XCh extension amode %d not"
+ " supported!\n", ext_amode);
continue;
}
+
+ /* much like core primary audio coding header */
+ dca_parse_audio_coding_header(s, s->xch_base_channel);
+
+ for (i = 0; i < (s->sample_blocks / 8); i++)
+ if ((ret = dca_decode_block(s, s->xch_base_channel, i))) {
+ av_log(avctx, AV_LOG_ERROR, "error decoding XCh extension\n");
+ continue;
+ }
+
+ s->xch_present = 1;
+ break;
}
+ case 0x47004a03:
+ /* XXCh: extended channels */
+ /* usually found either in core or HD part in DTS-HD HRA streams,
+ * but not in DTS-ES which contains XCh extensions instead */
+ s->core_ext_mask |= DCA_EXT_XXCH;
+ break;
+
+ case 0x1d95f262: {
+ int fsize96 = show_bits(&s->gb, 12) + 1;
+ if (s->frame_size != (get_bits_count(&s->gb) >> 3) - 4 + fsize96)
+ continue;
- s->xch_present = 1;
- break;
- }
- case 0x47004a03:
- /* XXCh: extended channels */
- /* usually found either in core or HD part in DTS-HD HRA streams,
- * but not in DTS-ES which contains XCh extensions instead */
- s->core_ext_mask |= DCA_EXT_XXCH;
- break;
-
- case 0x1d95f262: {
- int fsize96 = show_bits(&s->gb, 12) + 1;
- if (s->frame_size != (get_bits_count(&s->gb) >> 3) - 4 + fsize96)
- continue;
-
- av_log(avctx, AV_LOG_DEBUG, "X96 extension found at %d bits\n", get_bits_count(&s->gb));
- skip_bits(&s->gb, 12);
- av_log(avctx, AV_LOG_DEBUG, "FSIZE96 = %d bytes\n", fsize96);
- av_log(avctx, AV_LOG_DEBUG, "REVNO = %d\n", get_bits(&s->gb, 4));
-
- s->core_ext_mask |= DCA_EXT_X96;
- break;
- }
- }
+ av_log(avctx, AV_LOG_DEBUG, "X96 extension found at %d bits\n",
+ get_bits_count(&s->gb));
+ skip_bits(&s->gb, 12);
+ av_log(avctx, AV_LOG_DEBUG, "FSIZE96 = %d bytes\n", fsize96);
+ av_log(avctx, AV_LOG_DEBUG, "REVNO = %d\n", get_bits(&s->gb, 4));
- skip_bits_long(&s->gb, (-get_bits_count(&s->gb)) & 31);
- }
+ s->core_ext_mask |= DCA_EXT_X96;
+ break;
+ }
+ }
+ skip_bits_long(&s->gb, (-get_bits_count(&s->gb)) & 31);
+ }
} else {
/* no supported extensions, skip the rest of the core substream */
skip_bits_long(&s->gb, core_ss_end - get_bits_count(&s->gb));
@@ -1777,15 +1810,15 @@ static int dca_decode_frame(AVCodecContext *avctx, void *data,
s->profile = FF_PROFILE_DTS_ES;
/* check for ExSS (HD part) */
- if (s->dca_buffer_size - s->frame_size > 32
- && get_bits_long(&s->gb, 32) == DCA_HD_MARKER)
+ if (s->dca_buffer_size - s->frame_size > 32 &&
+ get_bits_long(&s->gb, 32) == DCA_HD_MARKER)
dca_exss_parse_header(s);
avctx->profile = s->profile;
channels = s->prim_channels + !!s->lfe;
- if (s->amode<16) {
+ if (s->amode < 16) {
avctx->channel_layout = dca_core_channel_layout[s->amode];
if (s->xch_present && (!avctx->request_channels ||
@@ -1821,7 +1854,7 @@ static int dca_decode_frame(AVCodecContext *avctx, void *data,
s->channel_order_tab = dca_channel_order_native;
}
} else {
- av_log(avctx, AV_LOG_ERROR, "Non standard configuration %d !\n",s->amode);
+ av_log(avctx, AV_LOG_ERROR, "Non standard configuration %d !\n", s->amode);
return AVERROR_INVALIDDATA;
}
@@ -1837,8 +1870,8 @@ static int dca_decode_frame(AVCodecContext *avctx, void *data,
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
- samples_flt = (float *)s->frame.data[0];
- samples_s16 = (int16_t *)s->frame.data[0];
+ samples_flt = (float *) s->frame.data[0];
+ samples_s16 = (int16_t *) s->frame.data[0];
/* filter to get final output */
for (i = 0; i < (s->sample_blocks / 8); i++) {
@@ -1846,10 +1879,10 @@ static int dca_decode_frame(AVCodecContext *avctx, void *data,
/* If this was marked as a DTS-ES stream we need to subtract back- */
/* channel from SL & SR to remove matrixed back-channel signal */
- if((s->source_pcm_res & 1) && s->xch_present) {
- float* back_chan = s->samples + s->channel_order_tab[s->xch_base_channel] * 256;
- float* lt_chan = s->samples + s->channel_order_tab[s->xch_base_channel - 2] * 256;
- float* rt_chan = s->samples + s->channel_order_tab[s->xch_base_channel - 1] * 256;
+ if ((s->source_pcm_res & 1) && s->xch_present) {
+ float *back_chan = s->samples + s->channel_order_tab[s->xch_base_channel] * 256;
+ float *lt_chan = s->samples + s->channel_order_tab[s->xch_base_channel - 2] * 256;
+ float *rt_chan = s->samples + s->channel_order_tab[s->xch_base_channel - 1] * 256;
s->dsp.vector_fmac_scalar(lt_chan, back_chan, -M_SQRT1_2, 256);
s->dsp.vector_fmac_scalar(rt_chan, back_chan, -M_SQRT1_2, 256);
}
@@ -1868,12 +1901,11 @@ static int dca_decode_frame(AVCodecContext *avctx, void *data,
/* update lfe history */
lfe_samples = 2 * s->lfe * (s->sample_blocks / 8);
- for (i = 0; i < 2 * s->lfe * 4; i++) {
+ for (i = 0; i < 2 * s->lfe * 4; i++)
s->lfe_data[i] = s->lfe_data[i + lfe_samples];
- }
- *got_frame_ptr = 1;
- *(AVFrame *)data = s->frame;
+ *got_frame_ptr = 1;
+ *(AVFrame *) data = s->frame;
return buf_size;
}
@@ -1886,7 +1918,7 @@ static int dca_decode_frame(AVCodecContext *avctx, void *data,
* @param avctx pointer to the AVCodecContext
*/
-static av_cold int dca_decode_init(AVCodecContext * avctx)
+static av_cold int dca_decode_init(AVCodecContext *avctx)
{
DCAContext *s = avctx->priv_data;
int i;
@@ -1900,15 +1932,15 @@ static av_cold int dca_decode_init(AVCodecContext * avctx)
ff_dcadsp_init(&s->dcadsp);
ff_fmt_convert_init(&s->fmt_conv, avctx);
- for (i = 0; i < DCA_PRIM_CHANNELS_MAX+1; i++)
+ for (i = 0; i < DCA_PRIM_CHANNELS_MAX + 1; i++)
s->samples_chanptr[i] = s->samples + i * 256;
if (avctx->request_sample_fmt == AV_SAMPLE_FMT_FLT) {
avctx->sample_fmt = AV_SAMPLE_FMT_FLT;
- s->scale_bias = 1.0 / 32768.0;
+ s->scale_bias = 1.0 / 32768.0;
} else {
avctx->sample_fmt = AV_SAMPLE_FMT_S16;
- s->scale_bias = 1.0;
+ s->scale_bias = 1.0;
}
/* allow downmixing to stereo */
@@ -1923,7 +1955,7 @@ static av_cold int dca_decode_init(AVCodecContext * avctx)
return 0;
}
-static av_cold int dca_decode_end(AVCodecContext * avctx)
+static av_cold int dca_decode_end(AVCodecContext *avctx)
{
DCAContext *s = avctx->priv_data;
ff_mdct_end(&s->imdct);
@@ -1940,17 +1972,17 @@ static const AVProfile profiles[] = {
};
AVCodec ff_dca_decoder = {
- .name = "dca",
- .type = AVMEDIA_TYPE_AUDIO,
- .id = CODEC_ID_DTS,
- .priv_data_size = sizeof(DCAContext),
- .init = dca_decode_init,
- .decode = dca_decode_frame,
- .close = dca_decode_end,
- .long_name = NULL_IF_CONFIG_SMALL("DCA (DTS Coherent Acoustics)"),
- .capabilities = CODEC_CAP_CHANNEL_CONF | CODEC_CAP_DR1,
- .sample_fmts = (const enum AVSampleFormat[]) {
- AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_NONE
- },
- .profiles = NULL_IF_CONFIG_SMALL(profiles),
+ .name = "dca",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .id = CODEC_ID_DTS,
+ .priv_data_size = sizeof(DCAContext),
+ .init = dca_decode_init,
+ .decode = dca_decode_frame,
+ .close = dca_decode_end,
+ .long_name = NULL_IF_CONFIG_SMALL("DCA (DTS Coherent Acoustics)"),
+ .capabilities = CODEC_CAP_CHANNEL_CONF | CODEC_CAP_DR1,
+ .sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_FLT,
+ AV_SAMPLE_FMT_S16,
+ AV_SAMPLE_FMT_NONE },
+ .profiles = NULL_IF_CONFIG_SMALL(profiles),
};
diff --git a/libavcodec/dct.c b/libavcodec/dct.c
index c30cff664e..4800e13b36 100644
--- a/libavcodec/dct.c
+++ b/libavcodec/dct.c
@@ -28,15 +28,16 @@
*/
#include <math.h>
+
#include "libavutil/mathematics.h"
#include "dct.h"
#include "dct32.h"
-/* sin((M_PI * x / (2*n)) */
-#define SIN(s,n,x) (s->costab[(n) - (x)])
+/* sin((M_PI * x / (2 * n)) */
+#define SIN(s, n, x) (s->costab[(n) - (x)])
-/* cos((M_PI * x / (2*n)) */
-#define COS(s,n,x) (s->costab[x])
+/* cos((M_PI * x / (2 * n)) */
+#define COS(s, n, x) (s->costab[x])
static void ff_dst_calc_I_c(DCTContext *ctx, FFTSample *data)
{
@@ -44,28 +45,28 @@ static void ff_dst_calc_I_c(DCTContext *ctx, FFTSample *data)
int i;
data[0] = 0;
- for(i = 1; i < n/2; i++) {
- float tmp1 = data[i ];
- float tmp2 = data[n - i];
- float s = SIN(ctx, n, 2*i);
-
- s *= tmp1 + tmp2;
- tmp1 = (tmp1 - tmp2) * 0.5f;
- data[i ] = s + tmp1;
- data[n - i] = s - tmp1;
+ for (i = 1; i < n / 2; i++) {
+ float tmp1 = data[i ];
+ float tmp2 = data[n - i];
+ float s = SIN(ctx, n, 2 * i);
+
+ s *= tmp1 + tmp2;
+ tmp1 = (tmp1 - tmp2) * 0.5f;
+ data[i] = s + tmp1;
+ data[n - i] = s - tmp1;
}
- data[n/2] *= 2;
+ data[n / 2] *= 2;
ctx->rdft.rdft_calc(&ctx->rdft, data);
data[0] *= 0.5f;
- for(i = 1; i < n-2; i += 2) {
- data[i + 1] += data[i - 1];
- data[i ] = -data[i + 2];
+ for (i = 1; i < n - 2; i += 2) {
+ data[i + 1] += data[i - 1];
+ data[i] = -data[i + 2];
}
- data[n-1] = 0;
+ data[n - 1] = 0;
}
static void ff_dct_calc_I_c(DCTContext *ctx, FFTSample *data)
@@ -74,19 +75,19 @@ static void ff_dct_calc_I_c(DCTContext *ctx, FFTSample *data)
int i;
float next = -0.5f * (data[0] - data[n]);
- for(i = 0; i < n/2; i++) {
- float tmp1 = data[i ];
+ for (i = 0; i < n / 2; i++) {
+ float tmp1 = data[i];
float tmp2 = data[n - i];
- float s = SIN(ctx, n, 2*i);
- float c = COS(ctx, n, 2*i);
+ float s = SIN(ctx, n, 2 * i);
+ float c = COS(ctx, n, 2 * i);
c *= tmp1 - tmp2;
s *= tmp1 - tmp2;
next += c;
- tmp1 = (tmp1 + tmp2) * 0.5f;
- data[i ] = tmp1 - s;
+ tmp1 = (tmp1 + tmp2) * 0.5f;
+ data[i] = tmp1 - s;
data[n - i] = tmp1 + s;
}
@@ -94,7 +95,7 @@ static void ff_dct_calc_I_c(DCTContext *ctx, FFTSample *data)
data[n] = data[1];
data[1] = next;
- for(i = 3; i <= n; i += 2)
+ for (i = 3; i <= n; i += 2)
data[i] = data[i - 2] - data[i];
}
@@ -103,16 +104,16 @@ static void ff_dct_calc_III_c(DCTContext *ctx, FFTSample *data)
int n = 1 << ctx->nbits;
int i;
- float next = data[n - 1];
+ float next = data[n - 1];
float inv_n = 1.0f / n;
for (i = n - 2; i >= 2; i -= 2) {
- float val1 = data[i ];
+ float val1 = data[i];
float val2 = data[i - 1] - data[i + 1];
- float c = COS(ctx, n, i);
- float s = SIN(ctx, n, i);
+ float c = COS(ctx, n, i);
+ float s = SIN(ctx, n, i);
- data[i ] = c * val1 + s * val2;
+ data[i] = c * val1 + s * val2;
data[i + 1] = s * val1 - c * val2;
}
@@ -121,13 +122,13 @@ static void ff_dct_calc_III_c(DCTContext *ctx, FFTSample *data)
ctx->rdft.rdft_calc(&ctx->rdft, data);
for (i = 0; i < n / 2; i++) {
- float tmp1 = data[i ] * inv_n;
+ float tmp1 = data[i] * inv_n;
float tmp2 = data[n - i - 1] * inv_n;
- float csc = ctx->csc2[i] * (tmp1 - tmp2);
+ float csc = ctx->csc2[i] * (tmp1 - tmp2);
- tmp1 += tmp2;
- data[i ] = tmp1 + csc;
- data[n - i - 1] = tmp1 - csc;
+ tmp1 += tmp2;
+ data[i] = tmp1 + csc;
+ data[n - i - 1] = tmp1 - csc;
}
}
@@ -137,34 +138,33 @@ static void ff_dct_calc_II_c(DCTContext *ctx, FFTSample *data)
int i;
float next;
- for (i=0; i < n/2; i++) {
- float tmp1 = data[i ];
+ for (i = 0; i < n / 2; i++) {
+ float tmp1 = data[i];
float tmp2 = data[n - i - 1];
- float s = SIN(ctx, n, 2*i + 1);
+ float s = SIN(ctx, n, 2 * i + 1);
- s *= tmp1 - tmp2;
- tmp1 = (tmp1 + tmp2) * 0.5f;
+ s *= tmp1 - tmp2;
+ tmp1 = (tmp1 + tmp2) * 0.5f;
- data[i ] = tmp1 + s;
+ data[i] = tmp1 + s;
data[n-i-1] = tmp1 - s;
}
ctx->rdft.rdft_calc(&ctx->rdft, data);
- next = data[1] * 0.5;
+ next = data[1] * 0.5;
data[1] *= -1;
for (i = n - 2; i >= 0; i -= 2) {
float inr = data[i ];
float ini = data[i + 1];
- float c = COS(ctx, n, i);
- float s = SIN(ctx, n, i);
+ float c = COS(ctx, n, i);
+ float s = SIN(ctx, n, i);
- data[i ] = c * inr + s * ini;
+ data[i] = c * inr + s * ini;
+ data[i + 1] = next;
- data[i+1] = next;
-
- next += s * inr - c * ini;
+ next += s * inr - c * ini;
}
}
@@ -180,36 +180,36 @@ av_cold int ff_dct_init(DCTContext *s, int nbits, enum DCTTransformType inverse)
memset(s, 0, sizeof(*s));
- s->nbits = nbits;
- s->inverse = inverse;
+ s->nbits = nbits;
+ s->inverse = inverse;
if (inverse == DCT_II && nbits == 5) {
s->dct_calc = dct32_func;
} else {
- ff_init_ff_cos_tabs(nbits+2);
-
- s->costab = ff_cos_tabs[nbits+2];
+ ff_init_ff_cos_tabs(nbits + 2);
- s->csc2 = av_malloc(n/2 * sizeof(FFTSample));
+ s->costab = ff_cos_tabs[nbits + 2];
+ s->csc2 = av_malloc(n / 2 * sizeof(FFTSample));
if (ff_rdft_init(&s->rdft, nbits, inverse == DCT_III) < 0) {
av_free(s->csc2);
return -1;
}
- for (i = 0; i < n/2; i++)
- s->csc2[i] = 0.5 / sin((M_PI / (2*n) * (2*i + 1)));
+ for (i = 0; i < n / 2; i++)
+ s->csc2[i] = 0.5 / sin((M_PI / (2 * n) * (2 * i + 1)));
- switch(inverse) {
- case DCT_I : s->dct_calc = ff_dct_calc_I_c; break;
- case DCT_II : s->dct_calc = ff_dct_calc_II_c ; break;
+ switch (inverse) {
+ case DCT_I : s->dct_calc = ff_dct_calc_I_c; break;
+ case DCT_II : s->dct_calc = ff_dct_calc_II_c; break;
case DCT_III: s->dct_calc = ff_dct_calc_III_c; break;
- case DST_I : s->dct_calc = ff_dst_calc_I_c; break;
+ case DST_I : s->dct_calc = ff_dst_calc_I_c; break;
}
}
s->dct32 = ff_dct32_float;
- if (HAVE_MMX) ff_dct_init_mmx(s);
+ if (HAVE_MMX)
+ ff_dct_init_mmx(s);
return 0;
}
diff --git a/libavcodec/pthread.c b/libavcodec/pthread.c
index 717b865bae..d545f140ac 100644
--- a/libavcodec/pthread.c
+++ b/libavcodec/pthread.c
@@ -35,7 +35,7 @@
#define _GNU_SOURCE
#include <sched.h>
#endif
-#if HAVE_GETSYSTEMINFO
+#if HAVE_GETPROCESSAFFINITYMASK
#include <windows.h>
#endif
#if HAVE_SYSCTL
@@ -172,10 +172,11 @@ static int get_logical_cpus(AVCodecContext *avctx)
if (!ret) {
nb_cpus = CPU_COUNT(&cpuset);
}
-#elif HAVE_GETSYSTEMINFO
- SYSTEM_INFO sysinfo;
- GetSystemInfo(&sysinfo);
- nb_cpus = sysinfo.dwNumberOfProcessors;
+#elif HAVE_GETPROCESSAFFINITYMASK
+ DWORD_PTR proc_aff, sys_aff;
+ ret = GetProcessAffinityMask(GetCurrentProcess(), &proc_aff, &sys_aff);
+ if (ret)
+ nb_cpus = av_popcount64(proc_aff);
#elif HAVE_SYSCTL && defined(HW_NCPU)
int mib[2] = { CTL_HW, HW_NCPU };
size_t len = sizeof(nb_cpus);
diff --git a/libavcodec/snow.c b/libavcodec/snow.c
index 660162a1b3..a289ecfbce 100644
--- a/libavcodec/snow.c
+++ b/libavcodec/snow.c
@@ -516,9 +516,9 @@ static void halfpel_interpol(SnowContext *s, uint8_t *halfpel[4][4], AVFrame *fr
int ls= frame->linesize[p];
uint8_t *src= frame->data[p];
- halfpel[1][p] = av_malloc(ls * (h + 2 * EDGE_WIDTH)) + EDGE_WIDTH * (1 + ls);
- halfpel[2][p] = av_malloc(ls * (h + 2 * EDGE_WIDTH)) + EDGE_WIDTH * (1 + ls);
- halfpel[3][p] = av_malloc(ls * (h + 2 * EDGE_WIDTH)) + EDGE_WIDTH * (1 + ls);
+ halfpel[1][p] = (uint8_t*) av_malloc(ls * (h + 2 * EDGE_WIDTH)) + EDGE_WIDTH * (1 + ls);
+ halfpel[2][p] = (uint8_t*) av_malloc(ls * (h + 2 * EDGE_WIDTH)) + EDGE_WIDTH * (1 + ls);
+ halfpel[3][p] = (uint8_t*) av_malloc(ls * (h + 2 * EDGE_WIDTH)) + EDGE_WIDTH * (1 + ls);
halfpel[0][p]= src;
for(y=0; y<h; y++){
diff --git a/libavcodec/utvideo.c b/libavcodec/utvideo.c
index 6fb384a4e4..906a61f9f8 100644
--- a/libavcodec/utvideo.c
+++ b/libavcodec/utvideo.c
@@ -282,6 +282,77 @@ static void restore_median(uint8_t *src, int step, int stride,
}
}
+/* UtVideo interlaced mode treats every two lines as a single one,
+ * so restoring function should take care of possible padding between
+ * two parts of the same "line".
+ */
+static void restore_median_il(uint8_t *src, int step, int stride,
+ int width, int height, int slices, int rmode)
+{
+ int i, j, slice;
+ int A, B, C;
+ uint8_t *bsrc;
+ int slice_start, slice_height;
+ const int cmask = ~(rmode ? 3 : 1);
+ const int stride2 = stride << 1;
+
+ for (slice = 0; slice < slices; slice++) {
+ slice_start = ((slice * height) / slices) & cmask;
+ slice_height = ((((slice + 1) * height) / slices) & cmask) - slice_start;
+ slice_height >>= 1;
+
+ bsrc = src + slice_start * stride;
+
+ // first line - left neighbour prediction
+ bsrc[0] += 0x80;
+ A = bsrc[0];
+ for (i = step; i < width * step; i += step) {
+ bsrc[i] += A;
+ A = bsrc[i];
+ }
+ for (i = 0; i < width * step; i += step) {
+ bsrc[stride + i] += A;
+ A = bsrc[stride + i];
+ }
+ bsrc += stride2;
+ if (slice_height == 1)
+ continue;
+ // second line - first element has top predition, the rest uses median
+ C = bsrc[-stride2];
+ bsrc[0] += C;
+ A = bsrc[0];
+ for (i = step; i < width * step; i += step) {
+ B = bsrc[i - stride2];
+ bsrc[i] += mid_pred(A, B, (uint8_t)(A + B - C));
+ C = B;
+ A = bsrc[i];
+ }
+ for (i = 0; i < width * step; i += step) {
+ B = bsrc[i - stride];
+ bsrc[stride + i] += mid_pred(A, B, (uint8_t)(A + B - C));
+ C = B;
+ A = bsrc[stride + i];
+ }
+ bsrc += stride2;
+ // the rest of lines use continuous median prediction
+ for (j = 2; j < slice_height; j++) {
+ for (i = 0; i < width * step; i += step) {
+ B = bsrc[i - stride2];
+ bsrc[i] += mid_pred(A, B, (uint8_t)(A + B - C));
+ C = B;
+ A = bsrc[i];
+ }
+ for (i = 0; i < width * step; i += step) {
+ B = bsrc[i - stride];
+ bsrc[i + stride] += mid_pred(A, B, (uint8_t)(A + B - C));
+ C = B;
+ A = bsrc[i + stride];
+ }
+ bsrc += stride2;
+ }
+ }
+}
+
static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt)
{
const uint8_t *buf = avpkt->data;
@@ -381,10 +452,18 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPac
c->frame_pred == PRED_LEFT);
if (ret)
return ret;
- if (c->frame_pred == PRED_MEDIAN)
- restore_median(c->pic.data[i], 1, c->pic.linesize[i],
- avctx->width >> !!i, avctx->height >> !!i,
- c->slices, !i);
+ if (c->frame_pred == PRED_MEDIAN) {
+ if (!c->interlaced) {
+ restore_median(c->pic.data[i], 1, c->pic.linesize[i],
+ avctx->width >> !!i, avctx->height >> !!i,
+ c->slices, !i);
+ } else {
+ restore_median_il(c->pic.data[i], 1, c->pic.linesize[i],
+ avctx->width >> !!i,
+ avctx->height >> !!i,
+ c->slices, !i);
+ }
+ }
}
break;
case PIX_FMT_YUV422P:
@@ -395,9 +474,17 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPac
c->frame_pred == PRED_LEFT);
if (ret)
return ret;
- if (c->frame_pred == PRED_MEDIAN)
- restore_median(c->pic.data[i], 1, c->pic.linesize[i],
- avctx->width >> !!i, avctx->height, c->slices, 0);
+ if (c->frame_pred == PRED_MEDIAN) {
+ if (!c->interlaced) {
+ restore_median(c->pic.data[i], 1, c->pic.linesize[i],
+ avctx->width >> !!i, avctx->height,
+ c->slices, 0);
+ } else {
+ restore_median_il(c->pic.data[i], 1, c->pic.linesize[i],
+ avctx->width >> !!i, avctx->height,
+ c->slices, 0);
+ }
+ }
}
break;
}