summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMichael Niedermayer <michaelni@gmx.at>2012-11-02 14:40:04 +0100
committerMichael Niedermayer <michaelni@gmx.at>2012-11-02 14:57:36 +0100
commit8551c6bec0fd6cf719f94b24bca39b1c3318e213 (patch)
tree9ba0df8011bead24b8a66dd0b0c2e6e544a7edf3
parent6788350281c418f0f395a8279eee82f7abe7c63b (diff)
parente1c804d883f3cca1b492147a2ac5d0aea7460076 (diff)
Merge remote-tracking branch 'qatar/master'
* qatar/master: dv1394: Swap the min and max values of the 'standard' option rtpdec_vp8: Don't parse fields that aren't used lavc: add some AVPacket doxy. audiointerleave: deobfuscate a function call. rtpdec: factorize identical code used in several handlers a64: remove interleaved mode. doc: Point to the new location of the c99-to-c89 tool decode_audio3: initialize AVFrame ws-snd1: set channel layout wmavoice: set channel layout wmapro: use AVCodecContext.channels instead of keeping a private copy wma: do not keep private copies of some AVCodecContext fields Conflicts: libavcodec/wmadec.c libavcodec/wmaenc.c Merged-by: Michael Niedermayer <michaelni@gmx.at>
-rw-r--r--libavcodec/avcodec.h22
-rw-r--r--libavcodec/utils.c2
-rw-r--r--libavcodec/wma.c36
-rw-r--r--libavcodec/wma.h4
-rw-r--r--libavcodec/wmadec.c34
-rw-r--r--libavcodec/wmaenc.c35
-rw-r--r--libavcodec/wmaprodec.c52
-rw-r--r--libavcodec/wmavoice.c3
-rw-r--r--libavcodec/ws-snd1.c11
-rw-r--r--libavformat/a64.c116
-rw-r--r--libavformat/audiointerleave.c2
-rw-r--r--libavformat/rtpdec.c11
-rw-r--r--libavformat/rtpdec.h5
-rw-r--r--libavformat/rtpdec_h263_rfc2190.c12
-rw-r--r--libavformat/rtpdec_jpeg.c13
-rw-r--r--libavformat/rtpdec_svq3.c9
-rw-r--r--libavformat/rtpdec_vp8.c38
-rw-r--r--libavformat/rtpdec_xiph.c13
18 files changed, 150 insertions, 268 deletions
diff --git a/libavcodec/avcodec.h b/libavcodec/avcodec.h
index 5344159939..a80612798e 100644
--- a/libavcodec/avcodec.h
+++ b/libavcodec/avcodec.h
@@ -962,6 +962,28 @@ enum AVPacketSideDataType {
AV_PKT_DATA_SUBTITLE_POSITION,
};
+/**
+ * This structure stores compressed data. It is typically exported by demuxers
+ * and then passed as input to decoders, or received as output from encoders and
+ * then passed to muxers.
+ *
+ * For video, it should typically contain one compressed frame. For audio it may
+ * contain several compressed frames.
+ *
+ * AVPacket is one of the few structs in FFmpeg, whose size is a part of public
+ * ABI. Thus it may be allocated on stack and no new fields can be added to it
+ * without libavcodec and libavformat major bump.
+ *
+ * The semantics of data ownership depends on the destruct field.
+ * If it is set, the packet data is dynamically allocated and is valid
+ * indefinitely until av_free_packet() is called (which in turn calls the
+ * destruct callback to free the data). If destruct is not set, the packet data
+ * is typically backed by some static buffer somewhere and is only valid for a
+ * limited time (e.g. until the next read call when demuxing).
+ *
+ * The side data is always allocated with av_malloc() and is freed in
+ * av_free_packet().
+ */
typedef struct AVPacket {
/**
* Presentation timestamp in AVStream->time_base units; the time at which
diff --git a/libavcodec/utils.c b/libavcodec/utils.c
index 2b7ab46f2a..931f3fd53e 100644
--- a/libavcodec/utils.c
+++ b/libavcodec/utils.c
@@ -1702,7 +1702,7 @@ int attribute_align_arg avcodec_decode_audio3(AVCodecContext *avctx, int16_t *sa
int *frame_size_ptr,
AVPacket *avpkt)
{
- AVFrame frame;
+ AVFrame frame = {0};
int ret, got_frame = 0;
if (avctx->get_buffer != avcodec_default_get_buffer) {
diff --git a/libavcodec/wma.c b/libavcodec/wma.c
index c65281711a..d0c0b34868 100644
--- a/libavcodec/wma.c
+++ b/libavcodec/wma.c
@@ -82,11 +82,6 @@ int ff_wma_init(AVCodecContext *avctx, int flags2)
|| avctx->bit_rate <= 0)
return -1;
- s->sample_rate = avctx->sample_rate;
- s->nb_channels = avctx->channels;
- s->bit_rate = avctx->bit_rate;
- s->block_align = avctx->block_align;
-
ff_dsputil_init(&s->dsp, avctx);
ff_fmt_convert_init(&s->fmt_conv, avctx);
avpriv_float_dsp_init(&s->fdsp, avctx->flags & CODEC_FLAG_BITEXACT);
@@ -98,7 +93,8 @@ int ff_wma_init(AVCodecContext *avctx, int flags2)
}
/* compute MDCT block size */
- s->frame_len_bits = ff_wma_get_frame_len_bits(s->sample_rate, s->version, 0);
+ s->frame_len_bits = ff_wma_get_frame_len_bits(avctx->sample_rate,
+ s->version, 0);
s->next_block_len_bits = s->frame_len_bits;
s->prev_block_len_bits = s->frame_len_bits;
s->block_len_bits = s->frame_len_bits;
@@ -107,7 +103,7 @@ int ff_wma_init(AVCodecContext *avctx, int flags2)
if (s->use_variable_block_len) {
int nb_max, nb;
nb = ((flags2 >> 3) & 3) + 1;
- if ((s->bit_rate / s->nb_channels) >= 32000)
+ if ((avctx->bit_rate / avctx->channels) >= 32000)
nb += 2;
nb_max = s->frame_len_bits - BLOCK_MIN_BITS;
if (nb > nb_max)
@@ -119,10 +115,10 @@ int ff_wma_init(AVCodecContext *avctx, int flags2)
/* init rate dependent parameters */
s->use_noise_coding = 1;
- high_freq = s->sample_rate * 0.5;
+ high_freq = avctx->sample_rate * 0.5;
/* if version 2, then the rates are normalized */
- sample_rate1 = s->sample_rate;
+ sample_rate1 = avctx->sample_rate;
if (s->version == 2) {
if (sample_rate1 >= 44100) {
sample_rate1 = 44100;
@@ -137,13 +133,13 @@ int ff_wma_init(AVCodecContext *avctx, int flags2)
}
}
- bps = (float)s->bit_rate / (float)(s->nb_channels * s->sample_rate);
+ bps = (float)avctx->bit_rate / (float)(avctx->channels * avctx->sample_rate);
s->byte_offset_bits = av_log2((int)(bps * s->frame_len / 8.0 + 0.5)) + 2;
/* compute high frequency value and choose if noise coding should
be activated */
bps1 = bps;
- if (s->nb_channels == 2)
+ if (avctx->channels == 2)
bps1 = bps * 1.6;
if (sample_rate1 == 44100) {
if (bps1 >= 0.61) {
@@ -186,8 +182,8 @@ int ff_wma_init(AVCodecContext *avctx, int flags2)
}
av_dlog(s->avctx, "flags2=0x%x\n", flags2);
av_dlog(s->avctx, "version=%d channels=%d sample_rate=%d bitrate=%d block_align=%d\n",
- s->version, s->nb_channels, s->sample_rate, s->bit_rate,
- s->block_align);
+ s->version, avctx->channels, avctx->sample_rate, avctx->bit_rate,
+ avctx->block_align);
av_dlog(s->avctx, "bps=%f bps1=%f high_freq=%f bitoffset=%d\n",
bps, bps1, high_freq, s->byte_offset_bits);
av_dlog(s->avctx, "use_noise_coding=%d use_exp_vlc=%d nb_block_sizes=%d\n",
@@ -210,7 +206,7 @@ int ff_wma_init(AVCodecContext *avctx, int flags2)
lpos = 0;
for (i = 0; i < 25; i++) {
a = ff_wma_critical_freqs[i];
- b = s->sample_rate;
+ b = avctx->sample_rate;
pos = ((block_len * 2 * a) + (b >> 1)) / b;
if (pos > block_len)
pos = block_len;
@@ -227,11 +223,11 @@ int ff_wma_init(AVCodecContext *avctx, int flags2)
table = NULL;
a = s->frame_len_bits - BLOCK_MIN_BITS - k;
if (a < 3) {
- if (s->sample_rate >= 44100) {
+ if (avctx->sample_rate >= 44100) {
table = exponent_band_44100[a];
- } else if (s->sample_rate >= 32000) {
+ } else if (avctx->sample_rate >= 32000) {
table = exponent_band_32000[a];
- } else if (s->sample_rate >= 22050) {
+ } else if (avctx->sample_rate >= 22050) {
table = exponent_band_22050[a];
}
}
@@ -245,7 +241,7 @@ int ff_wma_init(AVCodecContext *avctx, int flags2)
lpos = 0;
for (i = 0; i < 25; i++) {
a = ff_wma_critical_freqs[i];
- b = s->sample_rate;
+ b = avctx->sample_rate;
pos = ((block_len * 2 * a) + (b << 1)) / (4 * b);
pos <<= 2;
if (pos > block_len)
@@ -264,7 +260,7 @@ int ff_wma_init(AVCodecContext *avctx, int flags2)
s->coefs_end[k] = (s->frame_len - ((s->frame_len * 9) / 100)) >> k;
/* high freq computation */
s->high_band_start[k] = (int)((block_len * 2 * high_freq) /
- s->sample_rate + 0.5);
+ avctx->sample_rate + 0.5);
n = s->exponent_sizes[k];
j = 0;
pos = 0;
@@ -344,7 +340,7 @@ int ff_wma_init(AVCodecContext *avctx, int flags2)
/* choose the VLC tables for the coefficients */
coef_vlc_table = 2;
- if (s->sample_rate >= 32000) {
+ if (avctx->sample_rate >= 32000) {
if (bps1 < 0.72) {
coef_vlc_table = 0;
} else if (bps1 < 1.16) {
diff --git a/libavcodec/wma.h b/libavcodec/wma.h
index 1e7f0e5aba..4db4faa54a 100644
--- a/libavcodec/wma.h
+++ b/libavcodec/wma.h
@@ -69,11 +69,7 @@ typedef struct WMACodecContext {
AVFrame frame;
GetBitContext gb;
PutBitContext pb;
- int sample_rate;
- int nb_channels;
- int bit_rate;
int version; ///< 1 = 0x160 (WMAV1), 2 = 0x161 (WMAV2)
- int block_align;
int use_bit_reservoir;
int use_variable_block_len;
int use_exp_vlc; ///< exponent coding: 0 = lsp, 1 = vlc + delta
diff --git a/libavcodec/wmadec.c b/libavcodec/wmadec.c
index ca12d24031..f3ce474f41 100644
--- a/libavcodec/wmadec.c
+++ b/libavcodec/wmadec.c
@@ -487,11 +487,11 @@ static int wma_decode_block(WMACodecContext *s)
return -1;
}
- if (s->nb_channels == 2) {
+ if (s->avctx->channels == 2) {
s->ms_stereo = get_bits1(&s->gb);
}
v = 0;
- for(ch = 0; ch < s->nb_channels; ch++) {
+ for(ch = 0; ch < s->avctx->channels; ch++) {
a = get_bits1(&s->gb);
s->channel_coded[ch] = a;
v |= a;
@@ -518,13 +518,13 @@ static int wma_decode_block(WMACodecContext *s)
/* compute number of coefficients */
n = s->coefs_end[bsize] - s->coefs_start;
- for(ch = 0; ch < s->nb_channels; ch++)
+ for(ch = 0; ch < s->avctx->channels; ch++)
nb_coefs[ch] = n;
/* complex coding */
if (s->use_noise_coding) {
- for(ch = 0; ch < s->nb_channels; ch++) {
+ for(ch = 0; ch < s->avctx->channels; ch++) {
if (s->channel_coded[ch]) {
int i, n, a;
n = s->exponent_high_sizes[bsize];
@@ -537,7 +537,7 @@ static int wma_decode_block(WMACodecContext *s)
}
}
}
- for(ch = 0; ch < s->nb_channels; ch++) {
+ for(ch = 0; ch < s->avctx->channels; ch++) {
if (s->channel_coded[ch]) {
int i, n, val, code;
@@ -565,7 +565,7 @@ static int wma_decode_block(WMACodecContext *s)
/* exponents can be reused in short blocks. */
if ((s->block_len_bits == s->frame_len_bits) ||
get_bits1(&s->gb)) {
- for(ch = 0; ch < s->nb_channels; ch++) {
+ for(ch = 0; ch < s->avctx->channels; ch++) {
if (s->channel_coded[ch]) {
if (s->use_exp_vlc) {
if (decode_exp_vlc(s, ch) < 0)
@@ -579,7 +579,7 @@ static int wma_decode_block(WMACodecContext *s)
}
/* parse spectral coefficients : just RLE encoding */
- for(ch = 0; ch < s->nb_channels; ch++) {
+ for (ch = 0; ch < s->avctx->channels; ch++) {
if (s->channel_coded[ch]) {
int tindex;
WMACoef* ptr = &s->coefs1[ch][0];
@@ -593,7 +593,7 @@ static int wma_decode_block(WMACodecContext *s)
0, ptr, 0, nb_coefs[ch],
s->block_len, s->frame_len_bits, coef_nb_bits);
}
- if (s->version == 1 && s->nb_channels >= 2) {
+ if (s->version == 1 && s->avctx->channels >= 2) {
align_get_bits(&s->gb);
}
}
@@ -608,7 +608,7 @@ static int wma_decode_block(WMACodecContext *s)
}
/* finally compute the MDCT coefficients */
- for(ch = 0; ch < s->nb_channels; ch++) {
+ for (ch = 0; ch < s->avctx->channels; ch++) {
if (s->channel_coded[ch]) {
WMACoef *coefs1;
float *coefs, *exponents, mult, mult1, noise;
@@ -712,7 +712,7 @@ static int wma_decode_block(WMACodecContext *s)
}
#ifdef TRACE
- for(ch = 0; ch < s->nb_channels; ch++) {
+ for (ch = 0; ch < s->avctx->channels; ch++) {
if (s->channel_coded[ch]) {
dump_floats(s, "exponents", 3, s->exponents[ch], s->block_len);
dump_floats(s, "coefs", 1, s->coefs[ch], s->block_len);
@@ -736,7 +736,7 @@ static int wma_decode_block(WMACodecContext *s)
next:
mdct = &s->mdct_ctx[bsize];
- for(ch = 0; ch < s->nb_channels; ch++) {
+ for (ch = 0; ch < s->avctx->channels; ch++) {
int n4, index;
n4 = s->block_len / 2;
@@ -780,7 +780,7 @@ static int wma_decode_frame(WMACodecContext *s, float **samples,
break;
}
- for (ch = 0; ch < s->nb_channels; ch++) {
+ for (ch = 0; ch < s->avctx->channels; ch++) {
/* copy current block to output */
memcpy(samples[ch] + samples_offset, s->frame_out[ch],
s->frame_len * sizeof(*s->frame_out[ch]));
@@ -813,14 +813,14 @@ static int wma_decode_superframe(AVCodecContext *avctx, void *data,
s->last_superframe_len = 0;
return 0;
}
- if (buf_size < s->block_align) {
+ if (buf_size < avctx->block_align) {
av_log(avctx, AV_LOG_ERROR,
"Input packet size too small (%d < %d)\n",
- buf_size, s->block_align);
+ buf_size, avctx->block_align);
return AVERROR_INVALIDDATA;
}
- if(s->block_align)
- buf_size = s->block_align;
+ if(avctx->block_align)
+ buf_size = avctx->block_align;
init_get_bits(&s->gb, buf, buf_size*8);
@@ -915,7 +915,7 @@ static int wma_decode_superframe(AVCodecContext *avctx, void *data,
av_dlog(s->avctx, "%d %d %d %d outbytes:%td eaten:%d\n",
s->frame_len_bits, s->block_len_bits, s->frame_len, s->block_len,
- (int8_t *)samples - (int8_t *)data, s->block_align);
+ (int8_t *)samples - (int8_t *)data, avctx->block_align);
*got_frame_ptr = 1;
*(AVFrame *)data = s->frame;
diff --git a/libavcodec/wmaenc.c b/libavcodec/wmaenc.c
index 63f815c78a..bf5c2b1254 100644
--- a/libavcodec/wmaenc.c
+++ b/libavcodec/wmaenc.c
@@ -27,7 +27,7 @@
static int encode_init(AVCodecContext * avctx){
WMACodecContext *s = avctx->priv_data;
- int i, flags1, flags2;
+ int i, flags1, flags2, block_align;
uint8_t *extradata;
s->avctx = avctx;
@@ -78,10 +78,11 @@ static int encode_init(AVCodecContext * avctx){
for(i = 0; i < s->nb_block_sizes; i++)
ff_mdct_init(&s->mdct_ctx[i], s->frame_len_bits - i + 1, 0, 1.0);
- s->block_align = avctx->bit_rate * (int64_t)s->frame_len /
+ block_align = avctx->bit_rate * (int64_t)s->frame_len /
(avctx->sample_rate * 8);
- s->block_align = FFMIN(s->block_align, MAX_CODED_SUPERFRAME_SIZE);
- avctx->block_align = s->block_align;
+ block_align = FFMIN(block_align, MAX_CODED_SUPERFRAME_SIZE);
+ avctx->block_align = block_align;
+
avctx->frame_size = avctx->delay = s->frame_len;
#if FF_API_OLD_ENCODE_AUDIO
@@ -184,7 +185,7 @@ static int encode_block(WMACodecContext *s, float (*src_coefs)[BLOCK_MAX_SIZE],
//FIXME factor
v = s->coefs_end[bsize] - s->coefs_start;
- for(ch = 0; ch < s->nb_channels; ch++)
+ for (ch = 0; ch < s->avctx->channels; ch++)
nb_coefs[ch] = v;
{
int n4 = s->block_len / 2;
@@ -194,18 +195,18 @@ static int encode_block(WMACodecContext *s, float (*src_coefs)[BLOCK_MAX_SIZE],
}
}
- if (s->nb_channels == 2) {
+ if (s->avctx->channels == 2) {
put_bits(&s->pb, 1, !!s->ms_stereo);
}
- for(ch = 0; ch < s->nb_channels; ch++) {
+ for (ch = 0; ch < s->avctx->channels; ch++) {
s->channel_coded[ch] = 1; //FIXME only set channel_coded when needed, instead of always
if (s->channel_coded[ch]) {
init_exp(s, ch, fixed_exp);
}
}
- for(ch = 0; ch < s->nb_channels; ch++) {
+ for (ch = 0; ch < s->avctx->channels; ch++) {
if (s->channel_coded[ch]) {
WMACoef *coefs1;
float *coefs, *exponents, mult;
@@ -233,7 +234,7 @@ static int encode_block(WMACodecContext *s, float (*src_coefs)[BLOCK_MAX_SIZE],
}
v = 0;
- for(ch = 0; ch < s->nb_channels; ch++) {
+ for (ch = 0; ch < s->avctx->channels; ch++) {
int a = s->channel_coded[ch];
put_bits(&s->pb, 1, a);
v |= a;
@@ -249,7 +250,7 @@ static int encode_block(WMACodecContext *s, float (*src_coefs)[BLOCK_MAX_SIZE],
coef_nb_bits= ff_wma_total_gain_to_bits(total_gain);
if (s->use_noise_coding) {
- for(ch = 0; ch < s->nb_channels; ch++) {
+ for (ch = 0; ch < s->avctx->channels; ch++) {
if (s->channel_coded[ch]) {
int i, n;
n = s->exponent_high_sizes[bsize];
@@ -268,7 +269,7 @@ static int encode_block(WMACodecContext *s, float (*src_coefs)[BLOCK_MAX_SIZE],
}
if (parse_exponents) {
- for(ch = 0; ch < s->nb_channels; ch++) {
+ for (ch = 0; ch < s->avctx->channels; ch++) {
if (s->channel_coded[ch]) {
if (s->use_exp_vlc) {
encode_exp_vlc(s, ch, fixed_exp);
@@ -282,7 +283,7 @@ static int encode_block(WMACodecContext *s, float (*src_coefs)[BLOCK_MAX_SIZE],
av_assert0(0); //FIXME not implemented
}
- for(ch = 0; ch < s->nb_channels; ch++) {
+ for (ch = 0; ch < s->avctx->channels; ch++) {
if (s->channel_coded[ch]) {
int run, tindex;
WMACoef *ptr, *eptr;
@@ -320,7 +321,7 @@ static int encode_block(WMACodecContext *s, float (*src_coefs)[BLOCK_MAX_SIZE],
if(run)
put_bits(&s->pb, s->coef_vlcs[tindex]->huffbits[1], s->coef_vlcs[tindex]->huffcodes[1]);
}
- if (s->version == 1 && s->nb_channels >= 2) {
+ if (s->version == 1 && s->avctx->channels >= 2) {
avpriv_align_put_bits(&s->pb);
}
}
@@ -339,7 +340,7 @@ static int encode_frame(WMACodecContext *s, float (*src_coefs)[BLOCK_MAX_SIZE],
avpriv_align_put_bits(&s->pb);
- return put_bits_count(&s->pb)/8 - s->block_align;
+ return put_bits_count(&s->pb) / 8 - s->avctx->block_align;
}
static int encode_superframe(AVCodecContext *avctx, AVPacket *avpkt,
@@ -379,18 +380,18 @@ static int encode_superframe(AVCodecContext *avctx, AVPacket *avpkt,
while(total_gain <= 128 && error > 0)
error = encode_frame(s, s->coefs, avpkt->data, avpkt->size, total_gain++);
av_assert0((put_bits_count(&s->pb) & 7) == 0);
- i= s->block_align - (put_bits_count(&s->pb)+7)/8;
+ i= avctx->block_align - (put_bits_count(&s->pb)+7)/8;
av_assert0(i>=0);
while(i--)
put_bits(&s->pb, 8, 'N');
flush_put_bits(&s->pb);
- av_assert0(put_bits_ptr(&s->pb) - s->pb.buf == s->block_align);
+ av_assert0(put_bits_ptr(&s->pb) - s->pb.buf == avctx->block_align);
if (frame->pts != AV_NOPTS_VALUE)
avpkt->pts = frame->pts - ff_samples_to_time_base(avctx, avctx->delay);
- avpkt->size = s->block_align;
+ avpkt->size = avctx->block_align;
*got_packet_ptr = 1;
return 0;
}
diff --git a/libavcodec/wmaprodec.c b/libavcodec/wmaprodec.c
index 4d15e45875..e071eb1f95 100644
--- a/libavcodec/wmaprodec.c
+++ b/libavcodec/wmaprodec.c
@@ -184,7 +184,6 @@ typedef struct WMAProDecodeCtx {
uint8_t bits_per_sample; ///< integer audio sample size for the unscaled IMDCT output (used to scale to [-1.0, 1.0])
uint16_t samples_per_frame; ///< number of samples to output
uint16_t log2_frame_size;
- int8_t num_channels; ///< number of channels in the stream (same as AVCodecContext.num_channels)
int8_t lfe_channel; ///< lfe channel index
uint8_t max_num_subframes;
uint8_t subframe_len_bits; ///< number of bits used for the subframe length
@@ -246,7 +245,7 @@ static av_cold void dump_context(WMAProDecodeCtx *s)
PRINT("log2 frame size", s->log2_frame_size);
PRINT("max num subframes", s->max_num_subframes);
PRINT("len prefix", s->len_prefix);
- PRINT("num channels", s->num_channels);
+ PRINT("num channels", s->avctx->channels);
}
/**
@@ -343,18 +342,17 @@ static av_cold int decode_init(AVCodecContext *avctx)
return AVERROR_INVALIDDATA;
}
- s->num_channels = avctx->channels;
-
- if (s->num_channels < 0) {
- av_log(avctx, AV_LOG_ERROR, "invalid number of channels %d\n", s->num_channels);
+ if (avctx->channels < 0) {
+ av_log(avctx, AV_LOG_ERROR, "invalid number of channels %d\n",
+ avctx->channels);
return AVERROR_INVALIDDATA;
- } else if (s->num_channels > WMAPRO_MAX_CHANNELS) {
+ } else if (avctx->channels > WMAPRO_MAX_CHANNELS) {
av_log_ask_for_sample(avctx, "unsupported number of channels\n");
return AVERROR_PATCHWELCOME;
}
/** init previous block len */
- for (i = 0; i < s->num_channels; i++)
+ for (i = 0; i < avctx->channels; i++)
s->channel[i].prev_block_len = s->samples_per_frame;
/** extract lfe channel position */
@@ -531,7 +529,7 @@ static int decode_tilehdr(WMAProDecodeCtx *s)
{
uint16_t num_samples[WMAPRO_MAX_CHANNELS] = { 0 };/**< sum of samples for all currently known subframes of a channel */
uint8_t contains_subframe[WMAPRO_MAX_CHANNELS]; /**< flag indicating if a channel contains the current subframe */
- int channels_for_cur_subframe = s->num_channels; /**< number of channels that contain the current subframe */
+ int channels_for_cur_subframe = s->avctx->channels; /**< number of channels that contain the current subframe */
int fixed_channel_layout = 0; /**< flag indicating that all channels use the same subframe offsets and sizes */
int min_channel_len = 0; /**< smallest sum of samples (channels with this length will be processed first) */
int c;
@@ -543,7 +541,7 @@ static int decode_tilehdr(WMAProDecodeCtx *s)
*/
/** reset tiling information */
- for (c = 0; c < s->num_channels; c++)
+ for (c = 0; c < s->avctx->channels; c++)
s->channel[c].num_subframes = 0;
if (s->max_num_subframes == 1 || get_bits1(&s->gb))
@@ -554,7 +552,7 @@ static int decode_tilehdr(WMAProDecodeCtx *s)
int subframe_len;
/** check which channels contain the subframe */
- for (c = 0; c < s->num_channels; c++) {
+ for (c = 0; c < s->avctx->channels; c++) {
if (num_samples[c] == min_channel_len) {
if (fixed_channel_layout || channels_for_cur_subframe == 1 ||
(min_channel_len == s->samples_per_frame - s->min_samples_per_subframe))
@@ -571,7 +569,7 @@ static int decode_tilehdr(WMAProDecodeCtx *s)
/** add subframes to the individual channels and find new min_channel_len */
min_channel_len += subframe_len;
- for (c = 0; c < s->num_channels; c++) {
+ for (c = 0; c < s->avctx->channels; c++) {
WMAProChannelCtx* chan = &s->channel[c];
if (contains_subframe[c]) {
@@ -598,7 +596,7 @@ static int decode_tilehdr(WMAProDecodeCtx *s)
}
} while (min_channel_len < s->samples_per_frame);
- for (c = 0; c < s->num_channels; c++) {
+ for (c = 0; c < s->avctx->channels; c++) {
int i;
int offset = 0;
for (i = 0; i < s->channel[c].num_subframes; i++) {
@@ -624,8 +622,8 @@ static void decode_decorrelation_matrix(WMAProDecodeCtx *s,
int i;
int offset = 0;
int8_t rotation_offset[WMAPRO_MAX_CHANNELS * WMAPRO_MAX_CHANNELS];
- memset(chgroup->decorrelation_matrix, 0, s->num_channels *
- s->num_channels * sizeof(*chgroup->decorrelation_matrix));
+ memset(chgroup->decorrelation_matrix, 0, s->avctx->channels *
+ s->avctx->channels * sizeof(*chgroup->decorrelation_matrix));
for (i = 0; i < chgroup->num_channels * (chgroup->num_channels - 1) >> 1; i++)
rotation_offset[i] = get_bits(&s->gb, 6);
@@ -678,7 +676,7 @@ static int decode_channel_transform(WMAProDecodeCtx* s)
/** in the one channel case channel transforms are pointless */
s->num_chgroups = 0;
- if (s->num_channels > 1) {
+ if (s->avctx->channels > 1) {
int remaining_channels = s->channels_for_cur_subframe;
if (get_bits1(&s->gb)) {
@@ -724,7 +722,7 @@ static int decode_channel_transform(WMAProDecodeCtx* s)
}
} else {
chgroup->transform = 1;
- if (s->num_channels == 2) {
+ if (s->avctx->channels == 2) {
chgroup->decorrelation_matrix[0] = 1.0;
chgroup->decorrelation_matrix[1] = -1.0;
chgroup->decorrelation_matrix[2] = 1.0;
@@ -1014,7 +1012,7 @@ static void inverse_channel_transform(WMAProDecodeCtx *s)
(*ch)[y] = sum;
}
}
- } else if (s->num_channels == 2) {
+ } else if (s->avctx->channels == 2) {
int len = FFMIN(sfb[1], s->subframe_len) - sfb[0];
s->dsp.vector_fmul_scalar(ch_data[0] + sfb[0],
ch_data[0] + sfb[0],
@@ -1067,7 +1065,7 @@ static int decode_subframe(WMAProDecodeCtx *s)
int offset = s->samples_per_frame;
int subframe_len = s->samples_per_frame;
int i;
- int total_samples = s->samples_per_frame * s->num_channels;
+ int total_samples = s->samples_per_frame * s->avctx->channels;
int transmit_coeffs = 0;
int cur_subwoofer_cutoff;
@@ -1077,7 +1075,7 @@ static int decode_subframe(WMAProDecodeCtx *s)
== the next block of the channel with the smallest number of
decoded samples
*/
- for (i = 0; i < s->num_channels; i++) {
+ for (i = 0; i < s->avctx->channels; i++) {
s->channel[i].grouped = 0;
if (offset > s->channel[i].decoded_samples) {
offset = s->channel[i].decoded_samples;
@@ -1091,7 +1089,7 @@ static int decode_subframe(WMAProDecodeCtx *s)
/** get a list of all channels that contain the estimated block */
s->channels_for_cur_subframe = 0;
- for (i = 0; i < s->num_channels; i++) {
+ for (i = 0; i < s->avctx->channels; i++) {
const int cur_subframe = s->channel[i].cur_subframe;
/** substract already processed samples */
total_samples -= s->channel[i].decoded_samples;
@@ -1321,9 +1319,9 @@ static int decode_frame(WMAProDecodeCtx *s, int *got_frame_ptr)
}
/** read postproc transform */
- if (s->num_channels > 1 && get_bits1(gb)) {
+ if (s->avctx->channels > 1 && get_bits1(gb)) {
if (get_bits1(gb)) {
- for (i = 0; i < s->num_channels * s->num_channels; i++)
+ for (i = 0; i < avctx->channels * avctx->channels; i++)
skip_bits(gb, 4);
}
}
@@ -1358,7 +1356,7 @@ static int decode_frame(WMAProDecodeCtx *s, int *got_frame_ptr)
/** reset subframe states */
s->parsed_all_subframes = 0;
- for (i = 0; i < s->num_channels; i++) {
+ for (i = 0; i < avctx->channels; i++) {
s->channel[i].decoded_samples = 0;
s->channel[i].cur_subframe = 0;
s->channel[i].reuse_sf = 0;
@@ -1381,11 +1379,11 @@ static int decode_frame(WMAProDecodeCtx *s, int *got_frame_ptr)
}
/** copy samples to the output buffer */
- for (i = 0; i < s->num_channels; i++)
+ for (i = 0; i < avctx->channels; i++)
memcpy(s->frame.extended_data[i], s->channel[i].out,
s->samples_per_frame * sizeof(*s->channel[i].out));
- for (i = 0; i < s->num_channels; i++) {
+ for (i = 0; i < avctx->channels; i++) {
/** reuse second half of the IMDCT output for the next frame */
memcpy(&s->channel[i].out[0],
&s->channel[i].out[s->samples_per_frame],
@@ -1615,7 +1613,7 @@ static void flush(AVCodecContext *avctx)
int i;
/** reset output buffer as a part of it is used during the windowing of a
new frame */
- for (i = 0; i < s->num_channels; i++)
+ for (i = 0; i < avctx->channels; i++)
memset(s->channel[i].out, 0, s->samples_per_frame *
sizeof(*s->channel[i].out));
s->packet_loss = 1;
diff --git a/libavcodec/wmavoice.c b/libavcodec/wmavoice.c
index 9504dab3f1..f4e156e92e 100644
--- a/libavcodec/wmavoice.c
+++ b/libavcodec/wmavoice.c
@@ -29,6 +29,7 @@
#include <math.h>
+#include "libavutil/audioconvert.h"
#include "libavutil/mem.h"
#include "dsputil.h"
#include "avcodec.h"
@@ -439,6 +440,8 @@ static av_cold int wmavoice_decode_init(AVCodecContext *ctx)
2 * (s->block_conv_table[1] - 2 * s->min_pitch_val);
s->block_pitch_nbits = av_ceil_log2(s->block_pitch_range);
+ ctx->channels = 1;
+ ctx->channel_layout = AV_CH_LAYOUT_MONO;
ctx->sample_fmt = AV_SAMPLE_FMT_FLT;
avcodec_get_frame_defaults(&s->frame);
diff --git a/libavcodec/ws-snd1.c b/libavcodec/ws-snd1.c
index dfa02b6954..3a83f94e41 100644
--- a/libavcodec/ws-snd1.c
+++ b/libavcodec/ws-snd1.c
@@ -20,6 +20,8 @@
*/
#include <stdint.h>
+
+#include "libavutil/audioconvert.h"
#include "libavutil/common.h"
#include "libavutil/intreadwrite.h"
#include "avcodec.h"
@@ -46,12 +48,9 @@ static av_cold int ws_snd_decode_init(AVCodecContext *avctx)
{
WSSndContext *s = avctx->priv_data;
- if (avctx->channels != 1) {
- av_log_ask_for_sample(avctx, "unsupported number of channels\n");
- return AVERROR(EINVAL);
- }
-
- avctx->sample_fmt = AV_SAMPLE_FMT_U8;
+ avctx->channels = 1;
+ avctx->channel_layout = AV_CH_LAYOUT_MONO;
+ avctx->sample_fmt = AV_SAMPLE_FMT_U8;
avcodec_get_frame_defaults(&s->frame);
avctx->coded_frame = &s->frame;
diff --git a/libavformat/a64.c b/libavformat/a64.c
index c1f6b67904..c672fb698c 100644
--- a/libavformat/a64.c
+++ b/libavformat/a64.c
@@ -23,17 +23,11 @@
#include "libavcodec/a64enc.h"
#include "libavcodec/bytestream.h"
#include "avformat.h"
-
-typedef struct A64MuxerContext {
- int interleaved;
- AVPacket prev_pkt;
- int prev_frame_count;
-} A64MuxerContext;
+#include "rawenc.h"
static int a64_write_header(struct AVFormatContext *s)
{
AVCodecContext *avctx = s->streams[0]->codec;
- A64MuxerContext *c = s->priv_data;
uint8_t header[5] = {
0x00, //load
0x40, //address
@@ -41,7 +35,6 @@ static int a64_write_header(struct AVFormatContext *s)
0x00, //charset_lifetime (multi only)
0x00 //fps in 50/fps;
};
- c->interleaved = 0;
switch (avctx->codec->id) {
case AV_CODEC_ID_A64_MULTI:
header[2] = 0x00;
@@ -57,109 +50,6 @@ static int a64_write_header(struct AVFormatContext *s)
return AVERROR(EINVAL);
}
avio_write(s->pb, header, 2);
- c->prev_pkt.size = 0;
- c->prev_frame_count = 0;
- return 0;
-}
-
-static int a64_write_packet(struct AVFormatContext *s, AVPacket *pkt)
-{
- AVCodecContext *avctx = s->streams[0]->codec;
- A64MuxerContext *c = s->priv_data;
- int i, j;
- int ch_chunksize;
- int lifetime;
- int frame_count;
- int charset_size;
- int frame_size;
- int num_frames;
-
- /* fetch values from extradata */
- switch (avctx->codec->id) {
- case AV_CODEC_ID_A64_MULTI:
- case AV_CODEC_ID_A64_MULTI5:
- if(c->interleaved) {
- /* Write interleaved, means we insert chunks of the future charset before each current frame.
- * Reason: if we load 1 charset + corresponding frames in one block on c64, we need to store
- * them first and then display frame by frame to keep in sync. Thus we would read and write
- * the data for colram from/to ram first and waste too much time. If we interleave and send the
- * charset beforehand, we assemble a new charset chunk by chunk, write current screen data to
- * screen-ram to be displayed and decode the colram directly to colram-location $d800 during
- * the overscan, while reading directly from source.
- * This is the only way so far, to achieve 25fps on c64 */
- if(avctx->extradata) {
- /* fetch values from extradata */
- lifetime = AV_RB32(avctx->extradata + 0);
- frame_count = AV_RB32(avctx->extradata + 4);
- charset_size = AV_RB32(avctx->extradata + 8);
- frame_size = AV_RB32(avctx->extradata + 12);
-
- /* TODO: sanity checks? */
- } else {
- av_log(avctx, AV_LOG_ERROR, "extradata not set\n");
- return AVERROR(EINVAL);
- }
-
- ch_chunksize=charset_size/lifetime;
- /* TODO: check if charset/size is % lifetime, but maybe check in codec */
-
- if(pkt->data) num_frames = lifetime;
- else num_frames = c->prev_frame_count;
-
- for(i = 0; i < num_frames; i++) {
- if(pkt->data) {
- /* if available, put newest charset chunk into buffer */
- avio_write(s->pb, pkt->data + ch_chunksize * i, ch_chunksize);
- } else {
- /* a bit ugly, but is there an alternative to put many zeros? */
- for(j = 0; j < ch_chunksize; j++) avio_w8(s->pb, 0);
- }
-
- if(c->prev_pkt.data) {
- /* put frame (screen + colram) from last packet into buffer */
- avio_write(s->pb, c->prev_pkt.data + charset_size + frame_size * i, frame_size);
- } else {
- /* a bit ugly, but is there an alternative to put many zeros? */
- for(j = 0; j < frame_size; j++) avio_w8(s->pb, 0);
- }
- }
-
- /* backup current packet for next turn */
- if(pkt->data) {
- /* no backup packet yet? create one! */
- if(!c->prev_pkt.data) av_new_packet(&c->prev_pkt, pkt->size);
- /* we have a packet and data is big enough, reuse it */
- if(c->prev_pkt.data && c->prev_pkt.size >= pkt->size) {
- memcpy(c->prev_pkt.data, pkt->data, pkt->size);
- c->prev_pkt.size = pkt->size;
- } else {
- av_log(avctx, AV_LOG_ERROR, "Too less memory for prev_pkt.\n");
- return AVERROR(ENOMEM);
- }
- }
-
- c->prev_frame_count = frame_count;
- break;
- }
- default:
- /* Write things as is. Nice for self-contained frames from non-multicolor modes or if played
- * directly from ram and not from a streaming device (rrnet/mmc) */
- if(pkt) avio_write(s->pb, pkt->data, pkt->size);
- break;
- }
-
- avio_flush(s->pb);
- return 0;
-}
-
-static int a64_write_trailer(struct AVFormatContext *s)
-{
- A64MuxerContext *c = s->priv_data;
- AVPacket pkt = {0};
- /* need to flush last packet? */
- if(c->interleaved) a64_write_packet(s, &pkt);
- /* discard backed up packet */
- if(c->prev_pkt.data) av_destruct_packet(&c->prev_pkt);
return 0;
}
@@ -167,9 +57,7 @@ AVOutputFormat ff_a64_muxer = {
.name = "a64",
.long_name = NULL_IF_CONFIG_SMALL("a64 - video for Commodore 64"),
.extensions = "a64, A64",
- .priv_data_size = sizeof (A64Context),
.video_codec = AV_CODEC_ID_A64_MULTI,
.write_header = a64_write_header,
- .write_packet = a64_write_packet,
- .write_trailer = a64_write_trailer,
+ .write_packet = ff_raw_write_packet,
};
diff --git a/libavformat/audiointerleave.c b/libavformat/audiointerleave.c
index 20323a2c58..35dd8d5e62 100644
--- a/libavformat/audiointerleave.c
+++ b/libavformat/audiointerleave.c
@@ -144,5 +144,5 @@ int ff_audio_rechunk_interleave(AVFormatContext *s, AVPacket *out, AVPacket *pkt
}
}
- return get_packet(s, out, pkt, flush);
+ return get_packet(s, out, NULL, flush);
}
diff --git a/libavformat/rtpdec.c b/libavformat/rtpdec.c
index 18cc16fcfe..e92419d9d9 100644
--- a/libavformat/rtpdec.c
+++ b/libavformat/rtpdec.c
@@ -806,3 +806,14 @@ int ff_parse_fmtp(AVStream *stream, PayloadContext *data, const char *p,
av_free(value);
return 0;
}
+
+int ff_rtp_finalize_packet(AVPacket *pkt, AVIOContext **dyn_buf, int stream_idx)
+{
+ av_init_packet(pkt);
+
+ pkt->size = avio_close_dyn_buf(*dyn_buf, &pkt->data);
+ pkt->stream_index = stream_idx;
+ pkt->destruct = av_destruct_packet;
+ *dyn_buf = NULL;
+ return pkt->size;
+}
diff --git a/libavformat/rtpdec.h b/libavformat/rtpdec.h
index 3d7019024a..15d472a972 100644
--- a/libavformat/rtpdec.h
+++ b/libavformat/rtpdec.h
@@ -202,4 +202,9 @@ int ff_parse_fmtp(AVStream *stream, PayloadContext *data, const char *p,
void av_register_rtp_dynamic_payload_handlers(void);
+/**
+ * Close the dynamic buffer and make a packet from it.
+ */
+int ff_rtp_finalize_packet(AVPacket *pkt, AVIOContext **dyn_buf, int stream_idx);
+
#endif /* AVFORMAT_RTPDEC_H */
diff --git a/libavformat/rtpdec_h263_rfc2190.c b/libavformat/rtpdec_h263_rfc2190.c
index 163d4eaba7..4957b337c7 100644
--- a/libavformat/rtpdec_h263_rfc2190.c
+++ b/libavformat/rtpdec_h263_rfc2190.c
@@ -61,7 +61,7 @@ static int h263_handle_packet(AVFormatContext *ctx, PayloadContext *data,
{
/* Corresponding to header fields in the RFC */
int f, p, i, sbit, ebit, src, r;
- int header_size;
+ int header_size, ret;
if (data->newformat)
return ff_h263_handle_packet(ctx, data, st, pkt, timestamp, buf, len,
@@ -133,7 +133,7 @@ static int h263_handle_packet(AVFormatContext *ctx, PayloadContext *data,
/* Check the picture start code, only start buffering a new frame
* if this is correct */
if (len > 4 && AV_RB32(buf) >> 10 == 0x20) {
- int ret = avio_open_dyn_buf(&data->buf);
+ ret = avio_open_dyn_buf(&data->buf);
if (ret < 0)
return ret;
data->timestamp = *timestamp;
@@ -185,13 +185,11 @@ static int h263_handle_packet(AVFormatContext *ctx, PayloadContext *data,
avio_w8(data->buf, data->endbyte);
data->endbyte_bits = 0;
- av_init_packet(pkt);
- pkt->size = avio_close_dyn_buf(data->buf, &pkt->data);
- pkt->destruct = av_destruct_packet;
- pkt->stream_index = st->index;
+ ret = ff_rtp_finalize_packet(pkt, &data->buf, st->index);
+ if (ret < 0)
+ return ret;
if (!i)
pkt->flags |= AV_PKT_FLAG_KEY;
- data->buf = NULL;
return 0;
}
diff --git a/libavformat/rtpdec_jpeg.c b/libavformat/rtpdec_jpeg.c
index fb68de9702..447dd361bc 100644
--- a/libavformat/rtpdec_jpeg.c
+++ b/libavformat/rtpdec_jpeg.c
@@ -20,6 +20,7 @@
*/
#include "avformat.h"
+#include "rtpdec.h"
#include "rtpdec_formats.h"
#include "libavutil/intreadwrite.h"
#include "libavcodec/mjpeg.h"
@@ -367,19 +368,11 @@ static int jpeg_parse_packet(AVFormatContext *ctx, PayloadContext *jpeg,
avio_write(jpeg->frame, buf, sizeof(buf));
/* Prepare the JPEG packet. */
- av_init_packet(pkt);
- pkt->size = avio_close_dyn_buf(jpeg->frame, &pkt->data);
- if (pkt->size < 0) {
+ if ((ret = ff_rtp_finalize_packet(pkt, &jpeg->frame, st->index)) < 0) {
av_log(ctx, AV_LOG_ERROR,
"Error occured when getting frame buffer.\n");
- jpeg->frame = NULL;
- return pkt->size;
+ return ret;
}
- pkt->stream_index = st->index;
- pkt->destruct = av_destruct_packet;
-
- /* Re-init the frame buffer. */
- jpeg->frame = NULL;
return 0;
}
diff --git a/libavformat/rtpdec_svq3.c b/libavformat/rtpdec_svq3.c
index 99c4c52dee..779ad8a42b 100644
--- a/libavformat/rtpdec_svq3.c
+++ b/libavformat/rtpdec_svq3.c
@@ -97,12 +97,11 @@ static int svq3_parse_packet (AVFormatContext *s, PayloadContext *sv,
avio_write(sv->pktbuf, buf, len);
if (end_packet) {
- av_init_packet(pkt);
- pkt->stream_index = st->index;
+ int ret = ff_rtp_finalize_packet(pkt, &sv->pktbuf, st->index);
+ if (ret < 0)
+ return ret;
+
*timestamp = sv->timestamp;
- pkt->size = avio_close_dyn_buf(sv->pktbuf, &pkt->data);
- pkt->destruct = av_destruct_packet;
- sv->pktbuf = NULL;
return 0;
}
diff --git a/libavformat/rtpdec_vp8.c b/libavformat/rtpdec_vp8.c
index aef5f78132..1edc152a48 100644
--- a/libavformat/rtpdec_vp8.c
+++ b/libavformat/rtpdec_vp8.c
@@ -36,15 +36,6 @@ struct PayloadContext {
uint32_t timestamp;
};
-static void prepare_packet(AVPacket *pkt, PayloadContext *vp8, int stream)
-{
- av_init_packet(pkt);
- pkt->stream_index = stream;
- pkt->size = avio_close_dyn_buf(vp8->data, &pkt->data);
- pkt->destruct = av_destruct_packet;
- vp8->data = NULL;
-}
-
static int vp8_handle_packet(AVFormatContext *ctx,
PayloadContext *vp8,
AVStream *st,
@@ -54,16 +45,14 @@ static int vp8_handle_packet(AVFormatContext *ctx,
int len, int flags)
{
int start_partition, end_packet;
- int extended_bits, non_ref, part_id;
+ int extended_bits, part_id;
int pictureid_present = 0, tl0picidx_present = 0, tid_present = 0,
keyidx_present = 0;
- int pictureid = -1, keyidx = -1;
if (len < 1)
return AVERROR_INVALIDDATA;
extended_bits = buf[0] & 0x80;
- non_ref = buf[0] & 0x20;
start_partition = buf[0] & 0x10;
part_id = buf[0] & 0x0f;
end_packet = flags & RTP_FLAG_MARKER;
@@ -80,19 +69,12 @@ static int vp8_handle_packet(AVFormatContext *ctx,
len--;
}
if (pictureid_present) {
+ int size;
if (len < 1)
return AVERROR_INVALIDDATA;
- if (buf[0] & 0x80) {
- if (len < 2)
- return AVERROR_INVALIDDATA;
- pictureid = AV_RB16(buf) & 0x7fff;
- buf += 2;
- len -= 2;
- } else {
- pictureid = buf[0] & 0x7f;
- buf++;
- len--;
- }
+ size = buf[0] & 0x80 ? 2 : 1;
+ buf += size;
+ len -= size;
}
if (tl0picidx_present) {
// Ignoring temporal level zero index
@@ -100,11 +82,7 @@ static int vp8_handle_packet(AVFormatContext *ctx,
len--;
}
if (tid_present || keyidx_present) {
- // Ignoring temporal layer index and layer sync bit
- if (len < 1)
- return AVERROR_INVALIDDATA;
- if (keyidx_present)
- keyidx = buf[0] & 0x1f;
+ // Ignoring temporal layer index, layer sync bit and keyframe index
buf++;
len--;
}
@@ -133,7 +111,9 @@ static int vp8_handle_packet(AVFormatContext *ctx,
avio_write(vp8->data, buf, len);
if (end_packet) {
- prepare_packet(pkt, vp8, st->index);
+ int ret = ff_rtp_finalize_packet(pkt, &vp8->data, st->index);
+ if (ret < 0)
+ return ret;
return 0;
}
diff --git a/libavformat/rtpdec_xiph.c b/libavformat/rtpdec_xiph.c
index 773f4aa391..ad24c2671a 100644
--- a/libavformat/rtpdec_xiph.c
+++ b/libavformat/rtpdec_xiph.c
@@ -201,20 +201,13 @@ static int xiph_handle_packet(AVFormatContext * ctx,
if (fragmented == 3) {
// end of xiph data packet
- av_init_packet(pkt);
- pkt->size = avio_close_dyn_buf(data->fragment, &pkt->data);
-
- if (pkt->size < 0) {
+ int ret = ff_rtp_finalize_packet(pkt, &data->fragment, st->index);
+ if (ret < 0) {
av_log(ctx, AV_LOG_ERROR,
"Error occurred when getting fragment buffer.");
- return pkt->size;
+ return ret;
}
- pkt->stream_index = st->index;
- pkt->destruct = av_destruct_packet;
-
- data->fragment = NULL;
-
return 0;
}
}