summaryrefslogtreecommitdiff
path: root/doc/examples
diff options
context:
space:
mode:
authorwm4 <nfxjfg@googlemail.com>2013-07-18 15:45:36 +0200
committerStefano Sabatini <stefasab@gmail.com>2013-07-19 12:14:46 +0200
commit9f31c1608c7440e08d47294945752385632294b5 (patch)
tree009eaec8a72a1db5ae1a996b3baca027b1a2fd61 /doc/examples
parent06b269dacbe172b5d5c7283a0f70498ba065b77e (diff)
examples: demuxing: simplify audio output
There is no reason why this should copy the audio data in a very complicated way. Also, strictly write the first plane, instead of writing the whole buffer. This is more helpful in context of the example. This way a user can clearly confirm that it works by playing the written data as raw audio.
Diffstat (limited to 'doc/examples')
-rw-r--r--doc/examples/demuxing.c46
1 files changed, 10 insertions, 36 deletions
diff --git a/doc/examples/demuxing.c b/doc/examples/demuxing.c
index 1c0f1ff40c..acd2d413fa 100644
--- a/doc/examples/demuxing.c
+++ b/doc/examples/demuxing.c
@@ -47,10 +47,6 @@ static uint8_t *video_dst_data[4] = {NULL};
static int video_dst_linesize[4];
static int video_dst_bufsize;
-static uint8_t **audio_dst_data = NULL;
-static int audio_dst_linesize;
-static int audio_dst_bufsize;
-
static int video_stream_idx = -1, audio_stream_idx = -1;
static AVFrame *frame = NULL;
static AVPacket pkt;
@@ -99,31 +95,21 @@ static int decode_packet(int *got_frame, int cached)
decoded = FFMIN(ret, pkt.size);
if (*got_frame) {
+ size_t unpadded_linesize = frame->nb_samples * av_get_bytes_per_sample(frame->format);
printf("audio_frame%s n:%d nb_samples:%d pts:%s\n",
cached ? "(cached)" : "",
audio_frame_count++, frame->nb_samples,
av_ts2timestr(frame->pts, &audio_dec_ctx->time_base));
- ret = av_samples_alloc(audio_dst_data, &audio_dst_linesize, av_frame_get_channels(frame),
- frame->nb_samples, frame->format, 1);
- if (ret < 0) {
- fprintf(stderr, "Could not allocate audio buffer\n");
- return AVERROR(ENOMEM);
- }
-
- /* TODO: extend return code of the av_samples_* functions so that this call is not needed */
- audio_dst_bufsize =
- av_samples_get_buffer_size(NULL, av_frame_get_channels(frame),
- frame->nb_samples, frame->format, 1);
-
- /* copy audio data to destination buffer:
- * this is required since rawaudio expects non aligned data */
- av_samples_copy(audio_dst_data, frame->data, 0, 0,
- frame->nb_samples, av_frame_get_channels(frame), frame->format);
-
- /* write to rawaudio file */
- fwrite(audio_dst_data[0], 1, audio_dst_bufsize, audio_dst_file);
- av_freep(&audio_dst_data[0]);
+ /* Write the raw audio data samples of the first plane. This works
+ * fine for packed formats (e.g. AV_SAMPLE_FMT_S16). However,
+ * most audio decoders output planar audio, which uses a separate
+ * plane of audio samples for each channel (e.g. AV_SAMPLE_FMT_S16P).
+ * In other words, this code will write only the first audio channel
+ * in these cases.
+ * You should use libswresample or libavfilter to convert the frame
+ * to packed data. */
+ fwrite(frame->extended_data[0], 1, unpadded_linesize, audio_dst_file);
}
}
@@ -250,8 +236,6 @@ int main (int argc, char **argv)
}
if (open_codec_context(&audio_stream_idx, fmt_ctx, AVMEDIA_TYPE_AUDIO) >= 0) {
- int nb_planes;
-
audio_stream = fmt_ctx->streams[audio_stream_idx];
audio_dec_ctx = audio_stream->codec;
audio_dst_file = fopen(audio_dst_filename, "wb");
@@ -260,15 +244,6 @@ int main (int argc, char **argv)
ret = 1;
goto end;
}
-
- nb_planes = av_sample_fmt_is_planar(audio_dec_ctx->sample_fmt) ?
- audio_dec_ctx->channels : 1;
- audio_dst_data = av_mallocz(sizeof(uint8_t *) * nb_planes);
- if (!audio_dst_data) {
- fprintf(stderr, "Could not allocate audio data buffers\n");
- ret = AVERROR(ENOMEM);
- goto end;
- }
}
/* dump input information to stderr */
@@ -349,7 +324,6 @@ end:
fclose(audio_dst_file);
av_free(frame);
av_free(video_dst_data[0]);
- av_free(audio_dst_data);
return ret < 0;
}