summaryrefslogtreecommitdiff
path: root/doc/examples
diff options
context:
space:
mode:
Diffstat (limited to 'doc/examples')
-rw-r--r--doc/examples/Makefile46
-rw-r--r--doc/examples/README23
-rw-r--r--doc/examples/avio_dir_cmd.c180
-rw-r--r--doc/examples/avio_reading.c134
-rw-r--r--doc/examples/decoding_encoding.c (renamed from doc/examples/avcodec.c)350
-rw-r--r--doc/examples/demuxing_decoding.c383
-rw-r--r--doc/examples/extract_mvs.c185
-rw-r--r--doc/examples/filter_audio.c8
-rw-r--r--doc/examples/filtering_audio.c295
-rw-r--r--doc/examples/filtering_video.c280
-rw-r--r--doc/examples/http_multiclient.c155
-rw-r--r--doc/examples/metadata.c4
-rw-r--r--doc/examples/muxing.c (renamed from doc/examples/output.c)466
-rw-r--r--doc/examples/remuxing.c165
-rw-r--r--doc/examples/resampling_audio.c214
-rw-r--r--doc/examples/scaling_video.c140
-rw-r--r--doc/examples/transcode_aac.c103
-rw-r--r--doc/examples/transcoding.c582
18 files changed, 3274 insertions, 439 deletions
diff --git a/doc/examples/Makefile b/doc/examples/Makefile
new file mode 100644
index 0000000000..af3815995a
--- /dev/null
+++ b/doc/examples/Makefile
@@ -0,0 +1,46 @@
+# use pkg-config for getting CFLAGS and LDLIBS
+FFMPEG_LIBS= libavdevice \
+ libavformat \
+ libavfilter \
+ libavcodec \
+ libswresample \
+ libswscale \
+ libavutil \
+
+CFLAGS += -Wall -g
+CFLAGS := $(shell pkg-config --cflags $(FFMPEG_LIBS)) $(CFLAGS)
+LDLIBS := $(shell pkg-config --libs $(FFMPEG_LIBS)) $(LDLIBS)
+
+EXAMPLES= avio_dir_cmd \
+ avio_reading \
+ decoding_encoding \
+ demuxing_decoding \
+ extract_mvs \
+ filtering_video \
+ filtering_audio \
+ http_multiclient \
+ metadata \
+ muxing \
+ remuxing \
+ resampling_audio \
+ scaling_video \
+ transcode_aac \
+ transcoding \
+
+OBJS=$(addsuffix .o,$(EXAMPLES))
+
+# the following examples make explicit use of the math library
+avcodec: LDLIBS += -lm
+decoding_encoding: LDLIBS += -lm
+muxing: LDLIBS += -lm
+resampling_audio: LDLIBS += -lm
+
+.phony: all clean-test clean
+
+all: $(OBJS) $(EXAMPLES)
+
+clean-test:
+ $(RM) test*.pgm test.h264 test.mp2 test.sw test.mpg
+
+clean: clean-test
+ $(RM) $(EXAMPLES) $(OBJS)
diff --git a/doc/examples/README b/doc/examples/README
new file mode 100644
index 0000000000..c1ce619d35
--- /dev/null
+++ b/doc/examples/README
@@ -0,0 +1,23 @@
+FFmpeg examples README
+----------------------
+
+Both following use cases rely on pkg-config and make, thus make sure
+that you have them installed and working on your system.
+
+
+Method 1: build the installed examples in a generic read/write user directory
+
+Copy to a read/write user directory and just use "make", it will link
+to the libraries on your system, assuming the PKG_CONFIG_PATH is
+correctly configured.
+
+Method 2: build the examples in-tree
+
+Assuming you are in the source FFmpeg checkout directory, you need to build
+FFmpeg (no need to make install in any prefix). Then just run "make examples".
+This will build the examples using the FFmpeg build system. You can clean those
+examples using "make examplesclean"
+
+If you want to try the dedicated Makefile examples (to emulate the first
+method), go into doc/examples and run a command such as
+PKG_CONFIG_PATH=pc-uninstalled make.
diff --git a/doc/examples/avio_dir_cmd.c b/doc/examples/avio_dir_cmd.c
new file mode 100644
index 0000000000..50c435cf8f
--- /dev/null
+++ b/doc/examples/avio_dir_cmd.c
@@ -0,0 +1,180 @@
+/*
+ * Copyright (c) 2014 Lukasz Marek
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <libavcodec/avcodec.h>
+#include <libavformat/avformat.h>
+#include <libavformat/avio.h>
+
+static const char *type_string(int type)
+{
+ switch (type) {
+ case AVIO_ENTRY_DIRECTORY:
+ return "<DIR>";
+ case AVIO_ENTRY_FILE:
+ return "<FILE>";
+ case AVIO_ENTRY_BLOCK_DEVICE:
+ return "<BLOCK DEVICE>";
+ case AVIO_ENTRY_CHARACTER_DEVICE:
+ return "<CHARACTER DEVICE>";
+ case AVIO_ENTRY_NAMED_PIPE:
+ return "<PIPE>";
+ case AVIO_ENTRY_SYMBOLIC_LINK:
+ return "<LINK>";
+ case AVIO_ENTRY_SOCKET:
+ return "<SOCKET>";
+ case AVIO_ENTRY_SERVER:
+ return "<SERVER>";
+ case AVIO_ENTRY_SHARE:
+ return "<SHARE>";
+ case AVIO_ENTRY_WORKGROUP:
+ return "<WORKGROUP>";
+ case AVIO_ENTRY_UNKNOWN:
+ default:
+ break;
+ }
+ return "<UNKNOWN>";
+}
+
+static int list_op(const char *input_dir)
+{
+ AVIODirEntry *entry = NULL;
+ AVIODirContext *ctx = NULL;
+ int cnt, ret;
+ char filemode[4], uid_and_gid[20];
+
+ if ((ret = avio_open_dir(&ctx, input_dir, NULL)) < 0) {
+ av_log(NULL, AV_LOG_ERROR, "Cannot open directory: %s.\n", av_err2str(ret));
+ goto fail;
+ }
+
+ cnt = 0;
+ for (;;) {
+ if ((ret = avio_read_dir(ctx, &entry)) < 0) {
+ av_log(NULL, AV_LOG_ERROR, "Cannot list directory: %s.\n", av_err2str(ret));
+ goto fail;
+ }
+ if (!entry)
+ break;
+ if (entry->filemode == -1) {
+ snprintf(filemode, 4, "???");
+ } else {
+ snprintf(filemode, 4, "%3"PRIo64, entry->filemode);
+ }
+ snprintf(uid_and_gid, 20, "%"PRId64"(%"PRId64")", entry->user_id, entry->group_id);
+ if (cnt == 0)
+ av_log(NULL, AV_LOG_INFO, "%-9s %12s %30s %10s %s %16s %16s %16s\n",
+ "TYPE", "SIZE", "NAME", "UID(GID)", "UGO", "MODIFIED",
+ "ACCESSED", "STATUS_CHANGED");
+ av_log(NULL, AV_LOG_INFO, "%-9s %12"PRId64" %30s %10s %s %16"PRId64" %16"PRId64" %16"PRId64"\n",
+ type_string(entry->type),
+ entry->size,
+ entry->name,
+ uid_and_gid,
+ filemode,
+ entry->modification_timestamp,
+ entry->access_timestamp,
+ entry->status_change_timestamp);
+ avio_free_directory_entry(&entry);
+ cnt++;
+ };
+
+ fail:
+ avio_close_dir(&ctx);
+ return ret;
+}
+
+static int del_op(const char *url)
+{
+ int ret = avpriv_io_delete(url);
+ if (ret < 0)
+ av_log(NULL, AV_LOG_ERROR, "Cannot delete '%s': %s.\n", url, av_err2str(ret));
+ return ret;
+}
+
+static int move_op(const char *src, const char *dst)
+{
+ int ret = avpriv_io_move(src, dst);
+ if (ret < 0)
+ av_log(NULL, AV_LOG_ERROR, "Cannot move '%s' into '%s': %s.\n", src, dst, av_err2str(ret));
+ return ret;
+}
+
+
+static void usage(const char *program_name)
+{
+ fprintf(stderr, "usage: %s OPERATION entry1 [entry2]\n"
+ "API example program to show how to manipulate resources "
+ "accessed through AVIOContext.\n"
+ "OPERATIONS:\n"
+ "list list content of the directory\n"
+ "move rename content in directory\n"
+ "del delete content in directory\n",
+ program_name);
+}
+
+int main(int argc, char *argv[])
+{
+ const char *op = NULL;
+ int ret;
+
+ av_log_set_level(AV_LOG_DEBUG);
+
+ if (argc < 2) {
+ usage(argv[0]);
+ return 1;
+ }
+
+ /* register codecs and formats and other lavf/lavc components*/
+ av_register_all();
+ avformat_network_init();
+
+ op = argv[1];
+ if (strcmp(op, "list") == 0) {
+ if (argc < 3) {
+ av_log(NULL, AV_LOG_INFO, "Missing argument for list operation.\n");
+ ret = AVERROR(EINVAL);
+ } else {
+ ret = list_op(argv[2]);
+ }
+ } else if (strcmp(op, "del") == 0) {
+ if (argc < 3) {
+ av_log(NULL, AV_LOG_INFO, "Missing argument for del operation.\n");
+ ret = AVERROR(EINVAL);
+ } else {
+ ret = del_op(argv[2]);
+ }
+ } else if (strcmp(op, "move") == 0) {
+ if (argc < 4) {
+ av_log(NULL, AV_LOG_INFO, "Missing argument for move operation.\n");
+ ret = AVERROR(EINVAL);
+ } else {
+ ret = move_op(argv[2], argv[3]);
+ }
+ } else {
+ av_log(NULL, AV_LOG_INFO, "Invalid operation %s\n", op);
+ ret = AVERROR(EINVAL);
+ }
+
+ avformat_network_deinit();
+
+ return ret < 0 ? 1 : 0;
+}
diff --git a/doc/examples/avio_reading.c b/doc/examples/avio_reading.c
new file mode 100644
index 0000000000..02474e907a
--- /dev/null
+++ b/doc/examples/avio_reading.c
@@ -0,0 +1,134 @@
+/*
+ * Copyright (c) 2014 Stefano Sabatini
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+/**
+ * @file
+ * libavformat AVIOContext API example.
+ *
+ * Make libavformat demuxer access media content through a custom
+ * AVIOContext read callback.
+ * @example avio_reading.c
+ */
+
+#include <libavcodec/avcodec.h>
+#include <libavformat/avformat.h>
+#include <libavformat/avio.h>
+#include <libavutil/file.h>
+
+struct buffer_data {
+ uint8_t *ptr;
+ size_t size; ///< size left in the buffer
+};
+
+static int read_packet(void *opaque, uint8_t *buf, int buf_size)
+{
+ struct buffer_data *bd = (struct buffer_data *)opaque;
+ buf_size = FFMIN(buf_size, bd->size);
+
+ printf("ptr:%p size:%zu\n", bd->ptr, bd->size);
+
+ /* copy internal buffer data to buf */
+ memcpy(buf, bd->ptr, buf_size);
+ bd->ptr += buf_size;
+ bd->size -= buf_size;
+
+ return buf_size;
+}
+
+int main(int argc, char *argv[])
+{
+ AVFormatContext *fmt_ctx = NULL;
+ AVIOContext *avio_ctx = NULL;
+ uint8_t *buffer = NULL, *avio_ctx_buffer = NULL;
+ size_t buffer_size, avio_ctx_buffer_size = 4096;
+ char *input_filename = NULL;
+ int ret = 0;
+ struct buffer_data bd = { 0 };
+
+ if (argc != 2) {
+ fprintf(stderr, "usage: %s input_file\n"
+ "API example program to show how to read from a custom buffer "
+ "accessed through AVIOContext.\n", argv[0]);
+ return 1;
+ }
+ input_filename = argv[1];
+
+ /* register codecs and formats and other lavf/lavc components*/
+ av_register_all();
+
+ /* slurp file content into buffer */
+ ret = av_file_map(input_filename, &buffer, &buffer_size, 0, NULL);
+ if (ret < 0)
+ goto end;
+
+ /* fill opaque structure used by the AVIOContext read callback */
+ bd.ptr = buffer;
+ bd.size = buffer_size;
+
+ if (!(fmt_ctx = avformat_alloc_context())) {
+ ret = AVERROR(ENOMEM);
+ goto end;
+ }
+
+ avio_ctx_buffer = av_malloc(avio_ctx_buffer_size);
+ if (!avio_ctx_buffer) {
+ ret = AVERROR(ENOMEM);
+ goto end;
+ }
+ avio_ctx = avio_alloc_context(avio_ctx_buffer, avio_ctx_buffer_size,
+ 0, &bd, &read_packet, NULL, NULL);
+ if (!avio_ctx) {
+ ret = AVERROR(ENOMEM);
+ goto end;
+ }
+ fmt_ctx->pb = avio_ctx;
+
+ ret = avformat_open_input(&fmt_ctx, NULL, NULL, NULL);
+ if (ret < 0) {
+ fprintf(stderr, "Could not open input\n");
+ goto end;
+ }
+
+ ret = avformat_find_stream_info(fmt_ctx, NULL);
+ if (ret < 0) {
+ fprintf(stderr, "Could not find stream information\n");
+ goto end;
+ }
+
+ av_dump_format(fmt_ctx, 0, input_filename, 0);
+
+end:
+ avformat_close_input(&fmt_ctx);
+ /* note: the internal buffer could have changed, and be != avio_ctx_buffer */
+ if (avio_ctx) {
+ av_freep(&avio_ctx->buffer);
+ av_freep(&avio_ctx);
+ }
+ av_file_unmap(buffer, buffer_size);
+
+ if (ret < 0) {
+ fprintf(stderr, "Error occurred: %s\n", av_err2str(ret));
+ return 1;
+ }
+
+ return 0;
+}
diff --git a/doc/examples/avcodec.c b/doc/examples/decoding_encoding.c
index df0af4b1ea..06a98a630e 100644
--- a/doc/examples/avcodec.c
+++ b/doc/examples/decoding_encoding.c
@@ -1,47 +1,44 @@
/*
- * copyright (c) 2001 Fabrice Bellard
+ * Copyright (c) 2001 Fabrice Bellard
*
- * This file is part of Libav.
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
*
- * Libav is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
- * Libav is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
*/
/**
* @file
* libavcodec API use example.
*
- * @example avcodec.c
- * Note that this library only handles codecs (mpeg, mpeg4, etc...),
- * not file formats (avi, vob, etc...). See library 'libavformat' for the
+ * @example decoding_encoding.c
+ * Note that libavcodec only handles codecs (mpeg, mpeg4, etc...),
+ * not file formats (avi, vob, mp4, mov, mkv, mxf, flv, mpegts, mpegps, etc...). See library 'libavformat' for the
* format handling
*/
-#include <stdlib.h>
-#include <stdio.h>
-#include <string.h>
-
-#ifdef HAVE_AV_CONFIG_H
-#undef HAVE_AV_CONFIG_H
-#endif
+#include <math.h>
-#include "libavcodec/avcodec.h"
-#include "libavutil/channel_layout.h"
-#include "libavutil/common.h"
-#include "libavutil/imgutils.h"
-#include "libavutil/mathematics.h"
-#include "libavutil/samplefmt.h"
+#include <libavutil/opt.h>
+#include <libavcodec/avcodec.h>
+#include <libavutil/channel_layout.h>
+#include <libavutil/common.h>
+#include <libavutil/imgutils.h>
+#include <libavutil/mathematics.h>
+#include <libavutil/samplefmt.h>
#define INBUF_SIZE 4096
#define AUDIO_INBUF_SIZE 20480
@@ -115,16 +112,20 @@ static void audio_encode_example(const char *filename)
uint16_t *samples;
float t, tincr;
- printf("Audio encoding\n");
+ printf("Encode audio file %s\n", filename);
/* find the MP2 encoder */
codec = avcodec_find_encoder(AV_CODEC_ID_MP2);
if (!codec) {
- fprintf(stderr, "codec not found\n");
+ fprintf(stderr, "Codec not found\n");
exit(1);
}
c = avcodec_alloc_context3(codec);
+ if (!c) {
+ fprintf(stderr, "Could not allocate audio codec context\n");
+ exit(1);
+ }
/* put sample parameters */
c->bit_rate = 64000;
@@ -132,7 +133,7 @@ static void audio_encode_example(const char *filename)
/* check that the encoder supports s16 pcm input */
c->sample_fmt = AV_SAMPLE_FMT_S16;
if (!check_sample_fmt(codec, c->sample_fmt)) {
- fprintf(stderr, "encoder does not support %s",
+ fprintf(stderr, "Encoder does not support sample format %s",
av_get_sample_fmt_name(c->sample_fmt));
exit(1);
}
@@ -144,20 +145,20 @@ static void audio_encode_example(const char *filename)
/* open it */
if (avcodec_open2(c, codec, NULL) < 0) {
- fprintf(stderr, "could not open codec\n");
+ fprintf(stderr, "Could not open codec\n");
exit(1);
}
f = fopen(filename, "wb");
if (!f) {
- fprintf(stderr, "could not open %s\n", filename);
+ fprintf(stderr, "Could not open %s\n", filename);
exit(1);
}
/* frame containing input raw audio */
frame = av_frame_alloc();
if (!frame) {
- fprintf(stderr, "could not allocate audio frame\n");
+ fprintf(stderr, "Could not allocate audio frame\n");
exit(1);
}
@@ -169,9 +170,13 @@ static void audio_encode_example(const char *filename)
* we calculate the size of the samples buffer in bytes */
buffer_size = av_samples_get_buffer_size(NULL, c->channels, c->frame_size,
c->sample_fmt, 0);
+ if (buffer_size < 0) {
+ fprintf(stderr, "Could not get sample buffer size\n");
+ exit(1);
+ }
samples = av_malloc(buffer_size);
if (!samples) {
- fprintf(stderr, "could not allocate %d bytes for samples buffer\n",
+ fprintf(stderr, "Could not allocate %d bytes for samples buffer\n",
buffer_size);
exit(1);
}
@@ -179,14 +184,14 @@ static void audio_encode_example(const char *filename)
ret = avcodec_fill_audio_frame(frame, c->channels, c->sample_fmt,
(const uint8_t*)samples, buffer_size, 0);
if (ret < 0) {
- fprintf(stderr, "could not setup audio frame\n");
+ fprintf(stderr, "Could not setup audio frame\n");
exit(1);
}
/* encode a single tone sound */
t = 0;
tincr = 2 * M_PI * 440.0 / c->sample_rate;
- for(i=0;i<200;i++) {
+ for (i = 0; i < 200; i++) {
av_init_packet(&pkt);
pkt.data = NULL; // packet data will be allocated by the encoder
pkt.size = 0;
@@ -201,9 +206,23 @@ static void audio_encode_example(const char *filename)
/* encode the samples */
ret = avcodec_encode_audio2(c, &pkt, frame, &got_output);
if (ret < 0) {
- fprintf(stderr, "error encoding audio frame\n");
+ fprintf(stderr, "Error encoding audio frame\n");
+ exit(1);
+ }
+ if (got_output) {
+ fwrite(pkt.data, 1, pkt.size, f);
+ av_packet_unref(&pkt);
+ }
+ }
+
+ /* get the delayed frames */
+ for (got_output = 1; got_output; i++) {
+ ret = avcodec_encode_audio2(c, &pkt, NULL, &got_output);
+ if (ret < 0) {
+ fprintf(stderr, "Error encoding frame\n");
exit(1);
}
+
if (got_output) {
fwrite(pkt.data, 1, pkt.size, f);
av_packet_unref(&pkt);
@@ -232,26 +251,30 @@ static void audio_decode_example(const char *outfilename, const char *filename)
av_init_packet(&avpkt);
- printf("Audio decoding\n");
+ printf("Decode audio file %s to %s\n", filename, outfilename);
/* find the mpeg audio decoder */
codec = avcodec_find_decoder(AV_CODEC_ID_MP2);
if (!codec) {
- fprintf(stderr, "codec not found\n");
+ fprintf(stderr, "Codec not found\n");
exit(1);
}
c = avcodec_alloc_context3(codec);
+ if (!c) {
+ fprintf(stderr, "Could not allocate audio codec context\n");
+ exit(1);
+ }
/* open it */
if (avcodec_open2(c, codec, NULL) < 0) {
- fprintf(stderr, "could not open codec\n");
+ fprintf(stderr, "Could not open codec\n");
exit(1);
}
f = fopen(filename, "rb");
if (!f) {
- fprintf(stderr, "could not open %s\n", filename);
+ fprintf(stderr, "Could not open %s\n", filename);
exit(1);
}
outfile = fopen(outfilename, "wb");
@@ -265,11 +288,12 @@ static void audio_decode_example(const char *outfilename, const char *filename)
avpkt.size = fread(inbuf, 1, AUDIO_INBUF_SIZE, f);
while (avpkt.size > 0) {
+ int i, ch;
int got_frame = 0;
if (!decoded_frame) {
if (!(decoded_frame = av_frame_alloc())) {
- fprintf(stderr, "out of memory\n");
+ fprintf(stderr, "Could not allocate audio frame\n");
exit(1);
}
}
@@ -281,13 +305,20 @@ static void audio_decode_example(const char *outfilename, const char *filename)
}
if (got_frame) {
/* if a frame has been decoded, output it */
- int data_size = av_samples_get_buffer_size(NULL, c->channels,
- decoded_frame->nb_samples,
- c->sample_fmt, 1);
- fwrite(decoded_frame->data[0], 1, data_size, outfile);
+ int data_size = av_get_bytes_per_sample(c->sample_fmt);
+ if (data_size < 0) {
+ /* This should not occur, checking just for paranoia */
+ fprintf(stderr, "Failed to calculate data size\n");
+ exit(1);
+ }
+ for (i=0; i<decoded_frame->nb_samples; i++)
+ for (ch=0; ch<c->channels; ch++)
+ fwrite(decoded_frame->data[ch] + data_size*i, 1, data_size, outfile);
}
avpkt.size -= len;
avpkt.data += len;
+ avpkt.dts =
+ avpkt.pts = AV_NOPTS_VALUE;
if (avpkt.size < AUDIO_REFILL_THRESH) {
/* Refill the input buffer, to avoid trying to decode
* incomplete frames. Instead of this, one could also use
@@ -313,27 +344,30 @@ static void audio_decode_example(const char *outfilename, const char *filename)
/*
* Video encoding example
*/
-static void video_encode_example(const char *filename)
+static void video_encode_example(const char *filename, int codec_id)
{
AVCodec *codec;
AVCodecContext *c= NULL;
int i, ret, x, y, got_output;
FILE *f;
- AVFrame *picture;
+ AVFrame *frame;
AVPacket pkt;
uint8_t endcode[] = { 0, 0, 1, 0xb7 };
- printf("Video encoding\n");
+ printf("Encode video file %s\n", filename);
/* find the mpeg1 video encoder */
- codec = avcodec_find_encoder(AV_CODEC_ID_MPEG1VIDEO);
+ codec = avcodec_find_encoder(codec_id);
if (!codec) {
- fprintf(stderr, "codec not found\n");
+ fprintf(stderr, "Codec not found\n");
exit(1);
}
c = avcodec_alloc_context3(codec);
- picture = av_frame_alloc();
+ if (!c) {
+ fprintf(stderr, "Could not allocate video codec context\n");
+ exit(1);
+ }
/* put sample parameters */
c->bit_rate = 400000;
@@ -341,35 +375,52 @@ static void video_encode_example(const char *filename)
c->width = 352;
c->height = 288;
/* frames per second */
- c->time_base= (AVRational){1,25};
- c->gop_size = 10; /* emit one intra frame every ten frames */
- c->max_b_frames=1;
+ c->time_base = (AVRational){1,25};
+ /* emit one intra frame every ten frames
+ * check frame pict_type before passing frame
+ * to encoder, if frame->pict_type is AV_PICTURE_TYPE_I
+ * then gop_size is ignored and the output of encoder
+ * will always be I frame irrespective to gop_size
+ */
+ c->gop_size = 10;
+ c->max_b_frames = 1;
c->pix_fmt = AV_PIX_FMT_YUV420P;
+ if (codec_id == AV_CODEC_ID_H264)
+ av_opt_set(c->priv_data, "preset", "slow", 0);
+
/* open it */
if (avcodec_open2(c, codec, NULL) < 0) {
- fprintf(stderr, "could not open codec\n");
+ fprintf(stderr, "Could not open codec\n");
exit(1);
}
f = fopen(filename, "wb");
if (!f) {
- fprintf(stderr, "could not open %s\n", filename);
+ fprintf(stderr, "Could not open %s\n", filename);
+ exit(1);
+ }
+
+ frame = av_frame_alloc();
+ if (!frame) {
+ fprintf(stderr, "Could not allocate video frame\n");
exit(1);
}
+ frame->format = c->pix_fmt;
+ frame->width = c->width;
+ frame->height = c->height;
- ret = av_image_alloc(picture->data, picture->linesize, c->width, c->height,
+ /* the image can be allocated by any means and av_image_alloc() is
+ * just the most convenient way if av_malloc() is to be used */
+ ret = av_image_alloc(frame->data, frame->linesize, c->width, c->height,
c->pix_fmt, 32);
if (ret < 0) {
- fprintf(stderr, "could not alloc raw picture buffer\n");
+ fprintf(stderr, "Could not allocate raw picture buffer\n");
exit(1);
}
- picture->format = c->pix_fmt;
- picture->width = c->width;
- picture->height = c->height;
/* encode 1 second of video */
- for(i=0;i<25;i++) {
+ for (i = 0; i < 25; i++) {
av_init_packet(&pkt);
pkt.data = NULL; // packet data will be allocated by the encoder
pkt.size = 0;
@@ -377,31 +428,31 @@ static void video_encode_example(const char *filename)
fflush(stdout);
/* prepare a dummy image */
/* Y */
- for(y=0;y<c->height;y++) {
- for(x=0;x<c->width;x++) {
- picture->data[0][y * picture->linesize[0] + x] = x + y + i * 3;
+ for (y = 0; y < c->height; y++) {
+ for (x = 0; x < c->width; x++) {
+ frame->data[0][y * frame->linesize[0] + x] = x + y + i * 3;
}
}
/* Cb and Cr */
- for(y=0;y<c->height/2;y++) {
- for(x=0;x<c->width/2;x++) {
- picture->data[1][y * picture->linesize[1] + x] = 128 + y + i * 2;
- picture->data[2][y * picture->linesize[2] + x] = 64 + x + i * 5;
+ for (y = 0; y < c->height/2; y++) {
+ for (x = 0; x < c->width/2; x++) {
+ frame->data[1][y * frame->linesize[1] + x] = 128 + y + i * 2;
+ frame->data[2][y * frame->linesize[2] + x] = 64 + x + i * 5;
}
}
- picture->pts = i;
+ frame->pts = i;
/* encode the image */
- ret = avcodec_encode_video2(c, &pkt, picture, &got_output);
+ ret = avcodec_encode_video2(c, &pkt, frame, &got_output);
if (ret < 0) {
- fprintf(stderr, "error encoding frame\n");
+ fprintf(stderr, "Error encoding frame\n");
exit(1);
}
if (got_output) {
- printf("encoding frame %3d (size=%5d)\n", i, pkt.size);
+ printf("Write frame %3d (size=%5d)\n", i, pkt.size);
fwrite(pkt.data, 1, pkt.size, f);
av_packet_unref(&pkt);
}
@@ -413,12 +464,12 @@ static void video_encode_example(const char *filename)
ret = avcodec_encode_video2(c, &pkt, NULL, &got_output);
if (ret < 0) {
- fprintf(stderr, "error encoding frame\n");
+ fprintf(stderr, "Error encoding frame\n");
exit(1);
}
if (got_output) {
- printf("encoding frame %3d (size=%5d)\n", i, pkt.size);
+ printf("Write frame %3d (size=%5d)\n", i, pkt.size);
fwrite(pkt.data, 1, pkt.size, f);
av_packet_unref(&pkt);
}
@@ -430,8 +481,8 @@ static void video_encode_example(const char *filename)
avcodec_close(c);
av_free(c);
- av_freep(&picture->data[0]);
- av_frame_free(&picture);
+ av_freep(&frame->data[0]);
+ av_frame_free(&frame);
printf("\n");
}
@@ -445,22 +496,49 @@ static void pgm_save(unsigned char *buf, int wrap, int xsize, int ysize,
FILE *f;
int i;
- f=fopen(filename,"w");
- fprintf(f,"P5\n%d %d\n%d\n",xsize,ysize,255);
- for(i=0;i<ysize;i++)
- fwrite(buf + i * wrap,1,xsize,f);
+ f = fopen(filename,"w");
+ fprintf(f, "P5\n%d %d\n%d\n", xsize, ysize, 255);
+ for (i = 0; i < ysize; i++)
+ fwrite(buf + i * wrap, 1, xsize, f);
fclose(f);
}
+static int decode_write_frame(const char *outfilename, AVCodecContext *avctx,
+ AVFrame *frame, int *frame_count, AVPacket *pkt, int last)
+{
+ int len, got_frame;
+ char buf[1024];
+
+ len = avcodec_decode_video2(avctx, frame, &got_frame, pkt);
+ if (len < 0) {
+ fprintf(stderr, "Error while decoding frame %d\n", *frame_count);
+ return len;
+ }
+ if (got_frame) {
+ printf("Saving %sframe %3d\n", last ? "last " : "", *frame_count);
+ fflush(stdout);
+
+ /* the picture is allocated by the decoder, no need to free it */
+ snprintf(buf, sizeof(buf), outfilename, *frame_count);
+ pgm_save(frame->data[0], frame->linesize[0],
+ frame->width, frame->height, buf);
+ (*frame_count)++;
+ }
+ if (pkt->data) {
+ pkt->size -= len;
+ pkt->data += len;
+ }
+ return 0;
+}
+
static void video_decode_example(const char *outfilename, const char *filename)
{
AVCodec *codec;
AVCodecContext *c= NULL;
- int frame, got_picture, len;
+ int frame_count;
FILE *f;
- AVFrame *picture;
+ AVFrame *frame;
uint8_t inbuf[INBUF_SIZE + AV_INPUT_BUFFER_PADDING_SIZE];
- char buf[1024];
AVPacket avpkt;
av_init_packet(&avpkt);
@@ -468,17 +546,20 @@ static void video_decode_example(const char *outfilename, const char *filename)
/* set end of buffer to 0 (this ensures that no overreading happens for damaged mpeg streams) */
memset(inbuf + INBUF_SIZE, 0, AV_INPUT_BUFFER_PADDING_SIZE);
- printf("Video decoding\n");
+ printf("Decode video file %s to %s\n", filename, outfilename);
/* find the mpeg1 video decoder */
codec = avcodec_find_decoder(AV_CODEC_ID_MPEG1VIDEO);
if (!codec) {
- fprintf(stderr, "codec not found\n");
+ fprintf(stderr, "Codec not found\n");
exit(1);
}
c = avcodec_alloc_context3(codec);
- picture = av_frame_alloc();
+ if (!c) {
+ fprintf(stderr, "Could not allocate video codec context\n");
+ exit(1);
+ }
if (codec->capabilities & AV_CODEC_CAP_TRUNCATED)
c->flags |= AV_CODEC_FLAG_TRUNCATED; // we do not send complete frames
@@ -489,20 +570,24 @@ static void video_decode_example(const char *outfilename, const char *filename)
/* open it */
if (avcodec_open2(c, codec, NULL) < 0) {
- fprintf(stderr, "could not open codec\n");
+ fprintf(stderr, "Could not open codec\n");
exit(1);
}
- /* the codec gives us the frame size, in samples */
-
f = fopen(filename, "rb");
if (!f) {
- fprintf(stderr, "could not open %s\n", filename);
+ fprintf(stderr, "Could not open %s\n", filename);
exit(1);
}
- frame = 0;
- for(;;) {
+ frame = av_frame_alloc();
+ if (!frame) {
+ fprintf(stderr, "Could not allocate video frame\n");
+ exit(1);
+ }
+
+ frame_count = 0;
+ for (;;) {
avpkt.size = fread(inbuf, 1, INBUF_SIZE, f);
if (avpkt.size == 0)
break;
@@ -523,26 +608,9 @@ static void video_decode_example(const char *outfilename, const char *filename)
/* here, we use a stream based decoder (mpeg1video), so we
feed decoder and see if it could decode a frame */
avpkt.data = inbuf;
- while (avpkt.size > 0) {
- len = avcodec_decode_video2(c, picture, &got_picture, &avpkt);
- if (len < 0) {
- fprintf(stderr, "Error while decoding frame %d\n", frame);
+ while (avpkt.size > 0)
+ if (decode_write_frame(outfilename, c, frame, &frame_count, &avpkt, 0) < 0)
exit(1);
- }
- if (got_picture) {
- printf("saving frame %3d\n", frame);
- fflush(stdout);
-
- /* the picture is allocated by the decoder. no need to
- free it */
- snprintf(buf, sizeof(buf), outfilename, frame);
- pgm_save(picture->data[0], picture->linesize[0],
- c->width, c->height, buf);
- frame++;
- }
- avpkt.size -= len;
- avpkt.data += len;
- }
}
/* some codecs, such as MPEG, transmit the I and P frame with a
@@ -550,46 +618,48 @@ static void video_decode_example(const char *outfilename, const char *filename)
chance to get the last frame of the video */
avpkt.data = NULL;
avpkt.size = 0;
- len = avcodec_decode_video2(c, picture, &got_picture, &avpkt);
- if (got_picture) {
- printf("saving last frame %3d\n", frame);
- fflush(stdout);
-
- /* the picture is allocated by the decoder. no need to
- free it */
- snprintf(buf, sizeof(buf), outfilename, frame);
- pgm_save(picture->data[0], picture->linesize[0],
- c->width, c->height, buf);
- frame++;
- }
+ decode_write_frame(outfilename, c, frame, &frame_count, &avpkt, 1);
fclose(f);
avcodec_close(c);
av_free(c);
- av_frame_free(&picture);
+ av_frame_free(&frame);
printf("\n");
}
int main(int argc, char **argv)
{
- const char *filename;
+ const char *output_type;
/* register all the codecs */
avcodec_register_all();
- if (argc <= 1) {
- audio_encode_example("/tmp/test.mp2");
- audio_decode_example("/tmp/test.sw", "/tmp/test.mp2");
-
- video_encode_example("/tmp/test.mpg");
- filename = "/tmp/test.mpg";
+ if (argc < 2) {
+ printf("usage: %s output_type\n"
+ "API example program to decode/encode a media stream with libavcodec.\n"
+ "This program generates a synthetic stream and encodes it to a file\n"
+ "named test.h264, test.mp2 or test.mpg depending on output_type.\n"
+ "The encoded stream is then decoded and written to a raw data output.\n"
+ "output_type must be chosen between 'h264', 'mp2', 'mpg'.\n",
+ argv[0]);
+ return 1;
+ }
+ output_type = argv[1];
+
+ if (!strcmp(output_type, "h264")) {
+ video_encode_example("test.h264", AV_CODEC_ID_H264);
+ } else if (!strcmp(output_type, "mp2")) {
+ audio_encode_example("test.mp2");
+ audio_decode_example("test.pcm", "test.mp2");
+ } else if (!strcmp(output_type, "mpg")) {
+ video_encode_example("test.mpg", AV_CODEC_ID_MPEG1VIDEO);
+ video_decode_example("test%02d.pgm", "test.mpg");
} else {
- filename = argv[1];
+ fprintf(stderr, "Invalid output type '%s', choose between 'h264', 'mp2', or 'mpg'\n",
+ output_type);
+ return 1;
}
- // audio_decode_example("/tmp/test.sw", filename);
- video_decode_example("/tmp/test%d.pgm", filename);
-
return 0;
}
diff --git a/doc/examples/demuxing_decoding.c b/doc/examples/demuxing_decoding.c
new file mode 100644
index 0000000000..59e0ccc986
--- /dev/null
+++ b/doc/examples/demuxing_decoding.c
@@ -0,0 +1,383 @@
+/*
+ * Copyright (c) 2012 Stefano Sabatini
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+/**
+ * @file
+ * Demuxing and decoding example.
+ *
+ * Show how to use the libavformat and libavcodec API to demux and
+ * decode audio and video data.
+ * @example demuxing_decoding.c
+ */
+
+#include <libavutil/imgutils.h>
+#include <libavutil/samplefmt.h>
+#include <libavutil/timestamp.h>
+#include <libavformat/avformat.h>
+
+static AVFormatContext *fmt_ctx = NULL;
+static AVCodecContext *video_dec_ctx = NULL, *audio_dec_ctx;
+static int width, height;
+static enum AVPixelFormat pix_fmt;
+static AVStream *video_stream = NULL, *audio_stream = NULL;
+static const char *src_filename = NULL;
+static const char *video_dst_filename = NULL;
+static const char *audio_dst_filename = NULL;
+static FILE *video_dst_file = NULL;
+static FILE *audio_dst_file = NULL;
+
+static uint8_t *video_dst_data[4] = {NULL};
+static int video_dst_linesize[4];
+static int video_dst_bufsize;
+
+static int video_stream_idx = -1, audio_stream_idx = -1;
+static AVFrame *frame = NULL;
+static AVPacket pkt;
+static int video_frame_count = 0;
+static int audio_frame_count = 0;
+
+/* Enable or disable frame reference counting. You are not supposed to support
+ * both paths in your application but pick the one most appropriate to your
+ * needs. Look for the use of refcount in this example to see what are the
+ * differences of API usage between them. */
+static int refcount = 0;
+
+static int decode_packet(int *got_frame, int cached)
+{
+ int ret = 0;
+ int decoded = pkt.size;
+
+ *got_frame = 0;
+
+ if (pkt.stream_index == video_stream_idx) {
+ /* decode video frame */
+ ret = avcodec_decode_video2(video_dec_ctx, frame, got_frame, &pkt);
+ if (ret < 0) {
+ fprintf(stderr, "Error decoding video frame (%s)\n", av_err2str(ret));
+ return ret;
+ }
+
+ if (*got_frame) {
+
+ if (frame->width != width || frame->height != height ||
+ frame->format != pix_fmt) {
+ /* To handle this change, one could call av_image_alloc again and
+ * decode the following frames into another rawvideo file. */
+ fprintf(stderr, "Error: Width, height and pixel format have to be "
+ "constant in a rawvideo file, but the width, height or "
+ "pixel format of the input video changed:\n"
+ "old: width = %d, height = %d, format = %s\n"
+ "new: width = %d, height = %d, format = %s\n",
+ width, height, av_get_pix_fmt_name(pix_fmt),
+ frame->width, frame->height,
+ av_get_pix_fmt_name(frame->format));
+ return -1;
+ }
+
+ printf("video_frame%s n:%d coded_n:%d pts:%s\n",
+ cached ? "(cached)" : "",
+ video_frame_count++, frame->coded_picture_number,
+ av_ts2timestr(frame->pts, &video_dec_ctx->time_base));
+
+ /* copy decoded frame to destination buffer:
+ * this is required since rawvideo expects non aligned data */
+ av_image_copy(video_dst_data, video_dst_linesize,
+ (const uint8_t **)(frame->data), frame->linesize,
+ pix_fmt, width, height);
+
+ /* write to rawvideo file */
+ fwrite(video_dst_data[0], 1, video_dst_bufsize, video_dst_file);
+ }
+ } else if (pkt.stream_index == audio_stream_idx) {
+ /* decode audio frame */
+ ret = avcodec_decode_audio4(audio_dec_ctx, frame, got_frame, &pkt);
+ if (ret < 0) {
+ fprintf(stderr, "Error decoding audio frame (%s)\n", av_err2str(ret));
+ return ret;
+ }
+ /* Some audio decoders decode only part of the packet, and have to be
+ * called again with the remainder of the packet data.
+ * Sample: fate-suite/lossless-audio/luckynight-partial.shn
+ * Also, some decoders might over-read the packet. */
+ decoded = FFMIN(ret, pkt.size);
+
+ if (*got_frame) {
+ size_t unpadded_linesize = frame->nb_samples * av_get_bytes_per_sample(frame->format);
+ printf("audio_frame%s n:%d nb_samples:%d pts:%s\n",
+ cached ? "(cached)" : "",
+ audio_frame_count++, frame->nb_samples,
+ av_ts2timestr(frame->pts, &audio_dec_ctx->time_base));
+
+ /* Write the raw audio data samples of the first plane. This works
+ * fine for packed formats (e.g. AV_SAMPLE_FMT_S16). However,
+ * most audio decoders output planar audio, which uses a separate
+ * plane of audio samples for each channel (e.g. AV_SAMPLE_FMT_S16P).
+ * In other words, this code will write only the first audio channel
+ * in these cases.
+ * You should use libswresample or libavfilter to convert the frame
+ * to packed data. */
+ fwrite(frame->extended_data[0], 1, unpadded_linesize, audio_dst_file);
+ }
+ }
+
+ /* If we use frame reference counting, we own the data and need
+ * to de-reference it when we don't use it anymore */
+ if (*got_frame && refcount)
+ av_frame_unref(frame);
+
+ return decoded;
+}
+
+static int open_codec_context(int *stream_idx,
+ AVFormatContext *fmt_ctx, enum AVMediaType type)
+{
+ int ret, stream_index;
+ AVStream *st;
+ AVCodecContext *dec_ctx = NULL;
+ AVCodec *dec = NULL;
+ AVDictionary *opts = NULL;
+
+ ret = av_find_best_stream(fmt_ctx, type, -1, -1, NULL, 0);
+ if (ret < 0) {
+ fprintf(stderr, "Could not find %s stream in input file '%s'\n",
+ av_get_media_type_string(type), src_filename);
+ return ret;
+ } else {
+ stream_index = ret;
+ st = fmt_ctx->streams[stream_index];
+
+ /* find decoder for the stream */
+ dec_ctx = st->codec;
+ dec = avcodec_find_decoder(dec_ctx->codec_id);
+ if (!dec) {
+ fprintf(stderr, "Failed to find %s codec\n",
+ av_get_media_type_string(type));
+ return AVERROR(EINVAL);
+ }
+
+ /* Init the decoders, with or without reference counting */
+ av_dict_set(&opts, "refcounted_frames", refcount ? "1" : "0", 0);
+ if ((ret = avcodec_open2(dec_ctx, dec, &opts)) < 0) {
+ fprintf(stderr, "Failed to open %s codec\n",
+ av_get_media_type_string(type));
+ return ret;
+ }
+ *stream_idx = stream_index;
+ }
+
+ return 0;
+}
+
+static int get_format_from_sample_fmt(const char **fmt,
+ enum AVSampleFormat sample_fmt)
+{
+ int i;
+ struct sample_fmt_entry {
+ enum AVSampleFormat sample_fmt; const char *fmt_be, *fmt_le;
+ } sample_fmt_entries[] = {
+ { AV_SAMPLE_FMT_U8, "u8", "u8" },
+ { AV_SAMPLE_FMT_S16, "s16be", "s16le" },
+ { AV_SAMPLE_FMT_S32, "s32be", "s32le" },
+ { AV_SAMPLE_FMT_FLT, "f32be", "f32le" },
+ { AV_SAMPLE_FMT_DBL, "f64be", "f64le" },
+ };
+ *fmt = NULL;
+
+ for (i = 0; i < FF_ARRAY_ELEMS(sample_fmt_entries); i++) {
+ struct sample_fmt_entry *entry = &sample_fmt_entries[i];
+ if (sample_fmt == entry->sample_fmt) {
+ *fmt = AV_NE(entry->fmt_be, entry->fmt_le);
+ return 0;
+ }
+ }
+
+ fprintf(stderr,
+ "sample format %s is not supported as output format\n",
+ av_get_sample_fmt_name(sample_fmt));
+ return -1;
+}
+
+int main (int argc, char **argv)
+{
+ int ret = 0, got_frame;
+
+ if (argc != 4 && argc != 5) {
+ fprintf(stderr, "usage: %s [-refcount] input_file video_output_file audio_output_file\n"
+ "API example program to show how to read frames from an input file.\n"
+ "This program reads frames from a file, decodes them, and writes decoded\n"
+ "video frames to a rawvideo file named video_output_file, and decoded\n"
+ "audio frames to a rawaudio file named audio_output_file.\n\n"
+ "If the -refcount option is specified, the program use the\n"
+ "reference counting frame system which allows keeping a copy of\n"
+ "the data for longer than one decode call.\n"
+ "\n", argv[0]);
+ exit(1);
+ }
+ if (argc == 5 && !strcmp(argv[1], "-refcount")) {
+ refcount = 1;
+ argv++;
+ }
+ src_filename = argv[1];
+ video_dst_filename = argv[2];
+ audio_dst_filename = argv[3];
+
+ /* register all formats and codecs */
+ av_register_all();
+
+ /* open input file, and allocate format context */
+ if (avformat_open_input(&fmt_ctx, src_filename, NULL, NULL) < 0) {
+ fprintf(stderr, "Could not open source file %s\n", src_filename);
+ exit(1);
+ }
+
+ /* retrieve stream information */
+ if (avformat_find_stream_info(fmt_ctx, NULL) < 0) {
+ fprintf(stderr, "Could not find stream information\n");
+ exit(1);
+ }
+
+ if (open_codec_context(&video_stream_idx, fmt_ctx, AVMEDIA_TYPE_VIDEO) >= 0) {
+ video_stream = fmt_ctx->streams[video_stream_idx];
+ video_dec_ctx = video_stream->codec;
+
+ video_dst_file = fopen(video_dst_filename, "wb");
+ if (!video_dst_file) {
+ fprintf(stderr, "Could not open destination file %s\n", video_dst_filename);
+ ret = 1;
+ goto end;
+ }
+
+ /* allocate image where the decoded image will be put */
+ width = video_dec_ctx->width;
+ height = video_dec_ctx->height;
+ pix_fmt = video_dec_ctx->pix_fmt;
+ ret = av_image_alloc(video_dst_data, video_dst_linesize,
+ width, height, pix_fmt, 1);
+ if (ret < 0) {
+ fprintf(stderr, "Could not allocate raw video buffer\n");
+ goto end;
+ }
+ video_dst_bufsize = ret;
+ }
+
+ if (open_codec_context(&audio_stream_idx, fmt_ctx, AVMEDIA_TYPE_AUDIO) >= 0) {
+ audio_stream = fmt_ctx->streams[audio_stream_idx];
+ audio_dec_ctx = audio_stream->codec;
+ audio_dst_file = fopen(audio_dst_filename, "wb");
+ if (!audio_dst_file) {
+ fprintf(stderr, "Could not open destination file %s\n", audio_dst_filename);
+ ret = 1;
+ goto end;
+ }
+ }
+
+ /* dump input information to stderr */
+ av_dump_format(fmt_ctx, 0, src_filename, 0);
+
+ if (!audio_stream && !video_stream) {
+ fprintf(stderr, "Could not find audio or video stream in the input, aborting\n");
+ ret = 1;
+ goto end;
+ }
+
+ frame = av_frame_alloc();
+ if (!frame) {
+ fprintf(stderr, "Could not allocate frame\n");
+ ret = AVERROR(ENOMEM);
+ goto end;
+ }
+
+ /* initialize packet, set data to NULL, let the demuxer fill it */
+ av_init_packet(&pkt);
+ pkt.data = NULL;
+ pkt.size = 0;
+
+ if (video_stream)
+ printf("Demuxing video from file '%s' into '%s'\n", src_filename, video_dst_filename);
+ if (audio_stream)
+ printf("Demuxing audio from file '%s' into '%s'\n", src_filename, audio_dst_filename);
+
+ /* read frames from the file */
+ while (av_read_frame(fmt_ctx, &pkt) >= 0) {
+ AVPacket orig_pkt = pkt;
+ do {
+ ret = decode_packet(&got_frame, 0);
+ if (ret < 0)
+ break;
+ pkt.data += ret;
+ pkt.size -= ret;
+ } while (pkt.size > 0);
+ av_packet_unref(&orig_pkt);
+ }
+
+ /* flush cached frames */
+ pkt.data = NULL;
+ pkt.size = 0;
+ do {
+ decode_packet(&got_frame, 1);
+ } while (got_frame);
+
+ printf("Demuxing succeeded.\n");
+
+ if (video_stream) {
+ printf("Play the output video file with the command:\n"
+ "ffplay -f rawvideo -pix_fmt %s -video_size %dx%d %s\n",
+ av_get_pix_fmt_name(pix_fmt), width, height,
+ video_dst_filename);
+ }
+
+ if (audio_stream) {
+ enum AVSampleFormat sfmt = audio_dec_ctx->sample_fmt;
+ int n_channels = audio_dec_ctx->channels;
+ const char *fmt;
+
+ if (av_sample_fmt_is_planar(sfmt)) {
+ const char *packed = av_get_sample_fmt_name(sfmt);
+ printf("Warning: the sample format the decoder produced is planar "
+ "(%s). This example will output the first channel only.\n",
+ packed ? packed : "?");
+ sfmt = av_get_packed_sample_fmt(sfmt);
+ n_channels = 1;
+ }
+
+ if ((ret = get_format_from_sample_fmt(&fmt, sfmt)) < 0)
+ goto end;
+
+ printf("Play the output audio file with the command:\n"
+ "ffplay -f %s -ac %d -ar %d %s\n",
+ fmt, n_channels, audio_dec_ctx->sample_rate,
+ audio_dst_filename);
+ }
+
+end:
+ avcodec_close(video_dec_ctx);
+ avcodec_close(audio_dec_ctx);
+ avformat_close_input(&fmt_ctx);
+ if (video_dst_file)
+ fclose(video_dst_file);
+ if (audio_dst_file)
+ fclose(audio_dst_file);
+ av_frame_free(&frame);
+ av_free(video_dst_data[0]);
+
+ return ret < 0;
+}
diff --git a/doc/examples/extract_mvs.c b/doc/examples/extract_mvs.c
new file mode 100644
index 0000000000..975189c77d
--- /dev/null
+++ b/doc/examples/extract_mvs.c
@@ -0,0 +1,185 @@
+/*
+ * Copyright (c) 2012 Stefano Sabatini
+ * Copyright (c) 2014 Clément Bœsch
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <libavutil/motion_vector.h>
+#include <libavformat/avformat.h>
+
+static AVFormatContext *fmt_ctx = NULL;
+static AVCodecContext *video_dec_ctx = NULL;
+static AVStream *video_stream = NULL;
+static const char *src_filename = NULL;
+
+static int video_stream_idx = -1;
+static AVFrame *frame = NULL;
+static AVPacket pkt;
+static int video_frame_count = 0;
+
+static int decode_packet(int *got_frame, int cached)
+{
+ int decoded = pkt.size;
+
+ *got_frame = 0;
+
+ if (pkt.stream_index == video_stream_idx) {
+ int ret = avcodec_decode_video2(video_dec_ctx, frame, got_frame, &pkt);
+ if (ret < 0) {
+ fprintf(stderr, "Error decoding video frame (%s)\n", av_err2str(ret));
+ return ret;
+ }
+
+ if (*got_frame) {
+ int i;
+ AVFrameSideData *sd;
+
+ video_frame_count++;
+ sd = av_frame_get_side_data(frame, AV_FRAME_DATA_MOTION_VECTORS);
+ if (sd) {
+ const AVMotionVector *mvs = (const AVMotionVector *)sd->data;
+ for (i = 0; i < sd->size / sizeof(*mvs); i++) {
+ const AVMotionVector *mv = &mvs[i];
+ printf("%d,%2d,%2d,%2d,%4d,%4d,%4d,%4d,0x%"PRIx64"\n",
+ video_frame_count, mv->source,
+ mv->w, mv->h, mv->src_x, mv->src_y,
+ mv->dst_x, mv->dst_y, mv->flags);
+ }
+ }
+ }
+ }
+
+ return decoded;
+}
+
+static int open_codec_context(int *stream_idx,
+ AVFormatContext *fmt_ctx, enum AVMediaType type)
+{
+ int ret;
+ AVStream *st;
+ AVCodecContext *dec_ctx = NULL;
+ AVCodec *dec = NULL;
+ AVDictionary *opts = NULL;
+
+ ret = av_find_best_stream(fmt_ctx, type, -1, -1, NULL, 0);
+ if (ret < 0) {
+ fprintf(stderr, "Could not find %s stream in input file '%s'\n",
+ av_get_media_type_string(type), src_filename);
+ return ret;
+ } else {
+ *stream_idx = ret;
+ st = fmt_ctx->streams[*stream_idx];
+
+ /* find decoder for the stream */
+ dec_ctx = st->codec;
+ dec = avcodec_find_decoder(dec_ctx->codec_id);
+ if (!dec) {
+ fprintf(stderr, "Failed to find %s codec\n",
+ av_get_media_type_string(type));
+ return AVERROR(EINVAL);
+ }
+
+ /* Init the video decoder */
+ av_dict_set(&opts, "flags2", "+export_mvs", 0);
+ if ((ret = avcodec_open2(dec_ctx, dec, &opts)) < 0) {
+ fprintf(stderr, "Failed to open %s codec\n",
+ av_get_media_type_string(type));
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+int main(int argc, char **argv)
+{
+ int ret = 0, got_frame;
+
+ if (argc != 2) {
+ fprintf(stderr, "Usage: %s <video>\n", argv[0]);
+ exit(1);
+ }
+ src_filename = argv[1];
+
+ av_register_all();
+
+ if (avformat_open_input(&fmt_ctx, src_filename, NULL, NULL) < 0) {
+ fprintf(stderr, "Could not open source file %s\n", src_filename);
+ exit(1);
+ }
+
+ if (avformat_find_stream_info(fmt_ctx, NULL) < 0) {
+ fprintf(stderr, "Could not find stream information\n");
+ exit(1);
+ }
+
+ if (open_codec_context(&video_stream_idx, fmt_ctx, AVMEDIA_TYPE_VIDEO) >= 0) {
+ video_stream = fmt_ctx->streams[video_stream_idx];
+ video_dec_ctx = video_stream->codec;
+ }
+
+ av_dump_format(fmt_ctx, 0, src_filename, 0);
+
+ if (!video_stream) {
+ fprintf(stderr, "Could not find video stream in the input, aborting\n");
+ ret = 1;
+ goto end;
+ }
+
+ frame = av_frame_alloc();
+ if (!frame) {
+ fprintf(stderr, "Could not allocate frame\n");
+ ret = AVERROR(ENOMEM);
+ goto end;
+ }
+
+ printf("framenum,source,blockw,blockh,srcx,srcy,dstx,dsty,flags\n");
+
+ /* initialize packet, set data to NULL, let the demuxer fill it */
+ av_init_packet(&pkt);
+ pkt.data = NULL;
+ pkt.size = 0;
+
+ /* read frames from the file */
+ while (av_read_frame(fmt_ctx, &pkt) >= 0) {
+ AVPacket orig_pkt = pkt;
+ do {
+ ret = decode_packet(&got_frame, 0);
+ if (ret < 0)
+ break;
+ pkt.data += ret;
+ pkt.size -= ret;
+ } while (pkt.size > 0);
+ av_packet_unref(&orig_pkt);
+ }
+
+ /* flush cached frames */
+ pkt.data = NULL;
+ pkt.size = 0;
+ do {
+ decode_packet(&got_frame, 1);
+ } while (got_frame);
+
+end:
+ avcodec_close(video_dec_ctx);
+ avformat_close_input(&fmt_ctx);
+ av_frame_free(&frame);
+ return ret < 0;
+}
diff --git a/doc/examples/filter_audio.c b/doc/examples/filter_audio.c
index 60fe107dda..01761dcee4 100644
--- a/doc/examples/filter_audio.c
+++ b/doc/examples/filter_audio.c
@@ -1,20 +1,20 @@
/*
* copyright (c) 2013 Andrew Kelley
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
diff --git a/doc/examples/filtering_audio.c b/doc/examples/filtering_audio.c
new file mode 100644
index 0000000000..6bb24a431d
--- /dev/null
+++ b/doc/examples/filtering_audio.c
@@ -0,0 +1,295 @@
+/*
+ * Copyright (c) 2010 Nicolas George
+ * Copyright (c) 2011 Stefano Sabatini
+ * Copyright (c) 2012 Clément Bœsch
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+/**
+ * @file
+ * API example for audio decoding and filtering
+ * @example filtering_audio.c
+ */
+
+#include <unistd.h>
+
+#include <libavcodec/avcodec.h>
+#include <libavformat/avformat.h>
+#include <libavfilter/avfiltergraph.h>
+#include <libavfilter/buffersink.h>
+#include <libavfilter/buffersrc.h>
+#include <libavutil/opt.h>
+
+static const char *filter_descr = "aresample=8000,aformat=sample_fmts=s16:channel_layouts=mono";
+static const char *player = "ffplay -f s16le -ar 8000 -ac 1 -";
+
+static AVFormatContext *fmt_ctx;
+static AVCodecContext *dec_ctx;
+AVFilterContext *buffersink_ctx;
+AVFilterContext *buffersrc_ctx;
+AVFilterGraph *filter_graph;
+static int audio_stream_index = -1;
+
+static int open_input_file(const char *filename)
+{
+ int ret;
+ AVCodec *dec;
+
+ if ((ret = avformat_open_input(&fmt_ctx, filename, NULL, NULL)) < 0) {
+ av_log(NULL, AV_LOG_ERROR, "Cannot open input file\n");
+ return ret;
+ }
+
+ if ((ret = avformat_find_stream_info(fmt_ctx, NULL)) < 0) {
+ av_log(NULL, AV_LOG_ERROR, "Cannot find stream information\n");
+ return ret;
+ }
+
+ /* select the audio stream */
+ ret = av_find_best_stream(fmt_ctx, AVMEDIA_TYPE_AUDIO, -1, -1, &dec, 0);
+ if (ret < 0) {
+ av_log(NULL, AV_LOG_ERROR, "Cannot find an audio stream in the input file\n");
+ return ret;
+ }
+ audio_stream_index = ret;
+ dec_ctx = fmt_ctx->streams[audio_stream_index]->codec;
+ av_opt_set_int(dec_ctx, "refcounted_frames", 1, 0);
+
+ /* init the audio decoder */
+ if ((ret = avcodec_open2(dec_ctx, dec, NULL)) < 0) {
+ av_log(NULL, AV_LOG_ERROR, "Cannot open audio decoder\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int init_filters(const char *filters_descr)
+{
+ char args[512];
+ int ret = 0;
+ AVFilter *abuffersrc = avfilter_get_by_name("abuffer");
+ AVFilter *abuffersink = avfilter_get_by_name("abuffersink");
+ AVFilterInOut *outputs = avfilter_inout_alloc();
+ AVFilterInOut *inputs = avfilter_inout_alloc();
+ static const enum AVSampleFormat out_sample_fmts[] = { AV_SAMPLE_FMT_S16, -1 };
+ static const int64_t out_channel_layouts[] = { AV_CH_LAYOUT_MONO, -1 };
+ static const int out_sample_rates[] = { 8000, -1 };
+ const AVFilterLink *outlink;
+ AVRational time_base = fmt_ctx->streams[audio_stream_index]->time_base;
+
+ filter_graph = avfilter_graph_alloc();
+ if (!outputs || !inputs || !filter_graph) {
+ ret = AVERROR(ENOMEM);
+ goto end;
+ }
+
+ /* buffer audio source: the decoded frames from the decoder will be inserted here. */
+ if (!dec_ctx->channel_layout)
+ dec_ctx->channel_layout = av_get_default_channel_layout(dec_ctx->channels);
+ snprintf(args, sizeof(args),
+ "time_base=%d/%d:sample_rate=%d:sample_fmt=%s:channel_layout=0x%"PRIx64,
+ time_base.num, time_base.den, dec_ctx->sample_rate,
+ av_get_sample_fmt_name(dec_ctx->sample_fmt), dec_ctx->channel_layout);
+ ret = avfilter_graph_create_filter(&buffersrc_ctx, abuffersrc, "in",
+ args, NULL, filter_graph);
+ if (ret < 0) {
+ av_log(NULL, AV_LOG_ERROR, "Cannot create audio buffer source\n");
+ goto end;
+ }
+
+ /* buffer audio sink: to terminate the filter chain. */
+ ret = avfilter_graph_create_filter(&buffersink_ctx, abuffersink, "out",
+ NULL, NULL, filter_graph);
+ if (ret < 0) {
+ av_log(NULL, AV_LOG_ERROR, "Cannot create audio buffer sink\n");
+ goto end;
+ }
+
+ ret = av_opt_set_int_list(buffersink_ctx, "sample_fmts", out_sample_fmts, -1,
+ AV_OPT_SEARCH_CHILDREN);
+ if (ret < 0) {
+ av_log(NULL, AV_LOG_ERROR, "Cannot set output sample format\n");
+ goto end;
+ }
+
+ ret = av_opt_set_int_list(buffersink_ctx, "channel_layouts", out_channel_layouts, -1,
+ AV_OPT_SEARCH_CHILDREN);
+ if (ret < 0) {
+ av_log(NULL, AV_LOG_ERROR, "Cannot set output channel layout\n");
+ goto end;
+ }
+
+ ret = av_opt_set_int_list(buffersink_ctx, "sample_rates", out_sample_rates, -1,
+ AV_OPT_SEARCH_CHILDREN);
+ if (ret < 0) {
+ av_log(NULL, AV_LOG_ERROR, "Cannot set output sample rate\n");
+ goto end;
+ }
+
+ /*
+ * Set the endpoints for the filter graph. The filter_graph will
+ * be linked to the graph described by filters_descr.
+ */
+
+ /*
+ * The buffer source output must be connected to the input pad of
+ * the first filter described by filters_descr; since the first
+ * filter input label is not specified, it is set to "in" by
+ * default.
+ */
+ outputs->name = av_strdup("in");
+ outputs->filter_ctx = buffersrc_ctx;
+ outputs->pad_idx = 0;
+ outputs->next = NULL;
+
+ /*
+ * The buffer sink input must be connected to the output pad of
+ * the last filter described by filters_descr; since the last
+ * filter output label is not specified, it is set to "out" by
+ * default.
+ */
+ inputs->name = av_strdup("out");
+ inputs->filter_ctx = buffersink_ctx;
+ inputs->pad_idx = 0;
+ inputs->next = NULL;
+
+ if ((ret = avfilter_graph_parse_ptr(filter_graph, filters_descr,
+ &inputs, &outputs, NULL)) < 0)
+ goto end;
+
+ if ((ret = avfilter_graph_config(filter_graph, NULL)) < 0)
+ goto end;
+
+ /* Print summary of the sink buffer
+ * Note: args buffer is reused to store channel layout string */
+ outlink = buffersink_ctx->inputs[0];
+ av_get_channel_layout_string(args, sizeof(args), -1, outlink->channel_layout);
+ av_log(NULL, AV_LOG_INFO, "Output: srate:%dHz fmt:%s chlayout:%s\n",
+ (int)outlink->sample_rate,
+ (char *)av_x_if_null(av_get_sample_fmt_name(outlink->format), "?"),
+ args);
+
+end:
+ avfilter_inout_free(&inputs);
+ avfilter_inout_free(&outputs);
+
+ return ret;
+}
+
+static void print_frame(const AVFrame *frame)
+{
+ const int n = frame->nb_samples * av_get_channel_layout_nb_channels(av_frame_get_channel_layout(frame));
+ const uint16_t *p = (uint16_t*)frame->data[0];
+ const uint16_t *p_end = p + n;
+
+ while (p < p_end) {
+ fputc(*p & 0xff, stdout);
+ fputc(*p>>8 & 0xff, stdout);
+ p++;
+ }
+ fflush(stdout);
+}
+
+int main(int argc, char **argv)
+{
+ int ret;
+ AVPacket packet0, packet;
+ AVFrame *frame = av_frame_alloc();
+ AVFrame *filt_frame = av_frame_alloc();
+ int got_frame;
+
+ if (!frame || !filt_frame) {
+ perror("Could not allocate frame");
+ exit(1);
+ }
+ if (argc != 2) {
+ fprintf(stderr, "Usage: %s file | %s\n", argv[0], player);
+ exit(1);
+ }
+
+ av_register_all();
+ avfilter_register_all();
+
+ if ((ret = open_input_file(argv[1])) < 0)
+ goto end;
+ if ((ret = init_filters(filter_descr)) < 0)
+ goto end;
+
+ /* read all packets */
+ packet0.data = NULL;
+ packet.data = NULL;
+ while (1) {
+ if (!packet0.data) {
+ if ((ret = av_read_frame(fmt_ctx, &packet)) < 0)
+ break;
+ packet0 = packet;
+ }
+
+ if (packet.stream_index == audio_stream_index) {
+ got_frame = 0;
+ ret = avcodec_decode_audio4(dec_ctx, frame, &got_frame, &packet);
+ if (ret < 0) {
+ av_log(NULL, AV_LOG_ERROR, "Error decoding audio\n");
+ continue;
+ }
+ packet.size -= ret;
+ packet.data += ret;
+
+ if (got_frame) {
+ /* push the audio data from decoded frame into the filtergraph */
+ if (av_buffersrc_add_frame_flags(buffersrc_ctx, frame, 0) < 0) {
+ av_log(NULL, AV_LOG_ERROR, "Error while feeding the audio filtergraph\n");
+ break;
+ }
+
+ /* pull filtered audio from the filtergraph */
+ while (1) {
+ ret = av_buffersink_get_frame(buffersink_ctx, filt_frame);
+ if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
+ break;
+ if (ret < 0)
+ goto end;
+ print_frame(filt_frame);
+ av_frame_unref(filt_frame);
+ }
+ }
+
+ if (packet.size <= 0)
+ av_packet_unref(&packet0);
+ } else {
+ /* discard non-wanted packets */
+ av_packet_unref(&packet0);
+ }
+ }
+end:
+ avfilter_graph_free(&filter_graph);
+ avcodec_close(dec_ctx);
+ avformat_close_input(&fmt_ctx);
+ av_frame_free(&frame);
+ av_frame_free(&filt_frame);
+
+ if (ret < 0 && ret != AVERROR_EOF) {
+ fprintf(stderr, "Error occurred: %s\n", av_err2str(ret));
+ exit(1);
+ }
+
+ exit(0);
+}
diff --git a/doc/examples/filtering_video.c b/doc/examples/filtering_video.c
new file mode 100644
index 0000000000..3dabf13b10
--- /dev/null
+++ b/doc/examples/filtering_video.c
@@ -0,0 +1,280 @@
+/*
+ * Copyright (c) 2010 Nicolas George
+ * Copyright (c) 2011 Stefano Sabatini
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+/**
+ * @file
+ * API example for decoding and filtering
+ * @example filtering_video.c
+ */
+
+#define _XOPEN_SOURCE 600 /* for usleep */
+#include <unistd.h>
+
+#include <libavcodec/avcodec.h>
+#include <libavformat/avformat.h>
+#include <libavfilter/avfiltergraph.h>
+#include <libavfilter/buffersink.h>
+#include <libavfilter/buffersrc.h>
+#include <libavutil/opt.h>
+
+const char *filter_descr = "scale=78:24,transpose=cclock";
+/* other way:
+ scale=78:24 [scl]; [scl] transpose=cclock // assumes "[in]" and "[out]" to be input output pads respectively
+ */
+
+static AVFormatContext *fmt_ctx;
+static AVCodecContext *dec_ctx;
+AVFilterContext *buffersink_ctx;
+AVFilterContext *buffersrc_ctx;
+AVFilterGraph *filter_graph;
+static int video_stream_index = -1;
+static int64_t last_pts = AV_NOPTS_VALUE;
+
+static int open_input_file(const char *filename)
+{
+ int ret;
+ AVCodec *dec;
+
+ if ((ret = avformat_open_input(&fmt_ctx, filename, NULL, NULL)) < 0) {
+ av_log(NULL, AV_LOG_ERROR, "Cannot open input file\n");
+ return ret;
+ }
+
+ if ((ret = avformat_find_stream_info(fmt_ctx, NULL)) < 0) {
+ av_log(NULL, AV_LOG_ERROR, "Cannot find stream information\n");
+ return ret;
+ }
+
+ /* select the video stream */
+ ret = av_find_best_stream(fmt_ctx, AVMEDIA_TYPE_VIDEO, -1, -1, &dec, 0);
+ if (ret < 0) {
+ av_log(NULL, AV_LOG_ERROR, "Cannot find a video stream in the input file\n");
+ return ret;
+ }
+ video_stream_index = ret;
+ dec_ctx = fmt_ctx->streams[video_stream_index]->codec;
+ av_opt_set_int(dec_ctx, "refcounted_frames", 1, 0);
+
+ /* init the video decoder */
+ if ((ret = avcodec_open2(dec_ctx, dec, NULL)) < 0) {
+ av_log(NULL, AV_LOG_ERROR, "Cannot open video decoder\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int init_filters(const char *filters_descr)
+{
+ char args[512];
+ int ret = 0;
+ AVFilter *buffersrc = avfilter_get_by_name("buffer");
+ AVFilter *buffersink = avfilter_get_by_name("buffersink");
+ AVFilterInOut *outputs = avfilter_inout_alloc();
+ AVFilterInOut *inputs = avfilter_inout_alloc();
+ AVRational time_base = fmt_ctx->streams[video_stream_index]->time_base;
+ enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_GRAY8, AV_PIX_FMT_NONE };
+
+ filter_graph = avfilter_graph_alloc();
+ if (!outputs || !inputs || !filter_graph) {
+ ret = AVERROR(ENOMEM);
+ goto end;
+ }
+
+ /* buffer video source: the decoded frames from the decoder will be inserted here. */
+ snprintf(args, sizeof(args),
+ "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
+ dec_ctx->width, dec_ctx->height, dec_ctx->pix_fmt,
+ time_base.num, time_base.den,
+ dec_ctx->sample_aspect_ratio.num, dec_ctx->sample_aspect_ratio.den);
+
+ ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in",
+ args, NULL, filter_graph);
+ if (ret < 0) {
+ av_log(NULL, AV_LOG_ERROR, "Cannot create buffer source\n");
+ goto end;
+ }
+
+ /* buffer video sink: to terminate the filter chain. */
+ ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out",
+ NULL, NULL, filter_graph);
+ if (ret < 0) {
+ av_log(NULL, AV_LOG_ERROR, "Cannot create buffer sink\n");
+ goto end;
+ }
+
+ ret = av_opt_set_int_list(buffersink_ctx, "pix_fmts", pix_fmts,
+ AV_PIX_FMT_NONE, AV_OPT_SEARCH_CHILDREN);
+ if (ret < 0) {
+ av_log(NULL, AV_LOG_ERROR, "Cannot set output pixel format\n");
+ goto end;
+ }
+
+ /*
+ * Set the endpoints for the filter graph. The filter_graph will
+ * be linked to the graph described by filters_descr.
+ */
+
+ /*
+ * The buffer source output must be connected to the input pad of
+ * the first filter described by filters_descr; since the first
+ * filter input label is not specified, it is set to "in" by
+ * default.
+ */
+ outputs->name = av_strdup("in");
+ outputs->filter_ctx = buffersrc_ctx;
+ outputs->pad_idx = 0;
+ outputs->next = NULL;
+
+ /*
+ * The buffer sink input must be connected to the output pad of
+ * the last filter described by filters_descr; since the last
+ * filter output label is not specified, it is set to "out" by
+ * default.
+ */
+ inputs->name = av_strdup("out");
+ inputs->filter_ctx = buffersink_ctx;
+ inputs->pad_idx = 0;
+ inputs->next = NULL;
+
+ if ((ret = avfilter_graph_parse_ptr(filter_graph, filters_descr,
+ &inputs, &outputs, NULL)) < 0)
+ goto end;
+
+ if ((ret = avfilter_graph_config(filter_graph, NULL)) < 0)
+ goto end;
+
+end:
+ avfilter_inout_free(&inputs);
+ avfilter_inout_free(&outputs);
+
+ return ret;
+}
+
+static void display_frame(const AVFrame *frame, AVRational time_base)
+{
+ int x, y;
+ uint8_t *p0, *p;
+ int64_t delay;
+
+ if (frame->pts != AV_NOPTS_VALUE) {
+ if (last_pts != AV_NOPTS_VALUE) {
+ /* sleep roughly the right amount of time;
+ * usleep is in microseconds, just like AV_TIME_BASE. */
+ delay = av_rescale_q(frame->pts - last_pts,
+ time_base, AV_TIME_BASE_Q);
+ if (delay > 0 && delay < 1000000)
+ usleep(delay);
+ }
+ last_pts = frame->pts;
+ }
+
+ /* Trivial ASCII grayscale display. */
+ p0 = frame->data[0];
+ puts("\033c");
+ for (y = 0; y < frame->height; y++) {
+ p = p0;
+ for (x = 0; x < frame->width; x++)
+ putchar(" .-+#"[*(p++) / 52]);
+ putchar('\n');
+ p0 += frame->linesize[0];
+ }
+ fflush(stdout);
+}
+
+int main(int argc, char **argv)
+{
+ int ret;
+ AVPacket packet;
+ AVFrame *frame = av_frame_alloc();
+ AVFrame *filt_frame = av_frame_alloc();
+ int got_frame;
+
+ if (!frame || !filt_frame) {
+ perror("Could not allocate frame");
+ exit(1);
+ }
+ if (argc != 2) {
+ fprintf(stderr, "Usage: %s file\n", argv[0]);
+ exit(1);
+ }
+
+ av_register_all();
+ avfilter_register_all();
+
+ if ((ret = open_input_file(argv[1])) < 0)
+ goto end;
+ if ((ret = init_filters(filter_descr)) < 0)
+ goto end;
+
+ /* read all packets */
+ while (1) {
+ if ((ret = av_read_frame(fmt_ctx, &packet)) < 0)
+ break;
+
+ if (packet.stream_index == video_stream_index) {
+ got_frame = 0;
+ ret = avcodec_decode_video2(dec_ctx, frame, &got_frame, &packet);
+ if (ret < 0) {
+ av_log(NULL, AV_LOG_ERROR, "Error decoding video\n");
+ break;
+ }
+
+ if (got_frame) {
+ frame->pts = av_frame_get_best_effort_timestamp(frame);
+
+ /* push the decoded frame into the filtergraph */
+ if (av_buffersrc_add_frame_flags(buffersrc_ctx, frame, AV_BUFFERSRC_FLAG_KEEP_REF) < 0) {
+ av_log(NULL, AV_LOG_ERROR, "Error while feeding the filtergraph\n");
+ break;
+ }
+
+ /* pull filtered frames from the filtergraph */
+ while (1) {
+ ret = av_buffersink_get_frame(buffersink_ctx, filt_frame);
+ if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
+ break;
+ if (ret < 0)
+ goto end;
+ display_frame(filt_frame, buffersink_ctx->inputs[0]->time_base);
+ av_frame_unref(filt_frame);
+ }
+ av_frame_unref(frame);
+ }
+ }
+ av_packet_unref(&packet);
+ }
+end:
+ avfilter_graph_free(&filter_graph);
+ avcodec_close(dec_ctx);
+ avformat_close_input(&fmt_ctx);
+ av_frame_free(&frame);
+ av_frame_free(&filt_frame);
+
+ if (ret < 0 && ret != AVERROR_EOF) {
+ fprintf(stderr, "Error occurred: %s\n", av_err2str(ret));
+ exit(1);
+ }
+
+ exit(0);
+}
diff --git a/doc/examples/http_multiclient.c b/doc/examples/http_multiclient.c
new file mode 100644
index 0000000000..b9a306d835
--- /dev/null
+++ b/doc/examples/http_multiclient.c
@@ -0,0 +1,155 @@
+/*
+ * Copyright (c) 2015 Stephan Holljes
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+/**
+ * @file
+ * libavformat multi-client network API usage example.
+ *
+ * @example http_multiclient.c
+ * This example will serve a file without decoding or demuxing it over http.
+ * Multiple clients can connect and will receive the same file.
+ */
+
+#include <libavformat/avformat.h>
+#include <libavutil/opt.h>
+#include <unistd.h>
+
+void process_client(AVIOContext *client, const char *in_uri)
+{
+ AVIOContext *input = NULL;
+ uint8_t buf[1024];
+ int ret, n, reply_code;
+ char *resource = NULL;
+ while ((ret = avio_handshake(client)) > 0) {
+ av_opt_get(client, "resource", AV_OPT_SEARCH_CHILDREN, &resource);
+ // check for strlen(resource) is necessary, because av_opt_get()
+ // may return empty string.
+ if (resource && strlen(resource))
+ break;
+ }
+ if (ret < 0)
+ goto end;
+ av_log(client, AV_LOG_TRACE, "resource=%p\n", resource);
+ if (resource && resource[0] == '/' && !strcmp((resource + 1), in_uri)) {
+ reply_code = 200;
+ } else {
+ reply_code = AVERROR_HTTP_NOT_FOUND;
+ }
+ if ((ret = av_opt_set_int(client, "reply_code", reply_code, AV_OPT_SEARCH_CHILDREN)) < 0) {
+ av_log(client, AV_LOG_ERROR, "Failed to set reply_code: %s.\n", av_err2str(ret));
+ goto end;
+ }
+ av_log(client, AV_LOG_TRACE, "Set reply code to %d\n", reply_code);
+
+ while ((ret = avio_handshake(client)) > 0);
+
+ if (ret < 0)
+ goto end;
+
+ fprintf(stderr, "Handshake performed.\n");
+ if (reply_code != 200)
+ goto end;
+ fprintf(stderr, "Opening input file.\n");
+ if ((ret = avio_open2(&input, in_uri, AVIO_FLAG_READ, NULL, NULL)) < 0) {
+ av_log(input, AV_LOG_ERROR, "Failed to open input: %s: %s.\n", in_uri,
+ av_err2str(ret));
+ goto end;
+ }
+ for(;;) {
+ n = avio_read(input, buf, sizeof(buf));
+ if (n < 0) {
+ if (n == AVERROR_EOF)
+ break;
+ av_log(input, AV_LOG_ERROR, "Error reading from input: %s.\n",
+ av_err2str(n));
+ break;
+ }
+ avio_write(client, buf, n);
+ avio_flush(client);
+ }
+end:
+ fprintf(stderr, "Flushing client\n");
+ avio_flush(client);
+ fprintf(stderr, "Closing client\n");
+ avio_close(client);
+ fprintf(stderr, "Closing input\n");
+ avio_close(input);
+}
+
+int main(int argc, char **argv)
+{
+ av_log_set_level(AV_LOG_TRACE);
+ AVDictionary *options = NULL;
+ AVIOContext *client = NULL, *server = NULL;
+ const char *in_uri, *out_uri;
+ int ret, pid;
+ if (argc < 3) {
+ printf("usage: %s input http://hostname[:port]\n"
+ "API example program to serve http to multiple clients.\n"
+ "\n", argv[0]);
+ return 1;
+ }
+
+ in_uri = argv[1];
+ out_uri = argv[2];
+
+ av_register_all();
+ avformat_network_init();
+
+ if ((ret = av_dict_set(&options, "listen", "2", 0)) < 0) {
+ fprintf(stderr, "Failed to set listen mode for server: %s\n", av_err2str(ret));
+ return ret;
+ }
+ if ((ret = avio_open2(&server, out_uri, AVIO_FLAG_WRITE, NULL, &options)) < 0) {
+ fprintf(stderr, "Failed to open server: %s\n", av_err2str(ret));
+ return ret;
+ }
+ fprintf(stderr, "Entering main loop.\n");
+ for(;;) {
+ if ((ret = avio_accept(server, &client)) < 0)
+ goto end;
+ fprintf(stderr, "Accepted client, forking process.\n");
+ // XXX: Since we don't reap our children and don't ignore signals
+ // this produces zombie processes.
+ pid = fork();
+ if (pid < 0) {
+ perror("Fork failed");
+ ret = AVERROR(errno);
+ goto end;
+ }
+ if (pid == 0) {
+ fprintf(stderr, "In child.\n");
+ process_client(client, in_uri);
+ avio_close(server);
+ exit(0);
+ }
+ if (pid > 0)
+ avio_close(client);
+ }
+end:
+ avio_close(server);
+ if (ret < 0 && ret != AVERROR_EOF) {
+ fprintf(stderr, "Some errors occurred: %s\n", av_err2str(ret));
+ return 1;
+ }
+ return 0;
+}
diff --git a/doc/examples/metadata.c b/doc/examples/metadata.c
index f4c6eee9c3..f73c267369 100644
--- a/doc/examples/metadata.c
+++ b/doc/examples/metadata.c
@@ -22,8 +22,8 @@
/**
* @file
- * @example metadata.c
* Shows how the metadata API can be used in application programs.
+ * @example metadata.c
*/
#include <stdio.h>
@@ -51,6 +51,6 @@ int main (int argc, char **argv)
while ((tag = av_dict_get(fmt_ctx->metadata, "", tag, AV_DICT_IGNORE_SUFFIX)))
printf("%s=%s\n", tag->key, tag->value);
- avformat_free_context(fmt_ctx);
+ avformat_close_input(&fmt_ctx);
return 0;
}
diff --git a/doc/examples/output.c b/doc/examples/muxing.c
index cc2cbb1bae..2fbc89bb02 100644
--- a/doc/examples/output.c
+++ b/doc/examples/muxing.c
@@ -24,9 +24,9 @@
* @file
* libavformat API example.
*
- * @example output.c
* Output a media file in any supported libavformat format. The default
* codecs are used.
+ * @example muxing.c
*/
#include <stdlib.h>
@@ -34,17 +34,17 @@
#include <string.h>
#include <math.h>
-#include "libavutil/channel_layout.h"
-#include "libavutil/mathematics.h"
-#include "libavutil/opt.h"
-#include "libavformat/avformat.h"
-#include "libavresample/avresample.h"
-#include "libswscale/swscale.h"
+#include <libavutil/avassert.h>
+#include <libavutil/channel_layout.h>
+#include <libavutil/opt.h>
+#include <libavutil/mathematics.h>
+#include <libavutil/timestamp.h>
+#include <libavformat/avformat.h>
+#include <libswscale/swscale.h>
+#include <libswresample/swresample.h>
-/* 5 seconds stream duration */
-#define STREAM_DURATION 5.0
+#define STREAM_DURATION 10.0
#define STREAM_FRAME_RATE 25 /* 25 images/s */
-#define STREAM_NB_FRAMES ((int)(STREAM_DURATION * STREAM_FRAME_RATE))
#define STREAM_PIX_FMT AV_PIX_FMT_YUV420P /* default pix_fmt */
#define SCALE_FLAGS SWS_BICUBIC
@@ -56,6 +56,7 @@ typedef struct OutputStream {
/* pts of the next frame that will be generated */
int64_t next_pts;
+ int samples_count;
AVFrame *frame;
AVFrame *tmp_frame;
@@ -63,80 +64,126 @@ typedef struct OutputStream {
float t, tincr, tincr2;
struct SwsContext *sws_ctx;
- AVAudioResampleContext *avr;
+ struct SwrContext *swr_ctx;
} OutputStream;
-/**************************************************************/
-/* audio output */
+static void log_packet(const AVFormatContext *fmt_ctx, const AVPacket *pkt)
+{
+ AVRational *time_base = &fmt_ctx->streams[pkt->stream_index]->time_base;
-/*
- * add an audio output stream
- */
-static void add_audio_stream(OutputStream *ost, AVFormatContext *oc,
- enum AVCodecID codec_id)
+ printf("pts:%s pts_time:%s dts:%s dts_time:%s duration:%s duration_time:%s stream_index:%d\n",
+ av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, time_base),
+ av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, time_base),
+ av_ts2str(pkt->duration), av_ts2timestr(pkt->duration, time_base),
+ pkt->stream_index);
+}
+
+static int write_frame(AVFormatContext *fmt_ctx, const AVRational *time_base, AVStream *st, AVPacket *pkt)
+{
+ /* rescale output packet timestamp values from codec to stream timebase */
+ av_packet_rescale_ts(pkt, *time_base, st->time_base);
+ pkt->stream_index = st->index;
+
+ /* Write the compressed frame to the media file. */
+ log_packet(fmt_ctx, pkt);
+ return av_interleaved_write_frame(fmt_ctx, pkt);
+}
+
+/* Add an output stream. */
+static void add_stream(OutputStream *ost, AVFormatContext *oc,
+ AVCodec **codec,
+ enum AVCodecID codec_id)
{
AVCodecContext *c;
- AVCodec *codec;
- int ret;
+ int i;
- /* find the audio encoder */
- codec = avcodec_find_encoder(codec_id);
- if (!codec) {
- fprintf(stderr, "codec not found\n");
+ /* find the encoder */
+ *codec = avcodec_find_encoder(codec_id);
+ if (!(*codec)) {
+ fprintf(stderr, "Could not find encoder for '%s'\n",
+ avcodec_get_name(codec_id));
exit(1);
}
ost->st = avformat_new_stream(oc, NULL);
if (!ost->st) {
- fprintf(stderr, "Could not alloc stream\n");
+ fprintf(stderr, "Could not allocate stream\n");
exit(1);
}
-
- c = avcodec_alloc_context3(codec);
+ ost->st->id = oc->nb_streams-1;
+ c = avcodec_alloc_context3(*codec);
if (!c) {
fprintf(stderr, "Could not alloc an encoding context\n");
exit(1);
}
ost->enc = c;
- /* put sample parameters */
- c->sample_fmt = codec->sample_fmts ? codec->sample_fmts[0] : AV_SAMPLE_FMT_S16;
- c->sample_rate = codec->supported_samplerates ? codec->supported_samplerates[0] : 44100;
- c->channel_layout = codec->channel_layouts ? codec->channel_layouts[0] : AV_CH_LAYOUT_STEREO;
- c->channels = av_get_channel_layout_nb_channels(c->channel_layout);
- c->bit_rate = 64000;
+ switch ((*codec)->type) {
+ case AVMEDIA_TYPE_AUDIO:
+ c->sample_fmt = (*codec)->sample_fmts ?
+ (*codec)->sample_fmts[0] : AV_SAMPLE_FMT_FLTP;
+ c->bit_rate = 64000;
+ c->sample_rate = 44100;
+ if ((*codec)->supported_samplerates) {
+ c->sample_rate = (*codec)->supported_samplerates[0];
+ for (i = 0; (*codec)->supported_samplerates[i]; i++) {
+ if ((*codec)->supported_samplerates[i] == 44100)
+ c->sample_rate = 44100;
+ }
+ }
+ c->channels = av_get_channel_layout_nb_channels(c->channel_layout);
+ c->channel_layout = AV_CH_LAYOUT_STEREO;
+ if ((*codec)->channel_layouts) {
+ c->channel_layout = (*codec)->channel_layouts[0];
+ for (i = 0; (*codec)->channel_layouts[i]; i++) {
+ if ((*codec)->channel_layouts[i] == AV_CH_LAYOUT_STEREO)
+ c->channel_layout = AV_CH_LAYOUT_STEREO;
+ }
+ }
+ c->channels = av_get_channel_layout_nb_channels(c->channel_layout);
+ ost->st->time_base = (AVRational){ 1, c->sample_rate };
+ break;
+
+ case AVMEDIA_TYPE_VIDEO:
+ c->codec_id = codec_id;
+
+ c->bit_rate = 400000;
+ /* Resolution must be a multiple of two. */
+ c->width = 352;
+ c->height = 288;
+ /* timebase: This is the fundamental unit of time (in seconds) in terms
+ * of which frame timestamps are represented. For fixed-fps content,
+ * timebase should be 1/framerate and timestamp increments should be
+ * identical to 1. */
+ ost->st->time_base = (AVRational){ 1, STREAM_FRAME_RATE };
+ c->time_base = ost->st->time_base;
+
+ c->gop_size = 12; /* emit one intra frame every twelve frames at most */
+ c->pix_fmt = STREAM_PIX_FMT;
+ if (c->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
+ /* just for testing, we also add B frames */
+ c->max_b_frames = 2;
+ }
+ if (c->codec_id == AV_CODEC_ID_MPEG1VIDEO) {
+ /* Needed to avoid using macroblocks in which some coeffs overflow.
+ * This does not happen with normal video, it just happens here as
+ * the motion of the chroma plane does not match the luma plane. */
+ c->mb_decision = 2;
+ }
+ break;
- ost->st->time_base = (AVRational){ 1, c->sample_rate };
+ default:
+ break;
+ }
- // some formats want stream headers to be separate
+ /* Some formats want stream headers to be separate. */
if (oc->oformat->flags & AVFMT_GLOBALHEADER)
c->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
-
- /* initialize sample format conversion;
- * to simplify the code, we always pass the data through lavr, even
- * if the encoder supports the generated format directly -- the price is
- * some extra data copying;
- */
- ost->avr = avresample_alloc_context();
- if (!ost->avr) {
- fprintf(stderr, "Error allocating the resampling context\n");
- exit(1);
- }
-
- av_opt_set_int(ost->avr, "in_sample_fmt", AV_SAMPLE_FMT_S16, 0);
- av_opt_set_int(ost->avr, "in_sample_rate", 44100, 0);
- av_opt_set_int(ost->avr, "in_channel_layout", AV_CH_LAYOUT_STEREO, 0);
- av_opt_set_int(ost->avr, "out_sample_fmt", c->sample_fmt, 0);
- av_opt_set_int(ost->avr, "out_sample_rate", c->sample_rate, 0);
- av_opt_set_int(ost->avr, "out_channel_layout", c->channel_layout, 0);
-
- ret = avresample_open(ost->avr);
- if (ret < 0) {
- fprintf(stderr, "Error opening the resampling context\n");
- exit(1);
- }
}
+/**************************************************************/
+/* audio output */
+
static AVFrame *alloc_audio_frame(enum AVSampleFormat sample_fmt,
uint64_t channel_layout,
int sample_rate, int nb_samples)
@@ -165,16 +212,21 @@ static AVFrame *alloc_audio_frame(enum AVSampleFormat sample_fmt,
return frame;
}
-static void open_audio(AVFormatContext *oc, OutputStream *ost)
+static void open_audio(AVFormatContext *oc, AVCodec *codec, OutputStream *ost, AVDictionary *opt_arg)
{
AVCodecContext *c;
- int nb_samples, ret;
+ int nb_samples;
+ int ret;
+ AVDictionary *opt = NULL;
c = ost->enc;
/* open it */
- if (avcodec_open2(c, NULL, NULL) < 0) {
- fprintf(stderr, "could not open codec\n");
+ av_dict_copy(&opt, opt_arg, 0);
+ ret = avcodec_open2(c, codec, &opt);
+ av_dict_free(&opt);
+ if (ret < 0) {
+ fprintf(stderr, "Could not open audio codec: %s\n", av_err2str(ret));
exit(1);
}
@@ -191,8 +243,8 @@ static void open_audio(AVFormatContext *oc, OutputStream *ost)
ost->frame = alloc_audio_frame(c->sample_fmt, c->channel_layout,
c->sample_rate, nb_samples);
- ost->tmp_frame = alloc_audio_frame(AV_SAMPLE_FMT_S16, AV_CH_LAYOUT_STEREO,
- 44100, nb_samples);
+ ost->tmp_frame = alloc_audio_frame(AV_SAMPLE_FMT_S16, c->channel_layout,
+ c->sample_rate, nb_samples);
/* copy the stream parameters to the muxer */
ret = avcodec_parameters_from_context(ost->st->codecpar, c);
@@ -200,6 +252,27 @@ static void open_audio(AVFormatContext *oc, OutputStream *ost)
fprintf(stderr, "Could not copy the stream parameters\n");
exit(1);
}
+
+ /* create resampler context */
+ ost->swr_ctx = swr_alloc();
+ if (!ost->swr_ctx) {
+ fprintf(stderr, "Could not allocate resampler context\n");
+ exit(1);
+ }
+
+ /* set options */
+ av_opt_set_int (ost->swr_ctx, "in_channel_count", c->channels, 0);
+ av_opt_set_int (ost->swr_ctx, "in_sample_rate", c->sample_rate, 0);
+ av_opt_set_sample_fmt(ost->swr_ctx, "in_sample_fmt", AV_SAMPLE_FMT_S16, 0);
+ av_opt_set_int (ost->swr_ctx, "out_channel_count", c->channels, 0);
+ av_opt_set_int (ost->swr_ctx, "out_sample_rate", c->sample_rate, 0);
+ av_opt_set_sample_fmt(ost->swr_ctx, "out_sample_fmt", c->sample_fmt, 0);
+
+ /* initialize the resampling context */
+ if ((ret = swr_init(ost->swr_ctx)) < 0) {
+ fprintf(stderr, "Failed to initialize the resampling context\n");
+ exit(1);
+ }
}
/* Prepare a 16 bit dummy audio frame of 'frame_size' samples and
@@ -215,8 +288,7 @@ static AVFrame *get_audio_frame(OutputStream *ost)
STREAM_DURATION, (AVRational){ 1, 1 }) >= 0)
return NULL;
-
- for (j = 0; j < frame->nb_samples; j++) {
+ for (j = 0; j <frame->nb_samples; j++) {
v = (int)(sin(ost->t) * 10000);
for (i = 0; i < ost->enc->channels; i++)
*q++ = v;
@@ -224,62 +296,37 @@ static AVFrame *get_audio_frame(OutputStream *ost)
ost->tincr += ost->tincr2;
}
- return frame;
-}
+ frame->pts = ost->next_pts;
+ ost->next_pts += frame->nb_samples;
-/* if a frame is provided, send it to the encoder, otherwise flush the encoder;
- * return 1 when encoding is finished, 0 otherwise
- */
-static int encode_audio_frame(AVFormatContext *oc, OutputStream *ost,
- AVFrame *frame)
-{
- AVPacket pkt = { 0 }; // data and size must be 0;
- int got_packet;
-
- av_init_packet(&pkt);
- avcodec_encode_audio2(ost->enc, &pkt, frame, &got_packet);
-
- if (got_packet) {
- pkt.stream_index = ost->st->index;
-
- av_packet_rescale_ts(&pkt, ost->enc->time_base, ost->st->time_base);
-
- /* Write the compressed frame to the media file. */
- if (av_interleaved_write_frame(oc, &pkt) != 0) {
- fprintf(stderr, "Error while writing audio frame\n");
- exit(1);
- }
- }
-
- return (frame || got_packet) ? 0 : 1;
+ return frame;
}
/*
* encode one audio frame and send it to the muxer
* return 1 when encoding is finished, 0 otherwise
*/
-static int process_audio_stream(AVFormatContext *oc, OutputStream *ost)
+static int write_audio_frame(AVFormatContext *oc, OutputStream *ost)
{
+ AVCodecContext *c;
+ AVPacket pkt = { 0 }; // data and size must be 0;
AVFrame *frame;
- int got_output = 0;
int ret;
+ int got_packet;
+ int dst_nb_samples;
+
+ av_init_packet(&pkt);
+ c = ost->enc;
frame = get_audio_frame(ost);
- got_output |= !!frame;
- /* feed the data to lavr */
if (frame) {
- ret = avresample_convert(ost->avr, NULL, 0, 0,
- frame->extended_data, frame->linesize[0],
- frame->nb_samples);
- if (ret < 0) {
- fprintf(stderr, "Error feeding audio data to the resampler\n");
- exit(1);
- }
- }
+ /* convert samples from native format to destination codec format, using the resampler */
+ /* compute destination number of samples */
+ dst_nb_samples = av_rescale_rnd(swr_get_delay(ost->swr_ctx, c->sample_rate) + frame->nb_samples,
+ c->sample_rate, c->sample_rate, AV_ROUND_UP);
+ av_assert0(dst_nb_samples == frame->nb_samples);
- while ((frame && avresample_available(ost->avr) >= ost->frame->nb_samples) ||
- (!frame && avresample_get_out_samples(ost->avr, 0))) {
/* when we pass a frame to the encoder, it may keep a reference to it
* internally;
* make sure we do not overwrite it here
@@ -288,97 +335,41 @@ static int process_audio_stream(AVFormatContext *oc, OutputStream *ost)
if (ret < 0)
exit(1);
- /* the difference between the two avresample calls here is that the
- * first one just reads the already converted data that is buffered in
- * the lavr output buffer, while the second one also flushes the
- * resampler */
- if (frame) {
- ret = avresample_read(ost->avr, ost->frame->extended_data,
- ost->frame->nb_samples);
- } else {
- ret = avresample_convert(ost->avr, ost->frame->extended_data,
- ost->frame->linesize[0], ost->frame->nb_samples,
- NULL, 0, 0);
- }
+ /* convert to destination format */
+ ret = swr_convert(ost->swr_ctx,
+ ost->frame->data, dst_nb_samples,
+ (const uint8_t **)frame->data, frame->nb_samples);
+ if (ret < 0) {
+ fprintf(stderr, "Error while converting\n");
+ exit(1);
+ }
+ frame = ost->frame;
+
+ frame->pts = av_rescale_q(ost->samples_count, (AVRational){1, c->sample_rate}, c->time_base);
+ ost->samples_count += dst_nb_samples;
+ }
+
+ ret = avcodec_encode_audio2(c, &pkt, frame, &got_packet);
+ if (ret < 0) {
+ fprintf(stderr, "Error encoding audio frame: %s\n", av_err2str(ret));
+ exit(1);
+ }
+ if (got_packet) {
+ ret = write_frame(oc, &c->time_base, ost->st, &pkt);
if (ret < 0) {
- fprintf(stderr, "Error while resampling\n");
- exit(1);
- } else if (frame && ret != ost->frame->nb_samples) {
- fprintf(stderr, "Too few samples returned from lavr\n");
+ fprintf(stderr, "Error while writing audio frame: %s\n",
+ av_err2str(ret));
exit(1);
}
-
- ost->frame->nb_samples = ret;
-
- ost->frame->pts = ost->next_pts;
- ost->next_pts += ost->frame->nb_samples;
-
- got_output |= encode_audio_frame(oc, ost, ret ? ost->frame : NULL);
}
- return !got_output;
+ return (frame || got_packet) ? 0 : 1;
}
/**************************************************************/
/* video output */
-/* Add a video output stream. */
-static void add_video_stream(OutputStream *ost, AVFormatContext *oc,
- enum AVCodecID codec_id)
-{
- AVCodecContext *c;
- AVCodec *codec;
-
- /* find the video encoder */
- codec = avcodec_find_encoder(codec_id);
- if (!codec) {
- fprintf(stderr, "codec not found\n");
- exit(1);
- }
-
- ost->st = avformat_new_stream(oc, NULL);
- if (!ost->st) {
- fprintf(stderr, "Could not alloc stream\n");
- exit(1);
- }
-
- c = avcodec_alloc_context3(codec);
- if (!c) {
- fprintf(stderr, "Could not alloc an encoding context\n");
- exit(1);
- }
- ost->enc = c;
-
- /* Put sample parameters. */
- c->bit_rate = 400000;
- /* Resolution must be a multiple of two. */
- c->width = 352;
- c->height = 288;
- /* timebase: This is the fundamental unit of time (in seconds) in terms
- * of which frame timestamps are represented. For fixed-fps content,
- * timebase should be 1/framerate and timestamp increments should be
- * identical to 1. */
- ost->st->time_base = (AVRational){ 1, STREAM_FRAME_RATE };
- c->time_base = ost->st->time_base;
-
- c->gop_size = 12; /* emit one intra frame every twelve frames at most */
- c->pix_fmt = STREAM_PIX_FMT;
- if (c->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
- /* just for testing, we also add B frames */
- c->max_b_frames = 2;
- }
- if (c->codec_id == AV_CODEC_ID_MPEG1VIDEO) {
- /* Needed to avoid using macroblocks in which some coeffs overflow.
- * This does not happen with normal video, it just happens here as
- * the motion of the chroma plane does not match the luma plane. */
- c->mb_decision = 2;
- }
- /* Some formats want stream headers to be separate. */
- if (oc->oformat->flags & AVFMT_GLOBALHEADER)
- c->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
-}
-
static AVFrame *alloc_picture(enum AVPixelFormat pix_fmt, int width, int height)
{
AVFrame *picture;
@@ -402,23 +393,26 @@ static AVFrame *alloc_picture(enum AVPixelFormat pix_fmt, int width, int height)
return picture;
}
-static void open_video(AVFormatContext *oc, OutputStream *ost)
+static void open_video(AVFormatContext *oc, AVCodec *codec, OutputStream *ost, AVDictionary *opt_arg)
{
- AVCodecContext *c;
int ret;
+ AVCodecContext *c = ost->enc;
+ AVDictionary *opt = NULL;
- c = ost->enc;
+ av_dict_copy(&opt, opt_arg, 0);
/* open the codec */
- if (avcodec_open2(c, NULL, NULL) < 0) {
- fprintf(stderr, "could not open codec\n");
+ ret = avcodec_open2(c, codec, &opt);
+ av_dict_free(&opt);
+ if (ret < 0) {
+ fprintf(stderr, "Could not open video codec: %s\n", av_err2str(ret));
exit(1);
}
- /* Allocate the encoded raw picture. */
+ /* allocate and init a re-usable frame */
ost->frame = alloc_picture(c->pix_fmt, c->width, c->height);
if (!ost->frame) {
- fprintf(stderr, "Could not allocate picture\n");
+ fprintf(stderr, "Could not allocate video frame\n");
exit(1);
}
@@ -492,12 +486,13 @@ static AVFrame *get_video_frame(OutputStream *ost)
SCALE_FLAGS, NULL, NULL, NULL);
if (!ost->sws_ctx) {
fprintf(stderr,
- "Cannot initialize the conversion context\n");
+ "Could not initialize the conversion context\n");
exit(1);
}
}
fill_yuv_image(ost->tmp_frame, ost->next_pts, c->width, c->height);
- sws_scale(ost->sws_ctx, ost->tmp_frame->data, ost->tmp_frame->linesize,
+ sws_scale(ost->sws_ctx,
+ (const uint8_t * const *)ost->tmp_frame->data, ost->tmp_frame->linesize,
0, c->height, ost->frame->data, ost->frame->linesize);
} else {
fill_yuv_image(ost->frame, ost->next_pts, c->width, c->height);
@@ -517,8 +512,8 @@ static int write_video_frame(AVFormatContext *oc, OutputStream *ost)
int ret;
AVCodecContext *c;
AVFrame *frame;
- AVPacket pkt = { 0 };
int got_packet = 0;
+ AVPacket pkt = { 0 };
c = ost->enc;
@@ -529,20 +524,18 @@ static int write_video_frame(AVFormatContext *oc, OutputStream *ost)
/* encode the image */
ret = avcodec_encode_video2(c, &pkt, frame, &got_packet);
if (ret < 0) {
- fprintf(stderr, "Error encoding a video frame\n");
+ fprintf(stderr, "Error encoding video frame: %s\n", av_err2str(ret));
exit(1);
}
if (got_packet) {
- av_packet_rescale_ts(&pkt, c->time_base, ost->st->time_base);
- pkt.stream_index = ost->st->index;
-
- /* Write the compressed frame to the media file. */
- ret = av_interleaved_write_frame(oc, &pkt);
+ ret = write_frame(oc, &c->time_base, ost->st, &pkt);
+ } else {
+ ret = 0;
}
- if (ret != 0) {
- fprintf(stderr, "Error while writing video frame\n");
+ if (ret < 0) {
+ fprintf(stderr, "Error while writing video frame: %s\n", av_err2str(ret));
exit(1);
}
@@ -555,7 +548,7 @@ static void close_stream(AVFormatContext *oc, OutputStream *ost)
av_frame_free(&ost->frame);
av_frame_free(&ost->tmp_frame);
sws_freeContext(ost->sws_ctx);
- avresample_free(&ost->avr);
+ swr_free(&ost->swr_ctx);
}
/**************************************************************/
@@ -567,52 +560,51 @@ int main(int argc, char **argv)
const char *filename;
AVOutputFormat *fmt;
AVFormatContext *oc;
+ AVCodec *audio_codec, *video_codec;
+ int ret;
int have_video = 0, have_audio = 0;
int encode_video = 0, encode_audio = 0;
+ AVDictionary *opt = NULL;
/* Initialize libavcodec, and register all codecs and formats. */
av_register_all();
- if (argc != 2) {
+ if (argc < 2) {
printf("usage: %s output_file\n"
"API example program to output a media file with libavformat.\n"
+ "This program generates a synthetic audio and video stream, encodes and\n"
+ "muxes them into a file named output_file.\n"
"The output format is automatically guessed according to the file extension.\n"
- "Raw images can also be output by using '%%d' in the filename\n"
+ "Raw images can also be output by using '%%d' in the filename.\n"
"\n", argv[0]);
return 1;
}
filename = argv[1];
+ if (argc > 3 && !strcmp(argv[2], "-flags")) {
+ av_dict_set(&opt, argv[2]+1, argv[3], 0);
+ }
- /* Autodetect the output format from the name. default is MPEG. */
- fmt = av_guess_format(NULL, filename, NULL);
- if (!fmt) {
+ /* allocate the output media context */
+ avformat_alloc_output_context2(&oc, NULL, NULL, filename);
+ if (!oc) {
printf("Could not deduce output format from file extension: using MPEG.\n");
- fmt = av_guess_format("mpeg", NULL, NULL);
+ avformat_alloc_output_context2(&oc, NULL, "mpeg", filename);
}
- if (!fmt) {
- fprintf(stderr, "Could not find suitable output format\n");
+ if (!oc)
return 1;
- }
- /* Allocate the output media context. */
- oc = avformat_alloc_context();
- if (!oc) {
- fprintf(stderr, "Memory error\n");
- return 1;
- }
- oc->oformat = fmt;
- snprintf(oc->filename, sizeof(oc->filename), "%s", filename);
+ fmt = oc->oformat;
/* Add the audio and video streams using the default format codecs
* and initialize the codecs. */
if (fmt->video_codec != AV_CODEC_ID_NONE) {
- add_video_stream(&video_st, oc, fmt->video_codec);
+ add_stream(&video_st, oc, &video_codec, fmt->video_codec);
have_video = 1;
encode_video = 1;
}
if (fmt->audio_codec != AV_CODEC_ID_NONE) {
- add_audio_stream(&audio_st, oc, fmt->audio_codec);
+ add_stream(&audio_st, oc, &audio_codec, fmt->audio_codec);
have_audio = 1;
encode_audio = 1;
}
@@ -620,22 +612,30 @@ int main(int argc, char **argv)
/* Now that all the parameters are set, we can open the audio and
* video codecs and allocate the necessary encode buffers. */
if (have_video)
- open_video(oc, &video_st);
+ open_video(oc, video_codec, &video_st, opt);
+
if (have_audio)
- open_audio(oc, &audio_st);
+ open_audio(oc, audio_codec, &audio_st, opt);
av_dump_format(oc, 0, filename, 1);
/* open the output file, if needed */
if (!(fmt->flags & AVFMT_NOFILE)) {
- if (avio_open(&oc->pb, filename, AVIO_FLAG_WRITE) < 0) {
- fprintf(stderr, "Could not open '%s'\n", filename);
+ ret = avio_open(&oc->pb, filename, AVIO_FLAG_WRITE);
+ if (ret < 0) {
+ fprintf(stderr, "Could not open '%s': %s\n", filename,
+ av_err2str(ret));
return 1;
}
}
/* Write the stream header, if any. */
- avformat_write_header(oc, NULL);
+ ret = avformat_write_header(oc, &opt);
+ if (ret < 0) {
+ fprintf(stderr, "Error occurred when opening output file: %s\n",
+ av_err2str(ret));
+ return 1;
+ }
while (encode_video || encode_audio) {
/* select the stream to encode */
@@ -644,7 +644,7 @@ int main(int argc, char **argv)
audio_st.next_pts, audio_st.enc->time_base) <= 0)) {
encode_video = !write_video_frame(oc, &video_st);
} else {
- encode_audio = !process_audio_stream(oc, &audio_st);
+ encode_audio = !write_audio_frame(oc, &audio_st);
}
}
@@ -662,7 +662,7 @@ int main(int argc, char **argv)
if (!(fmt->flags & AVFMT_NOFILE))
/* Close the output file. */
- avio_close(oc->pb);
+ avio_closep(&oc->pb);
/* free the stream */
avformat_free_context(oc);
diff --git a/doc/examples/remuxing.c b/doc/examples/remuxing.c
new file mode 100644
index 0000000000..65437d9abd
--- /dev/null
+++ b/doc/examples/remuxing.c
@@ -0,0 +1,165 @@
+/*
+ * Copyright (c) 2013 Stefano Sabatini
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+/**
+ * @file
+ * libavformat/libavcodec demuxing and muxing API example.
+ *
+ * Remux streams from one container format to another.
+ * @example remuxing.c
+ */
+
+#include <libavutil/timestamp.h>
+#include <libavformat/avformat.h>
+
+static void log_packet(const AVFormatContext *fmt_ctx, const AVPacket *pkt, const char *tag)
+{
+ AVRational *time_base = &fmt_ctx->streams[pkt->stream_index]->time_base;
+
+ printf("%s: pts:%s pts_time:%s dts:%s dts_time:%s duration:%s duration_time:%s stream_index:%d\n",
+ tag,
+ av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, time_base),
+ av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, time_base),
+ av_ts2str(pkt->duration), av_ts2timestr(pkt->duration, time_base),
+ pkt->stream_index);
+}
+
+int main(int argc, char **argv)
+{
+ AVOutputFormat *ofmt = NULL;
+ AVFormatContext *ifmt_ctx = NULL, *ofmt_ctx = NULL;
+ AVPacket pkt;
+ const char *in_filename, *out_filename;
+ int ret, i;
+
+ if (argc < 3) {
+ printf("usage: %s input output\n"
+ "API example program to remux a media file with libavformat and libavcodec.\n"
+ "The output format is guessed according to the file extension.\n"
+ "\n", argv[0]);
+ return 1;
+ }
+
+ in_filename = argv[1];
+ out_filename = argv[2];
+
+ av_register_all();
+
+ if ((ret = avformat_open_input(&ifmt_ctx, in_filename, 0, 0)) < 0) {
+ fprintf(stderr, "Could not open input file '%s'", in_filename);
+ goto end;
+ }
+
+ if ((ret = avformat_find_stream_info(ifmt_ctx, 0)) < 0) {
+ fprintf(stderr, "Failed to retrieve input stream information");
+ goto end;
+ }
+
+ av_dump_format(ifmt_ctx, 0, in_filename, 0);
+
+ avformat_alloc_output_context2(&ofmt_ctx, NULL, NULL, out_filename);
+ if (!ofmt_ctx) {
+ fprintf(stderr, "Could not create output context\n");
+ ret = AVERROR_UNKNOWN;
+ goto end;
+ }
+
+ ofmt = ofmt_ctx->oformat;
+
+ for (i = 0; i < ifmt_ctx->nb_streams; i++) {
+ AVStream *in_stream = ifmt_ctx->streams[i];
+ AVStream *out_stream = avformat_new_stream(ofmt_ctx, in_stream->codec->codec);
+ if (!out_stream) {
+ fprintf(stderr, "Failed allocating output stream\n");
+ ret = AVERROR_UNKNOWN;
+ goto end;
+ }
+
+ ret = avcodec_copy_context(out_stream->codec, in_stream->codec);
+ if (ret < 0) {
+ fprintf(stderr, "Failed to copy context from input to output stream codec context\n");
+ goto end;
+ }
+ out_stream->codec->codec_tag = 0;
+ if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
+ out_stream->codec->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
+ }
+ av_dump_format(ofmt_ctx, 0, out_filename, 1);
+
+ if (!(ofmt->flags & AVFMT_NOFILE)) {
+ ret = avio_open(&ofmt_ctx->pb, out_filename, AVIO_FLAG_WRITE);
+ if (ret < 0) {
+ fprintf(stderr, "Could not open output file '%s'", out_filename);
+ goto end;
+ }
+ }
+
+ ret = avformat_write_header(ofmt_ctx, NULL);
+ if (ret < 0) {
+ fprintf(stderr, "Error occurred when opening output file\n");
+ goto end;
+ }
+
+ while (1) {
+ AVStream *in_stream, *out_stream;
+
+ ret = av_read_frame(ifmt_ctx, &pkt);
+ if (ret < 0)
+ break;
+
+ in_stream = ifmt_ctx->streams[pkt.stream_index];
+ out_stream = ofmt_ctx->streams[pkt.stream_index];
+
+ log_packet(ifmt_ctx, &pkt, "in");
+
+ /* copy packet */
+ pkt.pts = av_rescale_q_rnd(pkt.pts, in_stream->time_base, out_stream->time_base, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
+ pkt.dts = av_rescale_q_rnd(pkt.dts, in_stream->time_base, out_stream->time_base, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
+ pkt.duration = av_rescale_q(pkt.duration, in_stream->time_base, out_stream->time_base);
+ pkt.pos = -1;
+ log_packet(ofmt_ctx, &pkt, "out");
+
+ ret = av_interleaved_write_frame(ofmt_ctx, &pkt);
+ if (ret < 0) {
+ fprintf(stderr, "Error muxing packet\n");
+ break;
+ }
+ av_packet_unref(&pkt);
+ }
+
+ av_write_trailer(ofmt_ctx);
+end:
+
+ avformat_close_input(&ifmt_ctx);
+
+ /* close output */
+ if (ofmt_ctx && !(ofmt->flags & AVFMT_NOFILE))
+ avio_closep(&ofmt_ctx->pb);
+ avformat_free_context(ofmt_ctx);
+
+ if (ret < 0 && ret != AVERROR_EOF) {
+ fprintf(stderr, "Error occurred: %s\n", av_err2str(ret));
+ return 1;
+ }
+
+ return 0;
+}
diff --git a/doc/examples/resampling_audio.c b/doc/examples/resampling_audio.c
new file mode 100644
index 0000000000..f35e7e1779
--- /dev/null
+++ b/doc/examples/resampling_audio.c
@@ -0,0 +1,214 @@
+/*
+ * Copyright (c) 2012 Stefano Sabatini
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+/**
+ * @example resampling_audio.c
+ * libswresample API use example.
+ */
+
+#include <libavutil/opt.h>
+#include <libavutil/channel_layout.h>
+#include <libavutil/samplefmt.h>
+#include <libswresample/swresample.h>
+
+static int get_format_from_sample_fmt(const char **fmt,
+ enum AVSampleFormat sample_fmt)
+{
+ int i;
+ struct sample_fmt_entry {
+ enum AVSampleFormat sample_fmt; const char *fmt_be, *fmt_le;
+ } sample_fmt_entries[] = {
+ { AV_SAMPLE_FMT_U8, "u8", "u8" },
+ { AV_SAMPLE_FMT_S16, "s16be", "s16le" },
+ { AV_SAMPLE_FMT_S32, "s32be", "s32le" },
+ { AV_SAMPLE_FMT_FLT, "f32be", "f32le" },
+ { AV_SAMPLE_FMT_DBL, "f64be", "f64le" },
+ };
+ *fmt = NULL;
+
+ for (i = 0; i < FF_ARRAY_ELEMS(sample_fmt_entries); i++) {
+ struct sample_fmt_entry *entry = &sample_fmt_entries[i];
+ if (sample_fmt == entry->sample_fmt) {
+ *fmt = AV_NE(entry->fmt_be, entry->fmt_le);
+ return 0;
+ }
+ }
+
+ fprintf(stderr,
+ "Sample format %s not supported as output format\n",
+ av_get_sample_fmt_name(sample_fmt));
+ return AVERROR(EINVAL);
+}
+
+/**
+ * Fill dst buffer with nb_samples, generated starting from t.
+ */
+static void fill_samples(double *dst, int nb_samples, int nb_channels, int sample_rate, double *t)
+{
+ int i, j;
+ double tincr = 1.0 / sample_rate, *dstp = dst;
+ const double c = 2 * M_PI * 440.0;
+
+ /* generate sin tone with 440Hz frequency and duplicated channels */
+ for (i = 0; i < nb_samples; i++) {
+ *dstp = sin(c * *t);
+ for (j = 1; j < nb_channels; j++)
+ dstp[j] = dstp[0];
+ dstp += nb_channels;
+ *t += tincr;
+ }
+}
+
+int main(int argc, char **argv)
+{
+ int64_t src_ch_layout = AV_CH_LAYOUT_STEREO, dst_ch_layout = AV_CH_LAYOUT_SURROUND;
+ int src_rate = 48000, dst_rate = 44100;
+ uint8_t **src_data = NULL, **dst_data = NULL;
+ int src_nb_channels = 0, dst_nb_channels = 0;
+ int src_linesize, dst_linesize;
+ int src_nb_samples = 1024, dst_nb_samples, max_dst_nb_samples;
+ enum AVSampleFormat src_sample_fmt = AV_SAMPLE_FMT_DBL, dst_sample_fmt = AV_SAMPLE_FMT_S16;
+ const char *dst_filename = NULL;
+ FILE *dst_file;
+ int dst_bufsize;
+ const char *fmt;
+ struct SwrContext *swr_ctx;
+ double t;
+ int ret;
+
+ if (argc != 2) {
+ fprintf(stderr, "Usage: %s output_file\n"
+ "API example program to show how to resample an audio stream with libswresample.\n"
+ "This program generates a series of audio frames, resamples them to a specified "
+ "output format and rate and saves them to an output file named output_file.\n",
+ argv[0]);
+ exit(1);
+ }
+ dst_filename = argv[1];
+
+ dst_file = fopen(dst_filename, "wb");
+ if (!dst_file) {
+ fprintf(stderr, "Could not open destination file %s\n", dst_filename);
+ exit(1);
+ }
+
+ /* create resampler context */
+ swr_ctx = swr_alloc();
+ if (!swr_ctx) {
+ fprintf(stderr, "Could not allocate resampler context\n");
+ ret = AVERROR(ENOMEM);
+ goto end;
+ }
+
+ /* set options */
+ av_opt_set_int(swr_ctx, "in_channel_layout", src_ch_layout, 0);
+ av_opt_set_int(swr_ctx, "in_sample_rate", src_rate, 0);
+ av_opt_set_sample_fmt(swr_ctx, "in_sample_fmt", src_sample_fmt, 0);
+
+ av_opt_set_int(swr_ctx, "out_channel_layout", dst_ch_layout, 0);
+ av_opt_set_int(swr_ctx, "out_sample_rate", dst_rate, 0);
+ av_opt_set_sample_fmt(swr_ctx, "out_sample_fmt", dst_sample_fmt, 0);
+
+ /* initialize the resampling context */
+ if ((ret = swr_init(swr_ctx)) < 0) {
+ fprintf(stderr, "Failed to initialize the resampling context\n");
+ goto end;
+ }
+
+ /* allocate source and destination samples buffers */
+
+ src_nb_channels = av_get_channel_layout_nb_channels(src_ch_layout);
+ ret = av_samples_alloc_array_and_samples(&src_data, &src_linesize, src_nb_channels,
+ src_nb_samples, src_sample_fmt, 0);
+ if (ret < 0) {
+ fprintf(stderr, "Could not allocate source samples\n");
+ goto end;
+ }
+
+ /* compute the number of converted samples: buffering is avoided
+ * ensuring that the output buffer will contain at least all the
+ * converted input samples */
+ max_dst_nb_samples = dst_nb_samples =
+ av_rescale_rnd(src_nb_samples, dst_rate, src_rate, AV_ROUND_UP);
+
+ /* buffer is going to be directly written to a rawaudio file, no alignment */
+ dst_nb_channels = av_get_channel_layout_nb_channels(dst_ch_layout);
+ ret = av_samples_alloc_array_and_samples(&dst_data, &dst_linesize, dst_nb_channels,
+ dst_nb_samples, dst_sample_fmt, 0);
+ if (ret < 0) {
+ fprintf(stderr, "Could not allocate destination samples\n");
+ goto end;
+ }
+
+ t = 0;
+ do {
+ /* generate synthetic audio */
+ fill_samples((double *)src_data[0], src_nb_samples, src_nb_channels, src_rate, &t);
+
+ /* compute destination number of samples */
+ dst_nb_samples = av_rescale_rnd(swr_get_delay(swr_ctx, src_rate) +
+ src_nb_samples, dst_rate, src_rate, AV_ROUND_UP);
+ if (dst_nb_samples > max_dst_nb_samples) {
+ av_freep(&dst_data[0]);
+ ret = av_samples_alloc(dst_data, &dst_linesize, dst_nb_channels,
+ dst_nb_samples, dst_sample_fmt, 1);
+ if (ret < 0)
+ break;
+ max_dst_nb_samples = dst_nb_samples;
+ }
+
+ /* convert to destination format */
+ ret = swr_convert(swr_ctx, dst_data, dst_nb_samples, (const uint8_t **)src_data, src_nb_samples);
+ if (ret < 0) {
+ fprintf(stderr, "Error while converting\n");
+ goto end;
+ }
+ dst_bufsize = av_samples_get_buffer_size(&dst_linesize, dst_nb_channels,
+ ret, dst_sample_fmt, 1);
+ if (dst_bufsize < 0) {
+ fprintf(stderr, "Could not get sample buffer size\n");
+ goto end;
+ }
+ printf("t:%f in:%d out:%d\n", t, src_nb_samples, ret);
+ fwrite(dst_data[0], 1, dst_bufsize, dst_file);
+ } while (t < 10);
+
+ if ((ret = get_format_from_sample_fmt(&fmt, dst_sample_fmt)) < 0)
+ goto end;
+ fprintf(stderr, "Resampling succeeded. Play the output file with the command:\n"
+ "ffplay -f %s -channel_layout %"PRId64" -channels %d -ar %d %s\n",
+ fmt, dst_ch_layout, dst_nb_channels, dst_rate, dst_filename);
+
+end:
+ fclose(dst_file);
+
+ if (src_data)
+ av_freep(&src_data[0]);
+ av_freep(&src_data);
+
+ if (dst_data)
+ av_freep(&dst_data[0]);
+ av_freep(&dst_data);
+
+ swr_free(&swr_ctx);
+ return ret < 0;
+}
diff --git a/doc/examples/scaling_video.c b/doc/examples/scaling_video.c
new file mode 100644
index 0000000000..587f3abe4f
--- /dev/null
+++ b/doc/examples/scaling_video.c
@@ -0,0 +1,140 @@
+/*
+ * Copyright (c) 2012 Stefano Sabatini
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+/**
+ * @file
+ * libswscale API use example.
+ * @example scaling_video.c
+ */
+
+#include <libavutil/imgutils.h>
+#include <libavutil/parseutils.h>
+#include <libswscale/swscale.h>
+
+static void fill_yuv_image(uint8_t *data[4], int linesize[4],
+ int width, int height, int frame_index)
+{
+ int x, y;
+
+ /* Y */
+ for (y = 0; y < height; y++)
+ for (x = 0; x < width; x++)
+ data[0][y * linesize[0] + x] = x + y + frame_index * 3;
+
+ /* Cb and Cr */
+ for (y = 0; y < height / 2; y++) {
+ for (x = 0; x < width / 2; x++) {
+ data[1][y * linesize[1] + x] = 128 + y + frame_index * 2;
+ data[2][y * linesize[2] + x] = 64 + x + frame_index * 5;
+ }
+ }
+}
+
+int main(int argc, char **argv)
+{
+ uint8_t *src_data[4], *dst_data[4];
+ int src_linesize[4], dst_linesize[4];
+ int src_w = 320, src_h = 240, dst_w, dst_h;
+ enum AVPixelFormat src_pix_fmt = AV_PIX_FMT_YUV420P, dst_pix_fmt = AV_PIX_FMT_RGB24;
+ const char *dst_size = NULL;
+ const char *dst_filename = NULL;
+ FILE *dst_file;
+ int dst_bufsize;
+ struct SwsContext *sws_ctx;
+ int i, ret;
+
+ if (argc != 3) {
+ fprintf(stderr, "Usage: %s output_file output_size\n"
+ "API example program to show how to scale an image with libswscale.\n"
+ "This program generates a series of pictures, rescales them to the given "
+ "output_size and saves them to an output file named output_file\n."
+ "\n", argv[0]);
+ exit(1);
+ }
+ dst_filename = argv[1];
+ dst_size = argv[2];
+
+ if (av_parse_video_size(&dst_w, &dst_h, dst_size) < 0) {
+ fprintf(stderr,
+ "Invalid size '%s', must be in the form WxH or a valid size abbreviation\n",
+ dst_size);
+ exit(1);
+ }
+
+ dst_file = fopen(dst_filename, "wb");
+ if (!dst_file) {
+ fprintf(stderr, "Could not open destination file %s\n", dst_filename);
+ exit(1);
+ }
+
+ /* create scaling context */
+ sws_ctx = sws_getContext(src_w, src_h, src_pix_fmt,
+ dst_w, dst_h, dst_pix_fmt,
+ SWS_BILINEAR, NULL, NULL, NULL);
+ if (!sws_ctx) {
+ fprintf(stderr,
+ "Impossible to create scale context for the conversion "
+ "fmt:%s s:%dx%d -> fmt:%s s:%dx%d\n",
+ av_get_pix_fmt_name(src_pix_fmt), src_w, src_h,
+ av_get_pix_fmt_name(dst_pix_fmt), dst_w, dst_h);
+ ret = AVERROR(EINVAL);
+ goto end;
+ }
+
+ /* allocate source and destination image buffers */
+ if ((ret = av_image_alloc(src_data, src_linesize,
+ src_w, src_h, src_pix_fmt, 16)) < 0) {
+ fprintf(stderr, "Could not allocate source image\n");
+ goto end;
+ }
+
+ /* buffer is going to be written to rawvideo file, no alignment */
+ if ((ret = av_image_alloc(dst_data, dst_linesize,
+ dst_w, dst_h, dst_pix_fmt, 1)) < 0) {
+ fprintf(stderr, "Could not allocate destination image\n");
+ goto end;
+ }
+ dst_bufsize = ret;
+
+ for (i = 0; i < 100; i++) {
+ /* generate synthetic video */
+ fill_yuv_image(src_data, src_linesize, src_w, src_h, i);
+
+ /* convert to destination format */
+ sws_scale(sws_ctx, (const uint8_t * const*)src_data,
+ src_linesize, 0, src_h, dst_data, dst_linesize);
+
+ /* write scaled image to file */
+ fwrite(dst_data[0], 1, dst_bufsize, dst_file);
+ }
+
+ fprintf(stderr, "Scaling succeeded. Play the output file with the command:\n"
+ "ffplay -f rawvideo -pix_fmt %s -video_size %dx%d %s\n",
+ av_get_pix_fmt_name(dst_pix_fmt), dst_w, dst_h, dst_filename);
+
+end:
+ fclose(dst_file);
+ av_freep(&src_data[0]);
+ av_freep(&dst_data[0]);
+ sws_freeContext(sws_ctx);
+ return ret < 0;
+}
diff --git a/doc/examples/transcode_aac.c b/doc/examples/transcode_aac.c
index 3eebfb9d02..486e54c281 100644
--- a/doc/examples/transcode_aac.c
+++ b/doc/examples/transcode_aac.c
@@ -1,18 +1,18 @@
/*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -21,7 +21,7 @@
* simple audio converter
*
* @example transcode_aac.c
- * Convert an input audio file to AAC in an MP4 container using Libav.
+ * Convert an input audio file to AAC in an MP4 container using FFmpeg.
* @author Andreas Unterweger (dustsigns@gmail.com)
*/
@@ -33,11 +33,12 @@
#include "libavcodec/avcodec.h"
#include "libavutil/audio_fifo.h"
+#include "libavutil/avassert.h"
#include "libavutil/avstring.h"
#include "libavutil/frame.h"
#include "libavutil/opt.h"
-#include "libavresample/avresample.h"
+#include "libswresample/swresample.h"
/** The output bit rate in kbit/s */
#define OUTPUT_BIT_RATE 96000
@@ -49,7 +50,7 @@
* @param error Error code to be converted
* @return Corresponding error text (not thread-safe)
*/
-static char *const get_error_text(const int error)
+static const char *get_error_text(const int error)
{
static char error_buffer[255];
av_strerror(error, error_buffer, sizeof(error_buffer));
@@ -203,7 +204,7 @@ static int open_output_file(const char *filename,
return 0;
cleanup:
- avio_close((*output_format_context)->pb);
+ avio_closep(&(*output_format_context)->pb);
avformat_free_context(*output_format_context);
*output_format_context = NULL;
return error < 0 ? error : AVERROR_EXIT;
@@ -231,52 +232,46 @@ static int init_input_frame(AVFrame **frame)
/**
* Initialize the audio resampler based on the input and output codec settings.
* If the input and output sample formats differ, a conversion is required
- * libavresample takes care of this, but requires initialization.
+ * libswresample takes care of this, but requires initialization.
*/
static int init_resampler(AVCodecContext *input_codec_context,
AVCodecContext *output_codec_context,
- AVAudioResampleContext **resample_context)
+ SwrContext **resample_context)
{
- /**
- * Only initialize the resampler if it is necessary, i.e.,
- * if and only if the sample formats differ.
- */
- if (input_codec_context->sample_fmt != output_codec_context->sample_fmt ||
- input_codec_context->channels != output_codec_context->channels) {
int error;
- /** Create a resampler context for the conversion. */
- if (!(*resample_context = avresample_alloc_context())) {
- fprintf(stderr, "Could not allocate resample context\n");
- return AVERROR(ENOMEM);
- }
-
/**
+ * Create a resampler context for the conversion.
* Set the conversion parameters.
* Default channel layouts based on the number of channels
* are assumed for simplicity (they are sometimes not detected
* properly by the demuxer and/or decoder).
*/
- av_opt_set_int(*resample_context, "in_channel_layout",
- av_get_default_channel_layout(input_codec_context->channels), 0);
- av_opt_set_int(*resample_context, "out_channel_layout",
- av_get_default_channel_layout(output_codec_context->channels), 0);
- av_opt_set_int(*resample_context, "in_sample_rate",
- input_codec_context->sample_rate, 0);
- av_opt_set_int(*resample_context, "out_sample_rate",
- output_codec_context->sample_rate, 0);
- av_opt_set_int(*resample_context, "in_sample_fmt",
- input_codec_context->sample_fmt, 0);
- av_opt_set_int(*resample_context, "out_sample_fmt",
- output_codec_context->sample_fmt, 0);
+ *resample_context = swr_alloc_set_opts(NULL,
+ av_get_default_channel_layout(output_codec_context->channels),
+ output_codec_context->sample_fmt,
+ output_codec_context->sample_rate,
+ av_get_default_channel_layout(input_codec_context->channels),
+ input_codec_context->sample_fmt,
+ input_codec_context->sample_rate,
+ 0, NULL);
+ if (!*resample_context) {
+ fprintf(stderr, "Could not allocate resample context\n");
+ return AVERROR(ENOMEM);
+ }
+ /**
+ * Perform a sanity check so that the number of converted samples is
+ * not greater than the number of samples to be converted.
+ * If the sample rates differ, this case has to be handled differently
+ */
+ av_assert0(output_codec_context->sample_rate == input_codec_context->sample_rate);
/** Open the resampler with the specified parameters. */
- if ((error = avresample_open(*resample_context)) < 0) {
+ if ((error = swr_init(*resample_context)) < 0) {
fprintf(stderr, "Could not open resample context\n");
- avresample_free(resample_context);
+ swr_free(resample_context);
return error;
}
- }
return 0;
}
@@ -317,7 +312,7 @@ static int decode_audio_frame(AVFrame *frame,
/** Read one audio frame from the input file into a temporary packet. */
if ((error = av_read_frame(input_format_context, &input_packet)) < 0) {
- /** If we are the the end of the file, flush the decoder below. */
+ /** If we are at the end of the file, flush the decoder below. */
if (error == AVERROR_EOF)
*finished = 1;
else {
@@ -396,30 +391,21 @@ static int init_converted_samples(uint8_t ***converted_input_samples,
* The conversion happens on a per-frame basis, the size of which is specified
* by frame_size.
*/
-static int convert_samples(uint8_t **input_data,
+static int convert_samples(const uint8_t **input_data,
uint8_t **converted_data, const int frame_size,
- AVAudioResampleContext *resample_context)
+ SwrContext *resample_context)
{
int error;
/** Convert the samples using the resampler. */
- if ((error = avresample_convert(resample_context, converted_data, 0,
- frame_size, input_data, 0, frame_size)) < 0) {
+ if ((error = swr_convert(resample_context,
+ converted_data, frame_size,
+ input_data , frame_size)) < 0) {
fprintf(stderr, "Could not convert input samples (error '%s')\n",
get_error_text(error));
return error;
}
- /**
- * Perform a sanity check so that the number of converted samples is
- * not greater than the number of samples to be converted.
- * If the sample rates differ, this case has to be handled differently
- */
- if (avresample_available(resample_context)) {
- fprintf(stderr, "Converted samples left over\n");
- return AVERROR_EXIT;
- }
-
return 0;
}
@@ -456,7 +442,7 @@ static int read_decode_convert_and_store(AVAudioFifo *fifo,
AVFormatContext *input_format_context,
AVCodecContext *input_codec_context,
AVCodecContext *output_codec_context,
- AVAudioResampleContext *resampler_context,
+ SwrContext *resampler_context,
int *finished)
{
/** Temporary storage of the input samples of the frame read from the file. */
@@ -493,7 +479,7 @@ static int read_decode_convert_and_store(AVAudioFifo *fifo,
* Convert the input samples to the desired output sample format.
* This requires a temporary storage provided by converted_input_samples.
*/
- if (convert_samples(input_frame->extended_data, converted_input_samples,
+ if (convert_samples((const uint8_t**)input_frame->extended_data, converted_input_samples,
input_frame->nb_samples, resampler_context))
goto cleanup;
@@ -664,7 +650,7 @@ int main(int argc, char **argv)
{
AVFormatContext *input_format_context = NULL, *output_format_context = NULL;
AVCodecContext *input_codec_context = NULL, *output_codec_context = NULL;
- AVAudioResampleContext *resample_context = NULL;
+ SwrContext *resample_context = NULL;
AVAudioFifo *fifo = NULL;
int ret = AVERROR_EXIT;
@@ -768,14 +754,11 @@ int main(int argc, char **argv)
cleanup:
if (fifo)
av_audio_fifo_free(fifo);
- if (resample_context) {
- avresample_close(resample_context);
- avresample_free(&resample_context);
- }
+ swr_free(&resample_context);
if (output_codec_context)
avcodec_close(output_codec_context);
if (output_format_context) {
- avio_close(output_format_context->pb);
+ avio_closep(&output_format_context->pb);
avformat_free_context(output_format_context);
}
if (input_codec_context)
diff --git a/doc/examples/transcoding.c b/doc/examples/transcoding.c
new file mode 100644
index 0000000000..d5d410b168
--- /dev/null
+++ b/doc/examples/transcoding.c
@@ -0,0 +1,582 @@
+/*
+ * Copyright (c) 2010 Nicolas George
+ * Copyright (c) 2011 Stefano Sabatini
+ * Copyright (c) 2014 Andrey Utkin
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+/**
+ * @file
+ * API example for demuxing, decoding, filtering, encoding and muxing
+ * @example transcoding.c
+ */
+
+#include <libavcodec/avcodec.h>
+#include <libavformat/avformat.h>
+#include <libavfilter/avfiltergraph.h>
+#include <libavfilter/buffersink.h>
+#include <libavfilter/buffersrc.h>
+#include <libavutil/opt.h>
+#include <libavutil/pixdesc.h>
+
+static AVFormatContext *ifmt_ctx;
+static AVFormatContext *ofmt_ctx;
+typedef struct FilteringContext {
+ AVFilterContext *buffersink_ctx;
+ AVFilterContext *buffersrc_ctx;
+ AVFilterGraph *filter_graph;
+} FilteringContext;
+static FilteringContext *filter_ctx;
+
+static int open_input_file(const char *filename)
+{
+ int ret;
+ unsigned int i;
+
+ ifmt_ctx = NULL;
+ if ((ret = avformat_open_input(&ifmt_ctx, filename, NULL, NULL)) < 0) {
+ av_log(NULL, AV_LOG_ERROR, "Cannot open input file\n");
+ return ret;
+ }
+
+ if ((ret = avformat_find_stream_info(ifmt_ctx, NULL)) < 0) {
+ av_log(NULL, AV_LOG_ERROR, "Cannot find stream information\n");
+ return ret;
+ }
+
+ for (i = 0; i < ifmt_ctx->nb_streams; i++) {
+ AVStream *stream;
+ AVCodecContext *codec_ctx;
+ stream = ifmt_ctx->streams[i];
+ codec_ctx = stream->codec;
+ /* Reencode video & audio and remux subtitles etc. */
+ if (codec_ctx->codec_type == AVMEDIA_TYPE_VIDEO
+ || codec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {
+ /* Open decoder */
+ ret = avcodec_open2(codec_ctx,
+ avcodec_find_decoder(codec_ctx->codec_id), NULL);
+ if (ret < 0) {
+ av_log(NULL, AV_LOG_ERROR, "Failed to open decoder for stream #%u\n", i);
+ return ret;
+ }
+ }
+ }
+
+ av_dump_format(ifmt_ctx, 0, filename, 0);
+ return 0;
+}
+
+static int open_output_file(const char *filename)
+{
+ AVStream *out_stream;
+ AVStream *in_stream;
+ AVCodecContext *dec_ctx, *enc_ctx;
+ AVCodec *encoder;
+ int ret;
+ unsigned int i;
+
+ ofmt_ctx = NULL;
+ avformat_alloc_output_context2(&ofmt_ctx, NULL, NULL, filename);
+ if (!ofmt_ctx) {
+ av_log(NULL, AV_LOG_ERROR, "Could not create output context\n");
+ return AVERROR_UNKNOWN;
+ }
+
+
+ for (i = 0; i < ifmt_ctx->nb_streams; i++) {
+ out_stream = avformat_new_stream(ofmt_ctx, NULL);
+ if (!out_stream) {
+ av_log(NULL, AV_LOG_ERROR, "Failed allocating output stream\n");
+ return AVERROR_UNKNOWN;
+ }
+
+ in_stream = ifmt_ctx->streams[i];
+ dec_ctx = in_stream->codec;
+ enc_ctx = out_stream->codec;
+
+ if (dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO
+ || dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {
+ /* in this example, we choose transcoding to same codec */
+ encoder = avcodec_find_encoder(dec_ctx->codec_id);
+ if (!encoder) {
+ av_log(NULL, AV_LOG_FATAL, "Necessary encoder not found\n");
+ return AVERROR_INVALIDDATA;
+ }
+
+ /* In this example, we transcode to same properties (picture size,
+ * sample rate etc.). These properties can be changed for output
+ * streams easily using filters */
+ if (dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
+ enc_ctx->height = dec_ctx->height;
+ enc_ctx->width = dec_ctx->width;
+ enc_ctx->sample_aspect_ratio = dec_ctx->sample_aspect_ratio;
+ /* take first format from list of supported formats */
+ enc_ctx->pix_fmt = encoder->pix_fmts[0];
+ /* video time_base can be set to whatever is handy and supported by encoder */
+ enc_ctx->time_base = dec_ctx->time_base;
+ } else {
+ enc_ctx->sample_rate = dec_ctx->sample_rate;
+ enc_ctx->channel_layout = dec_ctx->channel_layout;
+ enc_ctx->channels = av_get_channel_layout_nb_channels(enc_ctx->channel_layout);
+ /* take first format from list of supported formats */
+ enc_ctx->sample_fmt = encoder->sample_fmts[0];
+ enc_ctx->time_base = (AVRational){1, enc_ctx->sample_rate};
+ }
+
+ /* Third parameter can be used to pass settings to encoder */
+ ret = avcodec_open2(enc_ctx, encoder, NULL);
+ if (ret < 0) {
+ av_log(NULL, AV_LOG_ERROR, "Cannot open video encoder for stream #%u\n", i);
+ return ret;
+ }
+ } else if (dec_ctx->codec_type == AVMEDIA_TYPE_UNKNOWN) {
+ av_log(NULL, AV_LOG_FATAL, "Elementary stream #%d is of unknown type, cannot proceed\n", i);
+ return AVERROR_INVALIDDATA;
+ } else {
+ /* if this stream must be remuxed */
+ ret = avcodec_copy_context(ofmt_ctx->streams[i]->codec,
+ ifmt_ctx->streams[i]->codec);
+ if (ret < 0) {
+ av_log(NULL, AV_LOG_ERROR, "Copying stream context failed\n");
+ return ret;
+ }
+ }
+
+ if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
+ enc_ctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
+
+ }
+ av_dump_format(ofmt_ctx, 0, filename, 1);
+
+ if (!(ofmt_ctx->oformat->flags & AVFMT_NOFILE)) {
+ ret = avio_open(&ofmt_ctx->pb, filename, AVIO_FLAG_WRITE);
+ if (ret < 0) {
+ av_log(NULL, AV_LOG_ERROR, "Could not open output file '%s'", filename);
+ return ret;
+ }
+ }
+
+ /* init muxer, write output file header */
+ ret = avformat_write_header(ofmt_ctx, NULL);
+ if (ret < 0) {
+ av_log(NULL, AV_LOG_ERROR, "Error occurred when opening output file\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int init_filter(FilteringContext* fctx, AVCodecContext *dec_ctx,
+ AVCodecContext *enc_ctx, const char *filter_spec)
+{
+ char args[512];
+ int ret = 0;
+ AVFilter *buffersrc = NULL;
+ AVFilter *buffersink = NULL;
+ AVFilterContext *buffersrc_ctx = NULL;
+ AVFilterContext *buffersink_ctx = NULL;
+ AVFilterInOut *outputs = avfilter_inout_alloc();
+ AVFilterInOut *inputs = avfilter_inout_alloc();
+ AVFilterGraph *filter_graph = avfilter_graph_alloc();
+
+ if (!outputs || !inputs || !filter_graph) {
+ ret = AVERROR(ENOMEM);
+ goto end;
+ }
+
+ if (dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
+ buffersrc = avfilter_get_by_name("buffer");
+ buffersink = avfilter_get_by_name("buffersink");
+ if (!buffersrc || !buffersink) {
+ av_log(NULL, AV_LOG_ERROR, "filtering source or sink element not found\n");
+ ret = AVERROR_UNKNOWN;
+ goto end;
+ }
+
+ snprintf(args, sizeof(args),
+ "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
+ dec_ctx->width, dec_ctx->height, dec_ctx->pix_fmt,
+ dec_ctx->time_base.num, dec_ctx->time_base.den,
+ dec_ctx->sample_aspect_ratio.num,
+ dec_ctx->sample_aspect_ratio.den);
+
+ ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in",
+ args, NULL, filter_graph);
+ if (ret < 0) {
+ av_log(NULL, AV_LOG_ERROR, "Cannot create buffer source\n");
+ goto end;
+ }
+
+ ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out",
+ NULL, NULL, filter_graph);
+ if (ret < 0) {
+ av_log(NULL, AV_LOG_ERROR, "Cannot create buffer sink\n");
+ goto end;
+ }
+
+ ret = av_opt_set_bin(buffersink_ctx, "pix_fmts",
+ (uint8_t*)&enc_ctx->pix_fmt, sizeof(enc_ctx->pix_fmt),
+ AV_OPT_SEARCH_CHILDREN);
+ if (ret < 0) {
+ av_log(NULL, AV_LOG_ERROR, "Cannot set output pixel format\n");
+ goto end;
+ }
+ } else if (dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {
+ buffersrc = avfilter_get_by_name("abuffer");
+ buffersink = avfilter_get_by_name("abuffersink");
+ if (!buffersrc || !buffersink) {
+ av_log(NULL, AV_LOG_ERROR, "filtering source or sink element not found\n");
+ ret = AVERROR_UNKNOWN;
+ goto end;
+ }
+
+ if (!dec_ctx->channel_layout)
+ dec_ctx->channel_layout =
+ av_get_default_channel_layout(dec_ctx->channels);
+ snprintf(args, sizeof(args),
+ "time_base=%d/%d:sample_rate=%d:sample_fmt=%s:channel_layout=0x%"PRIx64,
+ dec_ctx->time_base.num, dec_ctx->time_base.den, dec_ctx->sample_rate,
+ av_get_sample_fmt_name(dec_ctx->sample_fmt),
+ dec_ctx->channel_layout);
+ ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in",
+ args, NULL, filter_graph);
+ if (ret < 0) {
+ av_log(NULL, AV_LOG_ERROR, "Cannot create audio buffer source\n");
+ goto end;
+ }
+
+ ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out",
+ NULL, NULL, filter_graph);
+ if (ret < 0) {
+ av_log(NULL, AV_LOG_ERROR, "Cannot create audio buffer sink\n");
+ goto end;
+ }
+
+ ret = av_opt_set_bin(buffersink_ctx, "sample_fmts",
+ (uint8_t*)&enc_ctx->sample_fmt, sizeof(enc_ctx->sample_fmt),
+ AV_OPT_SEARCH_CHILDREN);
+ if (ret < 0) {
+ av_log(NULL, AV_LOG_ERROR, "Cannot set output sample format\n");
+ goto end;
+ }
+
+ ret = av_opt_set_bin(buffersink_ctx, "channel_layouts",
+ (uint8_t*)&enc_ctx->channel_layout,
+ sizeof(enc_ctx->channel_layout), AV_OPT_SEARCH_CHILDREN);
+ if (ret < 0) {
+ av_log(NULL, AV_LOG_ERROR, "Cannot set output channel layout\n");
+ goto end;
+ }
+
+ ret = av_opt_set_bin(buffersink_ctx, "sample_rates",
+ (uint8_t*)&enc_ctx->sample_rate, sizeof(enc_ctx->sample_rate),
+ AV_OPT_SEARCH_CHILDREN);
+ if (ret < 0) {
+ av_log(NULL, AV_LOG_ERROR, "Cannot set output sample rate\n");
+ goto end;
+ }
+ } else {
+ ret = AVERROR_UNKNOWN;
+ goto end;
+ }
+
+ /* Endpoints for the filter graph. */
+ outputs->name = av_strdup("in");
+ outputs->filter_ctx = buffersrc_ctx;
+ outputs->pad_idx = 0;
+ outputs->next = NULL;
+
+ inputs->name = av_strdup("out");
+ inputs->filter_ctx = buffersink_ctx;
+ inputs->pad_idx = 0;
+ inputs->next = NULL;
+
+ if (!outputs->name || !inputs->name) {
+ ret = AVERROR(ENOMEM);
+ goto end;
+ }
+
+ if ((ret = avfilter_graph_parse_ptr(filter_graph, filter_spec,
+ &inputs, &outputs, NULL)) < 0)
+ goto end;
+
+ if ((ret = avfilter_graph_config(filter_graph, NULL)) < 0)
+ goto end;
+
+ /* Fill FilteringContext */
+ fctx->buffersrc_ctx = buffersrc_ctx;
+ fctx->buffersink_ctx = buffersink_ctx;
+ fctx->filter_graph = filter_graph;
+
+end:
+ avfilter_inout_free(&inputs);
+ avfilter_inout_free(&outputs);
+
+ return ret;
+}
+
+static int init_filters(void)
+{
+ const char *filter_spec;
+ unsigned int i;
+ int ret;
+ filter_ctx = av_malloc_array(ifmt_ctx->nb_streams, sizeof(*filter_ctx));
+ if (!filter_ctx)
+ return AVERROR(ENOMEM);
+
+ for (i = 0; i < ifmt_ctx->nb_streams; i++) {
+ filter_ctx[i].buffersrc_ctx = NULL;
+ filter_ctx[i].buffersink_ctx = NULL;
+ filter_ctx[i].filter_graph = NULL;
+ if (!(ifmt_ctx->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO
+ || ifmt_ctx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO))
+ continue;
+
+
+ if (ifmt_ctx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO)
+ filter_spec = "null"; /* passthrough (dummy) filter for video */
+ else
+ filter_spec = "anull"; /* passthrough (dummy) filter for audio */
+ ret = init_filter(&filter_ctx[i], ifmt_ctx->streams[i]->codec,
+ ofmt_ctx->streams[i]->codec, filter_spec);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+static int encode_write_frame(AVFrame *filt_frame, unsigned int stream_index, int *got_frame) {
+ int ret;
+ int got_frame_local;
+ AVPacket enc_pkt;
+ int (*enc_func)(AVCodecContext *, AVPacket *, const AVFrame *, int *) =
+ (ifmt_ctx->streams[stream_index]->codec->codec_type ==
+ AVMEDIA_TYPE_VIDEO) ? avcodec_encode_video2 : avcodec_encode_audio2;
+
+ if (!got_frame)
+ got_frame = &got_frame_local;
+
+ av_log(NULL, AV_LOG_INFO, "Encoding frame\n");
+ /* encode filtered frame */
+ enc_pkt.data = NULL;
+ enc_pkt.size = 0;
+ av_init_packet(&enc_pkt);
+ ret = enc_func(ofmt_ctx->streams[stream_index]->codec, &enc_pkt,
+ filt_frame, got_frame);
+ av_frame_free(&filt_frame);
+ if (ret < 0)
+ return ret;
+ if (!(*got_frame))
+ return 0;
+
+ /* prepare packet for muxing */
+ enc_pkt.stream_index = stream_index;
+ av_packet_rescale_ts(&enc_pkt,
+ ofmt_ctx->streams[stream_index]->codec->time_base,
+ ofmt_ctx->streams[stream_index]->time_base);
+
+ av_log(NULL, AV_LOG_DEBUG, "Muxing frame\n");
+ /* mux encoded frame */
+ ret = av_interleaved_write_frame(ofmt_ctx, &enc_pkt);
+ return ret;
+}
+
+static int filter_encode_write_frame(AVFrame *frame, unsigned int stream_index)
+{
+ int ret;
+ AVFrame *filt_frame;
+
+ av_log(NULL, AV_LOG_INFO, "Pushing decoded frame to filters\n");
+ /* push the decoded frame into the filtergraph */
+ ret = av_buffersrc_add_frame_flags(filter_ctx[stream_index].buffersrc_ctx,
+ frame, 0);
+ if (ret < 0) {
+ av_log(NULL, AV_LOG_ERROR, "Error while feeding the filtergraph\n");
+ return ret;
+ }
+
+ /* pull filtered frames from the filtergraph */
+ while (1) {
+ filt_frame = av_frame_alloc();
+ if (!filt_frame) {
+ ret = AVERROR(ENOMEM);
+ break;
+ }
+ av_log(NULL, AV_LOG_INFO, "Pulling filtered frame from filters\n");
+ ret = av_buffersink_get_frame(filter_ctx[stream_index].buffersink_ctx,
+ filt_frame);
+ if (ret < 0) {
+ /* if no more frames for output - returns AVERROR(EAGAIN)
+ * if flushed and no more frames for output - returns AVERROR_EOF
+ * rewrite retcode to 0 to show it as normal procedure completion
+ */
+ if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
+ ret = 0;
+ av_frame_free(&filt_frame);
+ break;
+ }
+
+ filt_frame->pict_type = AV_PICTURE_TYPE_NONE;
+ ret = encode_write_frame(filt_frame, stream_index, NULL);
+ if (ret < 0)
+ break;
+ }
+
+ return ret;
+}
+
+static int flush_encoder(unsigned int stream_index)
+{
+ int ret;
+ int got_frame;
+
+ if (!(ofmt_ctx->streams[stream_index]->codec->codec->capabilities &
+ AV_CODEC_CAP_DELAY))
+ return 0;
+
+ while (1) {
+ av_log(NULL, AV_LOG_INFO, "Flushing stream #%u encoder\n", stream_index);
+ ret = encode_write_frame(NULL, stream_index, &got_frame);
+ if (ret < 0)
+ break;
+ if (!got_frame)
+ return 0;
+ }
+ return ret;
+}
+
+int main(int argc, char **argv)
+{
+ int ret;
+ AVPacket packet = { .data = NULL, .size = 0 };
+ AVFrame *frame = NULL;
+ enum AVMediaType type;
+ unsigned int stream_index;
+ unsigned int i;
+ int got_frame;
+ int (*dec_func)(AVCodecContext *, AVFrame *, int *, const AVPacket *);
+
+ if (argc != 3) {
+ av_log(NULL, AV_LOG_ERROR, "Usage: %s <input file> <output file>\n", argv[0]);
+ return 1;
+ }
+
+ av_register_all();
+ avfilter_register_all();
+
+ if ((ret = open_input_file(argv[1])) < 0)
+ goto end;
+ if ((ret = open_output_file(argv[2])) < 0)
+ goto end;
+ if ((ret = init_filters()) < 0)
+ goto end;
+
+ /* read all packets */
+ while (1) {
+ if ((ret = av_read_frame(ifmt_ctx, &packet)) < 0)
+ break;
+ stream_index = packet.stream_index;
+ type = ifmt_ctx->streams[packet.stream_index]->codec->codec_type;
+ av_log(NULL, AV_LOG_DEBUG, "Demuxer gave frame of stream_index %u\n",
+ stream_index);
+
+ if (filter_ctx[stream_index].filter_graph) {
+ av_log(NULL, AV_LOG_DEBUG, "Going to reencode&filter the frame\n");
+ frame = av_frame_alloc();
+ if (!frame) {
+ ret = AVERROR(ENOMEM);
+ break;
+ }
+ av_packet_rescale_ts(&packet,
+ ifmt_ctx->streams[stream_index]->time_base,
+ ifmt_ctx->streams[stream_index]->codec->time_base);
+ dec_func = (type == AVMEDIA_TYPE_VIDEO) ? avcodec_decode_video2 :
+ avcodec_decode_audio4;
+ ret = dec_func(ifmt_ctx->streams[stream_index]->codec, frame,
+ &got_frame, &packet);
+ if (ret < 0) {
+ av_frame_free(&frame);
+ av_log(NULL, AV_LOG_ERROR, "Decoding failed\n");
+ break;
+ }
+
+ if (got_frame) {
+ frame->pts = av_frame_get_best_effort_timestamp(frame);
+ ret = filter_encode_write_frame(frame, stream_index);
+ av_frame_free(&frame);
+ if (ret < 0)
+ goto end;
+ } else {
+ av_frame_free(&frame);
+ }
+ } else {
+ /* remux this frame without reencoding */
+ av_packet_rescale_ts(&packet,
+ ifmt_ctx->streams[stream_index]->time_base,
+ ofmt_ctx->streams[stream_index]->time_base);
+
+ ret = av_interleaved_write_frame(ofmt_ctx, &packet);
+ if (ret < 0)
+ goto end;
+ }
+ av_packet_unref(&packet);
+ }
+
+ /* flush filters and encoders */
+ for (i = 0; i < ifmt_ctx->nb_streams; i++) {
+ /* flush filter */
+ if (!filter_ctx[i].filter_graph)
+ continue;
+ ret = filter_encode_write_frame(NULL, i);
+ if (ret < 0) {
+ av_log(NULL, AV_LOG_ERROR, "Flushing filter failed\n");
+ goto end;
+ }
+
+ /* flush encoder */
+ ret = flush_encoder(i);
+ if (ret < 0) {
+ av_log(NULL, AV_LOG_ERROR, "Flushing encoder failed\n");
+ goto end;
+ }
+ }
+
+ av_write_trailer(ofmt_ctx);
+end:
+ av_packet_unref(&packet);
+ av_frame_free(&frame);
+ for (i = 0; i < ifmt_ctx->nb_streams; i++) {
+ avcodec_close(ifmt_ctx->streams[i]->codec);
+ if (ofmt_ctx && ofmt_ctx->nb_streams > i && ofmt_ctx->streams[i] && ofmt_ctx->streams[i]->codec)
+ avcodec_close(ofmt_ctx->streams[i]->codec);
+ if (filter_ctx && filter_ctx[i].filter_graph)
+ avfilter_graph_free(&filter_ctx[i].filter_graph);
+ }
+ av_free(filter_ctx);
+ avformat_close_input(&ifmt_ctx);
+ if (ofmt_ctx && !(ofmt_ctx->oformat->flags & AVFMT_NOFILE))
+ avio_closep(&ofmt_ctx->pb);
+ avformat_free_context(ofmt_ctx);
+
+ if (ret < 0)
+ av_log(NULL, AV_LOG_ERROR, "Error occurred: %s\n", av_err2str(ret));
+
+ return ret ? 1 : 0;
+}