summaryrefslogtreecommitdiff
path: root/libavformat/mux.c
diff options
context:
space:
mode:
Diffstat (limited to 'libavformat/mux.c')
-rw-r--r--libavformat/mux.c346
1 files changed, 281 insertions, 65 deletions
diff --git a/libavformat/mux.c b/libavformat/mux.c
index 505ed2e6f8..eff7caab25 100644
--- a/libavformat/mux.c
+++ b/libavformat/mux.c
@@ -1,21 +1,21 @@
/*
- * muxing functions for use within Libav
+ * muxing functions for use within FFmpeg
* Copyright (c) 2000, 2001, 2002 Fabrice Bellard
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -27,6 +27,7 @@
#include "libavutil/opt.h"
#include "libavutil/dict.h"
#include "libavutil/pixdesc.h"
+#include "libavutil/timestamp.h"
#include "metadata.h"
#include "id3v2.h"
#include "libavutil/avassert.h"
@@ -48,7 +49,7 @@
/**
* @file
- * muxing functions for use within Libav
+ * muxing functions for use within libavformat
*/
/* fraction handling */
@@ -101,6 +102,88 @@ static void frac_add(AVFrac *f, int64_t incr)
f->num = num;
}
+AVRational ff_choose_timebase(AVFormatContext *s, AVStream *st, int min_precission)
+{
+ AVRational q;
+ int j;
+
+ if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
+ q = (AVRational){1, st->codec->sample_rate};
+ } else {
+ q = st->codec->time_base;
+ }
+ for (j=2; j<14; j+= 1+(j>2))
+ while (q.den / q.num < min_precission && q.num % j == 0)
+ q.num /= j;
+ while (q.den / q.num < min_precission && q.den < (1<<24))
+ q.den <<= 1;
+
+ return q;
+}
+
+int avformat_alloc_output_context2(AVFormatContext **avctx, AVOutputFormat *oformat,
+ const char *format, const char *filename)
+{
+ AVFormatContext *s = avformat_alloc_context();
+ int ret = 0;
+
+ *avctx = NULL;
+ if (!s)
+ goto nomem;
+
+ if (!oformat) {
+ if (format) {
+ oformat = av_guess_format(format, NULL, NULL);
+ if (!oformat) {
+ av_log(s, AV_LOG_ERROR, "Requested output format '%s' is not a suitable output format\n", format);
+ ret = AVERROR(EINVAL);
+ goto error;
+ }
+ } else {
+ oformat = av_guess_format(NULL, filename, NULL);
+ if (!oformat) {
+ ret = AVERROR(EINVAL);
+ av_log(s, AV_LOG_ERROR, "Unable to find a suitable output format for '%s'\n",
+ filename);
+ goto error;
+ }
+ }
+ }
+
+ s->oformat = oformat;
+ if (s->oformat->priv_data_size > 0) {
+ s->priv_data = av_mallocz(s->oformat->priv_data_size);
+ if (!s->priv_data)
+ goto nomem;
+ if (s->oformat->priv_class) {
+ *(const AVClass**)s->priv_data= s->oformat->priv_class;
+ av_opt_set_defaults(s->priv_data);
+ }
+ } else
+ s->priv_data = NULL;
+
+ if (filename)
+ av_strlcpy(s->filename, filename, sizeof(s->filename));
+ *avctx = s;
+ return 0;
+nomem:
+ av_log(s, AV_LOG_ERROR, "Out of memory\n");
+ ret = AVERROR(ENOMEM);
+error:
+ avformat_free_context(s);
+ return ret;
+}
+
+#if FF_API_ALLOC_OUTPUT_CONTEXT
+AVFormatContext *avformat_alloc_output_context(const char *format,
+ AVOutputFormat *oformat, const char *filename)
+{
+ AVFormatContext *avctx;
+ int ret = avformat_alloc_output_context2(&avctx, oformat, format, filename);
+ return ret < 0 ? NULL : avctx;
+}
+#endif
+
static int validate_codec_tag(AVFormatContext *s, AVStream *st)
{
const AVCodecTag *avctag;
@@ -148,6 +231,9 @@ static int init_muxer(AVFormatContext *s, AVDictionary **options)
if ((ret = av_opt_set_dict(s, &tmp)) < 0)
goto fail;
+ if (s->priv_data && s->oformat->priv_class && *(const AVClass**)s->priv_data==s->oformat->priv_class &&
+ (ret = av_opt_set_dict(s->priv_data, &tmp)) < 0)
+ goto fail;
// some sanity checks
if (s->nb_streams == 0 && !(of->flags & AVFMT_NOSTREAMS)) {
@@ -185,18 +271,18 @@ static int init_muxer(AVFormatContext *s, AVDictionary **options)
ret = AVERROR(EINVAL);
goto fail;
}
-
- if (av_cmp_q(st->sample_aspect_ratio,
- codec->sample_aspect_ratio)) {
+ if (av_cmp_q(st->sample_aspect_ratio, codec->sample_aspect_ratio)
+ && FFABS(av_q2d(st->sample_aspect_ratio) - av_q2d(codec->sample_aspect_ratio)) > 0.004*av_q2d(st->sample_aspect_ratio)
+ ) {
if (st->sample_aspect_ratio.num != 0 &&
st->sample_aspect_ratio.den != 0 &&
codec->sample_aspect_ratio.den != 0 &&
codec->sample_aspect_ratio.den != 0) {
av_log(s, AV_LOG_ERROR, "Aspect ratio mismatch between muxer "
- "(%d/%d) and encoder layer (%d/%d)\n",
- st->sample_aspect_ratio.num, st->sample_aspect_ratio.den,
- codec->sample_aspect_ratio.num,
- codec->sample_aspect_ratio.den);
+ "(%d/%d) and encoder layer (%d/%d)\n",
+ st->sample_aspect_ratio.num, st->sample_aspect_ratio.den,
+ codec->sample_aspect_ratio.num,
+ codec->sample_aspect_ratio.den);
ret = AVERROR(EINVAL);
goto fail;
}
@@ -205,21 +291,23 @@ static int init_muxer(AVFormatContext *s, AVDictionary **options)
}
if (of->codec_tag) {
- if (codec->codec_tag &&
- codec->codec_id == AV_CODEC_ID_RAWVIDEO &&
- !av_codec_get_tag(of->codec_tag, codec->codec_id) &&
- !validate_codec_tag(s, st)) {
+ if ( codec->codec_tag
+ && codec->codec_id == AV_CODEC_ID_RAWVIDEO
+ && ( av_codec_get_tag(of->codec_tag, codec->codec_id) == 0
+ || av_codec_get_tag(of->codec_tag, codec->codec_id) == MKTAG('r', 'a', 'w', ' '))
+ && !validate_codec_tag(s, st)) {
// the current rawvideo encoding system ends up setting
- // the wrong codec_tag for avi, we override it here
+ // the wrong codec_tag for avi/mov, we override it here
codec->codec_tag = 0;
}
if (codec->codec_tag) {
if (!validate_codec_tag(s, st)) {
- char tagbuf[32];
+ char tagbuf[32], tagbuf2[32];
av_get_codec_tag_string(tagbuf, sizeof(tagbuf), codec->codec_tag);
+ av_get_codec_tag_string(tagbuf2, sizeof(tagbuf2), av_codec_get_tag(s->oformat->codec_tag, codec->codec_id));
av_log(s, AV_LOG_ERROR,
- "Tag %s/0x%08x incompatible with output codec id '%d'\n",
- tagbuf, codec->codec_tag, codec->codec_id);
+ "Tag %s/0x%08x incompatible with output codec id '%d' (%s)\n",
+ tagbuf, codec->codec_tag, codec->codec_id, tagbuf2);
ret = AVERROR_INVALIDDATA;
goto fail;
}
@@ -305,6 +393,8 @@ int avformat_write_header(AVFormatContext *s, AVDictionary **options)
if (s->oformat->write_header) {
ret = s->oformat->write_header(s);
+ if (ret >= 0 && s->pb && s->pb->error < 0)
+ ret = s->pb->error;
if (ret < 0)
return ret;
}
@@ -312,20 +402,24 @@ int avformat_write_header(AVFormatContext *s, AVDictionary **options)
if ((ret = init_pts(s)) < 0)
return ret;
+ if (s->avoid_negative_ts < 0) {
+ if (s->oformat->flags & (AVFMT_TS_NEGATIVE | AVFMT_NOTIMESTAMPS)) {
+ s->avoid_negative_ts = 0;
+ } else
+ s->avoid_negative_ts = 1;
+ }
+
return 0;
}
//FIXME merge with compute_pkt_fields
static int compute_pkt_fields2(AVFormatContext *s, AVStream *st, AVPacket *pkt)
{
- int delay = FFMAX(st->codec->has_b_frames, !!st->codec->max_b_frames);
+ int delay = FFMAX(st->codec->has_b_frames, st->codec->max_b_frames > 0);
int num, den, frame_size, i;
- av_dlog(s, "compute_pkt_fields2: pts:%" PRId64 " dts:%" PRId64 " cur_dts:%" PRId64 " b:%d size:%d st:%d\n",
- pkt->pts, pkt->dts, st->cur_dts, delay, pkt->size, pkt->stream_index);
-
-/* if(pkt->pts == AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE)
- * return AVERROR(EINVAL);*/
+ av_dlog(s, "compute_pkt_fields2: pts:%s dts:%s cur_dts:%s b:%d size:%d st:%d\n",
+ av_ts2str(pkt->pts), av_ts2str(pkt->dts), av_ts2str(st->cur_dts), delay, pkt->size, pkt->stream_index);
/* duration field */
if (pkt->duration == 0) {
@@ -340,6 +434,11 @@ static int compute_pkt_fields2(AVFormatContext *s, AVStream *st, AVPacket *pkt)
//XXX/FIXME this is a temporary hack until all encoders output pts
if ((pkt->pts == 0 || pkt->pts == AV_NOPTS_VALUE) && pkt->dts == AV_NOPTS_VALUE && !delay) {
+ static int warned;
+ if (!warned) {
+ av_log(s, AV_LOG_WARNING, "Encoder did not produce proper pts, making some up.\n");
+ warned = 1;
+ }
pkt->dts =
// pkt->pts= st->cur_dts;
pkt->pts = st->pts.val;
@@ -360,17 +459,18 @@ static int compute_pkt_fields2(AVFormatContext *s, AVStream *st, AVPacket *pkt)
((!(s->oformat->flags & AVFMT_TS_NONSTRICT) &&
st->cur_dts >= pkt->dts) || st->cur_dts > pkt->dts)) {
av_log(s, AV_LOG_ERROR,
- "Application provided invalid, non monotonically increasing dts to muxer in stream %d: %" PRId64 " >= %" PRId64 "\n",
- st->index, st->cur_dts, pkt->dts);
+ "Application provided invalid, non monotonically increasing dts to muxer in stream %d: %s >= %s\n",
+ st->index, av_ts2str(st->cur_dts), av_ts2str(pkt->dts));
return AVERROR(EINVAL);
}
if (pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts < pkt->dts) {
- av_log(s, AV_LOG_ERROR, "pts < dts in stream %d\n", st->index);
+ av_log(s, AV_LOG_ERROR, "pts (%s) < dts (%s) in stream %d\n",
+ av_ts2str(pkt->pts), av_ts2str(pkt->dts), st->index);
return AVERROR(EINVAL);
}
- av_dlog(s, "av_write_frame: pts2:%"PRId64" dts2:%"PRId64"\n",
- pkt->pts, pkt->dts);
+ av_dlog(s, "av_write_frame: pts2:%s dts2:%s\n",
+ av_ts2str(pkt->pts), av_ts2str(pkt->dts));
st->cur_dts = pkt->dts;
st->pts.val = pkt->dts;
@@ -395,37 +495,53 @@ static int compute_pkt_fields2(AVFormatContext *s, AVStream *st, AVPacket *pkt)
return 0;
}
-/*
+/**
+ * Make timestamps non negative, move side data from payload to internal struct, call muxer, and restore
+ * sidedata.
+ *
* FIXME: this function should NEVER get undefined pts/dts beside when the
* AVFMT_NOTIMESTAMPS is set.
* Those additional safety checks should be dropped once the correct checks
* are set in the callers.
*/
-
static int write_packet(AVFormatContext *s, AVPacket *pkt)
{
- int ret;
- if (!(s->oformat->flags & (AVFMT_TS_NEGATIVE | AVFMT_NOTIMESTAMPS))) {
- AVRational time_base = s->streams[pkt->stream_index]->time_base;
- int64_t offset = 0;
+ int ret, did_split;
- if (!s->offset && pkt->dts != AV_NOPTS_VALUE && pkt->dts < 0) {
+ if (s->avoid_negative_ts > 0) {
+ AVStream *st = s->streams[pkt->stream_index];
+ int64_t offset = st->mux_ts_offset;
+
+ if (pkt->dts < 0 && pkt->dts != AV_NOPTS_VALUE && !s->offset) {
s->offset = -pkt->dts;
- s->offset_timebase = time_base;
+ s->offset_timebase = st->time_base;
+ }
+
+ if (s->offset && !offset) {
+ offset = st->mux_ts_offset =
+ av_rescale_q_rnd(s->offset,
+ s->offset_timebase,
+ st->time_base,
+ AV_ROUND_UP);
}
- if (s->offset)
- offset = av_rescale_q(s->offset, s->offset_timebase, time_base);
if (pkt->dts != AV_NOPTS_VALUE)
pkt->dts += offset;
if (pkt->pts != AV_NOPTS_VALUE)
pkt->pts += offset;
+
+ av_assert2(pkt->dts == AV_NOPTS_VALUE || pkt->dts >= 0);
}
+
+ did_split = av_packet_split_side_data(pkt);
ret = s->oformat->write_packet(s, pkt);
- if (s->pb && ret >= 0 && s->flags & AVFMT_FLAG_FLUSH_PACKETS)
+ if (s->flush_packets && s->pb && ret >= 0 && s->flags & AVFMT_FLAG_FLUSH_PACKETS)
avio_flush(s->pb);
+ if (did_split)
+ av_packet_merge_side_data(pkt);
+
return ret;
}
@@ -434,8 +550,14 @@ int av_write_frame(AVFormatContext *s, AVPacket *pkt)
int ret;
if (!pkt) {
- if (s->oformat->flags & AVFMT_ALLOW_FLUSH)
- return s->oformat->write_packet(s, pkt);
+ if (s->oformat->flags & AVFMT_ALLOW_FLUSH) {
+ ret = s->oformat->write_packet(s, NULL);
+ if (s->flush_packets && s->pb && s->pb->error >= 0)
+ avio_flush(s->pb);
+ if (ret >= 0 && s->pb && s->pb->error < 0)
+ ret = s->pb->error;
+ return ret;
+ }
return 1;
}
@@ -445,18 +567,26 @@ int av_write_frame(AVFormatContext *s, AVPacket *pkt)
return ret;
ret = write_packet(s, pkt);
+ if (ret >= 0 && s->pb && s->pb->error < 0)
+ ret = s->pb->error;
if (ret >= 0)
s->streams[pkt->stream_index]->nb_frames++;
return ret;
}
-void ff_interleave_add_packet(AVFormatContext *s, AVPacket *pkt,
+#define CHUNK_START 0x1000
+
+int ff_interleave_add_packet(AVFormatContext *s, AVPacket *pkt,
int (*compare)(AVFormatContext *, AVPacket *, AVPacket *))
{
AVPacketList **next_point, *this_pktl;
+ AVStream *st = s->streams[pkt->stream_index];
+ int chunked = s->max_chunk_size || s->max_chunk_duration;
this_pktl = av_mallocz(sizeof(AVPacketList));
+ if (!this_pktl)
+ return AVERROR(ENOMEM);
this_pktl->pkt = *pkt;
#if FF_API_DESTRUCT_PACKET
FF_DISABLE_DEPRECATION_WARNINGS
@@ -464,23 +594,48 @@ FF_DISABLE_DEPRECATION_WARNINGS
FF_ENABLE_DEPRECATION_WARNINGS
#endif
pkt->buf = NULL;
- av_dup_packet(&this_pktl->pkt); // duplicate the packet if it uses non-alloced memory
+ av_dup_packet(&this_pktl->pkt); // duplicate the packet if it uses non-allocated memory
+ av_copy_packet_side_data(&this_pktl->pkt, &this_pktl->pkt); // copy side data
if (s->streams[pkt->stream_index]->last_in_packet_buffer) {
- next_point = &(s->streams[pkt->stream_index]->last_in_packet_buffer->next);
- } else
+ next_point = &(st->last_in_packet_buffer->next);
+ } else {
next_point = &s->packet_buffer;
+ }
+ if (chunked) {
+ uint64_t max= av_rescale_q_rnd(s->max_chunk_duration, AV_TIME_BASE_Q, st->time_base, AV_ROUND_UP);
+ st->interleaver_chunk_size += pkt->size;
+ st->interleaver_chunk_duration += pkt->duration;
+ if ( (s->max_chunk_size && st->interleaver_chunk_size > s->max_chunk_size)
+ || (max && st->interleaver_chunk_duration > max)) {
+ st->interleaver_chunk_size = 0;
+ this_pktl->pkt.flags |= CHUNK_START;
+ if (max && st->interleaver_chunk_duration > max) {
+ int64_t syncoffset = (st->codec->codec_type == AVMEDIA_TYPE_VIDEO)*max/2;
+ int64_t syncto = av_rescale(pkt->dts + syncoffset, 1, max)*max - syncoffset;
+
+ st->interleaver_chunk_duration += (pkt->dts - syncto)/8 - max;
+ } else
+ st->interleaver_chunk_duration = 0;
+ }
+ }
if (*next_point) {
+ if (chunked && !(this_pktl->pkt.flags & CHUNK_START))
+ goto next_non_null;
+
if (compare(s, &s->packet_buffer_end->pkt, pkt)) {
- while (!compare(s, &(*next_point)->pkt, pkt))
+ while ( *next_point
+ && ((chunked && !((*next_point)->pkt.flags&CHUNK_START))
+ || !compare(s, &(*next_point)->pkt, pkt)))
next_point = &(*next_point)->next;
- goto next_non_null;
+ if (*next_point)
+ goto next_non_null;
} else {
next_point = &(s->packet_buffer_end->next);
}
}
- assert(!*next_point);
+ av_assert1(!*next_point);
s->packet_buffer_end = this_pktl;
next_non_null:
@@ -489,6 +644,7 @@ next_non_null:
s->streams[pkt->stream_index]->last_in_packet_buffer =
*next_point = this_pktl;
+ return 0;
}
static int interleave_compare_dts(AVFormatContext *s, AVPacket *next,
@@ -498,6 +654,16 @@ static int interleave_compare_dts(AVFormatContext *s, AVPacket *next,
AVStream *st2 = s->streams[next->stream_index];
int comp = av_compare_ts(next->dts, st2->time_base, pkt->dts,
st->time_base);
+ if (s->audio_preload && ((st->codec->codec_type == AVMEDIA_TYPE_AUDIO) != (st2->codec->codec_type == AVMEDIA_TYPE_AUDIO))) {
+ int64_t ts = av_rescale_q(pkt ->dts, st ->time_base, AV_TIME_BASE_Q) - s->audio_preload*(st ->codec->codec_type == AVMEDIA_TYPE_AUDIO);
+ int64_t ts2= av_rescale_q(next->dts, st2->time_base, AV_TIME_BASE_Q) - s->audio_preload*(st2->codec->codec_type == AVMEDIA_TYPE_AUDIO);
+ if (ts == ts2) {
+ ts= ( pkt ->dts* st->time_base.num*AV_TIME_BASE - s->audio_preload*(int64_t)(st ->codec->codec_type == AVMEDIA_TYPE_AUDIO)* st->time_base.den)*st2->time_base.den
+ -( next->dts*st2->time_base.num*AV_TIME_BASE - s->audio_preload*(int64_t)(st2->codec->codec_type == AVMEDIA_TYPE_AUDIO)*st2->time_base.den)* st->time_base.den;
+ ts2=0;
+ }
+ comp= (ts>ts2) - (ts<ts2);
+ }
if (comp == 0)
return pkt->stream_index < next->stream_index;
@@ -508,27 +674,59 @@ int ff_interleave_packet_per_dts(AVFormatContext *s, AVPacket *out,
AVPacket *pkt, int flush)
{
AVPacketList *pktl;
- int stream_count = 0;
- int i;
+ int stream_count = 0, noninterleaved_count = 0;
+ int64_t delta_dts_max = 0;
+ int i, ret;
if (pkt) {
- ff_interleave_add_packet(s, pkt, interleave_compare_dts);
+ ret = ff_interleave_add_packet(s, pkt, interleave_compare_dts);
+ if (ret < 0)
+ return ret;
}
- for (i = 0; i < s->nb_streams; i++)
- stream_count += !!s->streams[i]->last_in_packet_buffer;
+ for (i = 0; i < s->nb_streams; i++) {
+ if (s->streams[i]->last_in_packet_buffer) {
+ ++stream_count;
+ } else if (s->streams[i]->codec->codec_type == AVMEDIA_TYPE_SUBTITLE) {
+ ++noninterleaved_count;
+ }
+ }
- if (stream_count && (s->nb_streams == stream_count || flush)) {
+ if (s->nb_streams == stream_count) {
+ flush = 1;
+ } else if (!flush) {
+ for (i=0; i < s->nb_streams; i++) {
+ if (s->streams[i]->last_in_packet_buffer) {
+ int64_t delta_dts =
+ av_rescale_q(s->streams[i]->last_in_packet_buffer->pkt.dts,
+ s->streams[i]->time_base,
+ AV_TIME_BASE_Q) -
+ av_rescale_q(s->packet_buffer->pkt.dts,
+ s->streams[s->packet_buffer->pkt.stream_index]->time_base,
+ AV_TIME_BASE_Q);
+ delta_dts_max= FFMAX(delta_dts_max, delta_dts);
+ }
+ }
+ if (s->nb_streams == stream_count+noninterleaved_count &&
+ delta_dts_max > 20*AV_TIME_BASE) {
+ av_log(s, AV_LOG_DEBUG, "flushing with %d noninterleaved\n", noninterleaved_count);
+ flush = 1;
+ }
+ }
+ if (stream_count && flush) {
+ AVStream *st;
pktl = s->packet_buffer;
*out = pktl->pkt;
+ st = s->streams[out->stream_index];
s->packet_buffer = pktl->next;
if (!s->packet_buffer)
s->packet_buffer_end = NULL;
- if (s->streams[out->stream_index]->last_in_packet_buffer == pktl)
- s->streams[out->stream_index]->last_in_packet_buffer = NULL;
+ if (st->last_in_packet_buffer == pktl)
+ st->last_in_packet_buffer = NULL;
av_freep(&pktl);
+
return 1;
} else {
av_init_packet(out);
@@ -567,8 +765,8 @@ int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt)
if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO && pkt->size == 0)
return 0;
- av_dlog(s, "av_interleaved_write_frame size:%d dts:%" PRId64 " pts:%" PRId64 "\n",
- pkt->size, pkt->dts, pkt->pts);
+ av_dlog(s, "av_interleaved_write_frame size:%d dts:%s pts:%s\n",
+ pkt->size, av_ts2str(pkt->dts), av_ts2str(pkt->pts));
if ((ret = compute_pkt_fields2(s, st, pkt)) < 0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
return ret;
@@ -594,6 +792,8 @@ int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt)
if (ret < 0)
return ret;
+ if(s->pb && s->pb->error)
+ return s->pb->error;
}
}
@@ -617,15 +817,18 @@ int av_write_trailer(AVFormatContext *s)
if (ret < 0)
goto fail;
+ if(s->pb && s->pb->error)
+ goto fail;
}
if (s->oformat->write_trailer)
ret = s->oformat->write_trailer(s);
- if (!(s->oformat->flags & AVFMT_NOFILE))
- avio_flush(s->pb);
-
fail:
+ if (s->pb)
+ avio_flush(s->pb);
+ if (ret == 0)
+ ret = s->pb ? s->pb->error : 0;
for (i = 0; i < s->nb_streams; i++) {
av_freep(&s->streams[i]->priv_data);
av_freep(&s->streams[i]->index_entries);
@@ -636,6 +839,15 @@ fail:
return ret;
}
+int av_get_output_timestamp(struct AVFormatContext *s, int stream,
+ int64_t *dts, int64_t *wall)
+{
+ if (!s->oformat || !s->oformat->get_output_timestamp)
+ return AVERROR(ENOSYS);
+ s->oformat->get_output_timestamp(s, stream, dts, wall);
+ return 0;
+}
+
int ff_write_chained(AVFormatContext *dst, int dst_stream, AVPacket *pkt,
AVFormatContext *src)
{
@@ -651,5 +863,9 @@ int ff_write_chained(AVFormatContext *dst, int dst_stream, AVPacket *pkt,
local_pkt.dts = av_rescale_q(pkt->dts,
src->streams[pkt->stream_index]->time_base,
dst->streams[dst_stream]->time_base);
+ if (pkt->duration)
+ local_pkt.duration = av_rescale_q(pkt->duration,
+ src->streams[pkt->stream_index]->time_base,
+ dst->streams[dst_stream]->time_base);
return av_write_frame(dst, &local_pkt);
}