summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rwxr-xr-xconfigure2
-rw-r--r--doc/examples/muxing.c192
-rw-r--r--libavcodec/h264.c10
-rw-r--r--libavcodec/h264.h1
-rw-r--r--libavcodec/lzw.c2
-rw-r--r--libavcodec/truemotion2.c137
-rw-r--r--libavformat/mov.c19
7 files changed, 223 insertions, 140 deletions
diff --git a/configure b/configure
index 6a435fd37e..1eb8e923c1 100755
--- a/configure
+++ b/configure
@@ -3219,7 +3219,7 @@ if enabled libdc1394; then
fi
SDL_CONFIG="${cross_prefix}sdl-config"
-if check_pkg_config sdl SDL_version.h SDL_Linked_Version; then
+if check_pkg_config sdl SDL_events.h SDL_PollEvent; then
check_cpp_condition SDL.h "(SDL_MAJOR_VERSION<<16 | SDL_MINOR_VERSION<<8 | SDL_PATCHLEVEL) >= 0x010201" $sdl_cflags &&
enable sdl &&
check_struct SDL.h SDL_VideoInfo current_w $sdl_cflags && enable sdl_video_size
diff --git a/doc/examples/muxing.c b/doc/examples/muxing.c
index 5e2bf9f9b0..9d338dee67 100644
--- a/doc/examples/muxing.c
+++ b/doc/examples/muxing.c
@@ -43,7 +43,7 @@
#define STREAM_DURATION 200.0
#define STREAM_FRAME_RATE 25 /* 25 images/s */
#define STREAM_NB_FRAMES ((int)(STREAM_DURATION * STREAM_FRAME_RATE))
-#define STREAM_PIX_FMT PIX_FMT_YUV420P /* default pix_fmt */
+#define STREAM_PIX_FMT PIX_FMT_YUV420P /* default pix_fmt */
static int sws_flags = SWS_BICUBIC;
@@ -80,10 +80,10 @@ static AVStream *add_audio_stream(AVFormatContext *oc, enum CodecID codec_id)
c = st->codec;
/* put sample parameters */
- c->sample_fmt = AV_SAMPLE_FMT_S16;
- c->bit_rate = 64000;
+ c->sample_fmt = AV_SAMPLE_FMT_S16;
+ c->bit_rate = 64000;
c->sample_rate = 44100;
- c->channels = 2;
+ c->channels = 2;
// some formats want stream headers to be separate
if (oc->oformat->flags & AVFMT_GLOBALHEADER)
@@ -105,7 +105,7 @@ static void open_audio(AVFormatContext *oc, AVStream *st)
}
/* init signal generator */
- t = 0;
+ t = 0;
tincr = 2 * M_PI * 110.0 / c->sample_rate;
/* increment frequency by 110 Hz per second */
tincr2 = 2 * M_PI * 110.0 / c->sample_rate / c->sample_rate;
@@ -114,12 +114,13 @@ static void open_audio(AVFormatContext *oc, AVStream *st)
audio_input_frame_size = 10000;
else
audio_input_frame_size = c->frame_size;
- samples = av_malloc(audio_input_frame_size * av_get_bytes_per_sample(c->sample_fmt)
- * c->channels);
+ samples = av_malloc(audio_input_frame_size *
+ av_get_bytes_per_sample(c->sample_fmt) *
+ c->channels);
}
-/* prepare a 16 bit dummy audio frame of 'frame_size' samples and
- 'nb_channels' channels */
+/* Prepare a 16 bit dummy audio frame of 'frame_size' samples and
+ * 'nb_channels' channels. */
static void get_audio_frame(int16_t *samples, int frame_size, int nb_channels)
{
int j, i, v;
@@ -128,9 +129,9 @@ static void get_audio_frame(int16_t *samples, int frame_size, int nb_channels)
q = samples;
for (j = 0; j < frame_size; j++) {
v = (int)(sin(t) * 10000);
- for(i = 0; i < nb_channels; i++)
+ for (i = 0; i < nb_channels; i++)
*q++ = v;
- t += tincr;
+ t += tincr;
tincr += tincr2;
}
}
@@ -138,7 +139,7 @@ static void get_audio_frame(int16_t *samples, int frame_size, int nb_channels)
static void write_audio_frame(AVFormatContext *oc, AVStream *st)
{
AVCodecContext *c;
- AVPacket pkt;
+ AVPacket pkt = { 0 }; // data and size must be 0;
AVFrame *frame = avcodec_alloc_frame();
int got_packet;
@@ -147,17 +148,19 @@ static void write_audio_frame(AVFormatContext *oc, AVStream *st)
get_audio_frame(samples, audio_input_frame_size, c->channels);
frame->nb_samples = audio_input_frame_size;
- avcodec_fill_audio_frame(frame, c->channels, c->sample_fmt, (uint8_t *)samples,
- audio_input_frame_size * av_get_bytes_per_sample(c->sample_fmt)
- * c->channels, 1);
+ avcodec_fill_audio_frame(frame, c->channels, c->sample_fmt,
+ (uint8_t *)samples,
+ audio_input_frame_size *
+ av_get_bytes_per_sample(c->sample_fmt) *
+ c->channels, 1);
avcodec_encode_audio2(c, &pkt, frame, &got_packet);
if (!got_packet)
return;
- pkt.stream_index= st->index;
+ pkt.stream_index = st->index;
- /* write the compressed frame in the media file */
+ /* Write the compressed frame to the media file. */
if (av_interleaved_write_frame(oc, &pkt) != 0) {
fprintf(stderr, "Error while writing audio frame\n");
exit(1);
@@ -178,7 +181,7 @@ static AVFrame *picture, *tmp_picture;
static uint8_t *video_outbuf;
static int frame_count, video_outbuf_size;
-/* add a video output stream */
+/* Add a video output stream. */
static AVStream *add_video_stream(AVFormatContext *oc, enum CodecID codec_id)
{
AVCodecContext *c;
@@ -210,30 +213,30 @@ static AVStream *add_video_stream(AVFormatContext *oc, enum CodecID codec_id)
c->codec_id = codec_id;
- /* put sample parameters */
+ /* Put sample parameters. */
c->bit_rate = 400000;
- /* resolution must be a multiple of two */
- c->width = 352;
- c->height = 288;
- /* time base: this is the fundamental unit of time (in seconds) in terms
- of which frame timestamps are represented. for fixed-fps content,
- timebase should be 1/framerate and timestamp increments should be
- identically 1. */
+ /* Resolution must be a multiple of two. */
+ c->width = 352;
+ c->height = 288;
+ /* timebase: This is the fundamental unit of time (in seconds) in terms
+ * of which frame timestamps are represented. For fixed-fps content,
+ * timebase should be 1/framerate and timestamp increments should be
+ * identical to 1. */
c->time_base.den = STREAM_FRAME_RATE;
c->time_base.num = 1;
- c->gop_size = 12; /* emit one intra frame every twelve frames at most */
- c->pix_fmt = STREAM_PIX_FMT;
+ c->gop_size = 12; /* emit one intra frame every twelve frames at most */
+ c->pix_fmt = STREAM_PIX_FMT;
if (c->codec_id == CODEC_ID_MPEG2VIDEO) {
/* just for testing, we also add B frames */
c->max_b_frames = 2;
}
- if (c->codec_id == CODEC_ID_MPEG1VIDEO){
+ if (c->codec_id == CODEC_ID_MPEG1VIDEO) {
/* Needed to avoid using macroblocks in which some coeffs overflow.
- This does not happen with normal video, it just happens here as
- the motion of the chroma plane does not match the luma plane. */
- c->mb_decision=2;
+ * This does not happen with normal video, it just happens here as
+ * the motion of the chroma plane does not match the luma plane. */
+ c->mb_decision = 2;
}
- // some formats want stream headers to be separate
+ /* Some formats want stream headers to be separate. */
if (oc->oformat->flags & AVFMT_GLOBALHEADER)
c->flags |= CODEC_FLAG_GLOBAL_HEADER;
@@ -249,7 +252,7 @@ static AVFrame *alloc_picture(enum PixelFormat pix_fmt, int width, int height)
picture = avcodec_alloc_frame();
if (!picture)
return NULL;
- size = avpicture_get_size(pix_fmt, width, height);
+ size = avpicture_get_size(pix_fmt, width, height);
picture_buf = av_malloc(size);
if (!picture_buf) {
av_free(picture);
@@ -274,26 +277,26 @@ static void open_video(AVFormatContext *oc, AVStream *st)
video_outbuf = NULL;
if (!(oc->oformat->flags & AVFMT_RAWPICTURE)) {
- /* allocate output buffer */
- /* XXX: API change will be done */
- /* buffers passed into lav* can be allocated any way you prefer,
- as long as they're aligned enough for the architecture, and
- they're freed appropriately (such as using av_free for buffers
- allocated with av_malloc) */
+ /* Allocate output buffer. */
+ /* XXX: API change will be done. */
+ /* Buffers passed into lav* can be allocated any way you prefer,
+ * as long as they're aligned enough for the architecture, and
+ * they're freed appropriately (such as using av_free for buffers
+ * allocated with av_malloc). */
video_outbuf_size = 200000;
- video_outbuf = av_malloc(video_outbuf_size);
+ video_outbuf = av_malloc(video_outbuf_size);
}
- /* allocate the encoded raw picture */
+ /* Allocate the encoded raw picture. */
picture = alloc_picture(c->pix_fmt, c->width, c->height);
if (!picture) {
fprintf(stderr, "Could not allocate picture\n");
exit(1);
}
- /* if the output format is not YUV420P, then a temporary YUV420P
- picture is needed too. It is then converted to the required
- output format */
+ /* If the output format is not YUV420P, then a temporary YUV420P
+ * picture is needed too. It is then converted to the required
+ * output format. */
tmp_picture = NULL;
if (c->pix_fmt != PIX_FMT_YUV420P) {
tmp_picture = alloc_picture(PIX_FMT_YUV420P, c->width, c->height);
@@ -304,23 +307,22 @@ static void open_video(AVFormatContext *oc, AVStream *st)
}
}
-/* prepare a dummy image */
-static void fill_yuv_image(AVFrame *pict, int frame_index, int width, int height)
+/* Prepare a dummy image. */
+static void fill_yuv_image(AVFrame *pict, int frame_index,
+ int width, int height)
{
int x, y, i;
i = frame_index;
/* Y */
- for (y = 0; y < height; y++) {
- for (x = 0; x < width; x++) {
+ for (y = 0; y < height; y++)
+ for (x = 0; x < width; x++)
pict->data[0][y * pict->linesize[0] + x] = x + y + i * 3;
- }
- }
/* Cb and Cr */
- for (y = 0; y < height/2; y++) {
- for (x = 0; x < width/2; x++) {
+ for (y = 0; y < height / 2; y++) {
+ for (x = 0; x < width / 2; x++) {
pict->data[1][y * pict->linesize[1] + x] = 128 + y + i * 2;
pict->data[2][y * pict->linesize[2] + x] = 64 + x + i * 5;
}
@@ -336,13 +338,13 @@ static void write_video_frame(AVFormatContext *oc, AVStream *st)
c = st->codec;
if (frame_count >= STREAM_NB_FRAMES) {
- /* no more frame to compress. The codec has a latency of a few
- frames if using B frames, so we get the last frames by
- passing the same picture again */
+ /* No more frames to compress. The codec has a latency of a few
+ * frames if using B-frames, so we get the last frames by
+ * passing the same picture again. */
} else {
if (c->pix_fmt != PIX_FMT_YUV420P) {
/* as we only generate a YUV420P picture, we must convert it
- to the codec pixel format if needed */
+ * to the codec pixel format if needed */
if (img_convert_ctx == NULL) {
img_convert_ctx = sws_getContext(c->width, c->height,
PIX_FMT_YUV420P,
@@ -350,7 +352,8 @@ static void write_video_frame(AVFormatContext *oc, AVStream *st)
c->pix_fmt,
sws_flags, NULL, NULL, NULL);
if (img_convert_ctx == NULL) {
- fprintf(stderr, "Cannot initialize the conversion context\n");
+ fprintf(stderr,
+ "Cannot initialize the conversion context\n");
exit(1);
}
}
@@ -362,36 +365,38 @@ static void write_video_frame(AVFormatContext *oc, AVStream *st)
}
}
-
if (oc->oformat->flags & AVFMT_RAWPICTURE) {
- /* raw video case. The API will change slightly in the near
- future for that. */
+ /* Raw video case - the API will change slightly in the near
+ * future for that. */
AVPacket pkt;
av_init_packet(&pkt);
- pkt.flags |= AV_PKT_FLAG_KEY;
- pkt.stream_index = st->index;
- pkt.data = (uint8_t *)picture;
- pkt.size = sizeof(AVPicture);
+ pkt.flags |= AV_PKT_FLAG_KEY;
+ pkt.stream_index = st->index;
+ pkt.data = (uint8_t *)picture;
+ pkt.size = sizeof(AVPicture);
ret = av_interleaved_write_frame(oc, &pkt);
} else {
/* encode the image */
- out_size = avcodec_encode_video(c, video_outbuf, video_outbuf_size, picture);
- /* if zero size, it means the image was buffered */
+ out_size = avcodec_encode_video(c, video_outbuf,
+ video_outbuf_size, picture);
+ /* If size is zero, it means the image was buffered. */
if (out_size > 0) {
AVPacket pkt;
av_init_packet(&pkt);
if (c->coded_frame->pts != AV_NOPTS_VALUE)
- pkt.pts= av_rescale_q(c->coded_frame->pts, c->time_base, st->time_base);
- if(c->coded_frame->key_frame)
+ pkt.pts = av_rescale_q(c->coded_frame->pts,
+ c->time_base, st->time_base);
+ if (c->coded_frame->key_frame)
pkt.flags |= AV_PKT_FLAG_KEY;
+
pkt.stream_index = st->index;
- pkt.data = video_outbuf;
- pkt.size = out_size;
+ pkt.data = video_outbuf;
+ pkt.size = out_size;
- /* write the compressed frame in the media file */
+ /* Write the compressed frame to the media file. */
ret = av_interleaved_write_frame(oc, &pkt);
} else {
ret = 0;
@@ -428,7 +433,7 @@ int main(int argc, char **argv)
double audio_pts, video_pts;
int i;
- /* initialize libavcodec, and register all codecs and formats */
+ /* Initialize libavcodec, and register all codecs and formats. */
av_register_all();
if (argc != 2) {
@@ -453,8 +458,8 @@ int main(int argc, char **argv)
}
fmt = oc->oformat;
- /* add the audio and video streams using the default format codecs
- and initialize the codecs */
+ /* Add the audio and video streams using the default format codecs
+ * and initialize the codecs. */
video_st = NULL;
audio_st = NULL;
if (fmt->video_codec != CODEC_ID_NONE) {
@@ -464,15 +469,15 @@ int main(int argc, char **argv)
audio_st = add_audio_stream(oc, fmt->audio_codec);
}
- av_dump_format(oc, 0, filename, 1);
-
- /* now that all the parameters are set, we can open the audio and
- video codecs and allocate the necessary encode buffers */
+ /* Now that all the parameters are set, we can open the audio and
+ * video codecs and allocate the necessary encode buffers. */
if (video_st)
open_video(oc, video_st);
if (audio_st)
open_audio(oc, audio_st);
+ av_dump_format(oc, 0, filename, 1);
+
/* open the output file, if needed */
if (!(fmt->flags & AVFMT_NOFILE)) {
if (avio_open(&oc->pb, filename, AVIO_FLAG_WRITE) < 0) {
@@ -481,18 +486,20 @@ int main(int argc, char **argv)
}
}
- /* write the stream header, if any */
+ /* Write the stream header, if any. */
avformat_write_header(oc, NULL);
+
picture->pts = 0;
- for(;;) {
- /* compute current audio and video time */
+ for (;;) {
+ /* Compute current audio and video time. */
if (audio_st)
audio_pts = (double)audio_st->pts.val * audio_st->time_base.num / audio_st->time_base.den;
else
audio_pts = 0.0;
if (video_st)
- video_pts = (double)video_st->pts.val * video_st->time_base.num / video_st->time_base.den;
+ video_pts = (double)video_st->pts.val * video_st->time_base.num /
+ video_st->time_base.den;
else
video_pts = 0.0;
@@ -509,28 +516,27 @@ int main(int argc, char **argv)
}
}
- /* write the trailer, if any. the trailer must be written
- * before you close the CodecContexts open when you wrote the
- * header; otherwise write_trailer may try to use memory that
- * was freed on av_codec_close() */
+ /* Write the trailer, if any. The trailer must be written before you
+ * close the CodecContexts open when you wrote the header; otherwise
+ * av_write_trailer() may try to use memory that was freed on
+ * av_codec_close(). */
av_write_trailer(oc);
- /* close each codec */
+ /* Close each codec. */
if (video_st)
close_video(oc, video_st);
if (audio_st)
close_audio(oc, audio_st);
- /* free the streams */
- for(i = 0; i < oc->nb_streams; i++) {
+ /* Free the streams. */
+ for (i = 0; i < oc->nb_streams; i++) {
av_freep(&oc->streams[i]->codec);
av_freep(&oc->streams[i]);
}
- if (!(fmt->flags & AVFMT_NOFILE)) {
- /* close the output file */
+ if (!(fmt->flags & AVFMT_NOFILE))
+ /* Close the output file. */
avio_close(oc->pb);
- }
/* free the stream */
av_free(oc);
diff --git a/libavcodec/h264.c b/libavcodec/h264.c
index b504dcf1cb..c6dc24391c 100644
--- a/libavcodec/h264.c
+++ b/libavcodec/h264.c
@@ -271,7 +271,7 @@ nsc:
* Identify the exact end of the bitstream
* @return the length of the trailing, or 0 if damaged
*/
-static int ff_h264_decode_rbsp_trailing(H264Context *h, const uint8_t *src)
+static int decode_rbsp_trailing(H264Context *h, const uint8_t *src)
{
int v = *src;
int r;
@@ -4260,7 +4260,7 @@ static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size)
dst_length--;
bit_length = !dst_length ? 0
: (8 * dst_length -
- ff_h264_decode_rbsp_trailing(h, ptr + dst_length - 1));
+ decode_rbsp_trailing(h, ptr + dst_length - 1));
if (s->avctx->debug & FF_DEBUG_STARTCODE)
av_log(h->s.avctx, AV_LOG_DEBUG, "NAL %d/%d at %d/%d length %d pass %d\n", hx->nal_unit_type, hx->nal_ref_idc, buf_index, buf_size, dst_length, pass);
@@ -4606,7 +4606,7 @@ av_cold void ff_h264_free_context(H264Context *h)
av_freep(h->pps_buffers + i);
}
-av_cold int ff_h264_decode_end(AVCodecContext *avctx)
+static av_cold int h264_decode_end(AVCodecContext *avctx)
{
H264Context *h = avctx->priv_data;
MpegEncContext *s = &h->s;
@@ -4664,7 +4664,7 @@ AVCodec ff_h264_decoder = {
.id = CODEC_ID_H264,
.priv_data_size = sizeof(H264Context),
.init = ff_h264_decode_init,
- .close = ff_h264_decode_end,
+ .close = h264_decode_end,
.decode = decode_frame,
.capabilities = /*CODEC_CAP_DRAW_HORIZ_BAND |*/ CODEC_CAP_DR1 |
CODEC_CAP_DELAY | CODEC_CAP_SLICE_THREADS |
@@ -4684,7 +4684,7 @@ AVCodec ff_h264_vdpau_decoder = {
.id = CODEC_ID_H264,
.priv_data_size = sizeof(H264Context),
.init = ff_h264_decode_init,
- .close = ff_h264_decode_end,
+ .close = h264_decode_end,
.decode = decode_frame,
.capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY | CODEC_CAP_HWACCEL_VDPAU,
.flush = flush_dpb,
diff --git a/libavcodec/h264.h b/libavcodec/h264.h
index 7739754948..153d83c695 100644
--- a/libavcodec/h264.h
+++ b/libavcodec/h264.h
@@ -678,7 +678,6 @@ void ff_h264_hl_decode_mb(H264Context *h);
int ff_h264_frame_start(H264Context *h);
int ff_h264_decode_extradata(H264Context *h, const uint8_t *buf, int size);
av_cold int ff_h264_decode_init(AVCodecContext *avctx);
-av_cold int ff_h264_decode_end(AVCodecContext *avctx);
av_cold void ff_h264_decode_init_vlc(void);
/**
diff --git a/libavcodec/lzw.c b/libavcodec/lzw.c
index aed1a43fac..348d2de06b 100644
--- a/libavcodec/lzw.c
+++ b/libavcodec/lzw.c
@@ -102,7 +102,7 @@ void ff_lzw_decode_tail(LZWState *p)
if(s->mode == FF_LZW_GIF) {
while (s->bs > 0) {
- if (s->pbuf + s->bs >= s->ebuf) {
+ if (s->bs >= s->ebuf - s->pbuf) {
s->pbuf = s->ebuf;
break;
} else {
diff --git a/libavcodec/truemotion2.c b/libavcodec/truemotion2.c
index 38c01c100d..c4f66e8f6d 100644
--- a/libavcodec/truemotion2.c
+++ b/libavcodec/truemotion2.c
@@ -60,7 +60,9 @@ typedef struct TM2Context{
int *clast;
/* data for current and previous frame */
+ int *Y1_base, *U1_base, *V1_base, *Y2_base, *U2_base, *V2_base;
int *Y1, *U1, *V1, *Y2, *U2, *V2;
+ int y_stride, uv_stride;
int cur;
} TM2Context;
@@ -131,7 +133,7 @@ static int tm2_build_huff_table(TM2Context *ctx, TM2Codes *code)
/* check for correct codes parameters */
if((huff.val_bits < 1) || (huff.val_bits > 32) ||
- (huff.max_bits < 0) || (huff.max_bits > 32)) {
+ (huff.max_bits < 0) || (huff.max_bits > 25)) {
av_log(ctx->avctx, AV_LOG_ERROR, "Incorrect tree parameters - literal length: %i, max code length: %i\n",
huff.val_bits, huff.max_bits);
return -1;
@@ -328,10 +330,21 @@ static int tm2_read_stream(TM2Context *ctx, const uint8_t *buf, int stream_id, i
return -1;
}
ctx->tokens[stream_id][i] = tm2_get_token(&ctx->gb, &codes);
+ if (stream_id <= TM2_MOT && ctx->tokens[stream_id][i] >= TM2_DELTAS) {
+ av_log(ctx->avctx, AV_LOG_ERROR, "Invalid delta token index %d for type %d, n=%d\n",
+ ctx->tokens[stream_id][i], stream_id, i);
+ return AVERROR_INVALIDDATA;
+ }
}
} else {
- for(i = 0; i < toks; i++)
+ for(i = 0; i < toks; i++) {
ctx->tokens[stream_id][i] = codes.recode[0];
+ if (stream_id <= TM2_MOT && ctx->tokens[stream_id][i] >= TM2_DELTAS) {
+ av_log(ctx->avctx, AV_LOG_ERROR, "Invalid delta token index %d for type %d, n=%d\n",
+ ctx->tokens[stream_id][i], stream_id, i);
+ return AVERROR_INVALIDDATA;
+ }
+ }
}
tm2_free_codes(&codes);
@@ -361,9 +374,9 @@ static inline int GET_TOK(TM2Context *ctx,int type) {
int *Y, *U, *V;\
int Ystride, Ustride, Vstride;\
\
- Ystride = ctx->avctx->width;\
- Vstride = (ctx->avctx->width + 1) >> 1;\
- Ustride = (ctx->avctx->width + 1) >> 1;\
+ Ystride = ctx->y_stride;\
+ Vstride = ctx->uv_stride;\
+ Ustride = ctx->uv_stride;\
Y = (ctx->cur?ctx->Y2:ctx->Y1) + by * 4 * Ystride + bx * 4;\
V = (ctx->cur?ctx->V2:ctx->V1) + by * 2 * Vstride + bx * 2;\
U = (ctx->cur?ctx->U2:ctx->U1) + by * 2 * Ustride + bx * 2;\
@@ -651,6 +664,8 @@ static inline void tm2_motion_block(TM2Context *ctx, AVFrame *pic, int bx, int b
mx = GET_TOK(ctx, TM2_MOT);
my = GET_TOK(ctx, TM2_MOT);
+ mx = av_clip(mx, -(bx * 4 + 4), ctx->avctx->width - bx * 4);
+ my = av_clip(my, -(by * 4 + 4), ctx->avctx->height - by * 4);
if (4*bx+mx<0 || 4*by+my<0 || 4*bx+mx+4 > ctx->avctx->width || 4*by+my+4 > ctx->avctx->height) {
av_log(0,0, "MV out of picture\n");
@@ -696,15 +711,12 @@ static inline void tm2_motion_block(TM2Context *ctx, AVFrame *pic, int bx, int b
static int tm2_decode_blocks(TM2Context *ctx, AVFrame *p)
{
int i, j;
- int bw, bh;
+ int w = ctx->avctx->width, h = ctx->avctx->height, bw = w >> 2, bh = h >> 2, cw = w >> 1;
int type;
int keyframe = 1;
int *Y, *U, *V;
uint8_t *dst;
- bw = ctx->avctx->width >> 2;
- bh = ctx->avctx->height >> 2;
-
for(i = 0; i < TM2_NUM_STREAMS; i++)
ctx->tok_ptrs[i] = 0;
@@ -757,17 +769,54 @@ static int tm2_decode_blocks(TM2Context *ctx, AVFrame *p)
U = (ctx->cur?ctx->U2:ctx->U1);
V = (ctx->cur?ctx->V2:ctx->V1);
dst = p->data[0];
- for(j = 0; j < ctx->avctx->height; j++){
- for(i = 0; i < ctx->avctx->width; i++){
+ for(j = 0; j < h; j++){
+ for(i = 0; i < w; i++){
int y = Y[i], u = U[i >> 1], v = V[i >> 1];
dst[3*i+0] = av_clip_uint8(y + v);
dst[3*i+1] = av_clip_uint8(y);
dst[3*i+2] = av_clip_uint8(y + u);
}
- Y += ctx->avctx->width;
+
+ /* horizontal edge extension */
+ Y[-4] = Y[-3] = Y[-2] = Y[-1] = Y[0];
+ Y[w + 3] = Y[w + 2] = Y[w + 1] = Y[w] = Y[w - 1];
+
+ /* vertical edge extension */
+ if (j == 0) {
+ memcpy(Y - 4 - 1 * ctx->y_stride, Y - 4, ctx->y_stride);
+ memcpy(Y - 4 - 2 * ctx->y_stride, Y - 4, ctx->y_stride);
+ memcpy(Y - 4 - 3 * ctx->y_stride, Y - 4, ctx->y_stride);
+ memcpy(Y - 4 - 4 * ctx->y_stride, Y - 4, ctx->y_stride);
+ } else if (j == h - 1) {
+ memcpy(Y - 4 + 1 * ctx->y_stride, Y - 4, ctx->y_stride);
+ memcpy(Y - 4 + 2 * ctx->y_stride, Y - 4, ctx->y_stride);
+ memcpy(Y - 4 + 3 * ctx->y_stride, Y - 4, ctx->y_stride);
+ memcpy(Y - 4 + 4 * ctx->y_stride, Y - 4, ctx->y_stride);
+ }
+
+ Y += ctx->y_stride;
if (j & 1) {
- U += ctx->avctx->width >> 1;
- V += ctx->avctx->width >> 1;
+ /* horizontal edge extension */
+ U[-2] = U[-1] = U[0];
+ V[-2] = V[-1] = V[0];
+ U[cw + 1] = U[cw] = U[cw - 1];
+ V[cw + 1] = V[cw] = V[cw - 1];
+
+ /* vertical edge extension */
+ if (j == 1) {
+ memcpy(U - 2 - 1 * ctx->uv_stride, U - 2, ctx->uv_stride);
+ memcpy(V - 2 - 1 * ctx->uv_stride, V - 2, ctx->uv_stride);
+ memcpy(U - 2 - 2 * ctx->uv_stride, U - 2, ctx->uv_stride);
+ memcpy(V - 2 - 2 * ctx->uv_stride, V - 2, ctx->uv_stride);
+ } else if (j == h - 1) {
+ memcpy(U - 2 + 1 * ctx->uv_stride, U - 2, ctx->uv_stride);
+ memcpy(V - 2 + 1 * ctx->uv_stride, V - 2, ctx->uv_stride);
+ memcpy(U - 2 + 2 * ctx->uv_stride, U - 2, ctx->uv_stride);
+ memcpy(V - 2 + 2 * ctx->uv_stride, V - 2, ctx->uv_stride);
+ }
+
+ U += ctx->uv_stride;
+ V += ctx->uv_stride;
}
dst += p->linesize[0];
}
@@ -813,9 +862,10 @@ static int decode_frame(AVCodecContext *avctx,
av_log(avctx, AV_LOG_ERROR, "no space for tm2_read_stream\n");
return AVERROR_INVALIDDATA;
}
+
t = tm2_read_stream(l, l->buffer + skip, tm2_stream_order[i], buf_size - skip);
- if(t == -1){
- return -1;
+ if(t < 0){
+ return t;
}
skip += t;
}
@@ -834,7 +884,7 @@ static int decode_frame(AVCodecContext *avctx,
static av_cold int decode_init(AVCodecContext *avctx){
TM2Context * const l = avctx->priv_data;
- int i;
+ int i, w = avctx->width, h = avctx->height;
if((avctx->width & 3) || (avctx->height & 3)){
av_log(avctx, AV_LOG_ERROR, "Width and height must be multiple of 4\n");
@@ -848,21 +898,46 @@ static av_cold int decode_init(AVCodecContext *avctx){
ff_dsputil_init(&l->dsp, avctx);
- l->last = av_malloc(4 * sizeof(int) * (avctx->width >> 2));
- l->clast = av_malloc(4 * sizeof(int) * (avctx->width >> 2));
+ l->last = av_malloc(4 * sizeof(*l->last) * (w >> 2));
+ l->clast = av_malloc(4 * sizeof(*l->clast) * (w >> 2));
for(i = 0; i < TM2_NUM_STREAMS; i++) {
l->tokens[i] = NULL;
l->tok_lens[i] = 0;
}
- l->Y1 = av_malloc(sizeof(int) * avctx->width * avctx->height);
- l->U1 = av_malloc(sizeof(int) * ((avctx->width + 1) >> 1) * ((avctx->height + 1) >> 1));
- l->V1 = av_malloc(sizeof(int) * ((avctx->width + 1) >> 1) * ((avctx->height + 1) >> 1));
- l->Y2 = av_malloc(sizeof(int) * avctx->width * avctx->height);
- l->U2 = av_malloc(sizeof(int) * ((avctx->width + 1) >> 1) * ((avctx->height + 1) >> 1));
- l->V2 = av_malloc(sizeof(int) * ((avctx->width + 1) >> 1) * ((avctx->height + 1) >> 1));
+ w += 8;
+ h += 8;
+ l->Y1_base = av_malloc(sizeof(*l->Y1_base) * w * h);
+ l->Y2_base = av_malloc(sizeof(*l->Y2_base) * w * h);
+ l->y_stride = w;
+ w = (w + 1) >> 1;
+ h = (h + 1) >> 1;
+ l->U1_base = av_malloc(sizeof(*l->U1_base) * w * h);
+ l->V1_base = av_malloc(sizeof(*l->V1_base) * w * h);
+ l->U2_base = av_malloc(sizeof(*l->U2_base) * w * h);
+ l->V2_base = av_malloc(sizeof(*l->V1_base) * w * h);
+ l->uv_stride = w;
l->cur = 0;
+ if (!l->Y1_base || !l->Y2_base || !l->U1_base ||
+ !l->V1_base || !l->U2_base || !l->V2_base ||
+ !l->last || !l->clast) {
+ av_freep(l->Y1_base);
+ av_freep(l->Y2_base);
+ av_freep(l->U1_base);
+ av_freep(l->U2_base);
+ av_freep(l->V1_base);
+ av_freep(l->V2_base);
+ av_freep(l->last);
+ av_freep(l->clast);
+ return AVERROR(ENOMEM);
+ }
+ l->Y1 = l->Y1_base + l->y_stride * 4 + 4;
+ l->Y2 = l->Y2_base + l->y_stride * 4 + 4;
+ l->U1 = l->U1_base + l->uv_stride * 2 + 2;
+ l->U2 = l->U2_base + l->uv_stride * 2 + 2;
+ l->V1 = l->V1_base + l->uv_stride * 2 + 2;
+ l->V2 = l->V2_base + l->uv_stride * 2 + 2;
return 0;
}
@@ -877,12 +952,12 @@ static av_cold int decode_end(AVCodecContext *avctx){
for(i = 0; i < TM2_NUM_STREAMS; i++)
av_free(l->tokens[i]);
if(l->Y1){
- av_free(l->Y1);
- av_free(l->U1);
- av_free(l->V1);
- av_free(l->Y2);
- av_free(l->U2);
- av_free(l->V2);
+ av_free(l->Y1_base);
+ av_free(l->U1_base);
+ av_free(l->V1_base);
+ av_free(l->Y2_base);
+ av_free(l->U2_base);
+ av_free(l->V2_base);
}
av_freep(&l->buffer);
l->buffer_size = 0;
diff --git a/libavformat/mov.c b/libavformat/mov.c
index 0e9566fe3a..248620be68 100644
--- a/libavformat/mov.c
+++ b/libavformat/mov.c
@@ -1803,6 +1803,7 @@ static void mov_build_index(MOVContext *mov, AVStream *st)
unsigned int stps_index = 0;
unsigned int i, j;
uint64_t stream_size = 0;
+ AVIndexEntry *mem;
/* adjust first dts according to edit list */
if ((sc->empty_duration || sc->start_time) && mov->time_scale > 0) {
@@ -1832,12 +1833,13 @@ static void mov_build_index(MOVContext *mov, AVStream *st)
if (!sc->sample_count || st->nb_index_entries)
return;
- if (sc->sample_count >= UINT_MAX / sizeof(*st->index_entries))
+ if (sc->sample_count >= UINT_MAX / sizeof(*st->index_entries) - st->nb_index_entries)
return;
- st->index_entries = av_malloc(sc->sample_count*sizeof(*st->index_entries));
- if (!st->index_entries)
+ mem = av_realloc(st->index_entries, (st->nb_index_entries + sc->sample_count) * sizeof(*st->index_entries));
+ if (!mem)
return;
- st->index_entries_allocated_size = sc->sample_count*sizeof(*st->index_entries);
+ st->index_entries = mem;
+ st->index_entries_allocated_size = (st->nb_index_entries + sc->sample_count) * sizeof(*st->index_entries);
for (i = 0; i < sc->chunk_count; i++) {
current_offset = sc->chunk_offsets[i];
@@ -1921,12 +1923,13 @@ static void mov_build_index(MOVContext *mov, AVStream *st)
}
av_dlog(mov->fc, "chunk count %d\n", total);
- if (total >= UINT_MAX / sizeof(*st->index_entries))
+ if (total >= UINT_MAX / sizeof(*st->index_entries) - st->nb_index_entries)
return;
- st->index_entries = av_malloc(total*sizeof(*st->index_entries));
- if (!st->index_entries)
+ mem = av_realloc(st->index_entries, (st->nb_index_entries + total) * sizeof(*st->index_entries));
+ if (!mem)
return;
- st->index_entries_allocated_size = total*sizeof(*st->index_entries);
+ st->index_entries = mem;
+ st->index_entries_allocated_size = (st->nb_index_entries + total) * sizeof(*st->index_entries);
// populate index
for (i = 0; i < sc->chunk_count; i++) {