summaryrefslogtreecommitdiff
path: root/libavutil/frame.c
diff options
context:
space:
mode:
Diffstat (limited to 'libavutil/frame.c')
-rw-r--r--libavutil/frame.c190
1 files changed, 161 insertions, 29 deletions
diff --git a/libavutil/frame.c b/libavutil/frame.c
index 48df311dd2..4ee06306ce 100644
--- a/libavutil/frame.c
+++ b/libavutil/frame.c
@@ -1,23 +1,24 @@
/*
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "channel_layout.h"
+#include "avassert.h"
#include "buffer.h"
#include "common.h"
#include "dict.h"
@@ -26,6 +27,65 @@
#include "mem.h"
#include "samplefmt.h"
+MAKE_ACCESSORS(AVFrame, frame, int64_t, best_effort_timestamp)
+MAKE_ACCESSORS(AVFrame, frame, int64_t, pkt_duration)
+MAKE_ACCESSORS(AVFrame, frame, int64_t, pkt_pos)
+MAKE_ACCESSORS(AVFrame, frame, int64_t, channel_layout)
+MAKE_ACCESSORS(AVFrame, frame, int, channels)
+MAKE_ACCESSORS(AVFrame, frame, int, sample_rate)
+MAKE_ACCESSORS(AVFrame, frame, AVDictionary *, metadata)
+MAKE_ACCESSORS(AVFrame, frame, int, decode_error_flags)
+MAKE_ACCESSORS(AVFrame, frame, int, pkt_size)
+MAKE_ACCESSORS(AVFrame, frame, enum AVColorSpace, colorspace)
+MAKE_ACCESSORS(AVFrame, frame, enum AVColorRange, color_range)
+
+#define CHECK_CHANNELS_CONSISTENCY(frame) \
+ av_assert2(!(frame)->channel_layout || \
+ (frame)->channels == \
+ av_get_channel_layout_nb_channels((frame)->channel_layout))
+
+AVDictionary **avpriv_frame_get_metadatap(AVFrame *frame) {return &frame->metadata;};
+
+int av_frame_set_qp_table(AVFrame *f, AVBufferRef *buf, int stride, int qp_type)
+{
+ av_buffer_unref(&f->qp_table_buf);
+
+ f->qp_table_buf = buf;
+
+ f->qscale_table = buf->data;
+ f->qstride = stride;
+ f->qscale_type = qp_type;
+
+ return 0;
+}
+
+int8_t *av_frame_get_qp_table(AVFrame *f, int *stride, int *type)
+{
+ *stride = f->qstride;
+ *type = f->qscale_type;
+
+ if (!f->qp_table_buf)
+ return NULL;
+
+ return f->qp_table_buf->data;
+}
+
+const char *av_get_colorspace_name(enum AVColorSpace val)
+{
+ static const char * const name[] = {
+ [AVCOL_SPC_RGB] = "GBR",
+ [AVCOL_SPC_BT709] = "bt709",
+ [AVCOL_SPC_FCC] = "fcc",
+ [AVCOL_SPC_BT470BG] = "bt470bg",
+ [AVCOL_SPC_SMPTE170M] = "smpte170m",
+ [AVCOL_SPC_SMPTE240M] = "smpte240m",
+ [AVCOL_SPC_YCOCG] = "YCgCo",
+ };
+ if ((unsigned)val >= FF_ARRAY_ELEMS(name))
+ return NULL;
+ return name[val];
+}
+
static void get_frame_defaults(AVFrame *frame)
{
if (frame->extended_data != frame->data)
@@ -33,7 +93,13 @@ static void get_frame_defaults(AVFrame *frame)
memset(frame, 0, sizeof(*frame));
- frame->pts = AV_NOPTS_VALUE;
+ frame->pts =
+ frame->pkt_dts =
+ frame->pkt_pts = AV_NOPTS_VALUE;
+ av_frame_set_best_effort_timestamp(frame, AV_NOPTS_VALUE);
+ av_frame_set_pkt_duration (frame, 0);
+ av_frame_set_pkt_pos (frame, -1);
+ av_frame_set_pkt_size (frame, -1);
frame->key_frame = 1;
frame->sample_aspect_ratio = (AVRational){ 0, 1 };
frame->format = -1; /* unknown */
@@ -61,6 +127,7 @@ AVFrame *av_frame_alloc(void)
if (!frame)
return NULL;
+ frame->extended_data = NULL;
get_frame_defaults(frame);
return frame;
@@ -87,21 +154,25 @@ static int get_video_buffer(AVFrame *frame, int align)
return ret;
if (!frame->linesize[0]) {
- ret = av_image_fill_linesizes(frame->linesize, frame->format,
- frame->width);
- if (ret < 0)
- return ret;
+ for(i=1; i<=align; i+=i) {
+ ret = av_image_fill_linesizes(frame->linesize, frame->format,
+ FFALIGN(frame->width, i));
+ if (ret < 0)
+ return ret;
+ if (!(frame->linesize[0] & (align-1)))
+ break;
+ }
for (i = 0; i < 4 && frame->linesize[i]; i++)
frame->linesize[i] = FFALIGN(frame->linesize[i], align);
}
for (i = 0; i < 4 && frame->linesize[i]; i++) {
- int h = frame->height;
+ int h = FFALIGN(frame->height, 32);
if (i == 1 || i == 2)
- h = -((-h) >> desc->log2_chroma_h);
+ h = FF_CEIL_RSHIFT(h, desc->log2_chroma_h);
- frame->buf[i] = av_buffer_alloc(frame->linesize[i] * h);
+ frame->buf[i] = av_buffer_alloc(frame->linesize[i] * h + 16 + 16/*STRIDE_ALIGN*/ - 1);
if (!frame->buf[i])
goto fail;
@@ -125,11 +196,18 @@ fail:
static int get_audio_buffer(AVFrame *frame, int align)
{
- int channels = av_get_channel_layout_nb_channels(frame->channel_layout);
+ int channels;
int planar = av_sample_fmt_is_planar(frame->format);
- int planes = planar ? channels : 1;
+ int planes;
int ret, i;
+ if (!frame->channels)
+ frame->channels = av_get_channel_layout_nb_channels(frame->channel_layout);
+
+ channels = frame->channels;
+ planes = planar ? channels : 1;
+
+ CHECK_CHANNELS_CONSISTENCY(frame);
if (!frame->linesize[0]) {
ret = av_samples_get_buffer_size(&frame->linesize[0], channels,
frame->nb_samples, frame->format,
@@ -139,9 +217,9 @@ static int get_audio_buffer(AVFrame *frame, int align)
}
if (planes > AV_NUM_DATA_POINTERS) {
- frame->extended_data = av_mallocz(planes *
+ frame->extended_data = av_mallocz_array(planes,
sizeof(*frame->extended_data));
- frame->extended_buf = av_mallocz((planes - AV_NUM_DATA_POINTERS) *
+ frame->extended_buf = av_mallocz_array((planes - AV_NUM_DATA_POINTERS),
sizeof(*frame->extended_buf));
if (!frame->extended_data || !frame->extended_buf) {
av_freep(&frame->extended_data);
@@ -179,7 +257,7 @@ int av_frame_get_buffer(AVFrame *frame, int align)
if (frame->width > 0 && frame->height > 0)
return get_video_buffer(frame, align);
- else if (frame->nb_samples > 0 && frame->channel_layout)
+ else if (frame->nb_samples > 0 && (frame->channel_layout || frame->channels > 0))
return get_audio_buffer(frame, align);
return AVERROR(EINVAL);
@@ -192,6 +270,7 @@ int av_frame_ref(AVFrame *dst, const AVFrame *src)
dst->format = src->format;
dst->width = src->width;
dst->height = src->height;
+ dst->channels = src->channels;
dst->channel_layout = src->channel_layout;
dst->nb_samples = src->nb_samples;
@@ -213,7 +292,9 @@ int av_frame_ref(AVFrame *dst, const AVFrame *src)
}
/* ref the buffers */
- for (i = 0; i < FF_ARRAY_ELEMS(src->buf) && src->buf[i]; i++) {
+ for (i = 0; i < FF_ARRAY_ELEMS(src->buf); i++) {
+ if (!src->buf[i])
+ continue;
dst->buf[i] = av_buffer_ref(src->buf[i]);
if (!dst->buf[i]) {
ret = AVERROR(ENOMEM);
@@ -222,7 +303,7 @@ int av_frame_ref(AVFrame *dst, const AVFrame *src)
}
if (src->extended_buf) {
- dst->extended_buf = av_mallocz(sizeof(*dst->extended_buf) *
+ dst->extended_buf = av_mallocz_array(sizeof(*dst->extended_buf),
src->nb_extended_buf);
if (!dst->extended_buf) {
ret = AVERROR(ENOMEM);
@@ -241,14 +322,15 @@ int av_frame_ref(AVFrame *dst, const AVFrame *src)
/* duplicate extended data */
if (src->extended_data != src->data) {
- int ch = av_get_channel_layout_nb_channels(src->channel_layout);
+ int ch = src->channels;
if (!ch) {
ret = AVERROR(EINVAL);
goto fail;
}
+ CHECK_CHANNELS_CONSISTENCY(src);
- dst->extended_data = av_malloc(sizeof(*dst->extended_data) * ch);
+ dst->extended_data = av_malloc_array(sizeof(*dst->extended_data), ch);
if (!dst->extended_data) {
ret = AVERROR(ENOMEM);
goto fail;
@@ -294,6 +376,9 @@ void av_frame_unref(AVFrame *frame)
for (i = 0; i < frame->nb_extended_buf; i++)
av_buffer_unref(&frame->extended_buf[i]);
av_freep(&frame->extended_buf);
+ av_dict_free(&frame->metadata);
+ av_buffer_unref(&frame->qp_table_buf);
+
get_frame_defaults(frame);
}
@@ -314,8 +399,9 @@ int av_frame_is_writable(AVFrame *frame)
if (!frame->buf[0])
return 0;
- for (i = 0; i < FF_ARRAY_ELEMS(frame->buf) && frame->buf[i]; i++)
- ret &= !!av_buffer_is_writable(frame->buf[i]);
+ for (i = 0; i < FF_ARRAY_ELEMS(frame->buf); i++)
+ if (frame->buf[i])
+ ret &= !!av_buffer_is_writable(frame->buf[i]);
for (i = 0; i < frame->nb_extended_buf; i++)
ret &= !!av_buffer_is_writable(frame->extended_buf[i]);
@@ -337,6 +423,7 @@ int av_frame_make_writable(AVFrame *frame)
tmp.format = frame->format;
tmp.width = frame->width;
tmp.height = frame->height;
+ tmp.channels = frame->channels;
tmp.channel_layout = frame->channel_layout;
tmp.nb_samples = frame->nb_samples;
ret = av_frame_get_buffer(&tmp, 32);
@@ -378,24 +465,38 @@ int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
dst->palette_has_changed = src->palette_has_changed;
dst->sample_rate = src->sample_rate;
dst->opaque = src->opaque;
+#if FF_API_AVFRAME_LAVC
+ dst->type = src->type;
+#endif
dst->pkt_pts = src->pkt_pts;
dst->pkt_dts = src->pkt_dts;
+ dst->pkt_pos = src->pkt_pos;
+ dst->pkt_size = src->pkt_size;
+ dst->pkt_duration = src->pkt_duration;
dst->reordered_opaque = src->reordered_opaque;
dst->quality = src->quality;
+ dst->best_effort_timestamp = src->best_effort_timestamp;
dst->coded_picture_number = src->coded_picture_number;
dst->display_picture_number = src->display_picture_number;
dst->flags = src->flags;
+ dst->decode_error_flags = src->decode_error_flags;
dst->color_primaries = src->color_primaries;
dst->color_trc = src->color_trc;
dst->colorspace = src->colorspace;
dst->color_range = src->color_range;
dst->chroma_location = src->chroma_location;
+ av_dict_copy(&dst->metadata, src->metadata, 0);
+
memcpy(dst->error, src->error, sizeof(dst->error));
for (i = 0; i < src->nb_side_data; i++) {
const AVFrameSideData *sd_src = src->side_data[i];
- AVFrameSideData *sd_dst = av_frame_new_side_data(dst, sd_src->type,
+ AVFrameSideData *sd_dst;
+ if ( sd_src->type == AV_FRAME_DATA_PANSCAN
+ && (src->width != dst->width || src->height != dst->height))
+ continue;
+ sd_dst = av_frame_new_side_data(dst, sd_src->type,
sd_src->size);
if (!sd_dst) {
for (i = 0; i < dst->nb_side_data; i++) {
@@ -408,6 +509,18 @@ int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
av_dict_copy(&sd_dst->metadata, sd_src->metadata, 0);
}
+ dst->qscale_table = NULL;
+ dst->qstride = 0;
+ dst->qscale_type = 0;
+ if (src->qp_table_buf) {
+ dst->qp_table_buf = av_buffer_ref(src->qp_table_buf);
+ if (dst->qp_table_buf) {
+ dst->qscale_table = dst->qp_table_buf->data;
+ dst->qstride = src->qstride;
+ dst->qscale_type = src->qscale_type;
+ }
+ }
+
return 0;
}
@@ -417,9 +530,10 @@ AVBufferRef *av_frame_get_plane_buffer(AVFrame *frame, int plane)
int planes, i;
if (frame->nb_samples) {
- int channels = av_get_channel_layout_nb_channels(frame->channel_layout);
+ int channels = frame->channels;
if (!channels)
return NULL;
+ CHECK_CHANNELS_CONSISTENCY(frame);
planes = av_sample_fmt_is_planar(frame->format) ? channels : 1;
} else
planes = 4;
@@ -491,8 +605,8 @@ static int frame_copy_video(AVFrame *dst, const AVFrame *src)
const uint8_t *src_data[4];
int i, planes;
- if (dst->width != src->width ||
- dst->height != src->height)
+ if (dst->width < src->width ||
+ dst->height < src->height)
return AVERROR(EINVAL);
planes = av_pix_fmt_count_planes(dst->format);
@@ -503,7 +617,7 @@ static int frame_copy_video(AVFrame *dst, const AVFrame *src)
memcpy(src_data, src->data, sizeof(src_data));
av_image_copy(dst->data, dst->linesize,
src_data, src->linesize,
- dst->format, dst->width, dst->height);
+ dst->format, src->width, src->height);
return 0;
}
@@ -511,14 +625,17 @@ static int frame_copy_video(AVFrame *dst, const AVFrame *src)
static int frame_copy_audio(AVFrame *dst, const AVFrame *src)
{
int planar = av_sample_fmt_is_planar(dst->format);
- int channels = av_get_channel_layout_nb_channels(dst->channel_layout);
+ int channels = dst->channels;
int planes = planar ? channels : 1;
int i;
if (dst->nb_samples != src->nb_samples ||
+ dst->channels != src->channels ||
dst->channel_layout != src->channel_layout)
return AVERROR(EINVAL);
+ CHECK_CHANNELS_CONSISTENCY(src);
+
for (i = 0; i < planes; i++)
if (!dst->extended_data[i] || !src->extended_data[i])
return AVERROR(EINVAL);
@@ -555,3 +672,18 @@ void av_frame_remove_side_data(AVFrame *frame, enum AVFrameSideDataType type)
}
}
}
+
+const char *av_frame_side_data_name(enum AVFrameSideDataType type)
+{
+ switch(type) {
+ case AV_FRAME_DATA_PANSCAN: return "AVPanScan";
+ case AV_FRAME_DATA_A53_CC: return "ATSC A53 Part 4 Closed Captions";
+ case AV_FRAME_DATA_STEREO3D: return "Stereoscopic 3d metadata";
+ case AV_FRAME_DATA_MATRIXENCODING: return "AVMatrixEncoding";
+ case AV_FRAME_DATA_DOWNMIX_INFO: return "Metadata relevant to a downmix procedure";
+ case AV_FRAME_DATA_REPLAYGAIN: return "AVReplayGain";
+ case AV_FRAME_DATA_DISPLAYMATRIX: return "3x3 displaymatrix";
+ case AV_FRAME_DATA_MOTION_VECTORS: return "Motion vectors";
+ }
+ return NULL;
+}