summaryrefslogtreecommitdiff
path: root/libavcodec
diff options
context:
space:
mode:
authorMichael Niedermayer <michaelni@gmx.at>2012-01-15 01:07:00 +0100
committerMichael Niedermayer <michaelni@gmx.at>2012-01-15 01:16:54 +0100
commit4640da7e58509996ff03b1a0b018ca8f337391c7 (patch)
tree732195f8bc4987e4974df716789044c7e3db0836 /libavcodec
parenta91f2066651416e0f9315e7fb0132587352c75dc (diff)
parent4cd0bdae9a62d1f0366e60603222762af31e5289 (diff)
Merge remote-tracking branch 'qatar/master'
* qatar/master: sgidec: Use bytestream2 functions to prevent buffer overreads. cosmetics: Move static and inline attributes to more standard places. configure: provide libavfilter/version.h header to get_version() swscale: change yuv2yuvX code to use cpuflag(). libx264: Don't leave max_b_frames as -1 if the user didn't set it FATE: convert output to rgba for the targa tests which currently output pal8 fate: add missing reference files for targa tests in 9c2f9b0e2 FATE: enable the 2 remaining targa conformance suite tests targa: add support for rgb555 palette FATE: fix targa tests on big-endian systems Conflicts: libavcodec/sgidec.c libavcodec/targa.c libswscale/x86/output.asm tests/fate/image.mak Merged-by: Michael Niedermayer <michaelni@gmx.at>
Diffstat (limited to 'libavcodec')
-rw-r--r--libavcodec/bytestream.h36
-rw-r--r--libavcodec/g722.c2
-rw-r--r--libavcodec/h264_loopfilter.c51
-rw-r--r--libavcodec/libx264.c3
-rw-r--r--libavcodec/sgidec.c123
-rw-r--r--libavcodec/targa.c33
6 files changed, 165 insertions, 83 deletions
diff --git a/libavcodec/bytestream.h b/libavcodec/bytestream.h
index 4e17e9d9f7..71c70aac84 100644
--- a/libavcodec/bytestream.h
+++ b/libavcodec/bytestream.h
@@ -75,6 +75,42 @@ DEF (byte, 1, AV_RB8 , AV_WB8 )
#undef DEF64
#undef DEF_T
+#if HAVE_BIGENDIAN
+# define bytestream2_get_ne16 bytestream2_get_be16
+# define bytestream2_get_ne24 bytestream2_get_be24
+# define bytestream2_get_ne32 bytestream2_get_be32
+# define bytestream2_get_ne64 bytestream2_get_be64
+# define bytestream2_get_ne16u bytestream2_get_be16u
+# define bytestream2_get_ne24u bytestream2_get_be24u
+# define bytestream2_get_ne32u bytestream2_get_be32u
+# define bytestream2_get_ne64u bytestream2_get_be64u
+# define bytestream2_put_ne16 bytestream2_put_be16
+# define bytestream2_put_ne24 bytestream2_put_be24
+# define bytestream2_put_ne32 bytestream2_put_be32
+# define bytestream2_put_ne64 bytestream2_put_be64
+# define bytestream2_peek_ne16 bytestream2_peek_be16
+# define bytestream2_peek_ne24 bytestream2_peek_be24
+# define bytestream2_peek_ne32 bytestream2_peek_be32
+# define bytestream2_peek_ne64 bytestream2_peek_be64
+#else
+# define bytestream2_get_ne16 bytestream2_get_le16
+# define bytestream2_get_ne24 bytestream2_get_le24
+# define bytestream2_get_ne32 bytestream2_get_le32
+# define bytestream2_get_ne64 bytestream2_get_le64
+# define bytestream2_get_ne16u bytestream2_get_le16u
+# define bytestream2_get_ne24u bytestream2_get_le24u
+# define bytestream2_get_ne32u bytestream2_get_le32u
+# define bytestream2_get_ne64u bytestream2_get_le64u
+# define bytestream2_put_ne16 bytestream2_put_le16
+# define bytestream2_put_ne24 bytestream2_put_le24
+# define bytestream2_put_ne32 bytestream2_put_le32
+# define bytestream2_put_ne64 bytestream2_put_le64
+# define bytestream2_peek_ne16 bytestream2_peek_le16
+# define bytestream2_peek_ne24 bytestream2_peek_le24
+# define bytestream2_peek_ne32 bytestream2_peek_le32
+# define bytestream2_peek_ne64 bytestream2_peek_le64
+#endif
+
static av_always_inline void bytestream2_init(GetByteContext *g,
const uint8_t *buf, int buf_size)
{
diff --git a/libavcodec/g722.c b/libavcodec/g722.c
index e8e74242b8..2c04c40b56 100644
--- a/libavcodec/g722.c
+++ b/libavcodec/g722.c
@@ -129,7 +129,7 @@ static void do_adaptive_prediction(struct G722Band *band, const int cur_diff)
band->prev_qtzd_reconst = cur_qtzd_reconst;
}
-static int inline linear_scale_factor(const int log_factor)
+static inline int linear_scale_factor(const int log_factor)
{
const int wd1 = inv_log2_table[(log_factor >> 6) & 31];
const int shift = log_factor >> 11;
diff --git a/libavcodec/h264_loopfilter.c b/libavcodec/h264_loopfilter.c
index 67399395a1..f3a5ff6783 100644
--- a/libavcodec/h264_loopfilter.c
+++ b/libavcodec/h264_loopfilter.c
@@ -101,7 +101,11 @@ static const uint8_t tc0_table[52*3][4] = {
};
/* intra: 0 if this loopfilter call is guaranteed to be inter (bS < 4), 1 if it might be intra (bS == 4) */
-static void av_always_inline filter_mb_edgev( uint8_t *pix, int stride, const int16_t bS[4], unsigned int qp, int a, int b, H264Context *h, int intra ) {
+static av_always_inline void filter_mb_edgev(uint8_t *pix, int stride,
+ const int16_t bS[4],
+ unsigned int qp, int a, int b,
+ H264Context *h, int intra)
+{
const unsigned int index_a = qp + a;
const int alpha = alpha_table[index_a];
const int beta = beta_table[qp + b];
@@ -118,7 +122,12 @@ static void av_always_inline filter_mb_edgev( uint8_t *pix, int stride, const in
h->h264dsp.h264_h_loop_filter_luma_intra(pix, stride, alpha, beta);
}
}
-static void av_always_inline filter_mb_edgecv( uint8_t *pix, int stride, const int16_t bS[4], unsigned int qp, int a, int b, H264Context *h, int intra ) {
+
+static av_always_inline void filter_mb_edgecv(uint8_t *pix, int stride,
+ const int16_t bS[4],
+ unsigned int qp, int a, int b,
+ H264Context *h, int intra)
+{
const unsigned int index_a = qp + a;
const int alpha = alpha_table[index_a];
const int beta = beta_table[qp + b];
@@ -136,7 +145,12 @@ static void av_always_inline filter_mb_edgecv( uint8_t *pix, int stride, const i
}
}
-static void av_always_inline filter_mb_mbaff_edgev( H264Context *h, uint8_t *pix, int stride, const int16_t bS[7], int bsi, int qp, int a, int b, int intra ) {
+static av_always_inline void filter_mb_mbaff_edgev(H264Context *h, uint8_t *pix,
+ int stride,
+ const int16_t bS[7], int bsi,
+ int qp, int a, int b,
+ int intra)
+{
const unsigned int index_a = qp + a;
const int alpha = alpha_table[index_a];
const int beta = beta_table[qp + b];
@@ -153,7 +167,13 @@ static void av_always_inline filter_mb_mbaff_edgev( H264Context *h, uint8_t *pix
h->h264dsp.h264_h_loop_filter_luma_mbaff_intra(pix, stride, alpha, beta);
}
}
-static void av_always_inline filter_mb_mbaff_edgecv( H264Context *h, uint8_t *pix, int stride, const int16_t bS[7], int bsi, int qp, int a, int b, int intra ) {
+
+static av_always_inline void filter_mb_mbaff_edgecv(H264Context *h,
+ uint8_t *pix, int stride,
+ const int16_t bS[7],
+ int bsi, int qp, int a,
+ int b, int intra)
+{
const unsigned int index_a = qp + a;
const int alpha = alpha_table[index_a];
const int beta = beta_table[qp + b];
@@ -171,7 +191,11 @@ static void av_always_inline filter_mb_mbaff_edgecv( H264Context *h, uint8_t *pi
}
}
-static void av_always_inline filter_mb_edgeh( uint8_t *pix, int stride, const int16_t bS[4], unsigned int qp, int a, int b, H264Context *h, int intra ) {
+static av_always_inline void filter_mb_edgeh(uint8_t *pix, int stride,
+ const int16_t bS[4],
+ unsigned int qp, int a, int b,
+ H264Context *h, int intra)
+{
const unsigned int index_a = qp + a;
const int alpha = alpha_table[index_a];
const int beta = beta_table[qp + b];
@@ -189,7 +213,11 @@ static void av_always_inline filter_mb_edgeh( uint8_t *pix, int stride, const in
}
}
-static void av_always_inline filter_mb_edgech( uint8_t *pix, int stride, const int16_t bS[4], unsigned int qp, int a, int b, H264Context *h, int intra ) {
+static av_always_inline void filter_mb_edgech(uint8_t *pix, int stride,
+ const int16_t bS[4],
+ unsigned int qp, int a, int b,
+ H264Context *h, int intra)
+{
const unsigned int index_a = qp + a;
const int alpha = alpha_table[index_a];
const int beta = beta_table[qp + b];
@@ -207,8 +235,15 @@ static void av_always_inline filter_mb_edgech( uint8_t *pix, int stride, const i
}
}
-static void av_always_inline h264_filter_mb_fast_internal( H264Context *h, int mb_x, int mb_y, uint8_t *img_y, uint8_t *img_cb, uint8_t *img_cr,
- unsigned int linesize, unsigned int uvlinesize, int pixel_shift) {
+static av_always_inline void h264_filter_mb_fast_internal(H264Context *h,
+ int mb_x, int mb_y,
+ uint8_t *img_y,
+ uint8_t *img_cb,
+ uint8_t *img_cr,
+ unsigned int linesize,
+ unsigned int uvlinesize,
+ int pixel_shift)
+{
MpegEncContext * const s = &h->s;
int chroma = !(CONFIG_GRAY && (s->flags&CODEC_FLAG_GRAY));
int chroma444 = CHROMA444;
diff --git a/libavcodec/libx264.c b/libavcodec/libx264.c
index f23b3fa9ae..59257f0199 100644
--- a/libavcodec/libx264.c
+++ b/libavcodec/libx264.c
@@ -532,6 +532,9 @@ static av_cold int X264_init(AVCodecContext *avctx)
// update AVCodecContext with x264 parameters
avctx->has_b_frames = x4->params.i_bframe ?
x4->params.i_bframe_pyramid ? 2 : 1 : 0;
+ if (avctx->max_b_frames < 0)
+ avctx->max_b_frames = 0;
+
avctx->bit_rate = x4->params.rc.i_bitrate*1000;
#if FF_API_X264_GLOBAL_OPTS
avctx->crf = x4->params.rc.f_rf_constant;
diff --git a/libavcodec/sgidec.c b/libavcodec/sgidec.c
index b84949daff..6a98718131 100644
--- a/libavcodec/sgidec.c
+++ b/libavcodec/sgidec.c
@@ -32,26 +32,27 @@ typedef struct SgiState {
unsigned int depth;
unsigned int bytes_per_channel;
int linesize;
+ GetByteContext g;
} SgiState;
/**
* Expand an RLE row into a channel.
- * @param in_buf input buffer
- * @param in_end end of input buffer
+ * @param s the current image state
* @param out_buf Points to one line after the output buffer.
* @param out_end end of line in output buffer
* @param pixelstride pixel stride of input buffer
* @return size of output in bytes, -1 if buffer overflows
*/
-static int expand_rle_row(const uint8_t *in_buf, const uint8_t* in_end,
- unsigned char *out_buf, uint8_t* out_end, int pixelstride)
+static int expand_rle_row(SgiState *s, uint8_t *out_buf,
+ uint8_t *out_end, int pixelstride)
{
unsigned char pixel, count;
unsigned char *orig = out_buf;
while (1) {
- if(in_buf + 1 > in_end) return -1;
- pixel = bytestream_get_byte(&in_buf);
+ if (bytestream2_get_bytes_left(&s->g) < 1)
+ return AVERROR_INVALIDDATA;
+ pixel = bytestream2_get_byteu(&s->g);
if (!(count = (pixel & 0x7f))) {
return (out_buf - orig) / pixelstride;
}
@@ -61,11 +62,11 @@ static int expand_rle_row(const uint8_t *in_buf, const uint8_t* in_end,
if (pixel & 0x80) {
while (count--) {
- *out_buf = bytestream_get_byte(&in_buf);
+ *out_buf = bytestream2_get_byte(&s->g);
out_buf += pixelstride;
}
} else {
- pixel = bytestream_get_byte(&in_buf);
+ pixel = bytestream2_get_byte(&s->g);
while (count--) {
*out_buf = pixel;
@@ -78,85 +79,73 @@ static int expand_rle_row(const uint8_t *in_buf, const uint8_t* in_end,
/**
* Read a run length encoded SGI image.
* @param out_buf output buffer
- * @param in_buf input buffer
- * @param in_end end of input buffer
* @param s the current image state
* @return 0 if no error, else return error number.
*/
-static int read_rle_sgi(unsigned char* out_buf, const uint8_t *in_buf,
- const uint8_t *in_end, SgiState* s)
+static int read_rle_sgi(uint8_t *out_buf, SgiState *s)
{
uint8_t *dest_row;
unsigned int len = s->height * s->depth * 4;
- const uint8_t *start_table = in_buf;
+ GetByteContext g_table = s->g;
unsigned int y, z;
unsigned int start_offset;
/* size of RLE offset and length tables */
- if(len * 2 > in_end - in_buf) {
+ if (len * 2 > bytestream2_get_bytes_left(&s->g)) {
return AVERROR_INVALIDDATA;
}
- in_buf -= SGI_HEADER_SIZE;
for (z = 0; z < s->depth; z++) {
dest_row = out_buf;
for (y = 0; y < s->height; y++) {
dest_row -= s->linesize;
- start_offset = bytestream_get_be32(&start_table);
- if(start_offset > in_end - in_buf) {
+ start_offset = bytestream2_get_be32(&g_table);
+ bytestream2_seek(&s->g, start_offset, SEEK_SET);
+ if (expand_rle_row(s, dest_row + z, dest_row + FFABS(s->linesize),
+ s->depth) != s->width) {
return AVERROR_INVALIDDATA;
}
- if (expand_rle_row(in_buf + start_offset, in_end, dest_row + z,
- dest_row + FFABS(s->linesize), s->depth) != s->width)
- return AVERROR_INVALIDDATA;
}
}
return 0;
}
-static av_always_inline void copy_loop(uint8_t *out_buf, const uint8_t *in_buf,
- unsigned offset, unsigned bytes_per_channel,
- SgiState *s)
-{
- int x, y, z;
- for (y = s->height - 1; y >= 0; y--) {
- uint8_t *line = out_buf + (y * s->linesize);
- for (x = s->width; x > 0; x--) {
- const uint8_t *ptr = in_buf;
- in_buf += bytes_per_channel;
- for(z = 0; z < s->depth; z ++) {
- memcpy(line, ptr, bytes_per_channel);
- line += bytes_per_channel;
- ptr += offset;
- }
- }
- }
-}
-
/**
* Read an uncompressed SGI image.
* @param out_buf output buffer
* @param out_end end ofoutput buffer
- * @param in_buf input buffer
- * @param in_end end of input buffer
* @param s the current image state
* @return 0 if read success, otherwise return -1.
*/
static int read_uncompressed_sgi(unsigned char* out_buf, uint8_t* out_end,
- const uint8_t *in_buf, const uint8_t *in_end, SgiState* s)
+ SgiState *s)
{
+ int x, y, z;
unsigned int offset = s->height * s->width * s->bytes_per_channel;
+ GetByteContext gp[4];
/* Test buffer size. */
- if (offset * s->depth > in_end - in_buf) {
- return -1;
+ if (offset * s->depth > bytestream2_get_bytes_left(&s->g))
+ return AVERROR_INVALIDDATA;
+
+ /* Create a reader for each plane */
+ for (z = 0; z < s->depth; z++) {
+ gp[z] = s->g;
+ bytestream2_skip(&gp[z], z * offset);
}
- if (s->bytes_per_channel == 2) {
- copy_loop(out_buf, in_buf, offset, 2, s);
- } else {
- av_assert1(s->bytes_per_channel == 1);
- copy_loop(out_buf, in_buf, offset, 1, s);
+ for (y = s->height - 1; y >= 0; y--) {
+ out_end = out_buf + (y * s->linesize);
+ if (s->bytes_per_channel == 1) {
+ for (x = s->width; x > 0; x--)
+ for (z = 0; z < s->depth; z++)
+ *out_end++ = bytestream2_get_byteu(&gp[z]);
+ } else {
+ uint16_t *out16 = (uint16_t *)out_end;
+ for (x = s->width; x > 0; x--)
+ for (z = 0; z < s->depth; z++)
+ *out16++ = bytestream2_get_ne16u(&gp[z]);
+ }
}
return 0;
}
@@ -165,33 +154,31 @@ static int decode_frame(AVCodecContext *avctx,
void *data, int *data_size,
AVPacket *avpkt)
{
- const uint8_t *in_buf = avpkt->data;
- int buf_size = avpkt->size;
SgiState *s = avctx->priv_data;
AVFrame *picture = data;
AVFrame *p = &s->picture;
- const uint8_t *in_end = in_buf + buf_size;
unsigned int dimension, rle;
int ret = 0;
uint8_t *out_buf, *out_end;
- if (buf_size < SGI_HEADER_SIZE){
- av_log(avctx, AV_LOG_ERROR, "buf_size too small (%d)\n", buf_size);
- return -1;
+ bytestream2_init(&s->g, avpkt->data, avpkt->size);
+ if (bytestream2_get_bytes_left(&s->g) < SGI_HEADER_SIZE) {
+ av_log(avctx, AV_LOG_ERROR, "buf_size too small (%d)\n", avpkt->size);
+ return AVERROR_INVALIDDATA;
}
/* Test for SGI magic. */
- if (bytestream_get_be16(&in_buf) != SGI_MAGIC) {
+ if (bytestream2_get_be16(&s->g) != SGI_MAGIC) {
av_log(avctx, AV_LOG_ERROR, "bad magic number\n");
- return -1;
+ return AVERROR_INVALIDDATA;
}
- rle = bytestream_get_byte(&in_buf);
- s->bytes_per_channel = bytestream_get_byte(&in_buf);
- dimension = bytestream_get_be16(&in_buf);
- s->width = bytestream_get_be16(&in_buf);
- s->height = bytestream_get_be16(&in_buf);
- s->depth = bytestream_get_be16(&in_buf);
+ rle = bytestream2_get_byte(&s->g);
+ s->bytes_per_channel = bytestream2_get_byte(&s->g);
+ dimension = bytestream2_get_be16(&s->g);
+ s->width = bytestream2_get_be16(&s->g);
+ s->height = bytestream2_get_be16(&s->g);
+ s->depth = bytestream2_get_be16(&s->g);
if (s->bytes_per_channel != 1 && (s->bytes_per_channel != 2 || rle)) {
av_log(avctx, AV_LOG_ERROR, "wrong channel number\n");
@@ -237,19 +224,19 @@ static int decode_frame(AVCodecContext *avctx,
s->linesize = p->linesize[0];
/* Skip header. */
- in_buf += SGI_HEADER_SIZE - 12;
+ bytestream2_seek(&s->g, SGI_HEADER_SIZE, SEEK_SET);
if (rle) {
- ret = read_rle_sgi(out_end, in_buf, in_end, s);
+ ret = read_rle_sgi(out_end, s);
} else {
- ret = read_uncompressed_sgi(out_buf, out_end, in_buf, in_end, s);
+ ret = read_uncompressed_sgi(out_buf, out_end, s);
}
if (ret == 0) {
*picture = s->picture;
*data_size = sizeof(AVPicture);
- return buf_size;
+ return avpkt->size;
} else {
- return -1;
+ return ret;
}
}
diff --git a/libavcodec/targa.c b/libavcodec/targa.c
index 4ab560d960..57a4fee22b 100644
--- a/libavcodec/targa.c
+++ b/libavcodec/targa.c
@@ -178,24 +178,45 @@ static int decode_frame(AVCodecContext *avctx,
}
if(colors){
- size_t pal_size;
+ int pal_size, pal_sample_size;
if((colors + first_clr) > 256){
av_log(avctx, AV_LOG_ERROR, "Incorrect palette: %i colors with offset %i\n", colors, first_clr);
return -1;
}
- if(csize != 24){
+ switch (csize) {
+ case 24: pal_sample_size = 3; break;
+ case 16:
+ case 15: pal_sample_size = 2; break;
+ default:
av_log(avctx, AV_LOG_ERROR, "Palette entry size %i bits is not supported\n", csize);
return -1;
}
- pal_size = colors * ((csize + 1) >> 3);
+ pal_size = colors * pal_sample_size;
CHECK_BUFFER_SIZE(buf, buf_end, pal_size, "color table");
if(avctx->pix_fmt != PIX_FMT_PAL8)//should not occur but skip palette anyway
buf += pal_size;
else{
int t;
- int32_t *pal = ((int32_t*)p->data[1]) + first_clr;
- for(t = 0; t < colors; t++){
- *pal++ = (0xff<<24) | bytestream_get_le24(&buf);
+ uint32_t *pal = ((uint32_t *)p->data[1]) + first_clr;
+
+ switch (pal_sample_size) {
+ case 3:
+ /* RGB24 */
+ for (t = 0; t < colors; t++)
+ *pal++ = (0xffU<<24) | bytestream_get_le24(&buf);
+ break;
+ case 2:
+ /* RGB555 */
+ for (t = 0; t < colors; t++) {
+ uint32_t v = bytestream_get_le16(&buf);
+ v = ((v & 0x7C00) << 9) |
+ ((v & 0x03E0) << 6) |
+ ((v & 0x001F) << 3);
+ /* left bit replication */
+ v |= (v & 0xE0E0E0U) >> 5;
+ *pal++ = (0xffU<<24) | v;
+ }
+ break;
}
p->palette_has_changed = 1;
}