From 652d81b2c2201922f0fef2bbd171299ce3798d12 Mon Sep 17 00:00:00 2001 From: Shitiz Garg Date: Thu, 15 Dec 2011 22:20:21 +0000 Subject: cljr: Check if width and height are positive integers Width and height might get passed as 0 and would cause floating point exceptions in decode_frame. Fixes bugzilla #149 Signed-off-by: Janne Grunau --- libavcodec/cljr.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/libavcodec/cljr.c b/libavcodec/cljr.c index a5ee73864c..f40d4d17ce 100644 --- a/libavcodec/cljr.c +++ b/libavcodec/cljr.c @@ -57,6 +57,11 @@ static int decode_frame(AVCodecContext *avctx, if (p->data[0]) avctx->release_buffer(avctx, p); + if (avctx->height <= 0 || avctx->width <= 0) { + av_log(avctx, AV_LOG_ERROR, "Invalid width or height\n"); + return AVERROR_INVALIDDATA; + } + if (buf_size / avctx->height < avctx->width) { av_log(avctx, AV_LOG_ERROR, "Resolution larger than buffer size. Invalid header?\n"); -- cgit v1.2.3 From 2e7905eee8d0f8813e703cacdd7b3ffdc4960656 Mon Sep 17 00:00:00 2001 From: "Paul B. Mahol" Date: Sat, 17 Dec 2011 17:58:06 +0000 Subject: cljr: fix buf_size sanity check Signed-off-by: Janne Grunau --- libavcodec/cljr.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libavcodec/cljr.c b/libavcodec/cljr.c index f40d4d17ce..cf307bb06a 100644 --- a/libavcodec/cljr.c +++ b/libavcodec/cljr.c @@ -62,7 +62,7 @@ static int decode_frame(AVCodecContext *avctx, return AVERROR_INVALIDDATA; } - if (buf_size / avctx->height < avctx->width) { + if (buf_size < avctx->height * avctx->width) { av_log(avctx, AV_LOG_ERROR, "Resolution larger than buffer size. Invalid header?\n"); return AVERROR_INVALIDDATA; -- cgit v1.2.3 From 87eebb3454ff0cd6af6ebf9e1d31bdfd1c3b601b Mon Sep 17 00:00:00 2001 From: Janne Grunau Date: Sun, 18 Dec 2011 22:42:36 +0100 Subject: h264: skip start code search if the size of the nal unit is known Start code emulation prevention is only required in Annex B bytestream packed NAL units. For other coding formats the size is already known. Looking for a start code prefix can result in false positives like in http://streams.videolan.org/streams/mp4/Mr_MrsSmith-h264_aac.mp4 which has a false positive in the SPS. --- libavcodec/h264.c | 11 +++++++++-- libavcodec/h264.h | 5 ++++- libavcodec/h264_parser.c | 2 +- 3 files changed, 14 insertions(+), 4 deletions(-) diff --git a/libavcodec/h264.c b/libavcodec/h264.c index 77acd7168f..5d4ce90cae 100644 --- a/libavcodec/h264.c +++ b/libavcodec/h264.c @@ -137,7 +137,10 @@ int ff_h264_check_intra_pred_mode(H264Context *h, int mode){ return mode; } -const uint8_t *ff_h264_decode_nal(H264Context *h, const uint8_t *src, int *dst_length, int *consumed, int length){ +const uint8_t *ff_h264_decode_nal(H264Context *h, const uint8_t *src, + int *dst_length, int *consumed, int length, + int nalsize_known) +{ int i, si, di; uint8_t *dst; int bufidx; @@ -148,6 +151,9 @@ const uint8_t *ff_h264_decode_nal(H264Context *h, const uint8_t *src, int *dst_l src++; length--; + if (nalsize_known) { + i = length; + } else #if HAVE_FAST_UNALIGNED # if HAVE_FAST_64BIT # define RS 7 @@ -3789,7 +3795,8 @@ static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size){ hx = h->thread_context[context_count]; - ptr= ff_h264_decode_nal(hx, buf + buf_index, &dst_length, &consumed, next_avc - buf_index); + ptr= ff_h264_decode_nal(hx, buf + buf_index, &dst_length, &consumed, + next_avc - buf_index, !!nalsize); if (ptr==NULL || dst_length < 0){ return -1; } diff --git a/libavcodec/h264.h b/libavcodec/h264.h index 50255389fa..24da4f5eac 100644 --- a/libavcodec/h264.h +++ b/libavcodec/h264.h @@ -610,9 +610,12 @@ int ff_h264_decode_picture_parameter_set(H264Context *h, int bit_length); * @param consumed is the number of bytes used as input * @param length is the length of the array * @param dst_length is the number of decoded bytes FIXME here or a decode rbsp tailing? + * @param nalsize_known skip start code search if the size of the nalu is known * @return decoded bytes, might be src+1 if no escapes */ -const uint8_t *ff_h264_decode_nal(H264Context *h, const uint8_t *src, int *dst_length, int *consumed, int length); +const uint8_t *ff_h264_decode_nal(H264Context *h, const uint8_t *src, + int *dst_length, int *consumed, int length, + int nalsize_known); /** * Free any data that may have been allocated in the H264 context like SPS, PPS etc. diff --git a/libavcodec/h264_parser.c b/libavcodec/h264_parser.c index 826c17a0f1..1967eface3 100644 --- a/libavcodec/h264_parser.c +++ b/libavcodec/h264_parser.c @@ -144,7 +144,7 @@ static inline int parse_nal_units(AVCodecParserContext *s, src_length = 20; break; } - ptr= ff_h264_decode_nal(h, buf, &dst_length, &consumed, src_length); + ptr= ff_h264_decode_nal(h, buf, &dst_length, &consumed, src_length, 0); if (ptr==NULL || dst_length < 0) break; -- cgit v1.2.3 From 7b3894bee9c9b970ff0b9eec9645c466e20e38a1 Mon Sep 17 00:00:00 2001 From: Nathan Adil Maxson Date: Sun, 18 Dec 2011 08:11:27 -0800 Subject: swscale: fix formatting and indentation of unscaled conversion routines. Signed-off-by: Ronald S. Bultje --- libswscale/swscale_unscaled.c | 710 +++++++++++++++++++++++------------------- 1 file changed, 383 insertions(+), 327 deletions(-) diff --git a/libswscale/swscale_unscaled.c b/libswscale/swscale_unscaled.c index 3dc2929a5a..34b0f246f1 100644 --- a/libswscale/swscale_unscaled.c +++ b/libswscale/swscale_unscaled.c @@ -77,21 +77,22 @@ DECLARE_ALIGNED(8, const uint8_t, dither_8x8_256)[8][8] = { }; #define RGB2YUV_SHIFT 15 -#define BY ( (int)(0.114*219/255*(1<srcW, dstParam[0], dstStride[0]); if (c->dstFormat == PIX_FMT_NV12) - interleaveBytes(src[1], src[2], dst, c->srcW/2, srcSliceH/2, srcStride[1], srcStride[2], dstStride[0]); + interleaveBytes(src[1], src[2], dst, c->srcW / 2, srcSliceH / 2, + srcStride[1], srcStride[2], dstStride[0]); else - interleaveBytes(src[2], src[1], dst, c->srcW/2, srcSliceH/2, srcStride[2], srcStride[1], dstStride[0]); + interleaveBytes(src[2], src[1], dst, c->srcW / 2, srcSliceH / 2, + srcStride[2], srcStride[1], dstStride[0]); return srcSliceH; } -static int planarToYuy2Wrapper(SwsContext *c, const uint8_t* src[], int srcStride[], int srcSliceY, - int srcSliceH, uint8_t* dstParam[], int dstStride[]) +static int planarToYuy2Wrapper(SwsContext *c, const uint8_t *src[], + int srcStride[], int srcSliceY, int srcSliceH, + uint8_t *dstParam[], int dstStride[]) { - uint8_t *dst=dstParam[0] + dstStride[0]*srcSliceY; + uint8_t *dst = dstParam[0] + dstStride[0] * srcSliceY; - yv12toyuy2(src[0], src[1], src[2], dst, c->srcW, srcSliceH, srcStride[0], srcStride[1], dstStride[0]); + yv12toyuy2(src[0], src[1], src[2], dst, c->srcW, srcSliceH, srcStride[0], + srcStride[1], dstStride[0]); return srcSliceH; } -static int planarToUyvyWrapper(SwsContext *c, const uint8_t* src[], int srcStride[], int srcSliceY, - int srcSliceH, uint8_t* dstParam[], int dstStride[]) +static int planarToUyvyWrapper(SwsContext *c, const uint8_t *src[], + int srcStride[], int srcSliceY, int srcSliceH, + uint8_t *dstParam[], int dstStride[]) { - uint8_t *dst=dstParam[0] + dstStride[0]*srcSliceY; + uint8_t *dst = dstParam[0] + dstStride[0] * srcSliceY; - yv12touyvy(src[0], src[1], src[2], dst, c->srcW, srcSliceH, srcStride[0], srcStride[1], dstStride[0]); + yv12touyvy(src[0], src[1], src[2], dst, c->srcW, srcSliceH, srcStride[0], + srcStride[1], dstStride[0]); return srcSliceH; } -static int yuv422pToYuy2Wrapper(SwsContext *c, const uint8_t* src[], int srcStride[], int srcSliceY, - int srcSliceH, uint8_t* dstParam[], int dstStride[]) +static int yuv422pToYuy2Wrapper(SwsContext *c, const uint8_t *src[], + int srcStride[], int srcSliceY, int srcSliceH, + uint8_t *dstParam[], int dstStride[]) { - uint8_t *dst=dstParam[0] + dstStride[0]*srcSliceY; + uint8_t *dst = dstParam[0] + dstStride[0] * srcSliceY; - yuv422ptoyuy2(src[0],src[1],src[2],dst,c->srcW,srcSliceH,srcStride[0],srcStride[1],dstStride[0]); + yuv422ptoyuy2(src[0], src[1], src[2], dst, c->srcW, srcSliceH, srcStride[0], + srcStride[1], dstStride[0]); return srcSliceH; } -static int yuv422pToUyvyWrapper(SwsContext *c, const uint8_t* src[], int srcStride[], int srcSliceY, - int srcSliceH, uint8_t* dstParam[], int dstStride[]) +static int yuv422pToUyvyWrapper(SwsContext *c, const uint8_t *src[], + int srcStride[], int srcSliceY, int srcSliceH, + uint8_t *dstParam[], int dstStride[]) { - uint8_t *dst=dstParam[0] + dstStride[0]*srcSliceY; + uint8_t *dst = dstParam[0] + dstStride[0] * srcSliceY; - yuv422ptouyvy(src[0],src[1],src[2],dst,c->srcW,srcSliceH,srcStride[0],srcStride[1],dstStride[0]); + yuv422ptouyvy(src[0], src[1], src[2], dst, c->srcW, srcSliceH, srcStride[0], + srcStride[1], dstStride[0]); return srcSliceH; } -static int yuyvToYuv420Wrapper(SwsContext *c, const uint8_t* src[], int srcStride[], int srcSliceY, - int srcSliceH, uint8_t* dstParam[], int dstStride[]) +static int yuyvToYuv420Wrapper(SwsContext *c, const uint8_t *src[], + int srcStride[], int srcSliceY, int srcSliceH, + uint8_t *dstParam[], int dstStride[]) { - uint8_t *ydst=dstParam[0] + dstStride[0]*srcSliceY; - uint8_t *udst=dstParam[1] + dstStride[1]*srcSliceY/2; - uint8_t *vdst=dstParam[2] + dstStride[2]*srcSliceY/2; + uint8_t *ydst = dstParam[0] + dstStride[0] * srcSliceY; + uint8_t *udst = dstParam[1] + dstStride[1] * srcSliceY / 2; + uint8_t *vdst = dstParam[2] + dstStride[2] * srcSliceY / 2; - yuyvtoyuv420(ydst, udst, vdst, src[0], c->srcW, srcSliceH, dstStride[0], dstStride[1], srcStride[0]); + yuyvtoyuv420(ydst, udst, vdst, src[0], c->srcW, srcSliceH, dstStride[0], + dstStride[1], srcStride[0]); if (dstParam[3]) fillPlane(dstParam[3], dstStride[3], c->srcW, srcSliceH, srcSliceY, 255); @@ -185,26 +200,30 @@ static int yuyvToYuv420Wrapper(SwsContext *c, const uint8_t* src[], int srcStrid return srcSliceH; } -static int yuyvToYuv422Wrapper(SwsContext *c, const uint8_t* src[], int srcStride[], int srcSliceY, - int srcSliceH, uint8_t* dstParam[], int dstStride[]) +static int yuyvToYuv422Wrapper(SwsContext *c, const uint8_t *src[], + int srcStride[], int srcSliceY, int srcSliceH, + uint8_t *dstParam[], int dstStride[]) { - uint8_t *ydst=dstParam[0] + dstStride[0]*srcSliceY; - uint8_t *udst=dstParam[1] + dstStride[1]*srcSliceY; - uint8_t *vdst=dstParam[2] + dstStride[2]*srcSliceY; + uint8_t *ydst = dstParam[0] + dstStride[0] * srcSliceY; + uint8_t *udst = dstParam[1] + dstStride[1] * srcSliceY; + uint8_t *vdst = dstParam[2] + dstStride[2] * srcSliceY; - yuyvtoyuv422(ydst, udst, vdst, src[0], c->srcW, srcSliceH, dstStride[0], dstStride[1], srcStride[0]); + yuyvtoyuv422(ydst, udst, vdst, src[0], c->srcW, srcSliceH, dstStride[0], + dstStride[1], srcStride[0]); return srcSliceH; } -static int uyvyToYuv420Wrapper(SwsContext *c, const uint8_t* src[], int srcStride[], int srcSliceY, - int srcSliceH, uint8_t* dstParam[], int dstStride[]) +static int uyvyToYuv420Wrapper(SwsContext *c, const uint8_t *src[], + int srcStride[], int srcSliceY, int srcSliceH, + uint8_t *dstParam[], int dstStride[]) { - uint8_t *ydst=dstParam[0] + dstStride[0]*srcSliceY; - uint8_t *udst=dstParam[1] + dstStride[1]*srcSliceY/2; - uint8_t *vdst=dstParam[2] + dstStride[2]*srcSliceY/2; + uint8_t *ydst = dstParam[0] + dstStride[0] * srcSliceY; + uint8_t *udst = dstParam[1] + dstStride[1] * srcSliceY / 2; + uint8_t *vdst = dstParam[2] + dstStride[2] * srcSliceY / 2; - uyvytoyuv420(ydst, udst, vdst, src[0], c->srcW, srcSliceH, dstStride[0], dstStride[1], srcStride[0]); + uyvytoyuv420(ydst, udst, vdst, src[0], c->srcW, srcSliceH, dstStride[0], + dstStride[1], srcStride[0]); if (dstParam[3]) fillPlane(dstParam[3], dstStride[3], c->srcW, srcSliceH, srcSliceY, 255); @@ -212,55 +231,60 @@ static int uyvyToYuv420Wrapper(SwsContext *c, const uint8_t* src[], int srcStrid return srcSliceH; } -static int uyvyToYuv422Wrapper(SwsContext *c, const uint8_t* src[], int srcStride[], int srcSliceY, - int srcSliceH, uint8_t* dstParam[], int dstStride[]) +static int uyvyToYuv422Wrapper(SwsContext *c, const uint8_t *src[], + int srcStride[], int srcSliceY, int srcSliceH, + uint8_t *dstParam[], int dstStride[]) { - uint8_t *ydst=dstParam[0] + dstStride[0]*srcSliceY; - uint8_t *udst=dstParam[1] + dstStride[1]*srcSliceY; - uint8_t *vdst=dstParam[2] + dstStride[2]*srcSliceY; + uint8_t *ydst = dstParam[0] + dstStride[0] * srcSliceY; + uint8_t *udst = dstParam[1] + dstStride[1] * srcSliceY; + uint8_t *vdst = dstParam[2] + dstStride[2] * srcSliceY; - uyvytoyuv422(ydst, udst, vdst, src[0], c->srcW, srcSliceH, dstStride[0], dstStride[1], srcStride[0]); + uyvytoyuv422(ydst, udst, vdst, src[0], c->srcW, srcSliceH, dstStride[0], + dstStride[1], srcStride[0]); return srcSliceH; } -static void gray8aToPacked32(const uint8_t *src, uint8_t *dst, int num_pixels, const uint8_t *palette) +static void gray8aToPacked32(const uint8_t *src, uint8_t *dst, int num_pixels, + const uint8_t *palette) { int i; - for (i=0; i> 1; int dststr = dstStride[0] >> 1; - uint16_t *dstPtr = (uint16_t *)dst[0]; - const uint16_t *srcPtr = (const uint16_t *)src[0]; + uint16_t *dstPtr = (uint16_t *) dst[0]; + const uint16_t *srcPtr = (const uint16_t *) src[0]; for (i = 0; i < srcSliceH; i++) { for (j = 0; j < srcstr; j++) { @@ -273,16 +297,17 @@ static int packed_16bpc_bswap(SwsContext *c, const uint8_t* src[], return srcSliceH; } -static int palToRgbWrapper(SwsContext *c, const uint8_t* src[], int srcStride[], int srcSliceY, - int srcSliceH, uint8_t* dst[], int dstStride[]) +static int palToRgbWrapper(SwsContext *c, const uint8_t *src[], int srcStride[], + int srcSliceY, int srcSliceH, uint8_t *dst[], + int dstStride[]) { - const enum PixelFormat srcFormat= c->srcFormat; - const enum PixelFormat dstFormat= c->dstFormat; + const enum PixelFormat srcFormat = c->srcFormat; + const enum PixelFormat dstFormat = c->dstFormat; void (*conv)(const uint8_t *src, uint8_t *dst, int num_pixels, - const uint8_t *palette)=NULL; + const uint8_t *palette) = NULL; int i; - uint8_t *dstPtr= dst[0] + dstStride[0]*srcSliceY; - const uint8_t *srcPtr= src[0]; + uint8_t *dstPtr = dst[0] + dstStride[0] * srcSliceY; + const uint8_t *srcPtr = src[0]; if (srcFormat == PIX_FMT_Y400A) { switch (dstFormat) { @@ -308,10 +333,10 @@ static int palToRgbWrapper(SwsContext *c, const uint8_t* src[], int srcStride[], av_log(c, AV_LOG_ERROR, "internal error %s -> %s converter\n", sws_format_name(srcFormat), sws_format_name(dstFormat)); else { - for (i=0; isrcW, (uint8_t *) c->pal_rgb); - srcPtr+= srcStride[0]; - dstPtr+= dstStride[0]; + srcPtr += srcStride[0]; + dstPtr += dstStride[0]; } } @@ -326,16 +351,17 @@ static int palToRgbWrapper(SwsContext *c, const uint8_t* src[], int srcStride[], ) /* {RGB,BGR}{15,16,24,32,32_1} -> {RGB,BGR}{15,16,24,32} */ -static int rgbToRgbWrapper(SwsContext *c, const uint8_t* src[], int srcStride[], int srcSliceY, - int srcSliceH, uint8_t* dst[], int dstStride[]) +static int rgbToRgbWrapper(SwsContext *c, const uint8_t *src[], int srcStride[], + int srcSliceY, int srcSliceH, uint8_t *dst[], + int dstStride[]) { - const enum PixelFormat srcFormat= c->srcFormat; - const enum PixelFormat dstFormat= c->dstFormat; - const int srcBpp= (c->srcFormatBpp + 7) >> 3; - const int dstBpp= (c->dstFormatBpp + 7) >> 3; - const int srcId= c->srcFormatBpp >> 2; /* 1:0, 4:1, 8:2, 15:3, 16:4, 24:6, 32:8 */ - const int dstId= c->dstFormatBpp >> 2; - void (*conv)(const uint8_t *src, uint8_t *dst, int src_size)=NULL; + const enum PixelFormat srcFormat = c->srcFormat; + const enum PixelFormat dstFormat = c->dstFormat; + const int srcBpp = (c->srcFormatBpp + 7) >> 3; + const int dstBpp = (c->dstFormatBpp + 7) >> 3; + const int srcId = c->srcFormatBpp >> 2; /* 1:0, 4:1, 8:2, 15:3, 16:4, 24:6, 32:8 */ + const int dstId = c->dstFormatBpp >> 2; + void (*conv)(const uint8_t *src, uint8_t *dst, int src_size) = NULL; #define CONV_IS(src, dst) (srcFormat == PIX_FMT_##src && dstFormat == PIX_FMT_##dst) @@ -354,40 +380,40 @@ static int rgbToRgbWrapper(SwsContext *c, const uint8_t* src[], int srcStride[], || CONV_IS(RGBA, ARGB)) conv = shuffle_bytes_3012; } else /* BGR -> BGR */ - if ( (isBGRinInt(srcFormat) && isBGRinInt(dstFormat)) - || (isRGBinInt(srcFormat) && isRGBinInt(dstFormat))) { - switch(srcId | (dstId<<4)) { - case 0x34: conv= rgb16to15; break; - case 0x36: conv= rgb24to15; break; - case 0x38: conv= rgb32to15; break; - case 0x43: conv= rgb15to16; break; - case 0x46: conv= rgb24to16; break; - case 0x48: conv= rgb32to16; break; - case 0x63: conv= rgb15to24; break; - case 0x64: conv= rgb16to24; break; - case 0x68: conv= rgb32to24; break; - case 0x83: conv= rgb15to32; break; - case 0x84: conv= rgb16to32; break; - case 0x86: conv= rgb24to32; break; + if ((isBGRinInt(srcFormat) && isBGRinInt(dstFormat)) || + (isRGBinInt(srcFormat) && isRGBinInt(dstFormat))) { + switch (srcId | (dstId << 4)) { + case 0x34: conv = rgb16to15; break; + case 0x36: conv = rgb24to15; break; + case 0x38: conv = rgb32to15; break; + case 0x43: conv = rgb15to16; break; + case 0x46: conv = rgb24to16; break; + case 0x48: conv = rgb32to16; break; + case 0x63: conv = rgb15to24; break; + case 0x64: conv = rgb16to24; break; + case 0x68: conv = rgb32to24; break; + case 0x83: conv = rgb15to32; break; + case 0x84: conv = rgb16to32; break; + case 0x86: conv = rgb24to32; break; } - } else if ( (isBGRinInt(srcFormat) && isRGBinInt(dstFormat)) - || (isRGBinInt(srcFormat) && isBGRinInt(dstFormat))) { - switch(srcId | (dstId<<4)) { - case 0x33: conv= rgb15tobgr15; break; - case 0x34: conv= rgb16tobgr15; break; - case 0x36: conv= rgb24tobgr15; break; - case 0x38: conv= rgb32tobgr15; break; - case 0x43: conv= rgb15tobgr16; break; - case 0x44: conv= rgb16tobgr16; break; - case 0x46: conv= rgb24tobgr16; break; - case 0x48: conv= rgb32tobgr16; break; - case 0x63: conv= rgb15tobgr24; break; - case 0x64: conv= rgb16tobgr24; break; - case 0x66: conv= rgb24tobgr24; break; - case 0x68: conv= rgb32tobgr24; break; - case 0x83: conv= rgb15tobgr32; break; - case 0x84: conv= rgb16tobgr32; break; - case 0x86: conv= rgb24tobgr32; break; + } else if ((isBGRinInt(srcFormat) && isRGBinInt(dstFormat)) || + (isRGBinInt(srcFormat) && isBGRinInt(dstFormat))) { + switch (srcId | (dstId << 4)) { + case 0x33: conv = rgb15tobgr15; break; + case 0x34: conv = rgb16tobgr15; break; + case 0x36: conv = rgb24tobgr15; break; + case 0x38: conv = rgb32tobgr15; break; + case 0x43: conv = rgb15tobgr16; break; + case 0x44: conv = rgb16tobgr16; break; + case 0x46: conv = rgb24tobgr16; break; + case 0x48: conv = rgb32tobgr16; break; + case 0x63: conv = rgb15tobgr24; break; + case 0x64: conv = rgb16tobgr24; break; + case 0x66: conv = rgb24tobgr24; break; + case 0x68: conv = rgb32tobgr24; break; + case 0x83: conv = rgb15tobgr32; break; + case 0x84: conv = rgb16tobgr32; break; + case 0x86: conv = rgb24tobgr32; break; } } @@ -395,38 +421,43 @@ static int rgbToRgbWrapper(SwsContext *c, const uint8_t* src[], int srcStride[], av_log(c, AV_LOG_ERROR, "internal error %s -> %s converter\n", sws_format_name(srcFormat), sws_format_name(dstFormat)); } else { - const uint8_t *srcPtr= src[0]; - uint8_t *dstPtr= dst[0]; - if ((srcFormat == PIX_FMT_RGB32_1 || srcFormat == PIX_FMT_BGR32_1) && !isRGBA32(dstFormat)) + const uint8_t *srcPtr = src[0]; + uint8_t *dstPtr = dst[0]; + if ((srcFormat == PIX_FMT_RGB32_1 || srcFormat == PIX_FMT_BGR32_1) && + !isRGBA32(dstFormat)) srcPtr += ALT32_CORR; - if ((dstFormat == PIX_FMT_RGB32_1 || dstFormat == PIX_FMT_BGR32_1) && !isRGBA32(srcFormat)) + if ((dstFormat == PIX_FMT_RGB32_1 || dstFormat == PIX_FMT_BGR32_1) && + !isRGBA32(srcFormat)) dstPtr += ALT32_CORR; - if (dstStride[0]*srcBpp == srcStride[0]*dstBpp && srcStride[0] > 0 && !(srcStride[0] % srcBpp)) - conv(srcPtr, dstPtr + dstStride[0]*srcSliceY, srcSliceH*srcStride[0]); + if (dstStride[0] * srcBpp == srcStride[0] * dstBpp && srcStride[0] > 0 && + !(srcStride[0] % srcBpp)) + conv(srcPtr, dstPtr + dstStride[0] * srcSliceY, + srcSliceH * srcStride[0]); else { int i; - dstPtr += dstStride[0]*srcSliceY; + dstPtr += dstStride[0] * srcSliceY; - for (i=0; isrcW*srcBpp); - srcPtr+= srcStride[0]; - dstPtr+= dstStride[0]; + for (i = 0; i < srcSliceH; i++) { + conv(srcPtr, dstPtr, c->srcW * srcBpp); + srcPtr += srcStride[0]; + dstPtr += dstStride[0]; } } } return srcSliceH; } -static int bgr24ToYv12Wrapper(SwsContext *c, const uint8_t* src[], int srcStride[], int srcSliceY, - int srcSliceH, uint8_t* dst[], int dstStride[]) +static int bgr24ToYv12Wrapper(SwsContext *c, const uint8_t *src[], + int srcStride[], int srcSliceY, int srcSliceH, + uint8_t *dst[], int dstStride[]) { rgb24toyv12( src[0], - dst[0]+ srcSliceY *dstStride[0], - dst[1]+(srcSliceY>>1)*dstStride[1], - dst[2]+(srcSliceY>>1)*dstStride[2], + dst[0] + srcSliceY * dstStride[0], + dst[1] + (srcSliceY >> 1) * dstStride[1], + dst[2] + (srcSliceY >> 1) * dstStride[2], c->srcW, srcSliceH, dstStride[0], dstStride[1], srcStride[0]); if (dst[3]) @@ -434,15 +465,16 @@ static int bgr24ToYv12Wrapper(SwsContext *c, const uint8_t* src[], int srcStride return srcSliceH; } -static int yvu9ToYv12Wrapper(SwsContext *c, const uint8_t* src[], int srcStride[], int srcSliceY, - int srcSliceH, uint8_t* dst[], int dstStride[]) +static int yvu9ToYv12Wrapper(SwsContext *c, const uint8_t *src[], + int srcStride[], int srcSliceY, int srcSliceH, + uint8_t *dst[], int dstStride[]) { copyPlane(src[0], srcStride[0], srcSliceY, srcSliceH, c->srcW, dst[0], dstStride[0]); - planar2x(src[1], dst[1] + dstStride[1]*(srcSliceY >> 1), c->chrSrcW, + planar2x(src[1], dst[1] + dstStride[1] * (srcSliceY >> 1), c->chrSrcW, srcSliceH >> 2, srcStride[1], dstStride[1]); - planar2x(src[2], dst[2] + dstStride[2]*(srcSliceY >> 1), c->chrSrcW, + planar2x(src[2], dst[2] + dstStride[2] * (srcSliceY >> 1), c->chrSrcW, srcSliceH >> 2, srcStride[2], dstStride[2]); if (dst[3]) fillPlane(dst[3], dstStride[3], c->srcW, srcSliceH, srcSliceY, 255); @@ -450,26 +482,28 @@ static int yvu9ToYv12Wrapper(SwsContext *c, const uint8_t* src[], int srcStride[ } /* unscaled copy like stuff (assumes nearly identical formats) */ -static int packedCopyWrapper(SwsContext *c, const uint8_t* src[], int srcStride[], int srcSliceY, - int srcSliceH, uint8_t* dst[], int dstStride[]) +static int packedCopyWrapper(SwsContext *c, const uint8_t *src[], + int srcStride[], int srcSliceY, int srcSliceH, + uint8_t *dst[], int dstStride[]) { - if (dstStride[0]==srcStride[0] && srcStride[0] > 0) - memcpy(dst[0] + dstStride[0]*srcSliceY, src[0], srcSliceH*dstStride[0]); + if (dstStride[0] == srcStride[0] && srcStride[0] > 0) + memcpy(dst[0] + dstStride[0] * srcSliceY, src[0], srcSliceH * dstStride[0]); else { int i; - const uint8_t *srcPtr= src[0]; - uint8_t *dstPtr= dst[0] + dstStride[0]*srcSliceY; - int length=0; + const uint8_t *srcPtr = src[0]; + uint8_t *dstPtr = dst[0] + dstStride[0] * srcSliceY; + int length = 0; /* universal length finder */ - while(length+c->srcW <= FFABS(dstStride[0]) - && length+c->srcW <= FFABS(srcStride[0])) length+= c->srcW; - assert(length!=0); + while (length + c->srcW <= FFABS(dstStride[0]) && + length + c->srcW <= FFABS(srcStride[0])) + length += c->srcW; + assert(length != 0); - for (i=0; isrcW : -((-c->srcW )>>c->chrDstHSubSample); - int y= (plane==0 || plane==3) ? srcSliceY: -((-srcSliceY)>>c->chrDstVSubSample); - int height= (plane==0 || plane==3) ? srcSliceH: -((-srcSliceH)>>c->chrDstVSubSample); - const uint8_t *srcPtr= src[plane]; - uint8_t *dstPtr= dst[plane] + dstStride[plane]*y; - - if (!dst[plane]) continue; + for (plane = 0; plane < 4; plane++) { + int length = (plane == 0 || plane == 3) ? c->srcW : -((-c->srcW ) >> c->chrDstHSubSample); + int y = (plane == 0 || plane == 3) ? srcSliceY: -((-srcSliceY) >> c->chrDstVSubSample); + int height = (plane == 0 || plane == 3) ? srcSliceH: -((-srcSliceH) >> c->chrDstVSubSample); + const uint8_t *srcPtr = src[plane]; + uint8_t *dstPtr = dst[plane] + dstStride[plane] * y; + + if (!dst[plane]) + continue; // ignore palette for GRAY8 if (plane == 1 && !dst[2]) continue; if (!src[plane] || (plane == 1 && !src[2])) { - if(is16BPS(c->dstFormat)) - length*=2; - fillPlane(dst[plane], dstStride[plane], length, height, y, (plane==3) ? 255 : 128); + if (is16BPS(c->dstFormat)) + length *= 2; + fillPlane(dst[plane], dstStride[plane], length, height, y, + (plane == 3) ? 255 : 128); } else { - if(is9_OR_10BPS(c->srcFormat)) { - const int src_depth = av_pix_fmt_descriptors[c->srcFormat].comp[plane].depth_minus1+1; - const int dst_depth = av_pix_fmt_descriptors[c->dstFormat].comp[plane].depth_minus1+1; - const uint16_t *srcPtr2 = (const uint16_t*)srcPtr; + if (is9_OR_10BPS(c->srcFormat)) { + const int src_depth = av_pix_fmt_descriptors[c->srcFormat].comp[plane].depth_minus1 + 1; + const int dst_depth = av_pix_fmt_descriptors[c->dstFormat].comp[plane].depth_minus1 + 1; + const uint16_t *srcPtr2 = (const uint16_t *) srcPtr; if (is16BPS(c->dstFormat)) { - uint16_t *dstPtr2 = (uint16_t*)dstPtr; + uint16_t *dstPtr2 = (uint16_t *) dstPtr; #define COPY9_OR_10TO16(rfunc, wfunc) \ for (i = 0; i < height; i++) { \ for (j = 0; j < length; j++) { \ int srcpx = rfunc(&srcPtr2[j]); \ - wfunc(&dstPtr2[j], (srcpx<<(16-src_depth)) | (srcpx>>(2*src_depth-16))); \ + wfunc(&dstPtr2[j], (srcpx << (16 - src_depth)) | (srcpx >> (2 * src_depth - 16))); \ } \ - dstPtr2 += dstStride[plane]/2; \ - srcPtr2 += srcStride[plane]/2; \ + dstPtr2 += dstStride[plane] / 2; \ + srcPtr2 += srcStride[plane] / 2; \ } if (isBE(c->dstFormat)) { if (isBE(c->srcFormat)) { @@ -545,22 +582,22 @@ static int planarCopyWrapper(SwsContext *c, const uint8_t* src[], int srcStride[ } } } else if (is9_OR_10BPS(c->dstFormat)) { - uint16_t *dstPtr2 = (uint16_t*)dstPtr; + uint16_t *dstPtr2 = (uint16_t *) dstPtr; #define COPY9_OR_10TO9_OR_10(loop) \ for (i = 0; i < height; i++) { \ for (j = 0; j < length; j++) { \ loop; \ } \ - dstPtr2 += dstStride[plane]/2; \ - srcPtr2 += srcStride[plane]/2; \ + dstPtr2 += dstStride[plane] / 2; \ + srcPtr2 += srcStride[plane] / 2; \ } #define COPY9_OR_10TO9_OR_10_2(rfunc, wfunc) \ if (dst_depth > src_depth) { \ COPY9_OR_10TO9_OR_10(int srcpx = rfunc(&srcPtr2[j]); \ wfunc(&dstPtr2[j], (srcpx << 1) | (srcpx >> 9))); \ } else if (dst_depth < src_depth) { \ - DITHER_COPY(dstPtr2, dstStride[plane]/2, wfunc, \ - srcPtr2, srcStride[plane]/2, rfunc, \ + DITHER_COPY(dstPtr2, dstStride[plane] / 2, wfunc, \ + srcPtr2, srcStride[plane] / 2, rfunc, \ dither_8x8_1, 1, clip9); \ } else { \ COPY9_OR_10TO9_OR_10(wfunc(&dstPtr2[j], rfunc(&srcPtr2[j]))); \ @@ -583,11 +620,11 @@ static int planarCopyWrapper(SwsContext *c, const uint8_t* src[], int srcStride[ #define COPY9_OR_10TO8(rfunc) \ if (src_depth == 9) { \ DITHER_COPY(dstPtr, dstStride[plane], W8, \ - srcPtr2, srcStride[plane]/2, rfunc, \ + srcPtr2, srcStride[plane] / 2, rfunc, \ dither_8x8_1, 1, av_clip_uint8); \ } else { \ DITHER_COPY(dstPtr, dstStride[plane], W8, \ - srcPtr2, srcStride[plane]/2, rfunc, \ + srcPtr2, srcStride[plane] / 2, rfunc, \ dither_8x8_3, 2, av_clip_uint8); \ } if (isBE(c->srcFormat)) { @@ -596,20 +633,20 @@ static int planarCopyWrapper(SwsContext *c, const uint8_t* src[], int srcStride[ COPY9_OR_10TO8(AV_RL16); } } - } else if(is9_OR_10BPS(c->dstFormat)) { - const int dst_depth = av_pix_fmt_descriptors[c->dstFormat].comp[plane].depth_minus1+1; - uint16_t *dstPtr2 = (uint16_t*)dstPtr; + } else if (is9_OR_10BPS(c->dstFormat)) { + const int dst_depth = av_pix_fmt_descriptors[c->dstFormat].comp[plane].depth_minus1 + 1; + uint16_t *dstPtr2 = (uint16_t *) dstPtr; if (is16BPS(c->srcFormat)) { - const uint16_t *srcPtr2 = (const uint16_t*)srcPtr; + const uint16_t *srcPtr2 = (const uint16_t *) srcPtr; #define COPY16TO9_OR_10(rfunc, wfunc) \ if (dst_depth == 9) { \ - DITHER_COPY(dstPtr2, dstStride[plane]/2, wfunc, \ - srcPtr2, srcStride[plane]/2, rfunc, \ + DITHER_COPY(dstPtr2, dstStride[plane] / 2, wfunc, \ + srcPtr2, srcStride[plane] / 2, rfunc, \ dither_8x8_128, 7, clip9); \ } else { \ - DITHER_COPY(dstPtr2, dstStride[plane]/2, wfunc, \ - srcPtr2, srcStride[plane]/2, rfunc, \ + DITHER_COPY(dstPtr2, dstStride[plane] / 2, wfunc, \ + srcPtr2, srcStride[plane] / 2, rfunc, \ dither_8x8_64, 6, clip10); \ } if (isBE(c->dstFormat)) { @@ -630,9 +667,9 @@ static int planarCopyWrapper(SwsContext *c, const uint8_t* src[], int srcStride[ for (i = 0; i < height; i++) { \ for (j = 0; j < length; j++) { \ const int srcpx = srcPtr[j]; \ - wfunc(&dstPtr2[j], (srcpx<<(dst_depth-8)) | (srcpx >> (16-dst_depth))); \ + wfunc(&dstPtr2[j], (srcpx << (dst_depth - 8)) | (srcpx >> (16 - dst_depth))); \ } \ - dstPtr2 += dstStride[plane]/2; \ + dstPtr2 += dstStride[plane] / 2; \ srcPtr += srcStride[plane]; \ } if (isBE(c->dstFormat)) { @@ -641,46 +678,46 @@ static int planarCopyWrapper(SwsContext *c, const uint8_t* src[], int srcStride[ COPY8TO9_OR_10(AV_WL16); } } - } else if(is16BPS(c->srcFormat) && !is16BPS(c->dstFormat)) { - const uint16_t *srcPtr2 = (const uint16_t*)srcPtr; + } else if (is16BPS(c->srcFormat) && !is16BPS(c->dstFormat)) { + const uint16_t *srcPtr2 = (const uint16_t *) srcPtr; #define COPY16TO8(rfunc) \ DITHER_COPY(dstPtr, dstStride[plane], W8, \ - srcPtr2, srcStride[plane]/2, rfunc, \ + srcPtr2, srcStride[plane] / 2, rfunc, \ dither_8x8_256, 8, av_clip_uint8); if (isBE(c->srcFormat)) { COPY16TO8(AV_RB16); } else { COPY16TO8(AV_RL16); } - } else if(!is16BPS(c->srcFormat) && is16BPS(c->dstFormat)) { - for (i=0; isrcFormat) && is16BPS(c->dstFormat)) { + for (i = 0; i < height; i++) { + for (j = 0; j < length; j++) { + dstPtr[ j << 1 ] = srcPtr[j]; + dstPtr[(j << 1) + 1] = srcPtr[j]; } - srcPtr+= srcStride[plane]; - dstPtr+= dstStride[plane]; + srcPtr += srcStride[plane]; + dstPtr += dstStride[plane]; } - } else if(is16BPS(c->srcFormat) && is16BPS(c->dstFormat) - && isBE(c->srcFormat) != isBE(c->dstFormat)) { - - for (i=0; isrcFormat) && is16BPS(c->dstFormat) && + isBE(c->srcFormat) != isBE(c->dstFormat)) { + + for (i = 0; i < height; i++) { + for (j = 0; j < length; j++) + ((uint16_t *) dstPtr)[j] = av_bswap16(((const uint16_t *) srcPtr)[j]); + srcPtr += srcStride[plane]; + dstPtr += dstStride[plane]; } } else if (dstStride[plane] == srcStride[plane] && srcStride[plane] > 0 && srcStride[plane] == length) { - memcpy(dst[plane] + dstStride[plane]*y, src[plane], - height*dstStride[plane]); + memcpy(dst[plane] + dstStride[plane] * y, src[plane], + height * dstStride[plane]); } else { - if(is16BPS(c->srcFormat) && is16BPS(c->dstFormat)) - length*=2; - for (i=0; isrcFormat) && is16BPS(c->dstFormat)) + length *= 2; + for (i = 0; i < height; i++) { memcpy(dstPtr, srcPtr, length); - srcPtr+= srcStride[plane]; - dstPtr+= dstStride[plane]; + srcPtr += srcStride[plane]; + dstPtr += dstStride[plane]; } } } @@ -702,27 +739,33 @@ void ff_get_unscaled_swscale(SwsContext *c) const int dstH = c->dstH; int needsDither; - needsDither= isAnyRGB(dstFormat) - && c->dstFormatBpp < 24 - && (c->dstFormatBpp < c->srcFormatBpp || (!isAnyRGB(srcFormat))); + needsDither = isAnyRGB(dstFormat) && + c->dstFormatBpp < 24 && + (c->dstFormatBpp < c->srcFormatBpp || (!isAnyRGB(srcFormat))); /* yv12_to_nv12 */ - if ((srcFormat == PIX_FMT_YUV420P || srcFormat == PIX_FMT_YUVA420P) && (dstFormat == PIX_FMT_NV12 || dstFormat == PIX_FMT_NV21)) { - c->swScale= planarToNv12Wrapper; + if ((srcFormat == PIX_FMT_YUV420P || srcFormat == PIX_FMT_YUVA420P) && + (dstFormat == PIX_FMT_NV12 || dstFormat == PIX_FMT_NV21)) { + c->swScale = planarToNv12Wrapper; } /* yuv2bgr */ - if ((srcFormat==PIX_FMT_YUV420P || srcFormat==PIX_FMT_YUV422P || srcFormat==PIX_FMT_YUVA420P) && isAnyRGB(dstFormat) - && !(flags & SWS_ACCURATE_RND) && !(dstH&1)) { - c->swScale= ff_yuv2rgb_get_func_ptr(c); + if ((srcFormat == PIX_FMT_YUV420P || srcFormat == PIX_FMT_YUV422P || + srcFormat == PIX_FMT_YUVA420P) && isAnyRGB(dstFormat) && + !(flags & SWS_ACCURATE_RND) && !(dstH & 1)) { + c->swScale = ff_yuv2rgb_get_func_ptr(c); } - if (srcFormat==PIX_FMT_YUV410P && (dstFormat==PIX_FMT_YUV420P || dstFormat==PIX_FMT_YUVA420P) && !(flags & SWS_BITEXACT)) { - c->swScale= yvu9ToYv12Wrapper; + if (srcFormat == PIX_FMT_YUV410P && + (dstFormat == PIX_FMT_YUV420P || dstFormat == PIX_FMT_YUVA420P) && + !(flags & SWS_BITEXACT)) { + c->swScale = yvu9ToYv12Wrapper; } /* bgr24toYV12 */ - if (srcFormat==PIX_FMT_BGR24 && (dstFormat==PIX_FMT_YUV420P || dstFormat==PIX_FMT_YUVA420P) && !(flags & SWS_ACCURATE_RND)) - c->swScale= bgr24ToYv12Wrapper; + if (srcFormat == PIX_FMT_BGR24 && + (dstFormat == PIX_FMT_YUV420P || dstFormat == PIX_FMT_YUVA420P) && + !(flags & SWS_ACCURATE_RND)) + c->swScale = bgr24ToYv12Wrapper; /* RGB/BGR -> RGB/BGR (no dither needed forms) */ if ( isAnyRGB(srcFormat) @@ -761,13 +804,13 @@ void ff_get_unscaled_swscale(SwsContext *c) dstFormat == PIX_FMT_BGR32 || dstFormat == PIX_FMT_BGR32_1 || dstFormat == PIX_FMT_BGR24))) - c->swScale= palToRgbWrapper; + c->swScale = palToRgbWrapper; if (srcFormat == PIX_FMT_YUV422P) { if (dstFormat == PIX_FMT_YUYV422) - c->swScale= yuv422pToYuy2Wrapper; + c->swScale = yuv422pToYuy2Wrapper; else if (dstFormat == PIX_FMT_UYVY422) - c->swScale= yuv422pToUyvyWrapper; + c->swScale = yuv422pToUyvyWrapper; } /* LQ converters if -sws 0 or -sws 4*/ @@ -775,37 +818,39 @@ void ff_get_unscaled_swscale(SwsContext *c) /* yv12_to_yuy2 */ if (srcFormat == PIX_FMT_YUV420P || srcFormat == PIX_FMT_YUVA420P) { if (dstFormat == PIX_FMT_YUYV422) - c->swScale= planarToYuy2Wrapper; + c->swScale = planarToYuy2Wrapper; else if (dstFormat == PIX_FMT_UYVY422) - c->swScale= planarToUyvyWrapper; + c->swScale = planarToUyvyWrapper; } } - if(srcFormat == PIX_FMT_YUYV422 && (dstFormat == PIX_FMT_YUV420P || dstFormat == PIX_FMT_YUVA420P)) - c->swScale= yuyvToYuv420Wrapper; - if(srcFormat == PIX_FMT_UYVY422 && (dstFormat == PIX_FMT_YUV420P || dstFormat == PIX_FMT_YUVA420P)) - c->swScale= uyvyToYuv420Wrapper; - if(srcFormat == PIX_FMT_YUYV422 && dstFormat == PIX_FMT_YUV422P) - c->swScale= yuyvToYuv422Wrapper; - if(srcFormat == PIX_FMT_UYVY422 && dstFormat == PIX_FMT_YUV422P) - c->swScale= uyvyToYuv422Wrapper; + if (srcFormat == PIX_FMT_YUYV422 && + (dstFormat == PIX_FMT_YUV420P || dstFormat == PIX_FMT_YUVA420P)) + c->swScale = yuyvToYuv420Wrapper; + if (srcFormat == PIX_FMT_UYVY422 && + (dstFormat == PIX_FMT_YUV420P || dstFormat == PIX_FMT_YUVA420P)) + c->swScale = uyvyToYuv420Wrapper; + if (srcFormat == PIX_FMT_YUYV422 && dstFormat == PIX_FMT_YUV422P) + c->swScale = yuyvToYuv422Wrapper; + if (srcFormat == PIX_FMT_UYVY422 && dstFormat == PIX_FMT_YUV422P) + c->swScale = uyvyToYuv422Wrapper; /* simple copy */ - if ( srcFormat == dstFormat - || (srcFormat == PIX_FMT_YUVA420P && dstFormat == PIX_FMT_YUV420P) - || (srcFormat == PIX_FMT_YUV420P && dstFormat == PIX_FMT_YUVA420P) - || (isPlanarYUV(srcFormat) && isGray(dstFormat)) - || (isPlanarYUV(dstFormat) && isGray(srcFormat)) - || (isGray(dstFormat) && isGray(srcFormat)) - || (isPlanarYUV(srcFormat) && isPlanarYUV(dstFormat) - && c->chrDstHSubSample == c->chrSrcHSubSample - && c->chrDstVSubSample == c->chrSrcVSubSample - && dstFormat != PIX_FMT_NV12 && dstFormat != PIX_FMT_NV21 - && srcFormat != PIX_FMT_NV12 && srcFormat != PIX_FMT_NV21)) + if ( srcFormat == dstFormat || + (srcFormat == PIX_FMT_YUVA420P && dstFormat == PIX_FMT_YUV420P) || + (srcFormat == PIX_FMT_YUV420P && dstFormat == PIX_FMT_YUVA420P) || + (isPlanarYUV(srcFormat) && isGray(dstFormat)) || + (isPlanarYUV(dstFormat) && isGray(srcFormat)) || + (isGray(dstFormat) && isGray(srcFormat)) || + (isPlanarYUV(srcFormat) && isPlanarYUV(dstFormat) && + c->chrDstHSubSample == c->chrSrcHSubSample && + c->chrDstVSubSample == c->chrSrcVSubSample && + dstFormat != PIX_FMT_NV12 && dstFormat != PIX_FMT_NV21 && + srcFormat != PIX_FMT_NV12 && srcFormat != PIX_FMT_NV21)) { if (isPacked(c->srcFormat)) - c->swScale= packedCopyWrapper; + c->swScale = packedCopyWrapper; else /* Planar YUV or gray */ - c->swScale= planarCopyWrapper; + c->swScale = planarCopyWrapper; } if (ARCH_BFIN) @@ -814,15 +859,15 @@ void ff_get_unscaled_swscale(SwsContext *c) ff_swscale_get_unscaled_altivec(c); } -static void reset_ptr(const uint8_t* src[], int format) +static void reset_ptr(const uint8_t *src[], int format) { - if(!isALPHA(format)) - src[3]=NULL; + if (!isALPHA(format)) + src[3] = NULL; if (!isPlanar(format)) { - src[3]=src[2]=NULL; + src[3] = src[2] = NULL; if (!usePal(format)) - src[1]= NULL; + src[1] = NULL; } } @@ -845,13 +890,15 @@ static int check_image_pointers(uint8_t *data[4], enum PixelFormat pix_fmt, * swscale wrapper, so we don't need to export the SwsContext. * Assumes planar YUV to be in YUV order instead of YVU. */ -int attribute_align_arg sws_scale(struct SwsContext *c, const uint8_t* const srcSlice[], - const int srcStride[], int srcSliceY, int srcSliceH, - uint8_t* const dst[], const int dstStride[]) +int attribute_align_arg sws_scale(struct SwsContext *c, + const uint8_t * const srcSlice[], + const int srcStride[], int srcSliceY, + int srcSliceH, uint8_t *const dst[], + const int dstStride[]) { int i; - const uint8_t* src2[4]= {srcSlice[0], srcSlice[1], srcSlice[2], srcSlice[3]}; - uint8_t* dst2[4]= {dst[0], dst[1], dst[2], dst[3]}; + const uint8_t *src2[4] = { srcSlice[0], srcSlice[1], srcSlice[2], srcSlice[3] }; + uint8_t *dst2[4] = { dst[0], dst[1], dst[2], dst[3] }; // do not mess up sliceDir if we have a "trailing" 0-size slice if (srcSliceH == 0) @@ -875,63 +922,64 @@ int attribute_align_arg sws_scale(struct SwsContext *c, const uint8_t* const src } if (usePal(c->srcFormat)) { - for (i=0; i<256; i++) { - int p, r, g, b,y,u,v; - if(c->srcFormat == PIX_FMT_PAL8) { - p=((const uint32_t*)(srcSlice[1]))[i]; - r= (p>>16)&0xFF; - g= (p>> 8)&0xFF; - b= p &0xFF; - } else if(c->srcFormat == PIX_FMT_RGB8) { - r= (i>>5 )*36; - g= ((i>>2)&7)*36; - b= (i&3 )*85; - } else if(c->srcFormat == PIX_FMT_BGR8) { - b= (i>>6 )*85; - g= ((i>>3)&7)*36; - r= (i&7 )*36; - } else if(c->srcFormat == PIX_FMT_RGB4_BYTE) { - r= (i>>3 )*255; - g= ((i>>1)&3)*85; - b= (i&1 )*255; - } else if(c->srcFormat == PIX_FMT_GRAY8 || c->srcFormat == PIX_FMT_Y400A) { + for (i = 0; i < 256; i++) { + int p, r, g, b, y, u, v; + if (c->srcFormat == PIX_FMT_PAL8) { + p = ((const uint32_t *)(srcSlice[1]))[i]; + r = (p >> 16) & 0xFF; + g = (p >> 8) & 0xFF; + b = p & 0xFF; + } else if (c->srcFormat == PIX_FMT_RGB8) { + r = ( i >> 5 ) * 36; + g = ((i >> 2) & 7) * 36; + b = ( i & 3) * 85; + } else if (c->srcFormat == PIX_FMT_BGR8) { + b = ( i >> 6 ) * 85; + g = ((i >> 3) & 7) * 36; + r = ( i & 7) * 36; + } else if (c->srcFormat == PIX_FMT_RGB4_BYTE) { + r = ( i >> 3 ) * 255; + g = ((i >> 1) & 3) * 85; + b = ( i & 1) * 255; + } else if (c->srcFormat == PIX_FMT_GRAY8 || + c->srcFormat == PIX_FMT_Y400A) { r = g = b = i; } else { assert(c->srcFormat == PIX_FMT_BGR4_BYTE); - b= (i>>3 )*255; - g= ((i>>1)&3)*85; - r= (i&1 )*255; + b = ( i >> 3 ) * 255; + g = ((i >> 1) & 3) * 85; + r = ( i & 1) * 255; } - y= av_clip_uint8((RY*r + GY*g + BY*b + ( 33<<(RGB2YUV_SHIFT-1)))>>RGB2YUV_SHIFT); - u= av_clip_uint8((RU*r + GU*g + BU*b + (257<<(RGB2YUV_SHIFT-1)))>>RGB2YUV_SHIFT); - v= av_clip_uint8((RV*r + GV*g + BV*b + (257<<(RGB2YUV_SHIFT-1)))>>RGB2YUV_SHIFT); - c->pal_yuv[i]= y + (u<<8) + (v<<16); + y = av_clip_uint8((RY * r + GY * g + BY * b + ( 33 << (RGB2YUV_SHIFT - 1))) >> RGB2YUV_SHIFT); + u = av_clip_uint8((RU * r + GU * g + BU * b + (257 << (RGB2YUV_SHIFT - 1))) >> RGB2YUV_SHIFT); + v = av_clip_uint8((RV * r + GV * g + BV * b + (257 << (RGB2YUV_SHIFT - 1))) >> RGB2YUV_SHIFT); + c->pal_yuv[i] = y + (u << 8) + (v << 16); - switch(c->dstFormat) { + switch (c->dstFormat) { case PIX_FMT_BGR32: #if !HAVE_BIGENDIAN case PIX_FMT_RGB24: #endif - c->pal_rgb[i]= r + (g<<8) + (b<<16); + c->pal_rgb[i] = r + (g << 8) + (b << 16); break; case PIX_FMT_BGR32_1: #if HAVE_BIGENDIAN case PIX_FMT_BGR24: #endif - c->pal_rgb[i]= (r + (g<<8) + (b<<16)) << 8; + c->pal_rgb[i] = (r + (g << 8) + (b << 16)) << 8; break; case PIX_FMT_RGB32_1: #if HAVE_BIGENDIAN case PIX_FMT_RGB24: #endif - c->pal_rgb[i]= (b + (g<<8) + (r<<16)) << 8; + c->pal_rgb[i] = (b + (g << 8) + (r << 16)) << 8; break; case PIX_FMT_RGB32: #if !HAVE_BIGENDIAN case PIX_FMT_BGR24: #endif default: - c->pal_rgb[i]= b + (g<<8) + (r<<16); + c->pal_rgb[i] = b + (g << 8) + (r << 16); } } } @@ -939,62 +987,70 @@ int attribute_align_arg sws_scale(struct SwsContext *c, const uint8_t* const src // copy strides, so they can safely be modified if (c->sliceDir == 1) { // slices go from top to bottom - int srcStride2[4]= {srcStride[0], srcStride[1], srcStride[2], srcStride[3]}; - int dstStride2[4]= {dstStride[0], dstStride[1], dstStride[2], dstStride[3]}; + int srcStride2[4] = { srcStride[0], srcStride[1], srcStride[2], + srcStride[3] }; + int dstStride2[4] = { dstStride[0], dstStride[1], dstStride[2], + dstStride[3] }; reset_ptr(src2, c->srcFormat); - reset_ptr((const uint8_t**)dst2, c->dstFormat); + reset_ptr((const uint8_t **) dst2, c->dstFormat); /* reset slice direction at end of frame */ if (srcSliceY + srcSliceH == c->srcH) c->sliceDir = 0; - return c->swScale(c, src2, srcStride2, srcSliceY, srcSliceH, dst2, dstStride2); + return c->swScale(c, src2, srcStride2, srcSliceY, srcSliceH, dst2, + dstStride2); } else { // slices go from bottom to top => we flip the image internally - int srcStride2[4]= {-srcStride[0], -srcStride[1], -srcStride[2], -srcStride[3]}; - int dstStride2[4]= {-dstStride[0], -dstStride[1], -dstStride[2], -dstStride[3]}; + int srcStride2[4] = { -srcStride[0], -srcStride[1], -srcStride[2], + -srcStride[3] }; + int dstStride2[4] = { -dstStride[0], -dstStride[1], -dstStride[2], + -dstStride[3] }; - src2[0] += (srcSliceH-1)*srcStride[0]; + src2[0] += (srcSliceH - 1) * srcStride[0]; if (!usePal(c->srcFormat)) - src2[1] += ((srcSliceH>>c->chrSrcVSubSample)-1)*srcStride[1]; - src2[2] += ((srcSliceH>>c->chrSrcVSubSample)-1)*srcStride[2]; - src2[3] += (srcSliceH-1)*srcStride[3]; - dst2[0] += ( c->dstH -1)*dstStride[0]; - dst2[1] += ((c->dstH>>c->chrDstVSubSample)-1)*dstStride[1]; - dst2[2] += ((c->dstH>>c->chrDstVSubSample)-1)*dstStride[2]; - dst2[3] += ( c->dstH -1)*dstStride[3]; + src2[1] += ((srcSliceH >> c->chrSrcVSubSample) - 1) * srcStride[1]; + src2[2] += ((srcSliceH >> c->chrSrcVSubSample) - 1) * srcStride[2]; + src2[3] += (srcSliceH - 1) * srcStride[3]; + dst2[0] += ( c->dstH - 1) * dstStride[0]; + dst2[1] += ((c->dstH >> c->chrDstVSubSample) - 1) * dstStride[1]; + dst2[2] += ((c->dstH >> c->chrDstVSubSample) - 1) * dstStride[2]; + dst2[3] += ( c->dstH - 1) * dstStride[3]; reset_ptr(src2, c->srcFormat); - reset_ptr((const uint8_t**)dst2, c->dstFormat); + reset_ptr((const uint8_t **) dst2, c->dstFormat); /* reset slice direction at end of frame */ if (!srcSliceY) c->sliceDir = 0; - return c->swScale(c, src2, srcStride2, c->srcH-srcSliceY-srcSliceH, srcSliceH, dst2, dstStride2); + return c->swScale(c, src2, srcStride2, c->srcH-srcSliceY-srcSliceH, + srcSliceH, dst2, dstStride2); } } /* Convert the palette to the same packed 32-bit format as the palette */ -void sws_convertPalette8ToPacked32(const uint8_t *src, uint8_t *dst, int num_pixels, const uint8_t *palette) +void sws_convertPalette8ToPacked32(const uint8_t *src, uint8_t *dst, + int num_pixels, const uint8_t *palette) { int i; - for (i=0; i dst format: ABC */ -void sws_convertPalette8ToPacked24(const uint8_t *src, uint8_t *dst, int num_pixels, const uint8_t *palette) +void sws_convertPalette8ToPacked24(const uint8_t *src, uint8_t *dst, + int num_pixels, const uint8_t *palette) { int i; - for (i=0; i Date: Mon, 19 Dec 2011 03:00:36 +0100 Subject: Revert "h264: skip start code search if the size of the nal unit is known" This reverts commit 87eebb3454ff0cd6af6ebf9e1d31bdfd1c3b601b. --- libavcodec/h264.c | 11 ++--------- libavcodec/h264.h | 5 +---- libavcodec/h264_parser.c | 2 +- 3 files changed, 4 insertions(+), 14 deletions(-) diff --git a/libavcodec/h264.c b/libavcodec/h264.c index 5d4ce90cae..77acd7168f 100644 --- a/libavcodec/h264.c +++ b/libavcodec/h264.c @@ -137,10 +137,7 @@ int ff_h264_check_intra_pred_mode(H264Context *h, int mode){ return mode; } -const uint8_t *ff_h264_decode_nal(H264Context *h, const uint8_t *src, - int *dst_length, int *consumed, int length, - int nalsize_known) -{ +const uint8_t *ff_h264_decode_nal(H264Context *h, const uint8_t *src, int *dst_length, int *consumed, int length){ int i, si, di; uint8_t *dst; int bufidx; @@ -151,9 +148,6 @@ const uint8_t *ff_h264_decode_nal(H264Context *h, const uint8_t *src, src++; length--; - if (nalsize_known) { - i = length; - } else #if HAVE_FAST_UNALIGNED # if HAVE_FAST_64BIT # define RS 7 @@ -3795,8 +3789,7 @@ static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size){ hx = h->thread_context[context_count]; - ptr= ff_h264_decode_nal(hx, buf + buf_index, &dst_length, &consumed, - next_avc - buf_index, !!nalsize); + ptr= ff_h264_decode_nal(hx, buf + buf_index, &dst_length, &consumed, next_avc - buf_index); if (ptr==NULL || dst_length < 0){ return -1; } diff --git a/libavcodec/h264.h b/libavcodec/h264.h index 24da4f5eac..50255389fa 100644 --- a/libavcodec/h264.h +++ b/libavcodec/h264.h @@ -610,12 +610,9 @@ int ff_h264_decode_picture_parameter_set(H264Context *h, int bit_length); * @param consumed is the number of bytes used as input * @param length is the length of the array * @param dst_length is the number of decoded bytes FIXME here or a decode rbsp tailing? - * @param nalsize_known skip start code search if the size of the nalu is known * @return decoded bytes, might be src+1 if no escapes */ -const uint8_t *ff_h264_decode_nal(H264Context *h, const uint8_t *src, - int *dst_length, int *consumed, int length, - int nalsize_known); +const uint8_t *ff_h264_decode_nal(H264Context *h, const uint8_t *src, int *dst_length, int *consumed, int length); /** * Free any data that may have been allocated in the H264 context like SPS, PPS etc. diff --git a/libavcodec/h264_parser.c b/libavcodec/h264_parser.c index 1967eface3..826c17a0f1 100644 --- a/libavcodec/h264_parser.c +++ b/libavcodec/h264_parser.c @@ -144,7 +144,7 @@ static inline int parse_nal_units(AVCodecParserContext *s, src_length = 20; break; } - ptr= ff_h264_decode_nal(h, buf, &dst_length, &consumed, src_length, 0); + ptr= ff_h264_decode_nal(h, buf, &dst_length, &consumed, src_length); if (ptr==NULL || dst_length < 0) break; -- cgit v1.2.3 From 30bbd5cbc00f935df5293ab39261a19e50a38c38 Mon Sep 17 00:00:00 2001 From: Diego Biurrun Date: Sun, 18 Dec 2011 13:13:02 +0100 Subject: x86: conditionally compile dnxhd encoder optimizations --- libavcodec/x86/Makefile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/libavcodec/x86/Makefile b/libavcodec/x86/Makefile index 6759a228c4..aa97942dba 100644 --- a/libavcodec/x86/Makefile +++ b/libavcodec/x86/Makefile @@ -31,6 +31,7 @@ YASM-OBJS-$(CONFIG_VC1_DECODER) += x86/vc1dsp_yasm.o MMX-OBJS-$(CONFIG_AC3DSP) += x86/ac3dsp_mmx.o YASM-OBJS-$(CONFIG_AC3DSP) += x86/ac3dsp.o MMX-OBJS-$(CONFIG_CAVS_DECODER) += x86/cavsdsp_mmx.o +MMX-OBJS-$(CONFIG_DNXHD_ENCODER) += x86/dnxhd_mmx.o MMX-OBJS-$(CONFIG_MPEGAUDIODSP) += x86/mpegaudiodec_mmx.o MMX-OBJS-$(CONFIG_ENCODERS) += x86/dsputilenc_mmx.o YASM-OBJS-$(CONFIG_ENCODERS) += x86/dsputilenc_yasm.o @@ -56,8 +57,7 @@ MMX-OBJS-$(HAVE_YASM) += x86/dsputil_yasm.o \ MMX-OBJS-$(CONFIG_FFT) += x86/fft.o -OBJS-$(HAVE_MMX) += x86/dnxhd_mmx.o \ - x86/dsputil_mmx.o \ +OBJS-$(HAVE_MMX) += x86/dsputil_mmx.o \ x86/fdct_mmx.o \ x86/fmtconvert_mmx.o \ x86/idct_mmx_xvid.o \ -- cgit v1.2.3 From 2fd291846b3382bce2a54a48cab2741f313faf0f Mon Sep 17 00:00:00 2001 From: Diego Biurrun Date: Mon, 19 Dec 2011 14:54:58 +0100 Subject: qcelpdec: cosmetics: drop some pointless parentheses --- libavcodec/qcelpdec.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/libavcodec/qcelpdec.c b/libavcodec/qcelpdec.c index 20a0484b42..cd14ebc151 100644 --- a/libavcodec/qcelpdec.c +++ b/libavcodec/qcelpdec.c @@ -122,9 +122,9 @@ static int decode_lspf(QCELPContext *q, float *lspf) const float *predictors; if (q->bitrate == RATE_OCTAVE || q->bitrate == I_F_Q) { - predictors = (q->prev_bitrate != RATE_OCTAVE && - q->prev_bitrate != I_F_Q ? - q->prev_lspf : q->predictor_lspf); + predictors = q->prev_bitrate != RATE_OCTAVE && + q->prev_bitrate != I_F_Q ? q->prev_lspf + : q->predictor_lspf; if (q->bitrate == RATE_OCTAVE) { q->octave_count++; @@ -136,14 +136,14 @@ static int decode_lspf(QCELPContext *q, float *lspf) + predictors[i] * QCELP_LSP_OCTAVE_PREDICTOR + (i + 1) * ((1 - QCELP_LSP_OCTAVE_PREDICTOR)/11); } - smooth = (q->octave_count < 10 ? .875 : 0.1); + smooth = q->octave_count < 10 ? .875 : 0.1; } else { erasure_coeff = QCELP_LSP_OCTAVE_PREDICTOR; assert(q->bitrate == I_F_Q); if(q->erasure_count > 1) - erasure_coeff *= (q->erasure_count < 4 ? 0.9 : 0.7); + erasure_coeff *= q->erasure_count < 4 ? 0.9 : 0.7; for(i = 0; i < 10; i++) { q->predictor_lspf[i] = @@ -156,11 +156,11 @@ static int decode_lspf(QCELPContext *q, float *lspf) // Check the stability of the LSP frequencies. lspf[0] = FFMAX(lspf[0], QCELP_LSP_SPREAD_FACTOR); for(i=1; i<10; i++) - lspf[i] = FFMAX(lspf[i], (lspf[i-1] + QCELP_LSP_SPREAD_FACTOR)); + lspf[i] = FFMAX(lspf[i], lspf[i - 1] + QCELP_LSP_SPREAD_FACTOR); - lspf[9] = FFMIN(lspf[9], (1.0 - QCELP_LSP_SPREAD_FACTOR)); + lspf[9] = FFMIN(lspf[9], 1.0 - QCELP_LSP_SPREAD_FACTOR); for(i=9; i>0; i--) - lspf[i-1] = FFMIN(lspf[i-1], (lspf[i] - QCELP_LSP_SPREAD_FACTOR)); + lspf[i - 1] = FFMIN(lspf[i - 1], lspf[i] - QCELP_LSP_SPREAD_FACTOR); // Low-pass filter the LSP frequencies. ff_weighted_vector_sumf(lspf, lspf, q->prev_lspf, smooth, 1.0-smooth, 10); -- cgit v1.2.3 From 25c2d76b68641443e560451205044f95e548d988 Mon Sep 17 00:00:00 2001 From: Asen Lekov Date: Mon, 19 Dec 2011 01:32:54 +0200 Subject: qcelpdec: K&R formatting cosmetics Signed-off-by: Diego Biurrun --- libavcodec/qcelpdec.c | 360 +++++++++++++++++++++++++------------------------- 1 file changed, 177 insertions(+), 183 deletions(-) diff --git a/libavcodec/qcelpdec.c b/libavcodec/qcelpdec.c index cd14ebc151..a3af2378f3 100644 --- a/libavcodec/qcelpdec.c +++ b/libavcodec/qcelpdec.c @@ -44,8 +44,7 @@ #undef NDEBUG #include -typedef enum -{ +typedef enum { I_F_Q = -1, /**< insufficient frame quality */ SILENCE, RATE_OCTAVE, @@ -54,8 +53,7 @@ typedef enum RATE_FULL } qcelp_packet_rate; -typedef struct -{ +typedef struct { AVFrame avframe; GetBitContext gb; qcelp_packet_rate bitrate; @@ -95,8 +93,8 @@ static av_cold int qcelp_decode_init(AVCodecContext *avctx) avctx->sample_fmt = AV_SAMPLE_FMT_FLT; - for(i=0; i<10; i++) - q->prev_lspf[i] = (i+1)/11.; + for (i = 0; i < 10; i++) + q->prev_lspf[i] = (i + 1) / 11.; avcodec_get_frame_defaults(&q->avframe); avctx->coded_frame = &q->avframe; @@ -129,12 +127,12 @@ static int decode_lspf(QCELPContext *q, float *lspf) if (q->bitrate == RATE_OCTAVE) { q->octave_count++; - for (i=0; i<10; i++) { + for (i = 0; i < 10; i++) { q->predictor_lspf[i] = lspf[i] = (q->frame.lspv[i] ? QCELP_LSP_SPREAD_FACTOR - : -QCELP_LSP_SPREAD_FACTOR) - + predictors[i] * QCELP_LSP_OCTAVE_PREDICTOR - + (i + 1) * ((1 - QCELP_LSP_OCTAVE_PREDICTOR)/11); + : -QCELP_LSP_SPREAD_FACTOR) + + predictors[i] * QCELP_LSP_OCTAVE_PREDICTOR + + (i + 1) * ((1 - QCELP_LSP_OCTAVE_PREDICTOR) / 11); } smooth = q->octave_count < 10 ? .875 : 0.1; } else { @@ -142,49 +140,49 @@ static int decode_lspf(QCELPContext *q, float *lspf) assert(q->bitrate == I_F_Q); - if(q->erasure_count > 1) + if (q->erasure_count > 1) erasure_coeff *= q->erasure_count < 4 ? 0.9 : 0.7; - for(i = 0; i < 10; i++) { + for (i = 0; i < 10; i++) { q->predictor_lspf[i] = - lspf[i] = (i + 1) * ( 1 - erasure_coeff)/11 - + erasure_coeff * predictors[i]; + lspf[i] = (i + 1) * (1 - erasure_coeff) / 11 + + erasure_coeff * predictors[i]; } smooth = 0.125; } // Check the stability of the LSP frequencies. lspf[0] = FFMAX(lspf[0], QCELP_LSP_SPREAD_FACTOR); - for(i=1; i<10; i++) + for (i = 1; i < 10; i++) lspf[i] = FFMAX(lspf[i], lspf[i - 1] + QCELP_LSP_SPREAD_FACTOR); lspf[9] = FFMIN(lspf[9], 1.0 - QCELP_LSP_SPREAD_FACTOR); - for(i=9; i>0; i--) + for (i = 9; i > 0; i--) lspf[i - 1] = FFMIN(lspf[i - 1], lspf[i] - QCELP_LSP_SPREAD_FACTOR); // Low-pass filter the LSP frequencies. - ff_weighted_vector_sumf(lspf, lspf, q->prev_lspf, smooth, 1.0-smooth, 10); + ff_weighted_vector_sumf(lspf, lspf, q->prev_lspf, smooth, 1.0 - smooth, 10); } else { q->octave_count = 0; tmp_lspf = 0.; for (i = 0; i < 5; i++) { - lspf[2*i+0] = tmp_lspf += qcelp_lspvq[i][q->frame.lspv[i]][0] * 0.0001; - lspf[2*i+1] = tmp_lspf += qcelp_lspvq[i][q->frame.lspv[i]][1] * 0.0001; + lspf[2 * i + 0] = tmp_lspf += qcelp_lspvq[i][q->frame.lspv[i]][0] * 0.0001; + lspf[2 * i + 1] = tmp_lspf += qcelp_lspvq[i][q->frame.lspv[i]][1] * 0.0001; } // Check for badly received packets. if (q->bitrate == RATE_QUARTER) { - if(lspf[9] <= .70 || lspf[9] >= .97) + if (lspf[9] <= .70 || lspf[9] >= .97) return -1; - for(i=3; i<10; i++) - if(fabs(lspf[i] - lspf[i-2]) < .08) + for (i = 3; i < 10; i++) + if (fabs(lspf[i] - lspf[i - 2]) < .08) return -1; } else { - if(lspf[9] <= .66 || lspf[9] >= .985) + if (lspf[9] <= .66 || lspf[9] >= .985) return -1; - for(i=4; i<10; i++) - if (fabs(lspf[i] - lspf[i-4]) < .0931) + for (i = 4; i < 10; i++) + if (fabs(lspf[i] - lspf[i - 4]) < .0931) return -1; } } @@ -199,72 +197,72 @@ static int decode_lspf(QCELPContext *q, float *lspf) * * TIA/EIA/IS-733 2.4.6.2 */ -static void decode_gain_and_index(QCELPContext *q, - float *gain) { - int i, subframes_count, g1[16]; +static void decode_gain_and_index(QCELPContext *q, float *gain) +{ + int i, subframes_count, g1[16]; float slope; if (q->bitrate >= RATE_QUARTER) { switch (q->bitrate) { - case RATE_FULL: subframes_count = 16; break; - case RATE_HALF: subframes_count = 4; break; - default: subframes_count = 5; + case RATE_FULL: subframes_count = 16; break; + case RATE_HALF: subframes_count = 4; break; + default: subframes_count = 5; } - for(i = 0; i < subframes_count; i++) { + for (i = 0; i < subframes_count; i++) { g1[i] = 4 * q->frame.cbgain[i]; - if (q->bitrate == RATE_FULL && !((i+1) & 3)) { - g1[i] += av_clip((g1[i-1] + g1[i-2] + g1[i-3]) / 3 - 6, 0, 32); + if (q->bitrate == RATE_FULL && !((i + 1) & 3)) { + g1[i] += av_clip((g1[i - 1] + g1[i - 2] + g1[i - 3]) / 3 - 6, 0, 32); } gain[i] = qcelp_g12ga[g1[i]]; if (q->frame.cbsign[i]) { gain[i] = -gain[i]; - q->frame.cindex[i] = (q->frame.cindex[i]-89) & 127; + q->frame.cindex[i] = (q->frame.cindex[i] - 89) & 127; } } - q->prev_g1[0] = g1[i-2]; - q->prev_g1[1] = g1[i-1]; - q->last_codebook_gain = qcelp_g12ga[g1[i-1]]; + q->prev_g1[0] = g1[i - 2]; + q->prev_g1[1] = g1[i - 1]; + q->last_codebook_gain = qcelp_g12ga[g1[i - 1]]; if (q->bitrate == RATE_QUARTER) { // Provide smoothing of the unvoiced excitation energy. - gain[7] = gain[4]; - gain[6] = 0.4*gain[3] + 0.6*gain[4]; - gain[5] = gain[3]; - gain[4] = 0.8*gain[2] + 0.2*gain[3]; - gain[3] = 0.2*gain[1] + 0.8*gain[2]; - gain[2] = gain[1]; - gain[1] = 0.6*gain[0] + 0.4*gain[1]; + gain[7] = gain[4]; + gain[6] = 0.4 * gain[3] + 0.6 * gain[4]; + gain[5] = gain[3]; + gain[4] = 0.8 * gain[2] + 0.2 * gain[3]; + gain[3] = 0.2 * gain[1] + 0.8 * gain[2]; + gain[2] = gain[1]; + gain[1] = 0.6 * gain[0] + 0.4 * gain[1]; } } else if (q->bitrate != SILENCE) { if (q->bitrate == RATE_OCTAVE) { - g1[0] = 2 * q->frame.cbgain[0] - + av_clip((q->prev_g1[0] + q->prev_g1[1]) / 2 - 5, 0, 54); + g1[0] = 2 * q->frame.cbgain[0] + + av_clip((q->prev_g1[0] + q->prev_g1[1]) / 2 - 5, 0, 54); subframes_count = 8; } else { assert(q->bitrate == I_F_Q); g1[0] = q->prev_g1[1]; switch (q->erasure_count) { - case 1 : break; - case 2 : g1[0] -= 1; break; - case 3 : g1[0] -= 2; break; - default: g1[0] -= 6; + case 1 : break; + case 2 : g1[0] -= 1; break; + case 3 : g1[0] -= 2; break; + default: g1[0] -= 6; } - if(g1[0] < 0) + if (g1[0] < 0) g1[0] = 0; subframes_count = 4; } // This interpolation is done to produce smoother background noise. - slope = 0.5*(qcelp_g12ga[g1[0]] - q->last_codebook_gain) / subframes_count; - for(i=1; i<=subframes_count; i++) - gain[i-1] = q->last_codebook_gain + slope * i; + slope = 0.5 * (qcelp_g12ga[g1[0]] - q->last_codebook_gain) / subframes_count; + for (i = 1; i <= subframes_count; i++) + gain[i - 1] = q->last_codebook_gain + slope * i; - q->last_codebook_gain = gain[i-2]; - q->prev_g1[0] = q->prev_g1[1]; - q->prev_g1[1] = g1[0]; + q->last_codebook_gain = gain[i - 2]; + q->prev_g1[0] = q->prev_g1[1]; + q->prev_g1[1] = g1[0]; } } @@ -279,13 +277,13 @@ static void decode_gain_and_index(QCELPContext *q, */ static int codebook_sanity_check_for_rate_quarter(const uint8_t *cbgain) { - int i, diff, prev_diff=0; + int i, diff, prev_diff = 0; - for(i=1; i<5; i++) { + for (i = 1; i < 5; i++) { diff = cbgain[i] - cbgain[i-1]; - if(FFABS(diff) > 10) + if (FFABS(diff) > 10) return -1; - else if(FFABS(diff - prev_diff) > 12) + else if (FFABS(diff - prev_diff) > 12) return -1; prev_diff = diff; } @@ -316,73 +314,74 @@ static int codebook_sanity_check_for_rate_quarter(const uint8_t *cbgain) static void compute_svector(QCELPContext *q, const float *gain, float *cdn_vector) { - int i, j, k; + int i, j, k; uint16_t cbseed, cindex; - float *rnd, tmp_gain, fir_filter_value; + float *rnd, tmp_gain, fir_filter_value; switch (q->bitrate) { - case RATE_FULL: - for (i = 0; i < 16; i++) { - tmp_gain = gain[i] * QCELP_RATE_FULL_CODEBOOK_RATIO; - cindex = -q->frame.cindex[i]; - for(j=0; j<10; j++) - *cdn_vector++ = tmp_gain * qcelp_rate_full_codebook[cindex++ & 127]; - } + case RATE_FULL: + for (i = 0; i < 16; i++) { + tmp_gain = gain[i] * QCELP_RATE_FULL_CODEBOOK_RATIO; + cindex = -q->frame.cindex[i]; + for (j = 0; j < 10; j++) + *cdn_vector++ = tmp_gain * qcelp_rate_full_codebook[cindex++ & 127]; + } break; - case RATE_HALF: - for (i = 0; i < 4; i++) { - tmp_gain = gain[i] * QCELP_RATE_HALF_CODEBOOK_RATIO; - cindex = -q->frame.cindex[i]; - for (j = 0; j < 40; j++) + case RATE_HALF: + for (i = 0; i < 4; i++) { + tmp_gain = gain[i] * QCELP_RATE_HALF_CODEBOOK_RATIO; + cindex = -q->frame.cindex[i]; + for (j = 0; j < 40; j++) *cdn_vector++ = tmp_gain * qcelp_rate_half_codebook[cindex++ & 127]; - } + } break; - case RATE_QUARTER: - cbseed = (0x0003 & q->frame.lspv[4])<<14 | - (0x003F & q->frame.lspv[3])<< 8 | - (0x0060 & q->frame.lspv[2])<< 1 | - (0x0007 & q->frame.lspv[1])<< 3 | - (0x0038 & q->frame.lspv[0])>> 3 ; - rnd = q->rnd_fir_filter_mem + 20; - for (i = 0; i < 8; i++) { - tmp_gain = gain[i] * (QCELP_SQRT1887 / 32768.0); - for (k = 0; k < 20; k++) { - cbseed = 521 * cbseed + 259; - *rnd = (int16_t)cbseed; + case RATE_QUARTER: + cbseed = (0x0003 & q->frame.lspv[4]) << 14 | + (0x003F & q->frame.lspv[3]) << 8 | + (0x0060 & q->frame.lspv[2]) << 1 | + (0x0007 & q->frame.lspv[1]) << 3 | + (0x0038 & q->frame.lspv[0]) >> 3; + rnd = q->rnd_fir_filter_mem + 20; + for (i = 0; i < 8; i++) { + tmp_gain = gain[i] * (QCELP_SQRT1887 / 32768.0); + for (k = 0; k < 20; k++) { + cbseed = 521 * cbseed + 259; + *rnd = (int16_t) cbseed; // FIR filter - fir_filter_value = 0.0; - for(j=0; j<10; j++) - fir_filter_value += qcelp_rnd_fir_coefs[j ] - * (rnd[-j ] + rnd[-20+j]); - - fir_filter_value += qcelp_rnd_fir_coefs[10] * rnd[-10]; - *cdn_vector++ = tmp_gain * fir_filter_value; - rnd++; - } + fir_filter_value = 0.0; + for (j = 0; j < 10; j++) + fir_filter_value += qcelp_rnd_fir_coefs[j] * + (rnd[-j] + rnd[-20+j]); + + fir_filter_value += qcelp_rnd_fir_coefs[10] * rnd[-10]; + *cdn_vector++ = tmp_gain * fir_filter_value; + rnd++; } - memcpy(q->rnd_fir_filter_mem, q->rnd_fir_filter_mem + 160, 20 * sizeof(float)); + } + memcpy(q->rnd_fir_filter_mem, q->rnd_fir_filter_mem + 160, + 20 * sizeof(float)); break; - case RATE_OCTAVE: - cbseed = q->first16bits; - for (i = 0; i < 8; i++) { - tmp_gain = gain[i] * (QCELP_SQRT1887 / 32768.0); - for (j = 0; j < 20; j++) { - cbseed = 521 * cbseed + 259; - *cdn_vector++ = tmp_gain * (int16_t)cbseed; - } + case RATE_OCTAVE: + cbseed = q->first16bits; + for (i = 0; i < 8; i++) { + tmp_gain = gain[i] * (QCELP_SQRT1887 / 32768.0); + for (j = 0; j < 20; j++) { + cbseed = 521 * cbseed + 259; + *cdn_vector++ = tmp_gain * (int16_t) cbseed; } + } break; - case I_F_Q: - cbseed = -44; // random codebook index - for (i = 0; i < 4; i++) { - tmp_gain = gain[i] * QCELP_RATE_FULL_CODEBOOK_RATIO; - for(j=0; j<40; j++) - *cdn_vector++ = tmp_gain * qcelp_rate_full_codebook[cbseed++ & 127]; - } + case I_F_Q: + cbseed = -44; // random codebook index + for (i = 0; i < 4; i++) { + tmp_gain = gain[i] * QCELP_RATE_FULL_CODEBOOK_RATIO; + for (j = 0; j < 40; j++) + *cdn_vector++ = tmp_gain * qcelp_rate_full_codebook[cbseed++ & 127]; + } break; - case SILENCE: - memset(cdn_vector, 0, 160 * sizeof(float)); + case SILENCE: + memset(cdn_vector, 0, 160 * sizeof(float)); break; } } @@ -396,8 +395,7 @@ static void compute_svector(QCELPContext *q, const float *gain, * * TIA/EIA/IS-733 2.4.8.3, 2.4.8.6 */ -static void apply_gain_ctrl(float *v_out, const float *v_ref, - const float *v_in) +static void apply_gain_ctrl(float *v_out, const float *v_ref, const float *v_in) { int i; @@ -429,8 +427,8 @@ static const float *do_pitchfilter(float memory[303], const float v_in[160], const float gain[4], const uint8_t *lag, const uint8_t pfrac[4]) { - int i, j; - float *v_lag, *v_out; + int i, j; + float *v_lag, *v_out; const float *v_len; v_out = memory + 143; // Output vector starts at memory[143]. @@ -440,9 +438,9 @@ static const float *do_pitchfilter(float memory[303], const float v_in[160], v_lag = memory + 143 + 40 * i - lag[i]; for (v_len = v_in + 40; v_in < v_len; v_in++) { if (pfrac[i]) { // If it is a fractional lag... - for(j=0, *v_out=0.; j<4; j++) - *v_out += qcelp_hammsinc_table[j] * (v_lag[j-4] + v_lag[3-j]); - }else + for (j = 0, *v_out = 0.; j < 4; j++) + *v_out += qcelp_hammsinc_table[j] * (v_lag[j - 4] + v_lag[3 - j]); + } else *v_out = *v_lag; *v_out = *v_in + gain[i] * *v_out; @@ -470,15 +468,13 @@ static const float *do_pitchfilter(float memory[303], const float v_in[160], */ static void apply_pitch_filters(QCELPContext *q, float *cdn_vector) { - int i; + int i; const float *v_synthesis_filtered, *v_pre_filtered; - if(q->bitrate >= RATE_HALF || - q->bitrate == SILENCE || - (q->bitrate == I_F_Q && (q->prev_bitrate >= RATE_HALF))) { - - if(q->bitrate >= RATE_HALF) { + if (q->bitrate >= RATE_HALF || q->bitrate == SILENCE || + (q->bitrate == I_F_Q && (q->prev_bitrate >= RATE_HALF))) { + if (q->bitrate >= RATE_HALF) { // Compute gain & lag for the whole frame. for (i = 0; i < 4; i++) { q->pitch_gain[i] = q->frame.plag[i] ? (q->frame.pgain[i] + 1) * 0.25 : 0.0; @@ -497,7 +493,7 @@ static void apply_pitch_filters(QCELPContext *q, float *cdn_vector) assert(q->bitrate == SILENCE); max_pitch_gain = 1.0; } - for(i=0; i<4; i++) + for (i = 0; i < 4; i++) q->pitch_gain[i] = FFMIN(q->pitch_gain[i], max_pitch_gain); memset(q->frame.pfrac, 0, sizeof(q->frame.pfrac)); @@ -509,18 +505,17 @@ static void apply_pitch_filters(QCELPContext *q, float *cdn_vector) q->pitch_lag, q->frame.pfrac); // pitch prefilter update - for(i=0; i<4; i++) + for (i = 0; i < 4; i++) q->pitch_gain[i] = 0.5 * FFMIN(q->pitch_gain[i], 1.0); - v_pre_filtered = do_pitchfilter(q->pitch_pre_filter_mem, - v_synthesis_filtered, - q->pitch_gain, q->pitch_lag, - q->frame.pfrac); + v_pre_filtered = do_pitchfilter(q->pitch_pre_filter_mem, + v_synthesis_filtered, + q->pitch_gain, q->pitch_lag, + q->frame.pfrac); apply_gain_ctrl(cdn_vector, v_synthesis_filtered, v_pre_filtered); } else { - memcpy(q->pitch_synthesis_filter_mem, cdn_vector + 17, - 143 * sizeof(float)); + memcpy(q->pitch_synthesis_filter_mem, cdn_vector + 17, 143 * sizeof(float)); memcpy(q->pitch_pre_filter_mem, cdn_vector + 17, 143 * sizeof(float)); memset(q->pitch_gain, 0, sizeof(q->pitch_gain)); memset(q->pitch_lag, 0, sizeof(q->pitch_lag)); @@ -543,15 +538,15 @@ static void lspf2lpc(const float *lspf, float *lpc) { double lsp[10]; double bandwidth_expansion_coeff = QCELP_BANDWIDTH_EXPANSION_COEFF; - int i; + int i; - for (i=0; i<10; i++) + for (i = 0; i < 10; i++) lsp[i] = cos(M_PI * lspf[i]); ff_acelp_lspd2lpc(lsp, lpc, 5); for (i = 0; i < 10; i++) { - lpc[i] *= bandwidth_expansion_coeff; + lpc[i] *= bandwidth_expansion_coeff; bandwidth_expansion_coeff *= QCELP_BANDWIDTH_EXPANSION_COEFF; } } @@ -573,9 +568,9 @@ static void interpolate_lpc(QCELPContext *q, const float *curr_lspf, float interpolated_lspf[10]; float weight; - if(q->bitrate >= RATE_QUARTER) + if (q->bitrate >= RATE_QUARTER) weight = 0.25 * (subframe_num + 1); - else if(q->bitrate == RATE_OCTAVE && !subframe_num) + else if (q->bitrate == RATE_OCTAVE && !subframe_num) weight = 0.625; else weight = 1.0; @@ -584,21 +579,21 @@ static void interpolate_lpc(QCELPContext *q, const float *curr_lspf, ff_weighted_vector_sumf(interpolated_lspf, curr_lspf, q->prev_lspf, weight, 1.0 - weight, 10); lspf2lpc(interpolated_lspf, lpc); - }else if(q->bitrate >= RATE_QUARTER || - (q->bitrate == I_F_Q && !subframe_num)) + } else if (q->bitrate >= RATE_QUARTER || + (q->bitrate == I_F_Q && !subframe_num)) lspf2lpc(curr_lspf, lpc); - else if(q->bitrate == SILENCE && !subframe_num) + else if (q->bitrate == SILENCE && !subframe_num) lspf2lpc(q->prev_lspf, lpc); } static qcelp_packet_rate buf_size2bitrate(const int buf_size) { switch (buf_size) { - case 35: return RATE_FULL; - case 17: return RATE_HALF; - case 8: return RATE_QUARTER; - case 4: return RATE_OCTAVE; - case 1: return SILENCE; + case 35: return RATE_FULL; + case 17: return RATE_HALF; + case 8: return RATE_QUARTER; + case 4: return RATE_OCTAVE; + case 1: return SILENCE; } return I_F_Q; @@ -616,8 +611,9 @@ static qcelp_packet_rate buf_size2bitrate(const int buf_size) * * TIA/EIA/IS-733 2.4.8.7.1 */ -static qcelp_packet_rate determine_bitrate(AVCodecContext *avctx, const int buf_size, - const uint8_t **buf) +static qcelp_packet_rate determine_bitrate(AVCodecContext *avctx, + const int buf_size, + const uint8_t **buf) { qcelp_packet_rate bitrate; @@ -639,7 +635,7 @@ static qcelp_packet_rate determine_bitrate(AVCodecContext *avctx, const int buf_ } else if ((bitrate = buf_size2bitrate(buf_size + 1)) >= 0) { av_log(avctx, AV_LOG_WARNING, "Bitrate byte is missing, guessing the bitrate from packet size.\n"); - }else + } else return I_F_Q; if (bitrate == SILENCE) { @@ -652,8 +648,8 @@ static qcelp_packet_rate determine_bitrate(AVCodecContext *avctx, const int buf_ static void warn_insufficient_frame_quality(AVCodecContext *avctx, const char *message) { - av_log(avctx, AV_LOG_WARNING, "Frame #%d, IFQ: %s\n", avctx->frame_number, - message); + av_log(avctx, AV_LOG_WARNING, "Frame #%d, IFQ: %s\n", + avctx->frame_number, message); } static void postfilter(QCELPContext *q, float *samples, float *lpc) @@ -675,23 +671,24 @@ static void postfilter(QCELPContext *q, float *samples, float *lpc) ff_celp_lp_zero_synthesis_filterf(zero_out, lpc_s, q->formant_mem + 10, 160, 10); - memcpy(pole_out, q->postfilter_synth_mem, sizeof(float) * 10); + memcpy(pole_out, q->postfilter_synth_mem, sizeof(float) * 10); ff_celp_lp_synthesis_filterf(pole_out + 10, lpc_p, zero_out, 160, 10); memcpy(q->postfilter_synth_mem, pole_out + 160, sizeof(float) * 10); ff_tilt_compensation(&q->postfilter_tilt_mem, 0.3, pole_out + 10, 160); ff_adaptive_gain_control(samples, pole_out + 10, - ff_dot_productf(q->formant_mem + 10, q->formant_mem + 10, 160), - 160, 0.9375, &q->postfilter_agc_mem); + ff_dot_productf(q->formant_mem + 10, + q->formant_mem + 10, 160), + 160, 0.9375, &q->postfilter_agc_mem); } static int qcelp_decode_frame(AVCodecContext *avctx, void *data, int *got_frame_ptr, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; - int buf_size = avpkt->size; - QCELPContext *q = avctx->priv_data; + int buf_size = avpkt->size; + QCELPContext *q = avctx->priv_data; float *outbuffer; int i, ret; float quantized_lspf[10], lpc[10]; @@ -711,23 +708,23 @@ static int qcelp_decode_frame(AVCodecContext *avctx, void *data, goto erasure; } - if(q->bitrate == RATE_OCTAVE && - (q->first16bits = AV_RB16(buf)) == 0xFFFF) { + if (q->bitrate == RATE_OCTAVE && + (q->first16bits = AV_RB16(buf)) == 0xFFFF) { warn_insufficient_frame_quality(avctx, "Bitrate is 1/8 and first 16 bits are on."); goto erasure; } if (q->bitrate > SILENCE) { const QCELPBitmap *bitmaps = qcelp_unpacking_bitmaps_per_rate[q->bitrate]; - const QCELPBitmap *bitmaps_end = qcelp_unpacking_bitmaps_per_rate[q->bitrate] - + qcelp_unpacking_bitmaps_lengths[q->bitrate]; - uint8_t *unpacked_data = (uint8_t *)&q->frame; + const QCELPBitmap *bitmaps_end = qcelp_unpacking_bitmaps_per_rate[q->bitrate] + + qcelp_unpacking_bitmaps_lengths[q->bitrate]; + uint8_t *unpacked_data = (uint8_t *)&q->frame; - init_get_bits(&q->gb, buf, 8*buf_size); + init_get_bits(&q->gb, buf, 8 * buf_size); memset(&q->frame, 0, sizeof(QCELPFrame)); - for(; bitmaps < bitmaps_end; bitmaps++) + for (; bitmaps < bitmaps_end; bitmaps++) unpacked_data[bitmaps->index] |= get_bits(&q->gb, bitmaps->bitlen) << bitmaps->bitpos; // Check for erasures/blanks on rates 1, 1/4 and 1/8. @@ -735,8 +732,8 @@ static int qcelp_decode_frame(AVCodecContext *avctx, void *data, warn_insufficient_frame_quality(avctx, "Wrong data in reserved frame area."); goto erasure; } - if(q->bitrate == RATE_QUARTER && - codebook_sanity_check_for_rate_quarter(q->frame.cbgain)) { + if (q->bitrate == RATE_QUARTER && + codebook_sanity_check_for_rate_quarter(q->frame.cbgain)) { warn_insufficient_frame_quality(avctx, "Codebook gain sanity check failed."); goto erasure; } @@ -759,7 +756,6 @@ static int qcelp_decode_frame(AVCodecContext *avctx, void *data, goto erasure; } - apply_pitch_filters(q, outbuffer); if (q->bitrate == I_F_Q) { @@ -770,14 +766,13 @@ erasure: compute_svector(q, gain, outbuffer); decode_lspf(q, quantized_lspf); apply_pitch_filters(q, outbuffer); - }else + } else q->erasure_count = 0; formant_mem = q->formant_mem + 10; for (i = 0; i < 4; i++) { interpolate_lpc(q, quantized_lspf, lpc, i); - ff_celp_lp_synthesis_filterf(formant_mem, lpc, outbuffer + i * 40, 40, - 10); + ff_celp_lp_synthesis_filterf(formant_mem, lpc, outbuffer + i * 40, 40, 10); formant_mem += 40; } @@ -787,7 +782,7 @@ erasure: memcpy(q->formant_mem, q->formant_mem + 160, 10 * sizeof(float)); memcpy(q->prev_lspf, quantized_lspf, sizeof(q->prev_lspf)); - q->prev_bitrate = q->bitrate; + q->prev_bitrate = q->bitrate; *got_frame_ptr = 1; *(AVFrame *)data = q->avframe; @@ -795,14 +790,13 @@ erasure: return buf_size; } -AVCodec ff_qcelp_decoder = -{ - .name = "qcelp", - .type = AVMEDIA_TYPE_AUDIO, - .id = CODEC_ID_QCELP, - .init = qcelp_decode_init, - .decode = qcelp_decode_frame, - .capabilities = CODEC_CAP_DR1, +AVCodec ff_qcelp_decoder = { + .name = "qcelp", + .type = AVMEDIA_TYPE_AUDIO, + .id = CODEC_ID_QCELP, + .init = qcelp_decode_init, + .decode = qcelp_decode_frame, + .capabilities = CODEC_CAP_DR1, .priv_data_size = sizeof(QCELPContext), - .long_name = NULL_IF_CONFIG_SMALL("QCELP / PureVoice"), + .long_name = NULL_IF_CONFIG_SMALL("QCELP / PureVoice"), }; -- cgit v1.2.3 From c9aa4cfdb12dd3cff1195cb20947b7d43897eed7 Mon Sep 17 00:00:00 2001 From: Michael Niedermayer Date: Wed, 14 Dec 2011 00:38:23 +0100 Subject: avplay: clear pkt_temp when pkt is freed. Signed-off-by: Michael Niedermayer Signed-off-by: Marton Balint Signed-off-by: Justin Ruggles --- avplay.c | 1 + 1 file changed, 1 insertion(+) diff --git a/avplay.c b/avplay.c index 5fcfaa68e7..bf1ac1b3f2 100644 --- a/avplay.c +++ b/avplay.c @@ -2102,6 +2102,7 @@ static int audio_decode_frame(VideoState *is, double *pts_ptr) /* free the current packet */ if (pkt->data) av_free_packet(pkt); + memset(pkt_temp, 0, sizeof(*pkt_temp)); if (is->paused || is->audioq.abort_request) { return -1; -- cgit v1.2.3 From 1ee5b5e823e3fe7e5599036bb057bfbcb3aa260c Mon Sep 17 00:00:00 2001 From: Luca Barbato Date: Wed, 14 Dec 2011 20:27:11 +0100 Subject: lavu: add AVERROR_BUG error value It should be used to mark codepath that can be reached only through programming error. --- libavutil/error.c | 1 + libavutil/error.h | 1 + 2 files changed, 2 insertions(+) diff --git a/libavutil/error.c b/libavutil/error.c index ddcc038650..a330e9f99c 100644 --- a/libavutil/error.c +++ b/libavutil/error.c @@ -38,6 +38,7 @@ int av_strerror(int errnum, char *errbuf, size_t errbuf_size) case AVERROR_PATCHWELCOME: errstr = "Not yet implemented in Libav, patches welcome"; break; case AVERROR_PROTOCOL_NOT_FOUND:errstr = "Protocol not found" ; break; case AVERROR_STREAM_NOT_FOUND: errstr = "Stream not found" ; break; + case AVERROR_BUG: errstr = "Bug detected, please report the issue" ; break; } if (errstr) { diff --git a/libavutil/error.h b/libavutil/error.h index 8ed77342ef..2db65cb83f 100644 --- a/libavutil/error.h +++ b/libavutil/error.h @@ -57,6 +57,7 @@ #define AVERROR_PATCHWELCOME (-MKTAG( 'P','A','W','E')) ///< Not yet implemented in Libav, patches welcome #define AVERROR_PROTOCOL_NOT_FOUND (-MKTAG(0xF8,'P','R','O')) ///< Protocol not found #define AVERROR_STREAM_NOT_FOUND (-MKTAG(0xF8,'S','T','R')) ///< Stream not found +#define AVERROR_BUG (-MKTAG( 'B','U','G',' ')) ///< Bug detected, please report the issue /** * Put a description of the AVERROR code errnum in errbuf. -- cgit v1.2.3 From 02e8f03296d29949a7cffc8fa3e704b0efa66f17 Mon Sep 17 00:00:00 2001 From: Luca Barbato Date: Sun, 2 Oct 2011 23:05:29 +0200 Subject: segment: introduce segmented chain muxer It behaves similarly to image2 muxer --- Changelog | 1 + doc/muxers.texi | 32 ++++++ libavformat/Makefile | 1 + libavformat/allformats.c | 1 + libavformat/segment.c | 273 +++++++++++++++++++++++++++++++++++++++++++++++ 5 files changed, 308 insertions(+) create mode 100644 libavformat/segment.c diff --git a/Changelog b/Changelog index 5f5154a4bb..58568c2c3d 100644 --- a/Changelog +++ b/Changelog @@ -109,6 +109,7 @@ easier to use. The changes are: - Dxtory capture format decoder - v410 QuickTime uncompressed 4:4:4 10-bit encoder and decoder - OpenMG Audio muxer +- Simple segmenting muxer version 0.7: diff --git a/doc/muxers.texi b/doc/muxers.texi index 17b1de3694..5a609c8b9a 100644 --- a/doc/muxers.texi +++ b/doc/muxers.texi @@ -90,6 +90,7 @@ avconv -i INPUT -c:a pcm_u8 -c:v mpeg2video -f framecrc - See also the @ref{crc} muxer. +@anchor{image2} @section image2 Image file muxer. @@ -267,4 +268,35 @@ For example a 3D WebM clip can be created using the following command line: avconv -i sample_left_right_clip.mpg -an -c:v libvpx -metadata STEREO_MODE=left_right -y stereo_clip.webm @end example +@section segment + +Basic stream segmenter. + +The segmenter muxer outputs streams to a number of separate files of nearly +fixed duration. Output filename pattern can be set in a fashion similar to +@ref{image2}. + +Every segment starts with a video keyframe, if a video stream is present. +The segment muxer works best with a single constant frame rate video. + +Optionally it can generate a flat list of the created segments, one segment +per line. + +@table @option +@item segment_format @var{format} +Override the inner container format, by default it is guessed by the filename +extension. +@item segment_time @var{t} +Set segment duration to @var{t} seconds. +@item segment_list @var{name} +Generate also a listfile named @var{name}. +@item segment_list_size @var{size} +Overwrite the listfile once it reaches @var{size} entries. +@end table + +@example +avconv -i in.mkv -c copy -map 0 -f segment -list out.list out%03d.nut +@end example + + @c man end MUXERS diff --git a/libavformat/Makefile b/libavformat/Makefile index ff6140b0c9..c3a4c0d07b 100644 --- a/libavformat/Makefile +++ b/libavformat/Makefile @@ -268,6 +268,7 @@ OBJS-$(CONFIG_SAP_DEMUXER) += sapdec.o OBJS-$(CONFIG_SAP_MUXER) += sapenc.o rtpenc_chain.o OBJS-$(CONFIG_SDP_DEMUXER) += rtsp.o OBJS-$(CONFIG_SEGAFILM_DEMUXER) += segafilm.o +OBJS-$(CONFIG_SEGMENT_MUXER) += segment.o OBJS-$(CONFIG_SHORTEN_DEMUXER) += rawdec.o OBJS-$(CONFIG_SIFF_DEMUXER) += siff.o OBJS-$(CONFIG_SMACKER_DEMUXER) += smacker.o diff --git a/libavformat/allformats.c b/libavformat/allformats.c index c75f1bacae..083ee08040 100644 --- a/libavformat/allformats.c +++ b/libavformat/allformats.c @@ -197,6 +197,7 @@ void av_register_all(void) av_register_rdt_dynamic_payload_handlers(); #endif REGISTER_DEMUXER (SEGAFILM, segafilm); + REGISTER_MUXER (SEGMENT, segment); REGISTER_DEMUXER (SHORTEN, shorten); REGISTER_DEMUXER (SIFF, siff); REGISTER_DEMUXER (SMACKER, smacker); diff --git a/libavformat/segment.c b/libavformat/segment.c new file mode 100644 index 0000000000..89ae62d312 --- /dev/null +++ b/libavformat/segment.c @@ -0,0 +1,273 @@ +/* + * Generic segmenter + * Copyright (c) 2011, Luca Barbato + * + * This file is part of Libav. + * + * Libav is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * Libav is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with Libav; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include +#include + +#include "avformat.h" +#include "internal.h" + +#include "libavutil/log.h" +#include "libavutil/opt.h" +#include "libavutil/avstring.h" +#include "libavutil/parseutils.h" +#include "libavutil/mathematics.h" + +typedef struct { + const AVClass *class; /**< Class for private options. */ + int number; + AVFormatContext *avf; + char *format; /**< Set by a private option. */ + char *list; /**< Set by a private option. */ + float time; /**< Set by a private option. */ + int size; /**< Set by a private option. */ + int64_t offset_time; + int64_t recording_time; + int has_video; + AVIOContext *pb; +} SegmentContext; + +static int segment_start(AVFormatContext *s) +{ + SegmentContext *c = s->priv_data; + AVFormatContext *oc = c->avf; + int err = 0; + + if (av_get_frame_filename(oc->filename, sizeof(oc->filename), + s->filename, c->number++) < 0) + return AVERROR(EINVAL); + + if ((err = avio_open2(&oc->pb, oc->filename, AVIO_FLAG_WRITE, + &s->interrupt_callback, NULL)) < 0) + return err; + + if (!oc->priv_data && oc->oformat->priv_data_size > 0) { + oc->priv_data = av_mallocz(oc->oformat->priv_data_size); + if (!oc->priv_data) { + avio_close(oc->pb); + return AVERROR(ENOMEM); + } + if (oc->oformat->priv_class) { + *(const AVClass**)oc->priv_data = oc->oformat->priv_class; + av_opt_set_defaults(oc->priv_data); + } + } + + if ((err = oc->oformat->write_header(oc)) < 0) { + goto fail; + } + + return 0; + +fail: + avio_close(oc->pb); + av_freep(&oc->priv_data); + + return err; +} + +static int segment_end(AVFormatContext *oc) +{ + int ret = 0; + + if (oc->oformat->write_trailer) + ret = oc->oformat->write_trailer(oc); + + avio_close(oc->pb); + if (oc->oformat->priv_class) + av_opt_free(oc->priv_data); + av_freep(&oc->priv_data); + + return ret; +} + +static int seg_write_header(AVFormatContext *s) +{ + SegmentContext *seg = s->priv_data; + AVFormatContext *oc; + int ret, i; + + seg->number = 0; + seg->offset_time = 0; + seg->recording_time = seg->time * 1000000; + + if (seg->list) + if ((ret = avio_open2(&seg->pb, seg->list, AVIO_FLAG_WRITE, + &s->interrupt_callback, NULL)) < 0) + return ret; + + for (i = 0; i< s->nb_streams; i++) + seg->has_video += + (s->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO); + + if (seg->has_video > 1) + av_log(s, AV_LOG_WARNING, + "More than a single video stream present, " + "expect issues decoding it.\n"); + + oc = avformat_alloc_context(); + + if (!oc) { + ret = AVERROR(ENOMEM); + goto fail; + } + + oc->oformat = av_guess_format(seg->format, s->filename, NULL); + + if (!oc->oformat) { + ret = AVERROR_MUXER_NOT_FOUND; + goto fail; + } + if (oc->oformat->flags & AVFMT_NOFILE) { + av_log(s, AV_LOG_ERROR, "format %s not supported.\n", + oc->oformat->name); + ret = AVERROR(EINVAL); + goto fail; + } + + seg->avf = oc; + + oc->streams = s->streams; + oc->nb_streams = s->nb_streams; + + if (av_get_frame_filename(oc->filename, sizeof(oc->filename), + s->filename, seg->number++) < 0) { + ret = AVERROR(EINVAL); + goto fail; + } + + if ((ret = avio_open2(&oc->pb, oc->filename, AVIO_FLAG_WRITE, + &s->interrupt_callback, NULL)) < 0) + goto fail; + + if ((ret = avformat_write_header(oc, NULL)) < 0) { + avio_close(oc->pb); + goto fail; + } + + if (seg->list) { + avio_printf(seg->pb, "%s\n", oc->filename); + avio_flush(seg->pb); + } + +fail: + if (ret) { + oc->streams = NULL; + oc->nb_streams = 0; + if (seg->list) + avio_close(seg->pb); + avformat_free_context(oc); + } + return ret; +} + +static int seg_write_packet(AVFormatContext *s, AVPacket *pkt) +{ + SegmentContext *seg = s->priv_data; + AVFormatContext *oc = seg->avf; + AVStream *st = oc->streams[pkt->stream_index]; + int64_t end_pts = seg->recording_time * seg->number; + int ret; + + if ((seg->has_video && st->codec->codec_type == AVMEDIA_TYPE_VIDEO) && + av_compare_ts(pkt->pts, st->time_base, + end_pts, AV_TIME_BASE_Q) >= 0 && + pkt->flags & AV_PKT_FLAG_KEY) { + + av_log(s, AV_LOG_DEBUG, "Next segment starts at %d %"PRId64"\n", + pkt->stream_index, pkt->pts); + + ret = segment_end(oc); + + if (!ret) + ret = segment_start(s); + + if (ret) + goto fail; + + if (seg->list) { + avio_printf(seg->pb, "%s\n", oc->filename); + avio_flush(seg->pb); + if (!(seg->number % seg->size)) { + avio_close(seg->pb); + if ((ret = avio_open2(&seg->pb, seg->list, AVIO_FLAG_WRITE, + &s->interrupt_callback, NULL)) < 0) + goto fail; + + } + } + } + + ret = oc->oformat->write_packet(oc, pkt); + +fail: + if (ret < 0) { + oc->streams = NULL; + oc->nb_streams = 0; + if (seg->list) + avio_close(seg->pb); + avformat_free_context(oc); + } + + return ret; +} + +static int seg_write_trailer(struct AVFormatContext *s) +{ + SegmentContext *seg = s->priv_data; + AVFormatContext *oc = seg->avf; + int ret = segment_end(oc); + if (seg->list) + avio_close(seg->pb); + oc->streams = NULL; + oc->nb_streams = 0; + avformat_free_context(oc); + return ret; +} + +#define OFFSET(x) offsetof(SegmentContext, x) +#define E AV_OPT_FLAG_ENCODING_PARAM +static const AVOption options[] = { + { "segment_format", "container format used for the segments", OFFSET(format), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, E }, + { "segment_time", "segment length in seconds", OFFSET(time), AV_OPT_TYPE_FLOAT, {.dbl = 2}, 0, FLT_MAX, E }, + { "segment_list", "output the segment list", OFFSET(list), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, E }, + { "segment_list_size", "maximum number of playlist entries", OFFSET(size), AV_OPT_TYPE_INT, {.dbl = 5}, 0, INT_MAX, E }, + { NULL }, +}; + +static const AVClass seg_class = { + .class_name = "segment muxer", + .item_name = av_default_item_name, + .option = options, + .version = LIBAVUTIL_VERSION_INT, +}; + + +AVOutputFormat ff_segment_muxer = { + .name = "segment", + .long_name = NULL_IF_CONFIG_SMALL("segment muxer"), + .priv_data_size = sizeof(SegmentContext), + .flags = AVFMT_GLOBALHEADER | AVFMT_NOFILE, + .write_header = seg_write_header, + .write_packet = seg_write_packet, + .write_trailer = seg_write_trailer, + .priv_class = &seg_class, +}; -- cgit v1.2.3 From 1c668624725318b62e1ce9adfdbd0417da0a1633 Mon Sep 17 00:00:00 2001 From: Luca Barbato Date: Sat, 17 Dec 2011 14:08:11 +0100 Subject: mpegts: rename payload_index to payload_size It holds the size of the current payload. --- libavformat/mpegtsenc.c | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/libavformat/mpegtsenc.c b/libavformat/mpegtsenc.c index 32a759c55b..57cfe4552b 100644 --- a/libavformat/mpegtsenc.c +++ b/libavformat/mpegtsenc.c @@ -201,7 +201,7 @@ typedef struct MpegTSWriteStream { struct MpegTSService *service; int pid; /* stream associated pid */ int cc; - int payload_index; + int payload_size; int first_pts_check; ///< first pts check needed int64_t payload_pts; int64_t payload_dts; @@ -1006,21 +1006,21 @@ static int mpegts_write_packet(AVFormatContext *s, AVPacket *pkt) return 0; } - if (ts_st->payload_index + size > DEFAULT_PES_PAYLOAD_SIZE) { - mpegts_write_pes(s, st, ts_st->payload, ts_st->payload_index, + if (ts_st->payload_size + size > DEFAULT_PES_PAYLOAD_SIZE) { + mpegts_write_pes(s, st, ts_st->payload, ts_st->payload_size, ts_st->payload_pts, ts_st->payload_dts, ts_st->payload_flags & AV_PKT_FLAG_KEY); - ts_st->payload_index = 0; + ts_st->payload_size = 0; } - if (!ts_st->payload_index) { + if (!ts_st->payload_size) { ts_st->payload_pts = pts; ts_st->payload_dts = dts; ts_st->payload_flags = pkt->flags; } - memcpy(ts_st->payload + ts_st->payload_index, buf, size); - ts_st->payload_index += size; + memcpy(ts_st->payload + ts_st->payload_size, buf, size); + ts_st->payload_size += size; av_free(data); @@ -1039,8 +1039,8 @@ static int mpegts_write_end(AVFormatContext *s) for(i = 0; i < s->nb_streams; i++) { st = s->streams[i]; ts_st = st->priv_data; - if (ts_st->payload_index > 0) { - mpegts_write_pes(s, st, ts_st->payload, ts_st->payload_index, + if (ts_st->payload_size > 0) { + mpegts_write_pes(s, st, ts_st->payload, ts_st->payload_size, ts_st->payload_pts, ts_st->payload_dts, ts_st->payload_flags & AV_PKT_FLAG_KEY); } -- cgit v1.2.3 From 00aad121d8a6f365641345a8321bdaac1ff80649 Mon Sep 17 00:00:00 2001 From: Aneesh Dogra Date: Tue, 20 Dec 2011 00:00:30 +0530 Subject: xl: Fix overreads MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Martin Storsjö --- libavcodec/xl.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/libavcodec/xl.c b/libavcodec/xl.c index 197b0c24a2..0ebc9467e0 100644 --- a/libavcodec/xl.c +++ b/libavcodec/xl.c @@ -68,6 +68,12 @@ static int decode_frame(AVCodecContext *avctx, V = a->pic.data[2]; stride = avctx->width - 4; + + if (buf_size < avctx->width * avctx->height) { + av_log(avctx, AV_LOG_ERROR, "Packet is too small\n"); + return AVERROR_INVALIDDATA; + } + for (i = 0; i < avctx->height; i++) { /* lines are stored in reversed order */ buf += stride; -- cgit v1.2.3 From 0a6aff69366cb60d252ae46bd1d21d4b2074fa71 Mon Sep 17 00:00:00 2001 From: Aneesh Dogra Date: Tue, 20 Dec 2011 01:38:19 +0530 Subject: vc1: Handle WVC1 interlaced stream MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Martin Storsjö --- libavcodec/vc1dec.c | 25 +++++++++++++++---------- 1 file changed, 15 insertions(+), 10 deletions(-) diff --git a/libavcodec/vc1dec.c b/libavcodec/vc1dec.c index 1151318a47..701a3da956 100644 --- a/libavcodec/vc1dec.c +++ b/libavcodec/vc1dec.c @@ -5425,8 +5425,8 @@ static int vc1_decode_frame(AVCodecContext *avctx, void *data, MpegEncContext *s = &v->s; AVFrame *pict = data; uint8_t *buf2 = NULL; - uint8_t *buf_field2 = NULL; const uint8_t *buf_start = buf; + uint8_t *tmp; int mb_height, n_slices1; struct { uint8_t *buf; @@ -5492,9 +5492,6 @@ static int vc1_decode_frame(AVCodecContext *avctx, void *data, slices[n_slices].mby_start = s->mb_height >> 1; n_slices1 = n_slices - 1; // index of the last slice of the first field n_slices++; - // not necessary, ad hoc until I find a way to handle WVC1i - buf_field2 = av_mallocz(buf_size + FF_INPUT_BUFFER_PADDING_SIZE); - vc1_unescape_buffer(start + 4, size, buf_field2); break; } case VC1_CODE_ENTRYPOINT: /* it should be before frame data */ @@ -5522,14 +5519,26 @@ static int vc1_decode_frame(AVCodecContext *avctx, void *data, } } else if (v->interlace && ((buf[0] & 0xC0) == 0xC0)) { /* WVC1 interlaced stores both fields divided by marker */ const uint8_t *divider; + int buf_size3; divider = find_next_marker(buf, buf + buf_size); if ((divider == (buf + buf_size)) || AV_RB32(divider) != VC1_CODE_FIELD) { av_log(avctx, AV_LOG_ERROR, "Error in WVC1 interlaced frame\n"); goto err; } else { // found field marker, unescape second field - buf_field2 = av_mallocz(buf_size + FF_INPUT_BUFFER_PADDING_SIZE); - vc1_unescape_buffer(divider + 4, buf + buf_size - divider - 4, buf_field2); + tmp = av_realloc(slices, sizeof(*slices) * (n_slices+1)); + if (!tmp) + goto err; + slices = tmp; + slices[n_slices].buf = av_mallocz(buf_size + FF_INPUT_BUFFER_PADDING_SIZE); + if (!slices[n_slices].buf) + goto err; + buf_size3 = vc1_unescape_buffer(divider + 4, buf + buf_size - divider - 4, slices[n_slices].buf); + init_get_bits(&slices[n_slices].gb, slices[n_slices].buf, + buf_size3 << 3); + slices[n_slices].mby_start = s->mb_height >> 1; + n_slices1 = n_slices - 1; + n_slices++; } buf_size2 = vc1_unescape_buffer(buf, divider - buf, buf2); } else { @@ -5702,10 +5711,7 @@ static int vc1_decode_frame(AVCodecContext *avctx, void *data, s->gb = slices[i].gb; } if (v->field_mode) { - av_free(buf_field2); v->second_field = 0; - } - if (v->field_mode) { if (s->pict_type == AV_PICTURE_TYPE_B) { memcpy(v->mv_f_base, v->mv_f_next_base, 2 * (s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2)); @@ -5760,7 +5766,6 @@ err: for (i = 0; i < n_slices; i++) av_free(slices[i].buf); av_free(slices); - av_free(buf_field2); return -1; } -- cgit v1.2.3 From 729ebb2f185244b0ff06d48edbbbbb02ceb4ed4e Mon Sep 17 00:00:00 2001 From: Janne Grunau Date: Mon, 19 Dec 2011 18:11:44 +0100 Subject: h264: clear trailing bits in partially parsed NAL units Trailing bits are likely to be non-zero if the NAL unit is truncated. Clearing the bits make overreads of the bitstream less likely in this case. Fixes playback of http://streams.videolan.org/streams/mp4/Mr_MrsSmith-h264_aac.mp4 which has a forbidden byte sequence of 0x00 0x00 0x00 in it SPS. --- libavcodec/h264.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/libavcodec/h264.c b/libavcodec/h264.c index 77acd7168f..a9a10513e3 100644 --- a/libavcodec/h264.c +++ b/libavcodec/h264.c @@ -3759,7 +3759,7 @@ static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size){ int consumed; int dst_length; int bit_length; - const uint8_t *ptr; + uint8_t *ptr; int i, nalsize = 0; int err; @@ -3809,6 +3809,9 @@ static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size){ } if (h->is_avc && (nalsize != consumed) && nalsize){ + // set trailing bits in the last partial byte to zero + if (bit_length & 7) + ptr[bit_length >> 3] = ptr[bit_length >> 3] & (0xff << 8 - (bit_length & 7)); av_log(h->s.avctx, AV_LOG_DEBUG, "AVC: Consumed only %d bytes instead of %d\n", consumed, nalsize); } -- cgit v1.2.3