summaryrefslogtreecommitdiff
path: root/libswscale/swscale.c
diff options
context:
space:
mode:
Diffstat (limited to 'libswscale/swscale.c')
-rw-r--r--libswscale/swscale.c529
1 files changed, 343 insertions, 186 deletions
diff --git a/libswscale/swscale.c b/libswscale/swscale.c
index f5c4e88688..d53af2771d 100644
--- a/libswscale/swscale.c
+++ b/libswscale/swscale.c
@@ -1,20 +1,20 @@
/*
* Copyright (C) 2001-2003 Michael Niedermayer <michaelni@gmx.at>
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -80,7 +80,7 @@ untested special converters
(x)==PIX_FMT_PAL8 \
|| (x)==PIX_FMT_YUYV422 \
|| (x)==PIX_FMT_UYVY422 \
- || (x)==PIX_FMT_Y400A \
+ || (x)==PIX_FMT_GRAY8A \
|| isAnyRGB(x) \
)
@@ -121,6 +121,63 @@ add BGR4 output support
write special BGR->BGR scaler
*/
+#if ARCH_X86
+DECLARE_ASM_CONST(8, uint64_t, bF8)= 0xF8F8F8F8F8F8F8F8LL;
+DECLARE_ASM_CONST(8, uint64_t, bFC)= 0xFCFCFCFCFCFCFCFCLL;
+DECLARE_ASM_CONST(8, uint64_t, w10)= 0x0010001000100010LL;
+DECLARE_ASM_CONST(8, uint64_t, w02)= 0x0002000200020002LL;
+DECLARE_ASM_CONST(8, uint64_t, bm00001111)=0x00000000FFFFFFFFLL;
+DECLARE_ASM_CONST(8, uint64_t, bm00000111)=0x0000000000FFFFFFLL;
+DECLARE_ASM_CONST(8, uint64_t, bm11111000)=0xFFFFFFFFFF000000LL;
+DECLARE_ASM_CONST(8, uint64_t, bm01010101)=0x00FF00FF00FF00FFLL;
+
+const DECLARE_ALIGNED(8, uint64_t, ff_dither4)[2] = {
+ 0x0103010301030103LL,
+ 0x0200020002000200LL,};
+
+const DECLARE_ALIGNED(8, uint64_t, ff_dither8)[2] = {
+ 0x0602060206020602LL,
+ 0x0004000400040004LL,};
+
+DECLARE_ASM_CONST(8, uint64_t, b16Mask)= 0x001F001F001F001FLL;
+DECLARE_ASM_CONST(8, uint64_t, g16Mask)= 0x07E007E007E007E0LL;
+DECLARE_ASM_CONST(8, uint64_t, r16Mask)= 0xF800F800F800F800LL;
+DECLARE_ASM_CONST(8, uint64_t, b15Mask)= 0x001F001F001F001FLL;
+DECLARE_ASM_CONST(8, uint64_t, g15Mask)= 0x03E003E003E003E0LL;
+DECLARE_ASM_CONST(8, uint64_t, r15Mask)= 0x7C007C007C007C00LL;
+
+DECLARE_ALIGNED(8, const uint64_t, ff_M24A) = 0x00FF0000FF0000FFLL;
+DECLARE_ALIGNED(8, const uint64_t, ff_M24B) = 0xFF0000FF0000FF00LL;
+DECLARE_ALIGNED(8, const uint64_t, ff_M24C) = 0x0000FF0000FF0000LL;
+
+#ifdef FAST_BGR2YV12
+DECLARE_ALIGNED(8, const uint64_t, ff_bgr2YCoeff) = 0x000000210041000DULL;
+DECLARE_ALIGNED(8, const uint64_t, ff_bgr2UCoeff) = 0x0000FFEEFFDC0038ULL;
+DECLARE_ALIGNED(8, const uint64_t, ff_bgr2VCoeff) = 0x00000038FFD2FFF8ULL;
+#else
+DECLARE_ALIGNED(8, const uint64_t, ff_bgr2YCoeff) = 0x000020E540830C8BULL;
+DECLARE_ALIGNED(8, const uint64_t, ff_bgr2UCoeff) = 0x0000ED0FDAC23831ULL;
+DECLARE_ALIGNED(8, const uint64_t, ff_bgr2VCoeff) = 0x00003831D0E6F6EAULL;
+#endif /* FAST_BGR2YV12 */
+DECLARE_ALIGNED(8, const uint64_t, ff_bgr2YOffset) = 0x1010101010101010ULL;
+DECLARE_ALIGNED(8, const uint64_t, ff_bgr2UVOffset) = 0x8080808080808080ULL;
+DECLARE_ALIGNED(8, const uint64_t, ff_w1111) = 0x0001000100010001ULL;
+
+DECLARE_ASM_CONST(8, uint64_t, ff_bgr24toY1Coeff) = 0x0C88000040870C88ULL;
+DECLARE_ASM_CONST(8, uint64_t, ff_bgr24toY2Coeff) = 0x20DE4087000020DEULL;
+DECLARE_ASM_CONST(8, uint64_t, ff_rgb24toY1Coeff) = 0x20DE0000408720DEULL;
+DECLARE_ASM_CONST(8, uint64_t, ff_rgb24toY2Coeff) = 0x0C88408700000C88ULL;
+DECLARE_ASM_CONST(8, uint64_t, ff_bgr24toYOffset) = 0x0008400000084000ULL;
+
+DECLARE_ASM_CONST(8, uint64_t, ff_bgr24toUV)[2][4] = {
+ {0x38380000DAC83838ULL, 0xECFFDAC80000ECFFULL, 0xF6E40000D0E3F6E4ULL, 0x3838D0E300003838ULL},
+ {0xECFF0000DAC8ECFFULL, 0x3838DAC800003838ULL, 0x38380000D0E33838ULL, 0xF6E4D0E30000F6E4ULL},
+};
+
+DECLARE_ASM_CONST(8, uint64_t, ff_bgr24toUVOffset)= 0x0040400000404000ULL;
+
+#endif /* ARCH_X86 */
+
DECLARE_ALIGNED(8, static const uint8_t, dither_2x2_4)[2][8]={
{ 1, 3, 1, 3, 1, 3, 1, 3, },
{ 2, 0, 2, 0, 2, 0, 2, 0, },
@@ -209,6 +266,99 @@ DECLARE_ALIGNED(8, const uint8_t, dither_8x8_220)[8][8]={
};
#endif
+DECLARE_ALIGNED(8, const uint8_t, dithers)[8][8][8]={
+{
+ { 0, 1, 0, 1, 0, 1, 0, 1,},
+ { 1, 0, 1, 0, 1, 0, 1, 0,},
+ { 0, 1, 0, 1, 0, 1, 0, 1,},
+ { 1, 0, 1, 0, 1, 0, 1, 0,},
+ { 0, 1, 0, 1, 0, 1, 0, 1,},
+ { 1, 0, 1, 0, 1, 0, 1, 0,},
+ { 0, 1, 0, 1, 0, 1, 0, 1,},
+ { 1, 0, 1, 0, 1, 0, 1, 0,},
+},{
+ { 1, 2, 1, 2, 1, 2, 1, 2,},
+ { 3, 0, 3, 0, 3, 0, 3, 0,},
+ { 1, 2, 1, 2, 1, 2, 1, 2,},
+ { 3, 0, 3, 0, 3, 0, 3, 0,},
+ { 1, 2, 1, 2, 1, 2, 1, 2,},
+ { 3, 0, 3, 0, 3, 0, 3, 0,},
+ { 1, 2, 1, 2, 1, 2, 1, 2,},
+ { 3, 0, 3, 0, 3, 0, 3, 0,},
+},{
+ { 2, 4, 3, 5, 2, 4, 3, 5,},
+ { 6, 0, 7, 1, 6, 0, 7, 1,},
+ { 3, 5, 2, 4, 3, 5, 2, 4,},
+ { 7, 1, 6, 0, 7, 1, 6, 0,},
+ { 2, 4, 3, 5, 2, 4, 3, 5,},
+ { 6, 0, 7, 1, 6, 0, 7, 1,},
+ { 3, 5, 2, 4, 3, 5, 2, 4,},
+ { 7, 1, 6, 0, 7, 1, 6, 0,},
+},{
+ { 4, 8, 7, 11, 4, 8, 7, 11,},
+ { 12, 0, 15, 3, 12, 0, 15, 3,},
+ { 6, 10, 5, 9, 6, 10, 5, 9,},
+ { 14, 2, 13, 1, 14, 2, 13, 1,},
+ { 4, 8, 7, 11, 4, 8, 7, 11,},
+ { 12, 0, 15, 3, 12, 0, 15, 3,},
+ { 6, 10, 5, 9, 6, 10, 5, 9,},
+ { 14, 2, 13, 1, 14, 2, 13, 1,},
+},{
+ { 9, 17, 15, 23, 8, 16, 14, 22,},
+ { 25, 1, 31, 7, 24, 0, 30, 6,},
+ { 13, 21, 11, 19, 12, 20, 10, 18,},
+ { 29, 5, 27, 3, 28, 4, 26, 2,},
+ { 8, 16, 14, 22, 9, 17, 15, 23,},
+ { 24, 0, 30, 6, 25, 1, 31, 7,},
+ { 12, 20, 10, 18, 13, 21, 11, 19,},
+ { 28, 4, 26, 2, 29, 5, 27, 3,},
+},{
+ { 18, 34, 30, 46, 17, 33, 29, 45,},
+ { 50, 2, 62, 14, 49, 1, 61, 13,},
+ { 26, 42, 22, 38, 25, 41, 21, 37,},
+ { 58, 10, 54, 6, 57, 9, 53, 5,},
+ { 16, 32, 28, 44, 19, 35, 31, 47,},
+ { 48, 0, 60, 12, 51, 3, 63, 15,},
+ { 24, 40, 20, 36, 27, 43, 23, 39,},
+ { 56, 8, 52, 4, 59, 11, 55, 7,},
+},{
+ { 18, 34, 30, 46, 17, 33, 29, 45,},
+ { 50, 2, 62, 14, 49, 1, 61, 13,},
+ { 26, 42, 22, 38, 25, 41, 21, 37,},
+ { 58, 10, 54, 6, 57, 9, 53, 5,},
+ { 16, 32, 28, 44, 19, 35, 31, 47,},
+ { 48, 0, 60, 12, 51, 3, 63, 15,},
+ { 24, 40, 20, 36, 27, 43, 23, 39,},
+ { 56, 8, 52, 4, 59, 11, 55, 7,},
+},{
+ { 36, 68, 60, 92, 34, 66, 58, 90,},
+ { 100, 4,124, 28, 98, 2,122, 26,},
+ { 52, 84, 44, 76, 50, 82, 42, 74,},
+ { 116, 20,108, 12,114, 18,106, 10,},
+ { 32, 64, 56, 88, 38, 70, 62, 94,},
+ { 96, 0,120, 24,102, 6,126, 30,},
+ { 48, 80, 40, 72, 54, 86, 46, 78,},
+ { 112, 16,104, 8,118, 22,110, 14,},
+}};
+
+uint16_t dither_scale[15][16]={
+{ 2, 3, 3, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,},
+{ 2, 3, 7, 7, 13, 13, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,},
+{ 3, 3, 4, 15, 15, 29, 57, 57, 57, 113, 113, 113, 113, 113, 113, 113,},
+{ 3, 4, 4, 5, 31, 31, 61, 121, 241, 241, 241, 241, 481, 481, 481, 481,},
+{ 3, 4, 5, 5, 6, 63, 63, 125, 249, 497, 993, 993, 993, 993, 993, 1985,},
+{ 3, 5, 6, 6, 6, 7, 127, 127, 253, 505, 1009, 2017, 4033, 4033, 4033, 4033,},
+{ 3, 5, 6, 7, 7, 7, 8, 255, 255, 509, 1017, 2033, 4065, 8129,16257,16257,},
+{ 3, 5, 6, 8, 8, 8, 8, 9, 511, 511, 1021, 2041, 4081, 8161,16321,32641,},
+{ 3, 5, 7, 8, 9, 9, 9, 9, 10, 1023, 1023, 2045, 4089, 8177,16353,32705,},
+{ 3, 5, 7, 8, 10, 10, 10, 10, 10, 11, 2047, 2047, 4093, 8185,16369,32737,},
+{ 3, 5, 7, 8, 10, 11, 11, 11, 11, 11, 12, 4095, 4095, 8189,16377,32753,},
+{ 3, 5, 7, 9, 10, 12, 12, 12, 12, 12, 12, 13, 8191, 8191,16381,32761,},
+{ 3, 5, 7, 9, 10, 12, 13, 13, 13, 13, 13, 13, 14,16383,16383,32765,},
+{ 3, 5, 7, 9, 10, 12, 14, 14, 14, 14, 14, 14, 14, 15,32767,32767,},
+{ 3, 5, 7, 9, 11, 12, 14, 15, 15, 15, 15, 15, 15, 15, 16,65535,},
+};
+
static av_always_inline void yuv2yuvX16inC_template(const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize,
const int16_t *chrFilter, const int16_t **chrSrc, int chrFilterSize,
const int16_t **alpSrc, uint16_t *dest, uint16_t *uDest, uint16_t *vDest, uint16_t *aDest,
@@ -271,54 +421,77 @@ static av_always_inline void yuv2yuvX16inC_template(const int16_t *lumFilter, co
}
}
-#define yuv2NBPS(bits, BE_LE, is_be) \
-static void yuv2yuvX ## bits ## BE_LE ## _c(const int16_t *lumFilter, \
- const int16_t **lumSrc, int lumFilterSize, \
- const int16_t *chrFilter, const int16_t **chrSrc, \
- int chrFilterSize, const int16_t **alpSrc, \
- uint16_t *dest, uint16_t *uDest, uint16_t *vDest, \
- uint16_t *aDest, int dstW, int chrDstW) \
-{ \
- yuv2yuvX16inC_template(lumFilter, lumSrc, lumFilterSize, \
- chrFilter, chrSrc, chrFilterSize, \
- alpSrc, \
- dest, uDest, vDest, aDest, \
- dstW, chrDstW, is_be, bits); \
+static av_always_inline void yuv2yuvXNinC_template(const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize,
+ const int16_t *chrFilter, const int16_t **chrSrc, int chrFilterSize,
+ const int16_t **alpSrc, uint16_t *dest, uint16_t *uDest, uint16_t *vDest, uint16_t *aDest,
+ int dstW, int chrDstW, int big_endian, int depth)
+{
+ //FIXME Optimize (just quickly written not optimized..)
+ int i;
+
+ for (i = 0; i < dstW; i++) {
+ int val = 1 << (26-depth);
+ int j;
+
+ for (j = 0; j < lumFilterSize; j++)
+ val += lumSrc[j][i] * lumFilter[j];
+
+ if (big_endian) {
+ AV_WB16(&dest[i], av_clip(val >> (27-depth), 0, (1<<depth)-1));
+ } else {
+ AV_WL16(&dest[i], av_clip(val >> (27-depth), 0, (1<<depth)-1));
+ }
+ }
+
+ if (uDest) {
+ for (i = 0; i < chrDstW; i++) {
+ int u = 1 << (26-depth);
+ int v = 1 << (26-depth);
+ int j;
+
+ for (j = 0; j < chrFilterSize; j++) {
+ u += chrSrc[j][i ] * chrFilter[j];
+ v += chrSrc[j][i + VOFW] * chrFilter[j];
+ }
+
+ if (big_endian) {
+ AV_WB16(&uDest[i], av_clip(u >> (27-depth), 0, (1<<depth)-1));
+ AV_WB16(&vDest[i], av_clip(v >> (27-depth), 0, (1<<depth)-1));
+ } else {
+ AV_WL16(&uDest[i], av_clip(u >> (27-depth), 0, (1<<depth)-1));
+ AV_WL16(&vDest[i], av_clip(v >> (27-depth), 0, (1<<depth)-1));
+ }
+ }
+ }
}
-yuv2NBPS( 9, BE, 1);
-yuv2NBPS( 9, LE, 0);
-yuv2NBPS(10, BE, 1);
-yuv2NBPS(10, LE, 0);
-yuv2NBPS(16, BE, 1);
-yuv2NBPS(16, LE, 0);
static inline void yuv2yuvX16inC(const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize,
const int16_t *chrFilter, const int16_t **chrSrc, int chrFilterSize,
const int16_t **alpSrc, uint16_t *dest, uint16_t *uDest, uint16_t *vDest, uint16_t *aDest, int dstW, int chrDstW,
enum PixelFormat dstFormat)
{
-#define conv16(bits) \
- if (isBE(dstFormat)) { \
- yuv2yuvX ## bits ## BE_c(lumFilter, lumSrc, lumFilterSize, \
- chrFilter, chrSrc, chrFilterSize, \
- alpSrc, \
- dest, uDest, vDest, aDest, \
- dstW, chrDstW); \
- } else { \
- yuv2yuvX ## bits ## LE_c(lumFilter, lumSrc, lumFilterSize, \
- chrFilter, chrSrc, chrFilterSize, \
- alpSrc, \
- dest, uDest, vDest, aDest, \
- dstW, chrDstW); \
- }
- if (is16BPS(dstFormat)) {
- conv16(16);
- } else if (av_pix_fmt_descriptors[dstFormat].comp[0].depth_minus1 == 8) {
- conv16(9);
+ if (isNBPS(dstFormat)) {
+ const int depth = av_pix_fmt_descriptors[dstFormat].comp[0].depth_minus1+1;
+ yuv2yuvXNinC_template(lumFilter, lumSrc, lumFilterSize,
+ chrFilter, chrSrc, chrFilterSize,
+ alpSrc,
+ dest, uDest, vDest, aDest,
+ dstW, chrDstW, isBE(dstFormat), depth);
} else {
- conv16(10);
+ if (isBE(dstFormat)) {
+ yuv2yuvX16inC_template(lumFilter, lumSrc, lumFilterSize,
+ chrFilter, chrSrc, chrFilterSize,
+ alpSrc,
+ dest, uDest, vDest, aDest,
+ dstW, chrDstW, 1, 16);
+ } else {
+ yuv2yuvX16inC_template(lumFilter, lumSrc, lumFilterSize,
+ chrFilter, chrSrc, chrFilterSize,
+ alpSrc,
+ dest, uDest, vDest, aDest,
+ dstW, chrDstW, 0, 16);
+ }
}
-#undef conv16
}
static inline void yuv2yuvXinC(const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize,
@@ -1133,6 +1306,16 @@ BGR2UV(uint16_t, bgr15ToUV, 0, 0, 0, 0, 0x001F, 0x03E0, 0x7C00, RU<<10, GU<<
BGR2UV(uint16_t, rgb16ToUV, 0, 0, 0, 0, 0xF800, 0x07E0, 0x001F, RU , GU<<5, BU<<11, RV , GV<<5, BV<<11, RGB2YUV_SHIFT+8)
BGR2UV(uint16_t, rgb15ToUV, 0, 0, 0, 0, 0x7C00, 0x03E0, 0x001F, RU , GU<<5, BU<<10, RV , GV<<5, BV<<10, RGB2YUV_SHIFT+7)
+static inline void palToA(uint8_t *dst, const uint8_t *src, long width, uint32_t *pal)
+{
+ int i;
+ for (i=0; i<width; i++) {
+ int d= src[i];
+
+ dst[i]= pal[d] >> 24;
+ }
+}
+
static inline void palToY(uint8_t *dst, const uint8_t *src, long width, uint32_t *pal)
{
int i;
@@ -1182,9 +1365,9 @@ static inline void monoblack2Y(uint8_t *dst, const uint8_t *src, long width, uin
#if CONFIG_RUNTIME_CPUDETECT
# define COMPILE_C 1
# if ARCH_X86
-# define COMPILE_MMX HAVE_MMX
-# define COMPILE_MMX2 HAVE_MMX2
-# define COMPILE_3DNOW HAVE_AMD3DNOW
+# define COMPILE_MMX 1
+# define COMPILE_MMX2 1
+# define COMPILE_3DNOW 1
# elif ARCH_PPC
# define COMPILE_ALTIVEC HAVE_ALTIVEC
# endif
@@ -1227,14 +1410,17 @@ static inline void monoblack2Y(uint8_t *dst, const uint8_t *src, long width, uin
#define COMPILE_TEMPLATE_AMD3DNOW 0
#define COMPILE_TEMPLATE_ALTIVEC 0
+#if COMPILE_C
+#define RENAME(a) a ## _C
#include "swscale_template.c"
+#endif
#if COMPILE_ALTIVEC
#undef RENAME
#undef COMPILE_TEMPLATE_ALTIVEC
#define COMPILE_TEMPLATE_ALTIVEC 1
#define RENAME(a) a ## _altivec
-#include "ppc/swscale_template.c"
+#include "swscale_template.c"
#endif
#if ARCH_X86
@@ -1249,7 +1435,7 @@ static inline void monoblack2Y(uint8_t *dst, const uint8_t *src, long width, uin
#define COMPILE_TEMPLATE_MMX2 0
#define COMPILE_TEMPLATE_AMD3DNOW 0
#define RENAME(a) a ## _MMX
-#include "x86/swscale_template.c"
+#include "swscale_template.c"
#endif
//MMX2 versions
@@ -1262,7 +1448,7 @@ static inline void monoblack2Y(uint8_t *dst, const uint8_t *src, long width, uin
#define COMPILE_TEMPLATE_MMX2 1
#define COMPILE_TEMPLATE_AMD3DNOW 0
#define RENAME(a) a ## _MMX2
-#include "x86/swscale_template.c"
+#include "swscale_template.c"
#endif
//3DNOW versions
@@ -1275,36 +1461,44 @@ static inline void monoblack2Y(uint8_t *dst, const uint8_t *src, long width, uin
#define COMPILE_TEMPLATE_MMX2 0
#define COMPILE_TEMPLATE_AMD3DNOW 1
#define RENAME(a) a ## _3DNow
-#include "x86/swscale_template.c"
+#include "swscale_template.c"
#endif
#endif //ARCH_X86
SwsFunc ff_getSwsFunc(SwsContext *c)
{
- sws_init_swScale_c(c);
-
#if CONFIG_RUNTIME_CPUDETECT
+ int flags = c->flags;
+
#if ARCH_X86
// ordered per speed fastest first
- if (c->flags & SWS_CPU_CAPS_MMX2) {
+ if (flags & SWS_CPU_CAPS_MMX2) {
sws_init_swScale_MMX2(c);
return swScale_MMX2;
- } else if (c->flags & SWS_CPU_CAPS_3DNOW) {
+ } else if (flags & SWS_CPU_CAPS_3DNOW) {
sws_init_swScale_3DNow(c);
return swScale_3DNow;
- } else if (c->flags & SWS_CPU_CAPS_MMX) {
+ } else if (flags & SWS_CPU_CAPS_MMX) {
sws_init_swScale_MMX(c);
return swScale_MMX;
+ } else {
+ sws_init_swScale_C(c);
+ return swScale_C;
}
#else
#if COMPILE_ALTIVEC
- if (c->flags & SWS_CPU_CAPS_ALTIVEC) {
+ if (flags & SWS_CPU_CAPS_ALTIVEC) {
sws_init_swScale_altivec(c);
return swScale_altivec;
+ } else {
+ sws_init_swScale_C(c);
+ return swScale_C;
}
#endif
+ sws_init_swScale_C(c);
+ return swScale_C;
#endif /* ARCH_X86 */
#else //CONFIG_RUNTIME_CPUDETECT
#if COMPILE_TEMPLATE_MMX2
@@ -1319,10 +1513,11 @@ SwsFunc ff_getSwsFunc(SwsContext *c)
#elif COMPILE_TEMPLATE_ALTIVEC
sws_init_swScale_altivec(c);
return swScale_altivec;
+#else
+ sws_init_swScale_C(c);
+ return swScale_C;
#endif
#endif //!CONFIG_RUNTIME_CPUDETECT
-
- return swScale_c;
}
static void copyPlane(const uint8_t *src, int srcStride,
@@ -1491,7 +1686,7 @@ static int palToRgbWrapper(SwsContext *c, const uint8_t* src[], int srcStride[],
uint8_t *dstPtr= dst[0] + dstStride[0]*srcSliceY;
const uint8_t *srcPtr= src[0];
- if (srcFormat == PIX_FMT_Y400A) {
+ if (srcFormat == PIX_FMT_GRAY8A) {
switch (dstFormat) {
case PIX_FMT_RGB32 : conv = gray8aToPacked32; break;
case PIX_FMT_BGR32 : conv = gray8aToPacked32; break;
@@ -1610,7 +1805,7 @@ static int rgbToRgbWrapper(SwsContext *c, const uint8_t* src[], int srcStride[],
if ((dstFormat == PIX_FMT_RGB32_1 || dstFormat == PIX_FMT_BGR32_1) && !isRGBA32(srcFormat))
dstPtr += ALT32_CORR;
- if (dstStride[0]*srcBpp == srcStride[0]*dstBpp && srcStride[0] > 0)
+ if (dstStride[0]*srcBpp == srcStride[0]*dstBpp && srcStride[0] > 0 && !(srcStride[0]%srcBpp))
conv(srcPtr, dstPtr + dstStride[0]*srcSliceY, srcSliceH*srcStride[0]);
else {
int i;
@@ -1682,6 +1877,28 @@ static int packedCopyWrapper(SwsContext *c, const uint8_t* src[], int srcStride[
return srcSliceH;
}
+#define DITHER_COPY(dst, dstStride, src, srcStride, bswap, dbswap)\
+ uint16_t scale= dither_scale[dst_depth-1][src_depth-1];\
+ int shift= src_depth-dst_depth + dither_scale[src_depth-2][dst_depth-1];\
+ for (i = 0; i < height; i++) {\
+ uint8_t *dither= dithers[src_depth-9][i&7];\
+ for (j = 0; j < length-7; j+=8){\
+ dst[j+0] = dbswap((bswap(src[j+0]) + dither[0])*scale>>shift);\
+ dst[j+1] = dbswap((bswap(src[j+1]) + dither[1])*scale>>shift);\
+ dst[j+2] = dbswap((bswap(src[j+2]) + dither[2])*scale>>shift);\
+ dst[j+3] = dbswap((bswap(src[j+3]) + dither[3])*scale>>shift);\
+ dst[j+4] = dbswap((bswap(src[j+4]) + dither[4])*scale>>shift);\
+ dst[j+5] = dbswap((bswap(src[j+5]) + dither[5])*scale>>shift);\
+ dst[j+6] = dbswap((bswap(src[j+6]) + dither[6])*scale>>shift);\
+ dst[j+7] = dbswap((bswap(src[j+7]) + dither[7])*scale>>shift);\
+ }\
+ for (; j < length; j++)\
+ dst[j] = dbswap((bswap(src[j]) + dither[j&7])*scale>>shift);\
+ dst += dstStride;\
+ src += srcStride;\
+ }
+
+
static int planarCopyWrapper(SwsContext *c, const uint8_t* src[], int srcStride[], int srcSliceY,
int srcSliceH, uint8_t* dst[], int dstStride[])
{
@@ -1701,141 +1918,72 @@ static int planarCopyWrapper(SwsContext *c, const uint8_t* src[], int srcStride[
length*=2;
fillPlane(dst[plane], dstStride[plane], length, height, y, (plane==3) ? 255 : 128);
} else {
- if(is9_OR_10BPS(c->srcFormat)) {
+ if(isNBPS(c->srcFormat) || isNBPS(c->dstFormat)
+ || (is16BPS(c->srcFormat) != is16BPS(c->dstFormat))
+ ) {
const int src_depth = av_pix_fmt_descriptors[c->srcFormat].comp[plane].depth_minus1+1;
const int dst_depth = av_pix_fmt_descriptors[c->dstFormat].comp[plane].depth_minus1+1;
const uint16_t *srcPtr2 = (const uint16_t*)srcPtr;
+ uint16_t *dstPtr2 = (uint16_t*)dstPtr;
- if (is16BPS(c->dstFormat)) {
- uint16_t *dstPtr2 = (uint16_t*)dstPtr;
-#define COPY9_OR_10TO16(rfunc, wfunc) \
- for (i = 0; i < height; i++) { \
- for (j = 0; j < length; j++) { \
- int srcpx = rfunc(&srcPtr2[j]); \
- wfunc(&dstPtr2[j], (srcpx<<(16-src_depth)) | (srcpx>>(2*src_depth-16))); \
- } \
- dstPtr2 += dstStride[plane]/2; \
- srcPtr2 += srcStride[plane]/2; \
- }
- if (isBE(c->dstFormat)) {
- if (isBE(c->srcFormat)) {
- COPY9_OR_10TO16(AV_RB16, AV_WB16);
- } else {
- COPY9_OR_10TO16(AV_RL16, AV_WB16);
- }
+ if (dst_depth == 8) {
+ if(isBE(c->srcFormat) == HAVE_BIGENDIAN){
+ DITHER_COPY(dstPtr, dstStride[plane], srcPtr2, srcStride[plane]/2, , )
} else {
- if (isBE(c->srcFormat)) {
- COPY9_OR_10TO16(AV_RB16, AV_WL16);
- } else {
- COPY9_OR_10TO16(AV_RL16, AV_WL16);
- }
- }
- } else if (is9_OR_10BPS(c->dstFormat)) {
- uint16_t *dstPtr2 = (uint16_t*)dstPtr;
-#define COPY9_OR_10TO9_OR_10(loop) \
- for (i = 0; i < height; i++) { \
- for (j = 0; j < length; j++) { \
- loop; \
- } \
- dstPtr2 += dstStride[plane]/2; \
- srcPtr2 += srcStride[plane]/2; \
- }
-#define COPY9_OR_10TO9_OR_10_2(rfunc, wfunc) \
- if (dst_depth > src_depth) { \
- COPY9_OR_10TO9_OR_10(int srcpx = rfunc(&srcPtr2[j]); \
- wfunc(&dstPtr2[j], (srcpx << 1) | (srcpx >> 9))); \
- } else if (dst_depth < src_depth) { \
- COPY9_OR_10TO9_OR_10(wfunc(&dstPtr2[j], rfunc(&srcPtr2[j]) >> 1)); \
- } else { \
- COPY9_OR_10TO9_OR_10(wfunc(&dstPtr2[j], rfunc(&srcPtr2[j]))); \
+ DITHER_COPY(dstPtr, dstStride[plane], srcPtr2, srcStride[plane]/2, av_bswap16, )
}
- if (isBE(c->dstFormat)) {
- if (isBE(c->srcFormat)) {
- COPY9_OR_10TO9_OR_10_2(AV_RB16, AV_WB16);
+ } else if (src_depth == 8) {
+ for (i = 0; i < height; i++) {
+ if(isBE(c->dstFormat)){
+ for (j = 0; j < length; j++)
+ AV_WB16(&dstPtr2[j], (srcPtr[j]<<(dst_depth-8)) |
+ (srcPtr[j]>>(2*8-dst_depth)));
} else {
- COPY9_OR_10TO9_OR_10_2(AV_RL16, AV_WB16);
+ for (j = 0; j < length; j++)
+ AV_WL16(&dstPtr2[j], (srcPtr[j]<<(dst_depth-8)) |
+ (srcPtr[j]>>(2*8-dst_depth)));
}
- } else {
- if (isBE(c->srcFormat)) {
- COPY9_OR_10TO9_OR_10_2(AV_RB16, AV_WL16);
+ dstPtr2 += dstStride[plane]/2;
+ srcPtr += srcStride[plane];
+ }
+ } else if (src_depth <= dst_depth) {
+ for (i = 0; i < height; i++) {
+#define COPY_UP(r,w) \
+ for (j = 0; j < length; j++){ \
+ unsigned int v= r(&srcPtr2[j]);\
+ w(&dstPtr2[j], (v<<(dst_depth-src_depth)) | \
+ (v>>(2*src_depth-dst_depth)));\
+ }
+ if(isBE(c->srcFormat)){
+ if(isBE(c->dstFormat)){
+ COPY_UP(AV_RB16, AV_WB16)
+ } else {
+ COPY_UP(AV_RB16, AV_WL16)
+ }
} else {
- COPY9_OR_10TO9_OR_10_2(AV_RL16, AV_WL16);
+ if(isBE(c->dstFormat)){
+ COPY_UP(AV_RL16, AV_WB16)
+ } else {
+ COPY_UP(AV_RL16, AV_WL16)
+ }
}
+ dstPtr2 += dstStride[plane]/2;
+ srcPtr2 += srcStride[plane]/2;
}
} else {
- // FIXME Maybe dither instead.
-#define COPY9_OR_10TO8(rfunc) \
- for (i = 0; i < height; i++) { \
- for (j = 0; j < length; j++) { \
- dstPtr[j] = rfunc(&srcPtr2[j])>>(src_depth-8); \
- } \
- dstPtr += dstStride[plane]; \
- srcPtr2 += srcStride[plane]/2; \
- }
- if (isBE(c->srcFormat)) {
- COPY9_OR_10TO8(AV_RB16);
- } else {
- COPY9_OR_10TO8(AV_RL16);
- }
- }
- } else if(is9_OR_10BPS(c->dstFormat)) {
- const int dst_depth = av_pix_fmt_descriptors[c->dstFormat].comp[plane].depth_minus1+1;
- uint16_t *dstPtr2 = (uint16_t*)dstPtr;
-
- if (is16BPS(c->srcFormat)) {
- const uint16_t *srcPtr2 = (const uint16_t*)srcPtr;
-#define COPY16TO9_OR_10(rfunc, wfunc) \
- for (i = 0; i < height; i++) { \
- for (j = 0; j < length; j++) { \
- wfunc(&dstPtr2[j], rfunc(&srcPtr2[j])>>(16-dst_depth)); \
- } \
- dstPtr2 += dstStride[plane]/2; \
- srcPtr2 += srcStride[plane]/2; \
- }
- if (isBE(c->dstFormat)) {
- if (isBE(c->srcFormat)) {
- COPY16TO9_OR_10(AV_RB16, AV_WB16);
+ if(isBE(c->srcFormat) == HAVE_BIGENDIAN){
+ if(isBE(c->dstFormat) == HAVE_BIGENDIAN){
+ DITHER_COPY(dstPtr2, dstStride[plane]/2, srcPtr2, srcStride[plane]/2, , )
} else {
- COPY16TO9_OR_10(AV_RL16, AV_WB16);
+ DITHER_COPY(dstPtr2, dstStride[plane]/2, srcPtr2, srcStride[plane]/2, , av_bswap16)
}
- } else {
- if (isBE(c->srcFormat)) {
- COPY16TO9_OR_10(AV_RB16, AV_WL16);
+ }else{
+ if(isBE(c->dstFormat) == HAVE_BIGENDIAN){
+ DITHER_COPY(dstPtr2, dstStride[plane]/2, srcPtr2, srcStride[plane]/2, av_bswap16, )
} else {
- COPY16TO9_OR_10(AV_RL16, AV_WL16);
+ DITHER_COPY(dstPtr2, dstStride[plane]/2, srcPtr2, srcStride[plane]/2, av_bswap16, av_bswap16)
}
}
- } else /* 8bit */ {
-#define COPY8TO9_OR_10(wfunc) \
- for (i = 0; i < height; i++) { \
- for (j = 0; j < length; j++) { \
- const int srcpx = srcPtr[j]; \
- wfunc(&dstPtr2[j], (srcpx<<(dst_depth-8)) | (srcpx >> (16-dst_depth))); \
- } \
- dstPtr2 += dstStride[plane]/2; \
- srcPtr += srcStride[plane]; \
- }
- if (isBE(c->dstFormat)) {
- COPY8TO9_OR_10(AV_WB16);
- } else {
- COPY8TO9_OR_10(AV_WL16);
- }
- }
- } else if(is16BPS(c->srcFormat) && !is16BPS(c->dstFormat)) {
- if (!isBE(c->srcFormat)) srcPtr++;
- for (i=0; i<height; i++) {
- for (j=0; j<length; j++) dstPtr[j] = srcPtr[j<<1];
- srcPtr+= srcStride[plane];
- dstPtr+= dstStride[plane];
- }
- } else if(!is16BPS(c->srcFormat) && is16BPS(c->dstFormat)) {
- for (i=0; i<height; i++) {
- for (j=0; j<length; j++) {
- dstPtr[ j<<1 ] = srcPtr[j];
- dstPtr[(j<<1)+1] = srcPtr[j];
- }
- srcPtr+= srcStride[plane];
- dstPtr+= dstStride[plane];
}
} else if(is16BPS(c->srcFormat) && is16BPS(c->dstFormat)
&& isBE(c->srcFormat) != isBE(c->dstFormat)) {
@@ -2061,9 +2209,10 @@ int sws_scale(SwsContext *c, const uint8_t* const src[], const int srcStride[],
if (usePal(c->srcFormat)) {
for (i=0; i<256; i++) {
- int p, r, g, b,y,u,v;
+ int p, r, g, b, y, u, v, a = 0xff;
if(c->srcFormat == PIX_FMT_PAL8) {
p=((const uint32_t*)(src[1]))[i];
+ a= (p>>24)&0xFF;
r= (p>>16)&0xFF;
g= (p>> 8)&0xFF;
b= p &0xFF;
@@ -2079,7 +2228,7 @@ int sws_scale(SwsContext *c, const uint8_t* const src[], const int srcStride[],
r= (i>>3 )*255;
g= ((i>>1)&3)*85;
b= (i&1 )*255;
- } else if(c->srcFormat == PIX_FMT_GRAY8 || c->srcFormat == PIX_FMT_Y400A) {
+ } else if(c->srcFormat == PIX_FMT_GRAY8 || c->srcFormat == PIX_FMT_GRAY8A) {
r = g = b = i;
} else {
assert(c->srcFormat == PIX_FMT_BGR4_BYTE);
@@ -2090,33 +2239,33 @@ int sws_scale(SwsContext *c, const uint8_t* const src[], const int srcStride[],
y= av_clip_uint8((RY*r + GY*g + BY*b + ( 33<<(RGB2YUV_SHIFT-1)))>>RGB2YUV_SHIFT);
u= av_clip_uint8((RU*r + GU*g + BU*b + (257<<(RGB2YUV_SHIFT-1)))>>RGB2YUV_SHIFT);
v= av_clip_uint8((RV*r + GV*g + BV*b + (257<<(RGB2YUV_SHIFT-1)))>>RGB2YUV_SHIFT);
- c->pal_yuv[i]= y + (u<<8) + (v<<16);
+ c->pal_yuv[i]= y + (u<<8) + (v<<16) + (a<<24);
switch(c->dstFormat) {
case PIX_FMT_BGR32:
#if !HAVE_BIGENDIAN
case PIX_FMT_RGB24:
#endif
- c->pal_rgb[i]= r + (g<<8) + (b<<16);
+ c->pal_rgb[i]= r + (g<<8) + (b<<16) + (a<<24);
break;
case PIX_FMT_BGR32_1:
#if HAVE_BIGENDIAN
case PIX_FMT_BGR24:
#endif
- c->pal_rgb[i]= (r + (g<<8) + (b<<16)) << 8;
+ c->pal_rgb[i]= a + (r<<8) + (g<<16) + (b<<24);
break;
case PIX_FMT_RGB32_1:
#if HAVE_BIGENDIAN
case PIX_FMT_RGB24:
#endif
- c->pal_rgb[i]= (b + (g<<8) + (r<<16)) << 8;
+ c->pal_rgb[i]= a + (b<<8) + (g<<16) + (r<<24);
break;
case PIX_FMT_RGB32:
#if !HAVE_BIGENDIAN
case PIX_FMT_BGR24:
#endif
default:
- c->pal_rgb[i]= b + (g<<8) + (r<<16);
+ c->pal_rgb[i]= b + (g<<8) + (r<<16) + (a<<24);
}
}
}
@@ -2161,6 +2310,14 @@ int sws_scale(SwsContext *c, const uint8_t* const src[], const int srcStride[],
}
}
+#if LIBSWSCALE_VERSION_MAJOR < 1
+int sws_scale_ordered(SwsContext *c, const uint8_t* const src[], int srcStride[], int srcSliceY,
+ int srcSliceH, uint8_t* dst[], int dstStride[])
+{
+ return sws_scale(c, src, srcStride, srcSliceY, srcSliceH, dst, dstStride);
+}
+#endif
+
/* Convert the palette to the same packed 32-bit format as the palette */
void sws_convertPalette8ToPacked32(const uint8_t *src, uint8_t *dst, long num_pixels, const uint8_t *palette)
{