From c12f7b2d2cf45b18dbabb207bcf142e5f170c773 Mon Sep 17 00:00:00 2001 From: Ramiro Polla Date: Tue, 14 Sep 2010 13:12:11 +0000 Subject: rgb2rgb: don't misuse HAVE_* defines Introduce and use COMPILE_TEMPLATE_* instead. Originally committed as revision 32241 to svn://svn.mplayerhq.hu/mplayer/trunk/libswscale --- libswscale/rgb2rgb_template.c | 134 +++++++++++++++++++++--------------------- 1 file changed, 67 insertions(+), 67 deletions(-) (limited to 'libswscale/rgb2rgb_template.c') diff --git a/libswscale/rgb2rgb_template.c b/libswscale/rgb2rgb_template.c index 34eeb3cfe7..6e7436651b 100644 --- a/libswscale/rgb2rgb_template.c +++ b/libswscale/rgb2rgb_template.c @@ -33,30 +33,30 @@ #undef MMREG_SIZE #undef PAVGB -#if HAVE_SSE2 +#if COMPILE_TEMPLATE_SSE2 #define MMREG_SIZE 16 #else #define MMREG_SIZE 8 #endif -#if HAVE_AMD3DNOW +#if COMPILE_TEMPLATE_AMD3DNOW #define PREFETCH "prefetch" #define PAVGB "pavgusb" -#elif HAVE_MMX2 +#elif COMPILE_TEMPLATE_MMX2 #define PREFETCH "prefetchnta" #define PAVGB "pavgb" #else #define PREFETCH " # nop" #endif -#if HAVE_AMD3DNOW +#if COMPILE_TEMPLATE_AMD3DNOW /* On K6 femms is faster than emms. On K7 femms is directly mapped to emms. */ #define EMMS "femms" #else #define EMMS "emms" #endif -#if HAVE_MMX2 +#if COMPILE_TEMPLATE_MMX2 #define MOVNTQ "movntq" #define SFENCE "sfence" #else @@ -69,11 +69,11 @@ static inline void RENAME(rgb24tobgr32)(const uint8_t *src, uint8_t *dst, long s uint8_t *dest = dst; const uint8_t *s = src; const uint8_t *end; -#if HAVE_MMX +#if COMPILE_TEMPLATE_MMX const uint8_t *mm_end; #endif end = s + src_size; -#if HAVE_MMX +#if COMPILE_TEMPLATE_MMX __asm__ volatile(PREFETCH" %0"::"m"(*s):"memory"); mm_end = end - 23; __asm__ volatile("movq %0, %%mm7"::"m"(mask32a):"memory"); @@ -164,11 +164,11 @@ static inline void RENAME(rgb32tobgr24)(const uint8_t *src, uint8_t *dst, long s uint8_t *dest = dst; const uint8_t *s = src; const uint8_t *end; -#if HAVE_MMX +#if COMPILE_TEMPLATE_MMX const uint8_t *mm_end; #endif end = s + src_size; -#if HAVE_MMX +#if COMPILE_TEMPLATE_MMX __asm__ volatile(PREFETCH" %0"::"m"(*s):"memory"); mm_end = end - 31; while (s < mm_end) { @@ -222,7 +222,7 @@ static inline void RENAME(rgb15to16)(const uint8_t *src, uint8_t *dst, long src_ register const uint8_t *end; const uint8_t *mm_end; end = s + src_size; -#if HAVE_MMX +#if COMPILE_TEMPLATE_MMX __asm__ volatile(PREFETCH" %0"::"m"(*s)); __asm__ volatile("movq %0, %%mm4"::"m"(mask15s)); mm_end = end - 15; @@ -268,7 +268,7 @@ static inline void RENAME(rgb16to15)(const uint8_t *src, uint8_t *dst, long src_ register const uint8_t *end; const uint8_t *mm_end; end = s + src_size; -#if HAVE_MMX +#if COMPILE_TEMPLATE_MMX __asm__ volatile(PREFETCH" %0"::"m"(*s)); __asm__ volatile("movq %0, %%mm7"::"m"(mask15rg)); __asm__ volatile("movq %0, %%mm6"::"m"(mask15b)); @@ -316,12 +316,12 @@ static inline void RENAME(rgb32to16)(const uint8_t *src, uint8_t *dst, long src_ { const uint8_t *s = src; const uint8_t *end; -#if HAVE_MMX +#if COMPILE_TEMPLATE_MMX const uint8_t *mm_end; #endif uint16_t *d = (uint16_t *)dst; end = s + src_size; -#if HAVE_MMX +#if COMPILE_TEMPLATE_MMX mm_end = end - 15; #if 1 //is faster only if multiplies are reasonably fast (FIXME figure out on which CPUs this is faster, on Athlon it is slightly faster) __asm__ volatile( @@ -412,12 +412,12 @@ static inline void RENAME(rgb32tobgr16)(const uint8_t *src, uint8_t *dst, long s { const uint8_t *s = src; const uint8_t *end; -#if HAVE_MMX +#if COMPILE_TEMPLATE_MMX const uint8_t *mm_end; #endif uint16_t *d = (uint16_t *)dst; end = s + src_size; -#if HAVE_MMX +#if COMPILE_TEMPLATE_MMX __asm__ volatile(PREFETCH" %0"::"m"(*src):"memory"); __asm__ volatile( "movq %0, %%mm7 \n\t" @@ -471,12 +471,12 @@ static inline void RENAME(rgb32to15)(const uint8_t *src, uint8_t *dst, long src_ { const uint8_t *s = src; const uint8_t *end; -#if HAVE_MMX +#if COMPILE_TEMPLATE_MMX const uint8_t *mm_end; #endif uint16_t *d = (uint16_t *)dst; end = s + src_size; -#if HAVE_MMX +#if COMPILE_TEMPLATE_MMX mm_end = end - 15; #if 1 //is faster only if multiplies are reasonably fast (FIXME figure out on which CPUs this is faster, on Athlon it is slightly faster) __asm__ volatile( @@ -567,12 +567,12 @@ static inline void RENAME(rgb32tobgr15)(const uint8_t *src, uint8_t *dst, long s { const uint8_t *s = src; const uint8_t *end; -#if HAVE_MMX +#if COMPILE_TEMPLATE_MMX const uint8_t *mm_end; #endif uint16_t *d = (uint16_t *)dst; end = s + src_size; -#if HAVE_MMX +#if COMPILE_TEMPLATE_MMX __asm__ volatile(PREFETCH" %0"::"m"(*src):"memory"); __asm__ volatile( "movq %0, %%mm7 \n\t" @@ -626,12 +626,12 @@ static inline void RENAME(rgb24tobgr16)(const uint8_t *src, uint8_t *dst, long s { const uint8_t *s = src; const uint8_t *end; -#if HAVE_MMX +#if COMPILE_TEMPLATE_MMX const uint8_t *mm_end; #endif uint16_t *d = (uint16_t *)dst; end = s + src_size; -#if HAVE_MMX +#if COMPILE_TEMPLATE_MMX __asm__ volatile(PREFETCH" %0"::"m"(*src):"memory"); __asm__ volatile( "movq %0, %%mm7 \n\t" @@ -687,12 +687,12 @@ static inline void RENAME(rgb24to16)(const uint8_t *src, uint8_t *dst, long src_ { const uint8_t *s = src; const uint8_t *end; -#if HAVE_MMX +#if COMPILE_TEMPLATE_MMX const uint8_t *mm_end; #endif uint16_t *d = (uint16_t *)dst; end = s + src_size; -#if HAVE_MMX +#if COMPILE_TEMPLATE_MMX __asm__ volatile(PREFETCH" %0"::"m"(*src):"memory"); __asm__ volatile( "movq %0, %%mm7 \n\t" @@ -748,12 +748,12 @@ static inline void RENAME(rgb24tobgr15)(const uint8_t *src, uint8_t *dst, long s { const uint8_t *s = src; const uint8_t *end; -#if HAVE_MMX +#if COMPILE_TEMPLATE_MMX const uint8_t *mm_end; #endif uint16_t *d = (uint16_t *)dst; end = s + src_size; -#if HAVE_MMX +#if COMPILE_TEMPLATE_MMX __asm__ volatile(PREFETCH" %0"::"m"(*src):"memory"); __asm__ volatile( "movq %0, %%mm7 \n\t" @@ -809,12 +809,12 @@ static inline void RENAME(rgb24to15)(const uint8_t *src, uint8_t *dst, long src_ { const uint8_t *s = src; const uint8_t *end; -#if HAVE_MMX +#if COMPILE_TEMPLATE_MMX const uint8_t *mm_end; #endif uint16_t *d = (uint16_t *)dst; end = s + src_size; -#if HAVE_MMX +#if COMPILE_TEMPLATE_MMX __asm__ volatile(PREFETCH" %0"::"m"(*src):"memory"); __asm__ volatile( "movq %0, %%mm7 \n\t" @@ -890,13 +890,13 @@ static inline void RENAME(rgb24to15)(const uint8_t *src, uint8_t *dst, long src_ static inline void RENAME(rgb15tobgr24)(const uint8_t *src, uint8_t *dst, long src_size) { const uint16_t *end; -#if HAVE_MMX +#if COMPILE_TEMPLATE_MMX const uint16_t *mm_end; #endif uint8_t *d = dst; const uint16_t *s = (const uint16_t*)src; end = s + src_size/2; -#if HAVE_MMX +#if COMPILE_TEMPLATE_MMX __asm__ volatile(PREFETCH" %0"::"m"(*s):"memory"); mm_end = end - 7; while (s < mm_end) { @@ -997,13 +997,13 @@ static inline void RENAME(rgb15tobgr24)(const uint8_t *src, uint8_t *dst, long s static inline void RENAME(rgb16tobgr24)(const uint8_t *src, uint8_t *dst, long src_size) { const uint16_t *end; -#if HAVE_MMX +#if COMPILE_TEMPLATE_MMX const uint16_t *mm_end; #endif uint8_t *d = (uint8_t *)dst; const uint16_t *s = (const uint16_t *)src; end = s + src_size/2; -#if HAVE_MMX +#if COMPILE_TEMPLATE_MMX __asm__ volatile(PREFETCH" %0"::"m"(*s):"memory"); mm_end = end - 7; while (s < mm_end) { @@ -1122,13 +1122,13 @@ static inline void RENAME(rgb16tobgr24)(const uint8_t *src, uint8_t *dst, long s static inline void RENAME(rgb15to32)(const uint8_t *src, uint8_t *dst, long src_size) { const uint16_t *end; -#if HAVE_MMX +#if COMPILE_TEMPLATE_MMX const uint16_t *mm_end; #endif uint8_t *d = dst; const uint16_t *s = (const uint16_t *)src; end = s + src_size/2; -#if HAVE_MMX +#if COMPILE_TEMPLATE_MMX __asm__ volatile(PREFETCH" %0"::"m"(*s):"memory"); __asm__ volatile("pxor %%mm7,%%mm7 \n\t":::"memory"); __asm__ volatile("pcmpeqd %%mm6,%%mm6 \n\t":::"memory"); @@ -1175,13 +1175,13 @@ static inline void RENAME(rgb15to32)(const uint8_t *src, uint8_t *dst, long src_ static inline void RENAME(rgb16to32)(const uint8_t *src, uint8_t *dst, long src_size) { const uint16_t *end; -#if HAVE_MMX +#if COMPILE_TEMPLATE_MMX const uint16_t *mm_end; #endif uint8_t *d = dst; const uint16_t *s = (const uint16_t*)src; end = s + src_size/2; -#if HAVE_MMX +#if COMPILE_TEMPLATE_MMX __asm__ volatile(PREFETCH" %0"::"m"(*s):"memory"); __asm__ volatile("pxor %%mm7,%%mm7 \n\t":::"memory"); __asm__ volatile("pcmpeqd %%mm6,%%mm6 \n\t":::"memory"); @@ -1230,7 +1230,7 @@ static inline void RENAME(shuffle_bytes_2103)(const uint8_t *src, uint8_t *dst, x86_reg idx = 15 - src_size; const uint8_t *s = src-idx; uint8_t *d = dst-idx; -#if HAVE_MMX +#if COMPILE_TEMPLATE_MMX __asm__ volatile( "test %0, %0 \n\t" "jns 2f \n\t" @@ -1244,7 +1244,7 @@ static inline void RENAME(shuffle_bytes_2103)(const uint8_t *src, uint8_t *dst, PREFETCH" 32(%1, %0) \n\t" "movq (%1, %0), %%mm0 \n\t" "movq 8(%1, %0), %%mm1 \n\t" -# if HAVE_MMX2 +# if COMPILE_TEMPLATE_MMX2 "pshufw $177, %%mm0, %%mm3 \n\t" "pshufw $177, %%mm1, %%mm5 \n\t" "pand %%mm7, %%mm0 \n\t" @@ -1292,7 +1292,7 @@ static inline void RENAME(shuffle_bytes_2103)(const uint8_t *src, uint8_t *dst, static inline void RENAME(rgb24tobgr24)(const uint8_t *src, uint8_t *dst, long src_size) { unsigned i; -#if HAVE_MMX +#if COMPILE_TEMPLATE_MMX x86_reg mmx_size= 23 - src_size; __asm__ volatile ( "test %%"REG_a", %%"REG_a" \n\t" @@ -1365,7 +1365,7 @@ static inline void RENAME(yuvPlanartoyuy2)(const uint8_t *ysrc, const uint8_t *u long y; const x86_reg chromWidth= width>>1; for (y=0; y>1; for (y=0; y>1; for (y=0; y>1; for (y=0; y>1; -#if HAVE_MMX +#if COMPILE_TEMPLATE_MMX for (y=0; y>1); uint8_t* d=dst1+dstStride1*y; x=0; -#if HAVE_MMX +#if COMPILE_TEMPLATE_MMX for (;x>1); uint8_t* d=dst2+dstStride2*y; x=0; -#if HAVE_MMX +#if COMPILE_TEMPLATE_MMX for (;x>2); uint8_t* d=dst+dstStride*y; x=0; -#if HAVE_MMX +#if COMPILE_TEMPLATE_MMX for (;x