From 67d80a54217f93a50b7a52449fad12215b43c9e8 Mon Sep 17 00:00:00 2001 From: "Ronald S. Bultje" Date: Thu, 2 Jun 2011 20:04:04 -0700 Subject: swscale: split out ppc _template.c files from main swscale.c. --- libswscale/ppc/swscale_altivec.c | 419 ++++++++++++++++++++++++++++++ libswscale/ppc/swscale_altivec_template.c | 409 ----------------------------- libswscale/ppc/yuv2rgb_altivec.c | 1 + libswscale/ppc/yuv2rgb_altivec.h | 34 +++ 4 files changed, 454 insertions(+), 409 deletions(-) create mode 100644 libswscale/ppc/swscale_altivec.c delete mode 100644 libswscale/ppc/swscale_altivec_template.c create mode 100644 libswscale/ppc/yuv2rgb_altivec.h (limited to 'libswscale/ppc') diff --git a/libswscale/ppc/swscale_altivec.c b/libswscale/ppc/swscale_altivec.c new file mode 100644 index 0000000000..acfdc94cd8 --- /dev/null +++ b/libswscale/ppc/swscale_altivec.c @@ -0,0 +1,419 @@ +/* + * AltiVec-enhanced yuv2yuvX + * + * Copyright (C) 2004 Romain Dolbeau + * based on the equivalent C code in swscale.c + * + * This file is part of Libav. + * + * Libav is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * Libav is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with Libav; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include +#include "config.h" +#include "libswscale/swscale.h" +#include "libswscale/swscale_internal.h" +#include "libavutil/cpu.h" +#include "yuv2rgb_altivec.h" + +#define vzero vec_splat_s32(0) + +static inline void +altivec_packIntArrayToCharArray(int *val, uint8_t* dest, int dstW) +{ + register int i; + vector unsigned int altivec_vectorShiftInt19 = + vec_add(vec_splat_u32(10), vec_splat_u32(9)); + if ((unsigned int)dest % 16) { + /* badly aligned store, we force store alignment */ + /* and will handle load misalignment on val w/ vec_perm */ + vector unsigned char perm1; + vector signed int v1; + for (i = 0 ; (i < dstW) && + (((unsigned int)dest + i) % 16) ; i++) { + int t = val[i] >> 19; + dest[i] = (t < 0) ? 0 : ((t > 255) ? 255 : t); + } + perm1 = vec_lvsl(i << 2, val); + v1 = vec_ld(i << 2, val); + for ( ; i < (dstW - 15); i+=16) { + int offset = i << 2; + vector signed int v2 = vec_ld(offset + 16, val); + vector signed int v3 = vec_ld(offset + 32, val); + vector signed int v4 = vec_ld(offset + 48, val); + vector signed int v5 = vec_ld(offset + 64, val); + vector signed int v12 = vec_perm(v1, v2, perm1); + vector signed int v23 = vec_perm(v2, v3, perm1); + vector signed int v34 = vec_perm(v3, v4, perm1); + vector signed int v45 = vec_perm(v4, v5, perm1); + + vector signed int vA = vec_sra(v12, altivec_vectorShiftInt19); + vector signed int vB = vec_sra(v23, altivec_vectorShiftInt19); + vector signed int vC = vec_sra(v34, altivec_vectorShiftInt19); + vector signed int vD = vec_sra(v45, altivec_vectorShiftInt19); + vector unsigned short vs1 = vec_packsu(vA, vB); + vector unsigned short vs2 = vec_packsu(vC, vD); + vector unsigned char vf = vec_packsu(vs1, vs2); + vec_st(vf, i, dest); + v1 = v5; + } + } else { // dest is properly aligned, great + for (i = 0; i < (dstW - 15); i+=16) { + int offset = i << 2; + vector signed int v1 = vec_ld(offset, val); + vector signed int v2 = vec_ld(offset + 16, val); + vector signed int v3 = vec_ld(offset + 32, val); + vector signed int v4 = vec_ld(offset + 48, val); + vector signed int v5 = vec_sra(v1, altivec_vectorShiftInt19); + vector signed int v6 = vec_sra(v2, altivec_vectorShiftInt19); + vector signed int v7 = vec_sra(v3, altivec_vectorShiftInt19); + vector signed int v8 = vec_sra(v4, altivec_vectorShiftInt19); + vector unsigned short vs1 = vec_packsu(v5, v6); + vector unsigned short vs2 = vec_packsu(v7, v8); + vector unsigned char vf = vec_packsu(vs1, vs2); + vec_st(vf, i, dest); + } + } + for ( ; i < dstW ; i++) { + int t = val[i] >> 19; + dest[i] = (t < 0) ? 0 : ((t > 255) ? 255 : t); + } +} + +static void +yuv2yuvX_altivec_real(SwsContext *c, + const int16_t *lumFilter, const int16_t **lumSrc, + int lumFilterSize, const int16_t *chrFilter, + const int16_t **chrUSrc, const int16_t **chrVSrc, + int chrFilterSize, const int16_t **alpSrc, + uint8_t *dest, uint8_t *uDest, + uint8_t *vDest, uint8_t *aDest, + int dstW, int chrDstW) +{ + const vector signed int vini = {(1 << 18), (1 << 18), (1 << 18), (1 << 18)}; + register int i, j; + { + DECLARE_ALIGNED(16, int, val)[dstW]; + + for (i = 0; i < (dstW -7); i+=4) { + vec_st(vini, i << 2, val); + } + for (; i < dstW; i++) { + val[i] = (1 << 18); + } + + for (j = 0; j < lumFilterSize; j++) { + vector signed short l1, vLumFilter = vec_ld(j << 1, lumFilter); + vector unsigned char perm, perm0 = vec_lvsl(j << 1, lumFilter); + vLumFilter = vec_perm(vLumFilter, vLumFilter, perm0); + vLumFilter = vec_splat(vLumFilter, 0); // lumFilter[j] is loaded 8 times in vLumFilter + + perm = vec_lvsl(0, lumSrc[j]); + l1 = vec_ld(0, lumSrc[j]); + + for (i = 0; i < (dstW - 7); i+=8) { + int offset = i << 2; + vector signed short l2 = vec_ld((i << 1) + 16, lumSrc[j]); + + vector signed int v1 = vec_ld(offset, val); + vector signed int v2 = vec_ld(offset + 16, val); + + vector signed short ls = vec_perm(l1, l2, perm); // lumSrc[j][i] ... lumSrc[j][i+7] + + vector signed int i1 = vec_mule(vLumFilter, ls); + vector signed int i2 = vec_mulo(vLumFilter, ls); + + vector signed int vf1 = vec_mergeh(i1, i2); + vector signed int vf2 = vec_mergel(i1, i2); // lumSrc[j][i] * lumFilter[j] ... lumSrc[j][i+7] * lumFilter[j] + + vector signed int vo1 = vec_add(v1, vf1); + vector signed int vo2 = vec_add(v2, vf2); + + vec_st(vo1, offset, val); + vec_st(vo2, offset + 16, val); + + l1 = l2; + } + for ( ; i < dstW; i++) { + val[i] += lumSrc[j][i] * lumFilter[j]; + } + } + altivec_packIntArrayToCharArray(val, dest, dstW); + } + if (uDest != 0) { + DECLARE_ALIGNED(16, int, u)[chrDstW]; + DECLARE_ALIGNED(16, int, v)[chrDstW]; + + for (i = 0; i < (chrDstW -7); i+=4) { + vec_st(vini, i << 2, u); + vec_st(vini, i << 2, v); + } + for (; i < chrDstW; i++) { + u[i] = (1 << 18); + v[i] = (1 << 18); + } + + for (j = 0; j < chrFilterSize; j++) { + vector signed short l1, l1_V, vChrFilter = vec_ld(j << 1, chrFilter); + vector unsigned char perm, perm0 = vec_lvsl(j << 1, chrFilter); + vChrFilter = vec_perm(vChrFilter, vChrFilter, perm0); + vChrFilter = vec_splat(vChrFilter, 0); // chrFilter[j] is loaded 8 times in vChrFilter + + perm = vec_lvsl(0, chrUSrc[j]); + l1 = vec_ld(0, chrUSrc[j]); + l1_V = vec_ld(0, chrVSrc[j]); + + for (i = 0; i < (chrDstW - 7); i+=8) { + int offset = i << 2; + vector signed short l2 = vec_ld((i << 1) + 16, chrUSrc[j]); + vector signed short l2_V = vec_ld((i << 1) + 16, chrVSrc[j]); + + vector signed int v1 = vec_ld(offset, u); + vector signed int v2 = vec_ld(offset + 16, u); + vector signed int v1_V = vec_ld(offset, v); + vector signed int v2_V = vec_ld(offset + 16, v); + + vector signed short ls = vec_perm(l1, l2, perm); // chrUSrc[j][i] ... chrUSrc[j][i+7] + vector signed short ls_V = vec_perm(l1_V, l2_V, perm); // chrVSrc[j][i] ... chrVSrc[j][i] + + vector signed int i1 = vec_mule(vChrFilter, ls); + vector signed int i2 = vec_mulo(vChrFilter, ls); + vector signed int i1_V = vec_mule(vChrFilter, ls_V); + vector signed int i2_V = vec_mulo(vChrFilter, ls_V); + + vector signed int vf1 = vec_mergeh(i1, i2); + vector signed int vf2 = vec_mergel(i1, i2); // chrUSrc[j][i] * chrFilter[j] ... chrUSrc[j][i+7] * chrFilter[j] + vector signed int vf1_V = vec_mergeh(i1_V, i2_V); + vector signed int vf2_V = vec_mergel(i1_V, i2_V); // chrVSrc[j][i] * chrFilter[j] ... chrVSrc[j][i+7] * chrFilter[j] + + vector signed int vo1 = vec_add(v1, vf1); + vector signed int vo2 = vec_add(v2, vf2); + vector signed int vo1_V = vec_add(v1_V, vf1_V); + vector signed int vo2_V = vec_add(v2_V, vf2_V); + + vec_st(vo1, offset, u); + vec_st(vo2, offset + 16, u); + vec_st(vo1_V, offset, v); + vec_st(vo2_V, offset + 16, v); + + l1 = l2; + l1_V = l2_V; + } + for ( ; i < chrDstW; i++) { + u[i] += chrUSrc[j][i] * chrFilter[j]; + v[i] += chrVSrc[j][i] * chrFilter[j]; + } + } + altivec_packIntArrayToCharArray(u, uDest, chrDstW); + altivec_packIntArrayToCharArray(v, vDest, chrDstW); + } +} + +static void hScale_altivec_real(int16_t *dst, int dstW, + const uint8_t *src, int srcW, + int xInc, const int16_t *filter, + const int16_t *filterPos, int filterSize) +{ + register int i; + DECLARE_ALIGNED(16, int, tempo)[4]; + + if (filterSize % 4) { + for (i=0; i>7, (1<<15)-1); + } + } + else + switch (filterSize) { + case 4: + { + for (i=0; i 12) { + src_v1 = vec_ld(srcPos + 16, src); + } + src_vF = vec_perm(src_v0, src_v1, vec_lvsl(srcPos, src)); + + src_v = // vec_unpackh sign-extends... + (vector signed short)(vec_mergeh((vector unsigned char)vzero, src_vF)); + // now put our elements in the even slots + src_v = vec_mergeh(src_v, (vector signed short)vzero); + + filter_v = vec_ld(i << 3, filter); + // The 3 above is 2 (filterSize == 4) + 1 (sizeof(short) == 2). + + // The neat trick: We only care for half the elements, + // high or low depending on (i<<3)%16 (it's 0 or 8 here), + // and we're going to use vec_mule, so we choose + // carefully how to "unpack" the elements into the even slots. + if ((i << 3) % 16) + filter_v = vec_mergel(filter_v, (vector signed short)vzero); + else + filter_v = vec_mergeh(filter_v, (vector signed short)vzero); + + val_vEven = vec_mule(src_v, filter_v); + val_s = vec_sums(val_vEven, vzero); + vec_st(val_s, 0, tempo); + dst[i] = FFMIN(tempo[3]>>7, (1<<15)-1); + } + } + break; + + case 8: + { + for (i=0; i 8) { + src_v1 = vec_ld(srcPos + 16, src); + } + src_vF = vec_perm(src_v0, src_v1, vec_lvsl(srcPos, src)); + + src_v = // vec_unpackh sign-extends... + (vector signed short)(vec_mergeh((vector unsigned char)vzero, src_vF)); + filter_v = vec_ld(i << 4, filter); + // the 4 above is 3 (filterSize == 8) + 1 (sizeof(short) == 2) + + val_v = vec_msums(src_v, filter_v, (vector signed int)vzero); + val_s = vec_sums(val_v, vzero); + vec_st(val_s, 0, tempo); + dst[i] = FFMIN(tempo[3]>>7, (1<<15)-1); + } + } + break; + + case 16: + { + for (i=0; i>7, (1<<15)-1); + } + } + break; + + default: + { + for (i=0; i 8) { + src_v1 = vec_ld(srcPos + j + 16, src); + } + src_vF = vec_perm(src_v0, src_v1, permS); + + src_v = // vec_unpackh sign-extends... + (vector signed short)(vec_mergeh((vector unsigned char)vzero, src_vF)); + // loading filter_v0R is useless, it's already done above + //vector signed short filter_v0R = vec_ld((i * 2 * filterSize) + j, filter); + filter_v1R = vec_ld((i * 2 * filterSize) + (j * 2) + 16, filter); + filter_v = vec_perm(filter_v0R, filter_v1R, permF); + + val_v = vec_msums(src_v, filter_v, val_v); + } + + val_s = vec_sums(val_v, vzero); + + vec_st(val_s, 0, tempo); + dst[i] = FFMIN(tempo[3]>>7, (1<<15)-1); + } + + } + } +} + +void ff_sws_init_swScale_altivec(SwsContext *c) +{ + if (!(av_get_cpu_flags() & AV_CPU_FLAG_ALTIVEC)) + return; + + c->yuv2yuvX = yuv2yuvX_altivec_real; + + /* The following list of supported dstFormat values should + * match what's found in the body of ff_yuv2packedX_altivec() */ + if (!(c->flags & SWS_BITEXACT) && !c->alpPixBuf && + (c->dstFormat==PIX_FMT_ABGR || c->dstFormat==PIX_FMT_BGRA || + c->dstFormat==PIX_FMT_BGR24 || c->dstFormat==PIX_FMT_RGB24 || + c->dstFormat==PIX_FMT_RGBA || c->dstFormat==PIX_FMT_ARGB)) { + c->yuv2packedX = ff_yuv2packedX_altivec; + } +} diff --git a/libswscale/ppc/swscale_altivec_template.c b/libswscale/ppc/swscale_altivec_template.c deleted file mode 100644 index 3c31c3e130..0000000000 --- a/libswscale/ppc/swscale_altivec_template.c +++ /dev/null @@ -1,409 +0,0 @@ -/* - * AltiVec-enhanced yuv2yuvX - * - * Copyright (C) 2004 Romain Dolbeau - * based on the equivalent C code in swscale.c - * - * This file is part of Libav. - * - * Libav is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * Libav is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with Libav; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -#define vzero vec_splat_s32(0) - -static inline void -altivec_packIntArrayToCharArray(int *val, uint8_t* dest, int dstW) -{ - register int i; - vector unsigned int altivec_vectorShiftInt19 = - vec_add(vec_splat_u32(10), vec_splat_u32(9)); - if ((unsigned int)dest % 16) { - /* badly aligned store, we force store alignment */ - /* and will handle load misalignment on val w/ vec_perm */ - vector unsigned char perm1; - vector signed int v1; - for (i = 0 ; (i < dstW) && - (((unsigned int)dest + i) % 16) ; i++) { - int t = val[i] >> 19; - dest[i] = (t < 0) ? 0 : ((t > 255) ? 255 : t); - } - perm1 = vec_lvsl(i << 2, val); - v1 = vec_ld(i << 2, val); - for ( ; i < (dstW - 15); i+=16) { - int offset = i << 2; - vector signed int v2 = vec_ld(offset + 16, val); - vector signed int v3 = vec_ld(offset + 32, val); - vector signed int v4 = vec_ld(offset + 48, val); - vector signed int v5 = vec_ld(offset + 64, val); - vector signed int v12 = vec_perm(v1, v2, perm1); - vector signed int v23 = vec_perm(v2, v3, perm1); - vector signed int v34 = vec_perm(v3, v4, perm1); - vector signed int v45 = vec_perm(v4, v5, perm1); - - vector signed int vA = vec_sra(v12, altivec_vectorShiftInt19); - vector signed int vB = vec_sra(v23, altivec_vectorShiftInt19); - vector signed int vC = vec_sra(v34, altivec_vectorShiftInt19); - vector signed int vD = vec_sra(v45, altivec_vectorShiftInt19); - vector unsigned short vs1 = vec_packsu(vA, vB); - vector unsigned short vs2 = vec_packsu(vC, vD); - vector unsigned char vf = vec_packsu(vs1, vs2); - vec_st(vf, i, dest); - v1 = v5; - } - } else { // dest is properly aligned, great - for (i = 0; i < (dstW - 15); i+=16) { - int offset = i << 2; - vector signed int v1 = vec_ld(offset, val); - vector signed int v2 = vec_ld(offset + 16, val); - vector signed int v3 = vec_ld(offset + 32, val); - vector signed int v4 = vec_ld(offset + 48, val); - vector signed int v5 = vec_sra(v1, altivec_vectorShiftInt19); - vector signed int v6 = vec_sra(v2, altivec_vectorShiftInt19); - vector signed int v7 = vec_sra(v3, altivec_vectorShiftInt19); - vector signed int v8 = vec_sra(v4, altivec_vectorShiftInt19); - vector unsigned short vs1 = vec_packsu(v5, v6); - vector unsigned short vs2 = vec_packsu(v7, v8); - vector unsigned char vf = vec_packsu(vs1, vs2); - vec_st(vf, i, dest); - } - } - for ( ; i < dstW ; i++) { - int t = val[i] >> 19; - dest[i] = (t < 0) ? 0 : ((t > 255) ? 255 : t); - } -} - -static void -yuv2yuvX_altivec_real(SwsContext *c, - const int16_t *lumFilter, const int16_t **lumSrc, - int lumFilterSize, const int16_t *chrFilter, - const int16_t **chrUSrc, const int16_t **chrVSrc, - int chrFilterSize, const int16_t **alpSrc, - uint8_t *dest, uint8_t *uDest, - uint8_t *vDest, uint8_t *aDest, - int dstW, int chrDstW) -{ - const vector signed int vini = {(1 << 18), (1 << 18), (1 << 18), (1 << 18)}; - register int i, j; - { - DECLARE_ALIGNED(16, int, val)[dstW]; - - for (i = 0; i < (dstW -7); i+=4) { - vec_st(vini, i << 2, val); - } - for (; i < dstW; i++) { - val[i] = (1 << 18); - } - - for (j = 0; j < lumFilterSize; j++) { - vector signed short l1, vLumFilter = vec_ld(j << 1, lumFilter); - vector unsigned char perm, perm0 = vec_lvsl(j << 1, lumFilter); - vLumFilter = vec_perm(vLumFilter, vLumFilter, perm0); - vLumFilter = vec_splat(vLumFilter, 0); // lumFilter[j] is loaded 8 times in vLumFilter - - perm = vec_lvsl(0, lumSrc[j]); - l1 = vec_ld(0, lumSrc[j]); - - for (i = 0; i < (dstW - 7); i+=8) { - int offset = i << 2; - vector signed short l2 = vec_ld((i << 1) + 16, lumSrc[j]); - - vector signed int v1 = vec_ld(offset, val); - vector signed int v2 = vec_ld(offset + 16, val); - - vector signed short ls = vec_perm(l1, l2, perm); // lumSrc[j][i] ... lumSrc[j][i+7] - - vector signed int i1 = vec_mule(vLumFilter, ls); - vector signed int i2 = vec_mulo(vLumFilter, ls); - - vector signed int vf1 = vec_mergeh(i1, i2); - vector signed int vf2 = vec_mergel(i1, i2); // lumSrc[j][i] * lumFilter[j] ... lumSrc[j][i+7] * lumFilter[j] - - vector signed int vo1 = vec_add(v1, vf1); - vector signed int vo2 = vec_add(v2, vf2); - - vec_st(vo1, offset, val); - vec_st(vo2, offset + 16, val); - - l1 = l2; - } - for ( ; i < dstW; i++) { - val[i] += lumSrc[j][i] * lumFilter[j]; - } - } - altivec_packIntArrayToCharArray(val, dest, dstW); - } - if (uDest != 0) { - DECLARE_ALIGNED(16, int, u)[chrDstW]; - DECLARE_ALIGNED(16, int, v)[chrDstW]; - - for (i = 0; i < (chrDstW -7); i+=4) { - vec_st(vini, i << 2, u); - vec_st(vini, i << 2, v); - } - for (; i < chrDstW; i++) { - u[i] = (1 << 18); - v[i] = (1 << 18); - } - - for (j = 0; j < chrFilterSize; j++) { - vector signed short l1, l1_V, vChrFilter = vec_ld(j << 1, chrFilter); - vector unsigned char perm, perm0 = vec_lvsl(j << 1, chrFilter); - vChrFilter = vec_perm(vChrFilter, vChrFilter, perm0); - vChrFilter = vec_splat(vChrFilter, 0); // chrFilter[j] is loaded 8 times in vChrFilter - - perm = vec_lvsl(0, chrUSrc[j]); - l1 = vec_ld(0, chrUSrc[j]); - l1_V = vec_ld(0, chrVSrc[j]); - - for (i = 0; i < (chrDstW - 7); i+=8) { - int offset = i << 2; - vector signed short l2 = vec_ld((i << 1) + 16, chrUSrc[j]); - vector signed short l2_V = vec_ld((i << 1) + 16, chrVSrc[j]); - - vector signed int v1 = vec_ld(offset, u); - vector signed int v2 = vec_ld(offset + 16, u); - vector signed int v1_V = vec_ld(offset, v); - vector signed int v2_V = vec_ld(offset + 16, v); - - vector signed short ls = vec_perm(l1, l2, perm); // chrUSrc[j][i] ... chrUSrc[j][i+7] - vector signed short ls_V = vec_perm(l1_V, l2_V, perm); // chrVSrc[j][i] ... chrVSrc[j][i] - - vector signed int i1 = vec_mule(vChrFilter, ls); - vector signed int i2 = vec_mulo(vChrFilter, ls); - vector signed int i1_V = vec_mule(vChrFilter, ls_V); - vector signed int i2_V = vec_mulo(vChrFilter, ls_V); - - vector signed int vf1 = vec_mergeh(i1, i2); - vector signed int vf2 = vec_mergel(i1, i2); // chrUSrc[j][i] * chrFilter[j] ... chrUSrc[j][i+7] * chrFilter[j] - vector signed int vf1_V = vec_mergeh(i1_V, i2_V); - vector signed int vf2_V = vec_mergel(i1_V, i2_V); // chrVSrc[j][i] * chrFilter[j] ... chrVSrc[j][i+7] * chrFilter[j] - - vector signed int vo1 = vec_add(v1, vf1); - vector signed int vo2 = vec_add(v2, vf2); - vector signed int vo1_V = vec_add(v1_V, vf1_V); - vector signed int vo2_V = vec_add(v2_V, vf2_V); - - vec_st(vo1, offset, u); - vec_st(vo2, offset + 16, u); - vec_st(vo1_V, offset, v); - vec_st(vo2_V, offset + 16, v); - - l1 = l2; - l1_V = l2_V; - } - for ( ; i < chrDstW; i++) { - u[i] += chrUSrc[j][i] * chrFilter[j]; - v[i] += chrVSrc[j][i] * chrFilter[j]; - } - } - altivec_packIntArrayToCharArray(u, uDest, chrDstW); - altivec_packIntArrayToCharArray(v, vDest, chrDstW); - } -} - -static inline void hScale_altivec_real(int16_t *dst, int dstW, - const uint8_t *src, int srcW, - int xInc, const int16_t *filter, - const int16_t *filterPos, int filterSize) -{ - register int i; - DECLARE_ALIGNED(16, int, tempo)[4]; - - if (filterSize % 4) { - for (i=0; i>7, (1<<15)-1); - } - } - else - switch (filterSize) { - case 4: - { - for (i=0; i 12) { - src_v1 = vec_ld(srcPos + 16, src); - } - src_vF = vec_perm(src_v0, src_v1, vec_lvsl(srcPos, src)); - - src_v = // vec_unpackh sign-extends... - (vector signed short)(vec_mergeh((vector unsigned char)vzero, src_vF)); - // now put our elements in the even slots - src_v = vec_mergeh(src_v, (vector signed short)vzero); - - filter_v = vec_ld(i << 3, filter); - // The 3 above is 2 (filterSize == 4) + 1 (sizeof(short) == 2). - - // The neat trick: We only care for half the elements, - // high or low depending on (i<<3)%16 (it's 0 or 8 here), - // and we're going to use vec_mule, so we choose - // carefully how to "unpack" the elements into the even slots. - if ((i << 3) % 16) - filter_v = vec_mergel(filter_v, (vector signed short)vzero); - else - filter_v = vec_mergeh(filter_v, (vector signed short)vzero); - - val_vEven = vec_mule(src_v, filter_v); - val_s = vec_sums(val_vEven, vzero); - vec_st(val_s, 0, tempo); - dst[i] = FFMIN(tempo[3]>>7, (1<<15)-1); - } - } - break; - - case 8: - { - for (i=0; i 8) { - src_v1 = vec_ld(srcPos + 16, src); - } - src_vF = vec_perm(src_v0, src_v1, vec_lvsl(srcPos, src)); - - src_v = // vec_unpackh sign-extends... - (vector signed short)(vec_mergeh((vector unsigned char)vzero, src_vF)); - filter_v = vec_ld(i << 4, filter); - // the 4 above is 3 (filterSize == 8) + 1 (sizeof(short) == 2) - - val_v = vec_msums(src_v, filter_v, (vector signed int)vzero); - val_s = vec_sums(val_v, vzero); - vec_st(val_s, 0, tempo); - dst[i] = FFMIN(tempo[3]>>7, (1<<15)-1); - } - } - break; - - case 16: - { - for (i=0; i>7, (1<<15)-1); - } - } - break; - - default: - { - for (i=0; i 8) { - src_v1 = vec_ld(srcPos + j + 16, src); - } - src_vF = vec_perm(src_v0, src_v1, permS); - - src_v = // vec_unpackh sign-extends... - (vector signed short)(vec_mergeh((vector unsigned char)vzero, src_vF)); - // loading filter_v0R is useless, it's already done above - //vector signed short filter_v0R = vec_ld((i * 2 * filterSize) + j, filter); - filter_v1R = vec_ld((i * 2 * filterSize) + (j * 2) + 16, filter); - filter_v = vec_perm(filter_v0R, filter_v1R, permF); - - val_v = vec_msums(src_v, filter_v, val_v); - } - - val_s = vec_sums(val_v, vzero); - - vec_st(val_s, 0, tempo); - dst[i] = FFMIN(tempo[3]>>7, (1<<15)-1); - } - - } - } -} - -static void RENAME(sws_init_swScale)(SwsContext *c) -{ - c->yuv2yuvX = yuv2yuvX_altivec_real; - - /* The following list of supported dstFormat values should - * match what's found in the body of ff_yuv2packedX_altivec() */ - if (!(c->flags & SWS_BITEXACT) && !c->alpPixBuf && - (c->dstFormat==PIX_FMT_ABGR || c->dstFormat==PIX_FMT_BGRA || - c->dstFormat==PIX_FMT_BGR24 || c->dstFormat==PIX_FMT_RGB24 || - c->dstFormat==PIX_FMT_RGBA || c->dstFormat==PIX_FMT_ARGB)) { - c->yuv2packedX = ff_yuv2packedX_altivec; - } -} diff --git a/libswscale/ppc/yuv2rgb_altivec.c b/libswscale/ppc/yuv2rgb_altivec.c index 4b2bdafca0..476db22489 100644 --- a/libswscale/ppc/yuv2rgb_altivec.c +++ b/libswscale/ppc/yuv2rgb_altivec.c @@ -95,6 +95,7 @@ adjustment. #include "libswscale/swscale.h" #include "libswscale/swscale_internal.h" #include "libavutil/cpu.h" +#include "yuv2rgb_altivec.h" #undef PROFILE_THE_BEAST #undef INC_SCALING diff --git a/libswscale/ppc/yuv2rgb_altivec.h b/libswscale/ppc/yuv2rgb_altivec.h new file mode 100644 index 0000000000..b54a856905 --- /dev/null +++ b/libswscale/ppc/yuv2rgb_altivec.h @@ -0,0 +1,34 @@ +/* + * AltiVec-enhanced yuv2yuvX + * + * Copyright (C) 2004 Romain Dolbeau + * based on the equivalent C code in swscale.c + * + * This file is part of Libav. + * + * Libav is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * Libav is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with Libav; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef PPC_YUV2RGB_ALTIVEC_H +#define PPC_YUV2RGB_ALTIVEC_H 1 + +void ff_yuv2packedX_altivec(SwsContext *c, const int16_t *lumFilter, + const int16_t **lumSrc, int lumFilterSize, + const int16_t *chrFilter, const int16_t **chrUSrc, + const int16_t **chrVSrc, int chrFilterSize, + const int16_t **alpSrc, uint8_t *dest, + int dstW, int dstY); + +#endif /* PPC_YUV2RGB_ALTIVEC_H */ -- cgit v1.2.3