summaryrefslogtreecommitdiff
path: root/libavcodec/ppc/h264_altivec.c
diff options
context:
space:
mode:
authorDiego Biurrun <diego@biurrun.de>2008-07-20 18:58:30 +0000
committerDiego Biurrun <diego@biurrun.de>2008-07-20 18:58:30 +0000
commite3905ce0afe91ad1422af83334d06d52e4e8fc80 (patch)
tree4b5c16c164776efb5db27f1361bb63df5c2615a4 /libavcodec/ppc/h264_altivec.c
parent41f5c62f5cdf17c74d7d3822cfa8db1da734719a (diff)
cosmetics: Reformat PPC code in libavcodec according to style guidelines.
This includes indentation changes, comment reformatting, consistent brace placement and some prettyprinting. Originally committed as revision 14316 to svn://svn.ffmpeg.org/ffmpeg/trunk
Diffstat (limited to 'libavcodec/ppc/h264_altivec.c')
-rw-r--r--libavcodec/ppc/h264_altivec.c12
1 files changed, 6 insertions, 6 deletions
diff --git a/libavcodec/ppc/h264_altivec.c b/libavcodec/ppc/h264_altivec.c
index 09a739bdb8..04dad2e33d 100644
--- a/libavcodec/ppc/h264_altivec.c
+++ b/libavcodec/ppc/h264_altivec.c
@@ -196,7 +196,7 @@ void put_no_rnd_h264_chroma_mc8_altivec(uint8_t * dst, uint8_t * src, int stride
const vec_s16_t vD = vec_splat((vec_s16_t)vABCD, 7);
LOAD_ZERO;
const vec_s16_t v28ss = vec_sub(vec_sl(vec_splat_s16(1),vec_splat_u16(5)),vec_splat_s16(4));
- const vec_u16_t v6us = vec_splat_u16(6);
+ const vec_u16_t v6us = vec_splat_u16(6);
register int loadSecond = (((unsigned long)src) % 16) <= 7 ? 0 : 1;
register int reallyBadAlign = (((unsigned long)src) % 16) == 15 ? 1 : 0;
@@ -392,8 +392,8 @@ static inline void avg_pixels16_l2_altivec( uint8_t * dst, const uint8_t * src1,
#define avg_pixels16_l2_altivec(d,s1,s2,ds,s1s,h) avg_pixels16_l2(d,s1,s2,ds,s1s,16,h)
*/
- H264_MC(put_, 16, altivec)
- H264_MC(avg_, 16, altivec)
+H264_MC(put_, 16, altivec)
+H264_MC(avg_, 16, altivec)
/****************************************************************************
@@ -685,9 +685,9 @@ static inline void write16x4(uint8_t *dst, int dst_stride,
r15 = vec_mergel(r3, r7); /*3,7,11,15 set 1*/ \
\
/*Third merge*/ \
- r0 = vec_mergeh(r8, r12); /*0,2,4,6,8,10,12,14 set 0*/ \
- r1 = vec_mergel(r8, r12); /*0,2,4,6,8,10,12,14 set 1*/ \
- r2 = vec_mergeh(r9, r13); /*0,2,4,6,8,10,12,14 set 2*/ \
+ r0 = vec_mergeh(r8, r12); /*0,2,4,6,8,10,12,14 set 0*/ \
+ r1 = vec_mergel(r8, r12); /*0,2,4,6,8,10,12,14 set 1*/ \
+ r2 = vec_mergeh(r9, r13); /*0,2,4,6,8,10,12,14 set 2*/ \
r4 = vec_mergeh(r10, r14); /*1,3,5,7,9,11,13,15 set 0*/ \
r5 = vec_mergel(r10, r14); /*1,3,5,7,9,11,13,15 set 1*/ \
r6 = vec_mergeh(r11, r15); /*1,3,5,7,9,11,13,15 set 2*/ \