summaryrefslogtreecommitdiff
path: root/libavcodec/ppc/imgresample_altivec.c
diff options
context:
space:
mode:
authorDiego Biurrun <diego@biurrun.de>2008-07-20 18:58:30 +0000
committerDiego Biurrun <diego@biurrun.de>2008-07-20 18:58:30 +0000
commite3905ce0afe91ad1422af83334d06d52e4e8fc80 (patch)
tree4b5c16c164776efb5db27f1361bb63df5c2615a4 /libavcodec/ppc/imgresample_altivec.c
parent41f5c62f5cdf17c74d7d3822cfa8db1da734719a (diff)
cosmetics: Reformat PPC code in libavcodec according to style guidelines.
This includes indentation changes, comment reformatting, consistent brace placement and some prettyprinting. Originally committed as revision 14316 to svn://svn.ffmpeg.org/ffmpeg/trunk
Diffstat (limited to 'libavcodec/ppc/imgresample_altivec.c')
-rw-r--r--libavcodec/ppc/imgresample_altivec.c21
1 files changed, 7 insertions, 14 deletions
diff --git a/libavcodec/ppc/imgresample_altivec.c b/libavcodec/ppc/imgresample_altivec.c
index 3b161c5a6d..b38e41b0f8 100644
--- a/libavcodec/ppc/imgresample_altivec.c
+++ b/libavcodec/ppc/imgresample_altivec.c
@@ -46,8 +46,7 @@ void v_resample16_altivec(uint8_t *dst, int dst_width, const uint8_t *src,
vector signed short zeros, sumhv, sumlv;
s = src;
- for(i=0;i<4;i++)
- {
+ for(i=0;i<4;i++) {
/*
The vec_madds later on does an implicit >>15 on the result.
Since FILTER_BITS is 8, and we have 15 bits of magnitude in
@@ -86,13 +85,11 @@ void v_resample16_altivec(uint8_t *dst, int dst_width, const uint8_t *src,
/* Do our altivec resampling on 16 pixels at once. */
while(dst_width>=16) {
- /*
- Read 16 (potentially unaligned) bytes from each of
+ /* Read 16 (potentially unaligned) bytes from each of
4 lines into 4 vectors, and split them into shorts.
Interleave the multipy/accumulate for the resample
filter with the loads to hide the 3 cycle latency
- the vec_madds have.
- */
+ the vec_madds have. */
tv = (vector unsigned char *) &s[0 * wrap];
tmp = vec_perm(tv[0], tv[1], vec_lvsl(0, &s[i * wrap]));
srchv[0].v = (vector signed short) vec_mergeh(zero, tmp);
@@ -121,10 +118,8 @@ void v_resample16_altivec(uint8_t *dst, int dst_width, const uint8_t *src,
sumhv = vec_madds(srchv[3].v, fv[3].v, sumhv);
sumlv = vec_madds(srclv[3].v, fv[3].v, sumlv);
- /*
- Pack the results into our destination vector,
- and do an aligned write of that back to memory.
- */
+ /* Pack the results into our destination vector,
+ and do an aligned write of that back to memory. */
dstv = vec_packsu(sumhv, sumlv) ;
vec_st(dstv, 0, (vector unsigned char *) dst);
@@ -133,10 +128,8 @@ void v_resample16_altivec(uint8_t *dst, int dst_width, const uint8_t *src,
dst_width-=16;
}
- /*
- If there are any leftover pixels, resample them
- with the slow scalar method.
- */
+ /* If there are any leftover pixels, resample them
+ with the slow scalar method. */
while(dst_width>0) {
sum = s[0 * wrap] * filter[0] +
s[1 * wrap] * filter[1] +