summaryrefslogtreecommitdiff
path: root/libavutil/ppc/util_altivec.h
diff options
context:
space:
mode:
Diffstat (limited to 'libavutil/ppc/util_altivec.h')
-rw-r--r--libavutil/ppc/util_altivec.h43
1 files changed, 35 insertions, 8 deletions
diff --git a/libavutil/ppc/util_altivec.h b/libavutil/ppc/util_altivec.h
index 5624ac5523..2f0c6eb902 100644
--- a/libavutil/ppc/util_altivec.h
+++ b/libavutil/ppc/util_altivec.h
@@ -1,18 +1,18 @@
/*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -85,29 +85,56 @@ do { \
} while (0)
+#if HAVE_BIGENDIAN
+#define VEC_LD(offset,b) \
+ vec_perm(vec_ld(offset, b), vec_ld((offset)+15, b), vec_lvsl(offset, b))
+#else
+#define VEC_LD(offset,b) \
+ vec_vsx_ld(offset, b)
+#endif
+
/** @brief loads unaligned vector @a *src with offset @a offset
and returns it */
-static inline vector unsigned char unaligned_load(int offset, uint8_t *src)
+#if HAVE_BIGENDIAN
+static inline vector unsigned char unaligned_load(int offset, const uint8_t *src)
{
register vector unsigned char first = vec_ld(offset, src);
register vector unsigned char second = vec_ld(offset+15, src);
register vector unsigned char mask = vec_lvsl(offset, src);
return vec_perm(first, second, mask);
}
+#else
+#define unaligned_load(a,b) VEC_LD(a,b)
+#endif
+
/**
* loads vector known misalignment
* @param perm_vec the align permute vector to combine the two loads from lvsl
*/
-static inline vec_u8 load_with_perm_vec(int offset, uint8_t *src, vec_u8 perm_vec)
+static inline vec_u8 load_with_perm_vec(int offset, const uint8_t *src, vec_u8 perm_vec)
{
vec_u8 a = vec_ld(offset, src);
vec_u8 b = vec_ld(offset+15, src);
return vec_perm(a, b, perm_vec);
}
-#define vec_unaligned_load(b) \
- vec_perm(vec_ld(0, b), vec_ld(15, b), vec_lvsl(0, b));
+#define vec_unaligned_load(b) VEC_LD(0, b)
+
+#if HAVE_BIGENDIAN
+#define VEC_MERGEH(a, b) vec_mergeh(a, b)
+#define VEC_MERGEL(a, b) vec_mergel(a, b)
+#else
+#define VEC_MERGEH(a, b) vec_mergeh(b, a)
+#define VEC_MERGEL(a, b) vec_mergel(b, a)
+#endif
+
+#if HAVE_BIGENDIAN
+#define VEC_ST(a,b,c) vec_st(a,b,c)
+#else
+#define VEC_ST(a,b,c) vec_vsx_st(a,b,c)
+#endif
+
#endif /* HAVE_ALTIVEC */