summaryrefslogtreecommitdiff
path: root/libavutil/arm/intmath.h
diff options
context:
space:
mode:
authorJason Garrett-Glaser <jason@x264.com>2011-02-16 10:20:54 -0800
committerJason Garrett-Glaser <jason@x264.com>2011-02-17 15:25:25 -0800
commiteb3755a5aa65da685d81399cfae4bd35e4a178b6 (patch)
tree503ba2342f1cd61e10ee5ed310f3a839fd9a65e8 /libavutil/arm/intmath.h
parentbcf4568f183055331415ba230e82af6d59faac1c (diff)
Force inlining of avutil common routines
On some versions of gcc, these weren't always getting inlined due to hitting the inline cap limit in some files. This is generally bad, as most of these functions are smaller inlined than not.
Diffstat (limited to 'libavutil/arm/intmath.h')
-rw-r--r--libavutil/arm/intmath.h14
1 files changed, 7 insertions, 7 deletions
diff --git a/libavutil/arm/intmath.h b/libavutil/arm/intmath.h
index 2c0aa3b11c..8f03d4bf90 100644
--- a/libavutil/arm/intmath.h
+++ b/libavutil/arm/intmath.h
@@ -31,7 +31,7 @@
#if HAVE_ARMV6
#define FASTDIV FASTDIV
-static inline av_const int FASTDIV(int a, int b)
+static av_always_inline av_const int FASTDIV(int a, int b)
{
int r, t;
__asm__ volatile("cmp %3, #2 \n\t"
@@ -43,7 +43,7 @@ static inline av_const int FASTDIV(int a, int b)
}
#define av_clip_uint8 av_clip_uint8_arm
-static inline av_const uint8_t av_clip_uint8_arm(int a)
+static av_always_inline av_const uint8_t av_clip_uint8_arm(int a)
{
unsigned x;
__asm__ volatile ("usat %0, #8, %1" : "=r"(x) : "r"(a));
@@ -51,7 +51,7 @@ static inline av_const uint8_t av_clip_uint8_arm(int a)
}
#define av_clip_int8 av_clip_int8_arm
-static inline av_const uint8_t av_clip_int8_arm(int a)
+static av_always_inline av_const uint8_t av_clip_int8_arm(int a)
{
unsigned x;
__asm__ volatile ("ssat %0, #8, %1" : "=r"(x) : "r"(a));
@@ -59,7 +59,7 @@ static inline av_const uint8_t av_clip_int8_arm(int a)
}
#define av_clip_uint16 av_clip_uint16_arm
-static inline av_const uint16_t av_clip_uint16_arm(int a)
+static av_always_inline av_const uint16_t av_clip_uint16_arm(int a)
{
unsigned x;
__asm__ volatile ("usat %0, #16, %1" : "=r"(x) : "r"(a));
@@ -67,7 +67,7 @@ static inline av_const uint16_t av_clip_uint16_arm(int a)
}
#define av_clip_int16 av_clip_int16_arm
-static inline av_const int16_t av_clip_int16_arm(int a)
+static av_always_inline av_const int16_t av_clip_int16_arm(int a)
{
int x;
__asm__ volatile ("ssat %0, #16, %1" : "=r"(x) : "r"(a));
@@ -77,7 +77,7 @@ static inline av_const int16_t av_clip_int16_arm(int a)
#else /* HAVE_ARMV6 */
#define FASTDIV FASTDIV
-static inline av_const int FASTDIV(int a, int b)
+static av_always_inline av_const int FASTDIV(int a, int b)
{
int r, t;
__asm__ volatile("umull %1, %0, %2, %3"
@@ -88,7 +88,7 @@ static inline av_const int FASTDIV(int a, int b)
#endif /* HAVE_ARMV6 */
#define av_clipl_int32 av_clipl_int32_arm
-static inline av_const int32_t av_clipl_int32_arm(int64_t a)
+static av_always_inline av_const int32_t av_clipl_int32_arm(int64_t a)
{
int x, y;
__asm__ volatile ("adds %1, %R2, %Q2, lsr #31 \n\t"