summaryrefslogtreecommitdiff
path: root/libavutil
diff options
context:
space:
mode:
authorDiego Pettenò <flameeyes@gmail.com>2008-10-16 13:34:09 +0000
committerDiego Pettenò <flameeyes@gmail.com>2008-10-16 13:34:09 +0000
commitbe449fca79a3b0394143f0a77c99784e65868d9f (patch)
tree5c5b2bbfe648467292b30cc501265e556acab101 /libavutil
parenta14b362fc650a5e036d413033d9709a526662d89 (diff)
Convert asm keyword into __asm__.
Neither the asm() nor the __asm__() keyword is part of the C99 standard, but while GCC accepts the former in C89 syntax, it is not accepted in C99 unless GNU extensions are turned on (with -fasm). The latter form is accepted in any syntax as an extension (without requiring further command-line options). Sun Studio C99 compiler also does not accept asm() while accepting __asm__(), albeit reporting warnings that it's not valid C99 syntax. Originally committed as revision 15627 to svn://svn.ffmpeg.org/ffmpeg/trunk
Diffstat (limited to 'libavutil')
-rw-r--r--libavutil/bswap.h20
-rw-r--r--libavutil/common.h10
-rw-r--r--libavutil/internal.h10
3 files changed, 20 insertions, 20 deletions
diff --git a/libavutil/bswap.h b/libavutil/bswap.h
index 18fa594f94..8e6b1b20f1 100644
--- a/libavutil/bswap.h
+++ b/libavutil/bswap.h
@@ -33,11 +33,11 @@
static av_always_inline av_const uint16_t bswap_16(uint16_t x)
{
#if defined(ARCH_X86)
- asm("rorw $8, %0" : "+r"(x));
+ __asm__("rorw $8, %0" : "+r"(x));
#elif defined(ARCH_SH4)
- asm("swap.b %0,%0" : "=r"(x) : "0"(x));
+ __asm__("swap.b %0,%0" : "=r"(x) : "0"(x));
#elif defined(HAVE_ARMV6)
- asm("rev16 %0, %0" : "+r"(x));
+ __asm__("rev16 %0, %0" : "+r"(x));
#else
x= (x>>8) | (x<<8);
#endif
@@ -48,30 +48,30 @@ static av_always_inline av_const uint32_t bswap_32(uint32_t x)
{
#if defined(ARCH_X86)
#ifdef HAVE_BSWAP
- asm("bswap %0" : "+r" (x));
+ __asm__("bswap %0" : "+r" (x));
#else
- asm("rorw $8, %w0 \n\t"
+ __asm__("rorw $8, %w0 \n\t"
"rorl $16, %0 \n\t"
"rorw $8, %w0"
: "+r"(x));
#endif
#elif defined(ARCH_SH4)
- asm("swap.b %0,%0\n"
+ __asm__("swap.b %0,%0\n"
"swap.w %0,%0\n"
"swap.b %0,%0\n"
: "=r"(x) : "0"(x));
#elif defined(HAVE_ARMV6)
- asm("rev %0, %0" : "+r"(x));
+ __asm__("rev %0, %0" : "+r"(x));
#elif defined(ARCH_ARMV4L)
uint32_t t;
- asm ("eor %1, %0, %0, ror #16 \n\t"
+ __asm__ ("eor %1, %0, %0, ror #16 \n\t"
"bic %1, %1, #0xFF0000 \n\t"
"mov %0, %0, ror #8 \n\t"
"eor %0, %0, %1, lsr #8 \n\t"
: "+r"(x), "+r"(t));
#elif defined(ARCH_BFIN)
unsigned tmp;
- asm("%1 = %0 >> 8 (V); \n\t"
+ __asm__("%1 = %0 >> 8 (V); \n\t"
"%0 = %0 << 8 (V); \n\t"
"%0 = %0 | %1; \n\t"
"%0 = PACK(%0.L, %0.H); \n\t"
@@ -90,7 +90,7 @@ static inline uint64_t av_const bswap_64(uint64_t x)
x= ((x<<16)&0xFFFF0000FFFF0000ULL) | ((x>>16)&0x0000FFFF0000FFFFULL);
return (x>>32) | (x<<32);
#elif defined(ARCH_X86_64)
- asm("bswap %0": "=r" (x) : "0" (x));
+ __asm__("bswap %0": "=r" (x) : "0" (x));
return x;
#else
union {
diff --git a/libavutil/common.h b/libavutil/common.h
index 9c78804273..7ceaf33890 100644
--- a/libavutil/common.h
+++ b/libavutil/common.h
@@ -154,7 +154,7 @@ static inline av_const int mid_pred(int a, int b, int c)
{
#ifdef HAVE_CMOV
int i=b;
- asm volatile(
+ __asm__ volatile(
"cmp %2, %1 \n\t"
"cmovg %1, %0 \n\t"
"cmovg %2, %1 \n\t"
@@ -327,7 +327,7 @@ static inline av_pure int ff_get_fourcc(const char *s){
static inline uint64_t read_time(void)
{
uint64_t a, d;
- asm volatile("rdtsc\n\t"
+ __asm__ volatile("rdtsc\n\t"
: "=a" (a), "=d" (d));
return (d << 32) | (a & 0xffffffff);
}
@@ -335,7 +335,7 @@ static inline uint64_t read_time(void)
static inline long long read_time(void)
{
long long l;
- asm volatile("rdtsc\n\t"
+ __asm__ volatile("rdtsc\n\t"
: "=A" (l));
return l;
}
@@ -349,7 +349,7 @@ static inline uint64_t read_time(void)
} p;
unsigned long long c;
} t;
- asm volatile ("%0=cycles; %1=cycles2;" : "=d" (t.p.lo), "=d" (t.p.hi));
+ __asm__ volatile ("%0=cycles; %1=cycles2;" : "=d" (t.p.lo), "=d" (t.p.hi));
return t.c;
}
#else //FIXME check ppc64
@@ -358,7 +358,7 @@ static inline uint64_t read_time(void)
uint32_t tbu, tbl, temp;
/* from section 2.2.1 of the 32-bit PowerPC PEM */
- asm volatile(
+ __asm__ volatile(
"1:\n"
"mftbu %2\n"
"mftb %0\n"
diff --git a/libavutil/internal.h b/libavutil/internal.h
index 0eb25d5f6c..aaecab77a5 100644
--- a/libavutil/internal.h
+++ b/libavutil/internal.h
@@ -130,7 +130,7 @@ extern const uint32_t ff_inverse[256];
# define FASTDIV(a,b) \
({\
int ret,dmy;\
- asm volatile(\
+ __asm__ volatile(\
"mull %3"\
:"=d"(ret),"=a"(dmy)\
:"1"(a),"g"(ff_inverse[b])\
@@ -141,7 +141,7 @@ extern const uint32_t ff_inverse[256];
static inline av_const int FASTDIV(int a, int b)
{
int r;
- asm volatile("cmp %2, #0 \n\t"
+ __asm__ volatile("cmp %2, #0 \n\t"
"smmul %0, %1, %2 \n\t"
"rsblt %0, %0, #0 \n\t"
: "=r"(r) : "r"(a), "r"(ff_inverse[b]));
@@ -151,7 +151,7 @@ static inline av_const int FASTDIV(int a, int b)
# define FASTDIV(a,b) \
({\
int ret,dmy;\
- asm volatile(\
+ __asm__ volatile(\
"umull %1, %0, %2, %3"\
:"=&r"(ret),"=&r"(dmy)\
:"r"(a),"r"(ff_inverse[b])\
@@ -190,7 +190,7 @@ static inline av_const unsigned int ff_sqrt(unsigned int a)
#if defined(ARCH_X86)
#define MASK_ABS(mask, level)\
- asm volatile(\
+ __asm__ volatile(\
"cltd \n\t"\
"xorl %1, %0 \n\t"\
"subl %1, %0 \n\t"\
@@ -204,7 +204,7 @@ static inline av_const unsigned int ff_sqrt(unsigned int a)
#ifdef HAVE_CMOV
#define COPY3_IF_LT(x,y,a,b,c,d)\
-asm volatile (\
+__asm__ volatile (\
"cmpl %0, %3 \n\t"\
"cmovl %3, %0 \n\t"\
"cmovl %4, %1 \n\t"\