summaryrefslogtreecommitdiff
path: root/libavcodec/x86/motion_est_mmx.c
diff options
context:
space:
mode:
Diffstat (limited to 'libavcodec/x86/motion_est_mmx.c')
-rw-r--r--libavcodec/x86/motion_est_mmx.c16
1 files changed, 8 insertions, 8 deletions
diff --git a/libavcodec/x86/motion_est_mmx.c b/libavcodec/x86/motion_est_mmx.c
index e1314aef4b..fefef41058 100644
--- a/libavcodec/x86/motion_est_mmx.c
+++ b/libavcodec/x86/motion_est_mmx.c
@@ -38,7 +38,7 @@ static inline void sad8_1_mmx(uint8_t *blk1, uint8_t *blk2, int stride, int h)
{
x86_reg len= -(stride*h);
__asm__ volatile(
- ASMALIGN(4)
+ ".p2align 4 \n\t"
"1: \n\t"
"movq (%1, %%"REG_a"), %%mm0 \n\t"
"movq (%2, %%"REG_a"), %%mm2 \n\t"
@@ -73,7 +73,7 @@ static inline void sad8_1_mmx(uint8_t *blk1, uint8_t *blk2, int stride, int h)
static inline void sad8_1_mmx2(uint8_t *blk1, uint8_t *blk2, int stride, int h)
{
__asm__ volatile(
- ASMALIGN(4)
+ ".p2align 4 \n\t"
"1: \n\t"
"movq (%1), %%mm0 \n\t"
"movq (%1, %3), %%mm1 \n\t"
@@ -95,7 +95,7 @@ static int sad16_sse2(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)
int ret;
__asm__ volatile(
"pxor %%xmm2, %%xmm2 \n\t"
- ASMALIGN(4)
+ ".p2align 4 \n\t"
"1: \n\t"
"movdqu (%1), %%xmm0 \n\t"
"movdqu (%1, %4), %%xmm1 \n\t"
@@ -119,7 +119,7 @@ static int sad16_sse2(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)
static inline void sad8_x2a_mmx2(uint8_t *blk1, uint8_t *blk2, int stride, int h)
{
__asm__ volatile(
- ASMALIGN(4)
+ ".p2align 4 \n\t"
"1: \n\t"
"movq (%1), %%mm0 \n\t"
"movq (%1, %3), %%mm1 \n\t"
@@ -143,7 +143,7 @@ static inline void sad8_y2a_mmx2(uint8_t *blk1, uint8_t *blk2, int stride, int h
__asm__ volatile(
"movq (%1), %%mm0 \n\t"
"add %3, %1 \n\t"
- ASMALIGN(4)
+ ".p2align 4 \n\t"
"1: \n\t"
"movq (%1), %%mm1 \n\t"
"movq (%1, %3), %%mm2 \n\t"
@@ -170,7 +170,7 @@ static inline void sad8_4_mmx2(uint8_t *blk1, uint8_t *blk2, int stride, int h)
"movq (%1), %%mm0 \n\t"
"pavgb 1(%1), %%mm0 \n\t"
"add %3, %1 \n\t"
- ASMALIGN(4)
+ ".p2align 4 \n\t"
"1: \n\t"
"movq (%1), %%mm1 \n\t"
"movq (%1,%3), %%mm2 \n\t"
@@ -197,7 +197,7 @@ static inline void sad8_2_mmx(uint8_t *blk1a, uint8_t *blk1b, uint8_t *blk2, int
{
x86_reg len= -(stride*h);
__asm__ volatile(
- ASMALIGN(4)
+ ".p2align 4 \n\t"
"1: \n\t"
"movq (%1, %%"REG_a"), %%mm0 \n\t"
"movq (%2, %%"REG_a"), %%mm1 \n\t"
@@ -245,7 +245,7 @@ static inline void sad8_4_mmx(uint8_t *blk1, uint8_t *blk2, int stride, int h)
"punpckhbw %%mm7, %%mm3 \n\t"
"paddw %%mm2, %%mm0 \n\t"
"paddw %%mm3, %%mm1 \n\t"
- ASMALIGN(4)
+ ".p2align 4 \n\t"
"1: \n\t"
"movq (%2, %%"REG_a"), %%mm2 \n\t"
"movq 1(%2, %%"REG_a"), %%mm4 \n\t"