summaryrefslogtreecommitdiff
path: root/libavcodec/x86/fpel_mmx.c
diff options
context:
space:
mode:
authorDiego Biurrun <diego@biurrun.de>2013-04-23 17:10:59 +0200
committerDiego Biurrun <diego@biurrun.de>2013-05-02 11:16:45 +0200
commit9b3a04d30691e85b77e63f75f5f26a93c3a000cd (patch)
treecd02a593b7fb29be55410c4f39af7dbb02b8960e /libavcodec/x86/fpel_mmx.c
parent0671adbb18f79e834d5c1e68a322d75e4524a8dc (diff)
x86: Move duplicated put_pixels{8|16}_mmx functions into their own file
Diffstat (limited to 'libavcodec/x86/fpel_mmx.c')
-rw-r--r--libavcodec/x86/fpel_mmx.c94
1 files changed, 94 insertions, 0 deletions
diff --git a/libavcodec/x86/fpel_mmx.c b/libavcodec/x86/fpel_mmx.c
new file mode 100644
index 0000000000..bb8b788024
--- /dev/null
+++ b/libavcodec/x86/fpel_mmx.c
@@ -0,0 +1,94 @@
+/*
+ * MMX-optimized avg/put pixel routines
+ *
+ * Copyright (c) 2000, 2001 Fabrice Bellard
+ * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "config.h"
+#include "dsputil_mmx.h"
+
+#if HAVE_MMX_INLINE
+
+void ff_put_pixels8_mmx(uint8_t *block, const uint8_t *pixels,
+ ptrdiff_t line_size, int h)
+{
+ __asm__ volatile (
+ "lea (%3, %3), %%"REG_a" \n\t"
+ ".p2align 3 \n\t"
+ "1: \n\t"
+ "movq (%1 ), %%mm0 \n\t"
+ "movq (%1, %3), %%mm1 \n\t"
+ "movq %%mm0, (%2) \n\t"
+ "movq %%mm1, (%2, %3) \n\t"
+ "add %%"REG_a", %1 \n\t"
+ "add %%"REG_a", %2 \n\t"
+ "movq (%1 ), %%mm0 \n\t"
+ "movq (%1, %3), %%mm1 \n\t"
+ "movq %%mm0, (%2) \n\t"
+ "movq %%mm1, (%2, %3) \n\t"
+ "add %%"REG_a", %1 \n\t"
+ "add %%"REG_a", %2 \n\t"
+ "subl $4, %0 \n\t"
+ "jnz 1b \n\t"
+ : "+g"(h), "+r"(pixels), "+r"(block)
+ : "r"((x86_reg)line_size)
+ : "%"REG_a, "memory"
+ );
+}
+
+void ff_put_pixels16_mmx(uint8_t *block, const uint8_t *pixels,
+ ptrdiff_t line_size, int h)
+{
+ __asm__ volatile (
+ "lea (%3, %3), %%"REG_a" \n\t"
+ ".p2align 3 \n\t"
+ "1: \n\t"
+ "movq (%1 ), %%mm0 \n\t"
+ "movq 8(%1 ), %%mm4 \n\t"
+ "movq (%1, %3), %%mm1 \n\t"
+ "movq 8(%1, %3), %%mm5 \n\t"
+ "movq %%mm0, (%2) \n\t"
+ "movq %%mm4, 8(%2) \n\t"
+ "movq %%mm1, (%2, %3) \n\t"
+ "movq %%mm5, 8(%2, %3) \n\t"
+ "add %%"REG_a", %1 \n\t"
+ "add %%"REG_a", %2 \n\t"
+ "movq (%1 ), %%mm0 \n\t"
+ "movq 8(%1 ), %%mm4 \n\t"
+ "movq (%1, %3), %%mm1 \n\t"
+ "movq 8(%1, %3), %%mm5 \n\t"
+ "movq %%mm0, (%2) \n\t"
+ "movq %%mm4, 8(%2) \n\t"
+ "movq %%mm1, (%2, %3) \n\t"
+ "movq %%mm5, 8(%2, %3) \n\t"
+ "add %%"REG_a", %1 \n\t"
+ "add %%"REG_a", %2 \n\t"
+ "subl $4, %0 \n\t"
+ "jnz 1b \n\t"
+ : "+g"(h), "+r"(pixels), "+r"(block)
+ : "r"((x86_reg)line_size)
+ : "%"REG_a, "memory"
+ );
+}
+
+#endif /* HAVE_MMX_INLINE */