summaryrefslogtreecommitdiff
path: root/libavcodec/i386/dsputil_h264_template_mmx.c
diff options
context:
space:
mode:
Diffstat (limited to 'libavcodec/i386/dsputil_h264_template_mmx.c')
-rw-r--r--libavcodec/i386/dsputil_h264_template_mmx.c65
1 files changed, 64 insertions, 1 deletions
diff --git a/libavcodec/i386/dsputil_h264_template_mmx.c b/libavcodec/i386/dsputil_h264_template_mmx.c
index d52938ccf4..c159767ab2 100644
--- a/libavcodec/i386/dsputil_h264_template_mmx.c
+++ b/libavcodec/i386/dsputil_h264_template_mmx.c
@@ -240,7 +240,7 @@ static void H264_CHROMA_MC8_TMPL(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*
}
}
-static void H264_CHROMA_MC4_TMPL(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y)
+static void H264_CHROMA_MC4_TMPL(uint8_t *dst/*align 4*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y)
{
DECLARE_ALIGNED_8(uint64_t, AA);
DECLARE_ALIGNED_8(uint64_t, DD);
@@ -319,3 +319,66 @@ static void H264_CHROMA_MC4_TMPL(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*
dst += stride;
}
}
+
+#ifdef H264_CHROMA_MC2_TMPL
+static void H264_CHROMA_MC2_TMPL(uint8_t *dst/*align 2*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y)
+{
+ int CD=((1<<16)-1)*x*y + 8*y;
+ int AB=((8<<16)-8)*x + 64 - CD;
+ int i;
+
+ asm volatile(
+ /* mm5 = {A,B,A,B} */
+ /* mm6 = {C,D,C,D} */
+ "movd %0, %%mm5\n\t"
+ "movd %1, %%mm6\n\t"
+ "punpckldq %%mm5, %%mm5\n\t"
+ "punpckldq %%mm6, %%mm6\n\t"
+ "pxor %%mm7, %%mm7\n\t"
+ :: "r"(AB), "r"(CD));
+
+ asm volatile(
+ /* mm0 = src[0,1,1,2] */
+ "movd %0, %%mm0\n\t"
+ "punpcklbw %%mm7, %%mm0\n\t"
+ "pshufw $0x94, %%mm0, %%mm0\n\t"
+ :: "m"(src[0]));
+
+ for(i=0; i<h; i++) {
+ asm volatile(
+ /* mm1 = A * src[0,1] + B * src[1,2] */
+ "movq %%mm0, %%mm1\n\t"
+ "pmaddwd %%mm5, %%mm1\n\t"
+ ::);
+
+ src += stride;
+ asm volatile(
+ /* mm0 = src[0,1,1,2] */
+ "movd %0, %%mm0\n\t"
+ "punpcklbw %%mm7, %%mm0\n\t"
+ "pshufw $0x94, %%mm0, %%mm0\n\t"
+ :: "m"(src[0]));
+
+ asm volatile(
+ /* mm1 += C * src[0,1] + D * src[1,2] */
+ "movq %%mm0, %%mm2\n\t"
+ "pmaddwd %%mm6, %%mm2\n\t"
+ "paddw %%mm2, %%mm1\n\t"
+ ::);
+
+ asm volatile(
+ /* dst[0,1] = pack((mm1 + 32) >> 6) */
+ "paddw %1, %%mm1\n\t"
+ "psrlw $6, %%mm1\n\t"
+ "packssdw %%mm7, %%mm1\n\t"
+ "packuswb %%mm7, %%mm1\n\t"
+ /* writes garbage to the right of dst.
+ * ok because partitions are processed from left to right. */
+ H264_CHROMA_OP4(%0, %%mm1, %%mm3)
+ "movd %%mm1, %0\n\t"
+ : "=m" (dst[0]) : "m" (ff_pw_32) : "eax");
+ dst += stride;
+ }
+}
+#endif
+