/* * Loongson SIMD optimized h264dsp * * Copyright (c) 2015 Loongson Technology Corporation Limited * Copyright (c) 2015 Zhou Xiaoyong * Zhang Shuangshuang * Heiher * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "libavcodec/bit_depth_template.c" #include "h264dsp_mips.h" void ff_h264_add_pixels4_8_mmi(uint8_t *dst, int16_t *src, int stride) { __asm__ volatile ( "xor $f0, $f0, $f0 \r\n" "ldc1 $f2, 0(%[src]) \r\n" "ldc1 $f4, 8(%[src]) \r\n" "ldc1 $f6, 16(%[src]) \r\n" "ldc1 $f8, 24(%[src]) \r\n" "lwc1 $f10, 0(%[dst0]) \r\n" "lwc1 $f12, 0(%[dst1]) \r\n" "lwc1 $f14, 0(%[dst2]) \r\n" "lwc1 $f16, 0(%[dst3]) \r\n" "punpcklbh $f10, $f10, $f0 \r\n" "punpcklbh $f12, $f12, $f0 \r\n" "punpcklbh $f14, $f14, $f0 \r\n" "punpcklbh $f16, $f16, $f0 \r\n" "paddh $f2, $f2, $f10 \r\n" "paddh $f4, $f4, $f12 \r\n" "paddh $f6, $f6, $f14 \r\n" "paddh $f8, $f8, $f16 \r\n" "packushb $f2, $f2, $f0 \r\n" "packushb $f4, $f4, $f0 \r\n" "packushb $f6, $f6, $f0 \r\n" "packushb $f8, $f8, $f0 \r\n" "swc1 $f2, 0(%[dst0]) \r\n" "swc1 $f4, 0(%[dst1]) \r\n" "swc1 $f6, 0(%[dst2]) \r\n" "swc1 $f8, 0(%[dst3]) \r\n" ::[dst0]"r"(dst),[dst1]"r"(dst+stride),[dst2]"r"(dst+2*stride), [dst3]"r"(dst+3*stride),[src]"r"(src) : "$f0","$f2","$f4","$f6","$f8","$f10","$f12","$f14","$f16" ); memset(src, 0, 32); } void ff_h264_idct_add_8_mmi(uint8_t *dst, int16_t *block, int stride) { __asm__ volatile ( "dli $8, 1 \r\n" "ldc1 $f0, 0(%[block]) \r\n" "dmtc1 $8, $f16 \r\n" "ldc1 $f2, 8(%[block]) \r\n" "dli $8, 6 \r\n" "ldc1 $f4, 16(%[block]) \r\n" "dmtc1 $8, $f18 \r\n" "psrah $f8, $f2, $f16 \r\n" "ldc1 $f6, 24(%[block]) \r\n" "psrah $f10, $f6, $f16 \r\n" "psubh $f8, $f8, $f6 \r\n" "paddh $f10, $f10, $f2 \r\n" "paddh $f20, $f4, $f0 \r\n" "psubh $f0, $f0, $f4 \r\n" "paddh $f22, $f10, $f20 \r\n" "psubh $f4, $f20, $f10 \r\n" "paddh $f20, $f8, $f0 \r\n" "psubh $f0, $f0, $f8 \r\n" "punpckhhw $f2, $f22, $f20 \r\n" "punpcklhw $f10, $f22, $f20 \r\n" "punpckhhw $f8, $f0, $f4 \r\n" "punpcklhw $f0, $f0, $f4 \r\n" "punpckhwd $f4, $f10, $f0 \r\n" "punpcklwd $f10, $f10, $f0 \r\n" "punpcklwd $f20, $f2, $f8 \r\n" "punpckhwd $f0, $f2, $f8 \r\n" "paddh $f10, $f10, %[ff_pw_32] \r\n" "psrah $f8, $f4, $f16 \r\n" "psrah $f6, $f0, $f16 \r\n" "psubh $f8, $f8, $f0 \r\n" "paddh $f6, $f6, $f4 \r\n" "paddh $f2, $f20, $f10 \r\n" "psubh $f10, $f10, $f20 \r\n" "paddh $f20, $f6, $f2 \r\n" "psubh $f2, $f2, $f6 \r\n" "paddh $f22, $f8, $f10 \r\n" "xor $f14, $f14, $f14 \r\n" "psubh $f10, $f10, $f8 \r\n" "sdc1 $f14, 0(%[block]) \r\n" "sdc1 $f14, 8(%[block]) \r\n" "sdc1 $f14, 16(%[block]) \r\n" "sdc1 $f14, 24(%[block]) \r\n" "lwc1 $f4, 0(%[dst]) \r\n" "psrah $f6, $f20, $f18 \r\n" "gslwxc1 $f0, 0(%[dst], %[stride]) \r\n" "psrah $f8, $f22, $f18 \r\n" "punpcklbh $f4, $f4, $f14 \r\n" "punpcklbh $f0, $f0, $f14 \r\n" "paddh $f4, $f4, $f6 \r\n" "paddh $f0, $f0, $f8 \r\n" "packushb $f4, $f4, $f14 \r\n" "packushb $f0, $f0, $f14 \r\n" "swc1 $f4, 0(%[dst]) \r\n" "gsswxc1 $f0, 0(%[dst], %[stride]) \r\n" "daddu %[dst], %[dst], %[stride] \r\n" "daddu %[dst], %[dst], %[stride] \r\n" "lwc1 $f4, 0(%[dst]) \r\n" "psrah $f10, $f10, $f18 \r\n" "gslwxc1 $f0, 0(%[dst], %[stride]) \r\n" "psrah $f2, $f2, $f18 \r\n" "punpcklbh $f4, $f4, $f14 \r\n" "punpcklbh $f0, $f0, $f14 \r\n" "paddh $f4, $f4, $f10 \r\n" "paddh $f0, $f0, $f2 \r\n" "packushb $f4, $f4, $f14 \r\n" "swc1 $f4, 0(%[dst]) \r\n" "packushb $f0, $f0, $f14 \r\n" "gsswxc1 $f0, 0(%[dst], %[stride]) \r\n" ::[dst]"r"(dst),[block]"r"(block),[stride]"r"((uint64_t)stride), [ff_pw_32]"f"(ff_pw_32) : "$8","$f0","$f2","$f4","$f6","$f8","$f10","$f12","$f14","$f16", "$f18","$f20","$f22" ); memset(block, 0, 32); } void ff_h264_idct8_add_8_mmi(uint8_t *dst, int16_t *block, int stride) { __asm__ volatile ( "lhu $10, 0x0(%[block]) \r\n" "daddiu $29, $29, -0x20 \r\n" "daddiu $10, $10, 0x20 \r\n" "ldc1 $f2, 0x10(%[block]) \r\n" "sh $10, 0x0(%[block]) \r\n" "ldc1 $f4, 0x20(%[block]) \r\n" "dli $10, 0x1 \r\n" "ldc1 $f6, 0x30(%[block]) \r\n" "dmtc1 $10, $f16 \r\n" "ldc1 $f10, 0x50(%[block]) \r\n" "ldc1 $f12, 0x60(%[block]) \r\n" "ldc1 $f14, 0x70(%[block]) \r\n" "mov.d $f0, $f2 \r\n" "psrah $f2, $f2, $f16 \r\n" "psrah $f8, $f10, $f16 \r\n" "paddh $f2, $f2, $f0 \r\n" "paddh $f8, $f8, $f10 \r\n" "paddh $f2, $f2, $f10 \r\n" "paddh $f8, $f8, $f14 \r\n" "paddh $f2, $f2, $f6 \r\n" "psubh $f8, $f8, $f0 \r\n" "psubh $f0, $f0, $f6 \r\n" "psubh $f10, $f10, $f6 \r\n" "psrah $f6, $f6, $f16 \r\n" "paddh $f0, $f0, $f14 \r\n" "psubh $f10, $f10, $f14 \r\n" "psrah $f14, $f14, $f16 \r\n" "psubh $f0, $f0, $f6 \r\n" "dli $10, 0x2 \r\n" "psubh $f10, $f10, $f14 \r\n" "dmtc1 $10, $f18 \r\n" "mov.d $f14, $f2 \r\n" "psrah $f2, $f2, $f18 \r\n" "psrah $f6, $f8, $f18 \r\n" "paddh $f6, $f6, $f0 \r\n" "psrah $f0, $f0, $f18 \r\n" "paddh $f2, $f2, $f10 \r\n" "psrah $f10, $f10, $f18 \r\n" "psubh $f0, $f0, $f8 \r\n" "psubh $f14, $f14, $f10 \r\n" "mov.d $f10, $f12 \r\n" "psrah $f12, $f12, $f16 \r\n" "psrah $f8, $f4, $f16 \r\n" "paddh $f12, $f12, $f4 \r\n" "psubh $f8, $f8, $f10 \r\n" "ldc1 $f4, 0x0(%[block]) \r\n" "ldc1 $f10, 0x40(%[block]) \r\n" "paddh $f10, $f10, $f4 \r\n" "paddh $f4, $f4, $f4 \r\n" "paddh $f12, $f12, $f10 \r\n" "psubh $f4, $f4, $f10 \r\n" "paddh $f10, $f10, $f10 \r\n" "paddh $f8, $f8, $f4 \r\n" "psubh $f10, $f10, $f12 \r\n" "paddh $f4, $f4, $f4 \r\n" "paddh $f14, $f14, $f12 \r\n" "psubh $f4, $f4, $f8 \r\n" "paddh $f12, $f12, $f12 \r\n" "paddh $f0, $f0, $f8 \r\n" "psubh $f12, $f12, $f14 \r\n" "paddh $f8, $f8, $f8 \r\n" "paddh $f6, $f6, $f4 \r\n" "psubh $f8, $f8, $f0 \r\n" "paddh $f4, $f4, $f4 \r\n" "paddh $f2, $f2, $f10 \r\n" "psubh $f4, $f4, $f6 \r\n" "paddh $f10, $f10, $f10 \r\n" "sdc1 $f12, 0x0(%[block]) \r\n" "psubh $f10, $f10, $f2 \r\n" "punpckhhw $f12, $f14, $f0 \r\n" "punpcklhw $f14, $f14, $f0 \r\n" "punpckhhw $f0, $f6, $f2 \r\n" "punpcklhw $f6, $f6, $f2 \r\n" "punpckhwd $f2, $f14, $f6 \r\n" "punpcklwd $f14, $f14, $f6 \r\n" "punpckhwd $f6, $f12, $f0 \r\n" "punpcklwd $f12, $f12, $f0 \r\n" "ldc1 $f0, 0x0(%[block]) \r\n" "sdc1 $f14, 0x0($29) \r\n" "sdc1 $f2, 0x10($29) \r\n" "dmfc1 $8, $f12 \r\n" "dmfc1 $11, $f6 \r\n" "punpckhhw $f6, $f10, $f4 \r\n" "punpcklhw $f10, $f10, $f4 \r\n" "punpckhhw $f4, $f8, $f0 \r\n" "punpcklhw $f8, $f8, $f0 \r\n" "punpckhwd $f0, $f10, $f8 \r\n" "punpcklwd $f10, $f10, $f8 \r\n" "punpckhwd $f8, $f6, $f4 \r\n" "punpcklwd $f6, $f6, $f4 \r\n" "sdc1 $f10, 0x8($29) \r\n" "sdc1 $f0, 0x18($29) \r\n" "dmfc1 $9, $f6 \r\n" "dmfc1 $12, $f8 \r\n" "ldc1 $f2, 0x18(%[block]) \r\n" "ldc1 $f12, 0x28(%[block]) \r\n" "ldc1 $f4, 0x38(%[block]) \r\n" "ldc1 $f0, 0x58(%[block]) \r\n" "ldc1 $f6, 0x68(%[block]) \r\n" "ldc1 $f8, 0x78(%[block]) \r\n" "mov.d $f14, $f2 \r\n" "psrah $f10, $f0, $f16 \r\n" "psrah $f2, $f2, $f16 \r\n" "paddh $f10, $f10, $f0 \r\n" "paddh $f2, $f2, $f14 \r\n" "paddh $f10, $f10, $f8 \r\n" "paddh $f2, $f2, $f0 \r\n" "psubh $f10, $f10, $f14 \r\n" "paddh $f2, $f2, $f4 \r\n" "psubh $f14, $f14, $f4 \r\n" "psubh $f0, $f0, $f4 \r\n" "psrah $f4, $f4, $f16 \r\n" "paddh $f14, $f14, $f8 \r\n" "psubh $f0, $f0, $f8 \r\n" "psrah $f8, $f8, $f16 \r\n" "psubh $f14, $f14, $f4 \r\n" "psubh $f0, $f0, $f8 \r\n" "mov.d $f8, $f2 \r\n" "psrah $f4, $f10, $f18 \r\n" "psrah $f2, $f2, $f18 \r\n" "paddh $f4, $f4, $f14 \r\n" "psrah $f14, $f14, $f18 \r\n" "paddh $f2, $f2, $f0 \r\n" "psrah $f0, $f0, $f18 \r\n" "psubh $f14, $f14, $f10 \r\n" "psubh $f8, $f8, $f0 \r\n" "mov.d $f0, $f6 \r\n" "psrah $f6, $f6, $f16 \r\n" "psrah $f10, $f12, $f16 \r\n" "paddh $f6, $f6, $f12 \r\n" "psubh $f10, $f10, $f0 \r\n" "ldc1 $f12, 0x8(%[block]) \r\n" "ldc1 $f0, 0x48(%[block]) \r\n" "paddh $f0, $f0, $f12 \r\n" "paddh $f12, $f12, $f12 \r\n" "paddh $f6, $f6, $f0 \r\n" "psubh $f12, $f12, $f0 \r\n" "paddh $f0, $f0, $f0 \r\n" "paddh $f10, $f10, $f12 \r\n" "psubh $f0, $f0, $f6 \r\n" "paddh $f12, $f12, $f12 \r\n" "paddh $f8, $f8, $f6 \r\n" "psubh $f12, $f12, $f10 \r\n" "paddh $f6, $f6, $f6 \r\n" "paddh $f14, $f14, $f10 \r\n" "psubh $f6, $f6, $f8 \r\n" "paddh $f10, $f10, $f10 \r\n" "paddh $f4, $f4, $f12 \r\n" "psubh $f10, $f10, $f14 \r\n" "paddh $f12, $f12, $f12 \r\n" "paddh $f2, $f2, $f0 \r\n" "psubh $f12, $f12, $f4 \r\n" "paddh $f0, $f0, $f0 \r\n" "sdc1 $f6, 0x8(%[block]) \r\n" "psubh $f0, $f0, $f2 \r\n" "punpckhhw $f6, $f8, $f14 \r\n" "punpcklhw $f8, $f8, $f14 \r\n" "punpckhhw $f14, $f4, $f2 \r\n" "punpcklhw $f4, $f4, $f2 \r\n" "punpckhwd $f2, $f8, $f4 \r\n" "punpcklwd $f8, $f8, $f4 \r\n" "punpckhwd $f4, $f6, $f14 \r\n" "punpcklwd $f6, $f6, $f14 \r\n" "ldc1 $f14, 0x8(%[block]) \r\n" "dmfc1 $13, $f8 \r\n" "dmfc1 $15, $f2 \r\n" "mov.d $f24, $f6 \r\n" "mov.d $f28, $f4 \r\n" "punpckhhw $f4, $f0, $f12 \r\n" "punpcklhw $f0, $f0, $f12 \r\n" "punpckhhw $f12, $f10, $f14 \r\n" "punpcklhw $f10, $f10, $f14 \r\n" "punpckhwd $f14, $f0, $f10 \r\n" "punpcklwd $f0, $f0, $f10 \r\n" "punpckhwd $f10, $f4, $f12 \r\n" "punpcklwd $f4, $f4, $f12 \r\n" "dmfc1 $14, $f0 \r\n" "mov.d $f22, $f14 \r\n" "mov.d $f26, $f4 \r\n" "mov.d $f30, $f10 \r\n" "daddiu $10, %[dst], 0x4 \r\n" "dmtc1 $15, $f14 \r\n" "dmtc1 $11, $f12 \r\n" "ldc1 $f2, 0x10($29) \r\n" "dmtc1 $8, $f6 \r\n" "mov.d $f8, $f2 \r\n" "psrah $f2, $f2, $f16 \r\n" "psrah $f0, $f14, $f16 \r\n" "paddh $f2, $f2, $f8 \r\n" "paddh $f0, $f0, $f14 \r\n" "paddh $f2, $f2, $f14 \r\n" "paddh $f0, $f0, $f28 \r\n" "paddh $f2, $f2, $f12 \r\n" "psubh $f0, $f0, $f8 \r\n" "psubh $f8, $f8, $f12 \r\n" "psubh $f14, $f14, $f12 \r\n" "psrah $f12, $f12, $f16 \r\n" "paddh $f8, $f8, $f28 \r\n" "psubh $f14, $f14, $f28 \r\n" "psrah $f10, $f28, $f16 \r\n" "psubh $f8, $f8, $f12 \r\n" "psubh $f14, $f14, $f10 \r\n" "mov.d $f10, $f2 \r\n" "psrah $f2, $f2, $f18 \r\n" "psrah $f12, $f0, $f18 \r\n" "paddh $f2, $f2, $f14 \r\n" "paddh $f12, $f12, $f8 \r\n" "psrah $f8, $f8, $f18 \r\n" "psrah $f14, $f14, $f18 \r\n" "psubh $f8, $f8, $f0 \r\n" "psubh $f10, $f10, $f14 \r\n" "mov.d $f14, $f24 \r\n" "psrah $f4, $f24, $f16 \r\n" "psrah $f0, $f6, $f16 \r\n" "paddh $f4, $f4, $f6 \r\n" "psubh $f0, $f0, $f14 \r\n" "ldc1 $f6, 0x0($29) \r\n" "dmtc1 $13, $f14 \r\n" "paddh $f14, $f14, $f6 \r\n" "paddh $f6, $f6, $f6 \r\n" "paddh $f4, $f4, $f14 \r\n" "psubh $f6, $f6, $f14 \r\n" "paddh $f14, $f14, $f14 \r\n" "paddh $f0, $f0, $f6 \r\n" "psubh $f14, $f14, $f4 \r\n" "paddh $f6, $f6, $f6 \r\n" "paddh $f10, $f10, $f4 \r\n" "psubh $f6, $f6, $f0 \r\n" "paddh $f4, $f4, $f4 \r\n" "paddh $f8, $f8, $f0 \r\n" "psubh $f4, $f4, $f10 \r\n" "paddh $f0, $f0, $f0 \r\n" "paddh $f12, $f12, $f6 \r\n" "psubh $f0, $f0, $f8 \r\n" "paddh $f6, $f6, $f6 \r\n" "paddh $f2, $f2, $f14 \r\n" "psubh $f6, $f6, $f12 \r\n" "paddh $f14, $f14, $f14 \r\n" "sdc1 $f6, 0x0($29) \r\n" "psubh $f14, $f14, $f2 \r\n" "sdc1 $f0, 0x10($29) \r\n" "dmfc1 $8, $f4 \r\n" "xor $f4, $f4, $f4 \r\n" "sdc1 $f4, 0x0(%[block]) \r\n" "sdc1 $f4, 0x8(%[block]) \r\n" "sdc1 $f4, 0x10(%[block]) \r\n" "sdc1 $f4, 0x18(%[block]) \r\n" "sdc1 $f4, 0x20(%[block]) \r\n" "sdc1 $f4, 0x28(%[block]) \r\n" "sdc1 $f4, 0x30(%[block]) \r\n" "sdc1 $f4, 0x38(%[block]) \r\n" "sdc1 $f4, 0x40(%[block]) \r\n" "sdc1 $f4, 0x48(%[block]) \r\n" "sdc1 $f4, 0x50(%[block]) \r\n" "sdc1 $f4, 0x58(%[block]) \r\n" "sdc1 $f4, 0x60(%[block]) \r\n" "sdc1 $f4, 0x68(%[block]) \r\n" "sdc1 $f4, 0x70(%[block]) \r\n" "sdc1 $f4, 0x78(%[block]) \r\n" "dli $11, 0x6 \r\n" "lwc1 $f6, 0x0(%[dst]) \r\n" "dmtc1 $11, $f20 \r\n" "gslwxc1 $f0, 0x0(%[dst], %[stride]) \r\n" "psrah $f10, $f10, $f20 \r\n" "psrah $f8, $f8, $f20 \r\n" "punpcklbh $f6, $f6, $f4 \r\n" "punpcklbh $f0, $f0, $f4 \r\n" "paddh $f6, $f6, $f10 \r\n" "paddh $f0, $f0, $f8 \r\n" "packushb $f6, $f6, $f4 \r\n" "packushb $f0, $f0, $f4 \r\n" "swc1 $f6, 0x0(%[dst]) \r\n" "gsswxc1 $f0, 0x0(%[dst], %[stride]) \r\n" "daddu %[dst], %[dst], %[stride] \r\n" "daddu %[dst], %[dst], %[stride] \r\n" "lwc1 $f6, 0x0(%[dst]) \r\n" "gslwxc1 $f0, 0x0(%[dst], %[stride]) \r\n" "psrah $f12, $f12, $f20 \r\n" "psrah $f2, $f2, $f20 \r\n" "punpcklbh $f6, $f6, $f4 \r\n" "punpcklbh $f0, $f0, $f4 \r\n" "paddh $f6, $f6, $f12 \r\n" "paddh $f0, $f0, $f2 \r\n" "packushb $f6, $f6, $f4 \r\n" "packushb $f0, $f0, $f4 \r\n" "swc1 $f6, 0x0(%[dst]) \r\n" "gsswxc1 $f0, 0x0(%[dst], %[stride]) \r\n" "ldc1 $f10, 0x0($29) \r\n" "ldc1 $f8, 0x10($29) \r\n" "dmtc1 $8, $f12 \r\n" "daddu %[dst], %[dst], %[stride] \r\n" "daddu %[dst], %[dst], %[stride] \r\n" "lwc1 $f6, 0x0(%[dst]) \r\n" "gslwxc1 $f0, 0x0(%[dst], %[stride]) \r\n" "psrah $f14, $f14, $f20 \r\n" "psrah $f10, $f10, $f20 \r\n" "punpcklbh $f6, $f6, $f4 \r\n" "punpcklbh $f0, $f0, $f4 \r\n" "paddh $f6, $f6, $f14 \r\n" "paddh $f0, $f0, $f10 \r\n" "packushb $f6, $f6, $f4 \r\n" "packushb $f0, $f0, $f4 \r\n" "swc1 $f6, 0x0(%[dst]) \r\n" "gsswxc1 $f0, 0x0(%[dst], %[stride]) \r\n" "daddu %[dst], %[dst], %[stride] \r\n" "daddu %[dst], %[dst], %[stride] \r\n" "lwc1 $f6, 0x0(%[dst]) \r\n" "gslwxc1 $f0, 0x0(%[dst], %[stride]) \r\n" "psrah $f8, $f8, $f20 \r\n" "psrah $f12, $f12, $f20 \r\n" "punpcklbh $f6, $f6, $f4 \r\n" "punpcklbh $f0, $f0, $f4 \r\n" "paddh $f6, $f6, $f8 \r\n" "paddh $f0, $f0, $f12 \r\n" "packushb $f6, $f6, $f4 \r\n" "packushb $f0, $f0, $f4 \r\n" "swc1 $f6, 0x0(%[dst]) \r\n" "gsswxc1 $f0, 0x0(%[dst], %[stride]) \r\n" "dmtc1 $12, $f2 \r\n" "dmtc1 $9, $f12 \r\n" "ldc1 $f8, 0x18($29) \r\n" "mov.d $f10, $f8 \r\n" "psrah $f8, $f8, $f16 \r\n" "psrah $f14, $f22, $f16 \r\n" "paddh $f14, $f14, $f22 \r\n" "paddh $f8, $f8, $f10 \r\n" "paddh $f14, $f14, $f30 \r\n" "paddh $f8, $f8, $f22 \r\n" "psubh $f14, $f14, $f10 \r\n" "paddh $f8, $f8, $f2 \r\n" "psubh $f10, $f10, $f2 \r\n" "psubh $f6, $f22, $f2 \r\n" "psrah $f2, $f2, $f16 \r\n" "paddh $f10, $f10, $f30 \r\n" "psubh $f6, $f6, $f30 \r\n" "psrah $f4, $f30, $f16 \r\n" "psubh $f10, $f10, $f2 \r\n" "psubh $f6, $f6, $f4 \r\n" "mov.d $f4, $f8 \r\n" "psrah $f8, $f8, $f18 \r\n" "psrah $f2, $f14, $f18 \r\n" "paddh $f8, $f8, $f6 \r\n" "paddh $f2, $f2, $f10 \r\n" "psrah $f10, $f10, $f18 \r\n" "psrah $f6, $f6, $f18 \r\n" "psubh $f10, $f10, $f14 \r\n" "psubh $f4, $f4, $f6 \r\n" "mov.d $f6, $f26 \r\n" "psrah $f0, $f26, $f16 \r\n" "psrah $f14, $f12, $f16 \r\n" "paddh $f0, $f0, $f12 \r\n" "psubh $f14, $f14, $f6 \r\n" "ldc1 $f12, 0x8($29) \r\n" "dmtc1 $14, $f6 \r\n" "paddh $f6, $f6, $f12 \r\n" "paddh $f12, $f12, $f12 \r\n" "paddh $f0, $f0, $f6 \r\n" "psubh $f12, $f12, $f6 \r\n" "paddh $f6, $f6, $f6 \r\n" "paddh $f14, $f14, $f12 \r\n" "psubh $f6, $f6, $f0 \r\n" "paddh $f12, $f12, $f12 \r\n" "paddh $f4, $f4, $f0 \r\n" "psubh $f12, $f12, $f14 \r\n" "paddh $f0, $f0, $f0 \r\n" "paddh $f10, $f10, $f14 \r\n" "psubh $f0, $f0, $f4 \r\n" "paddh $f14, $f14, $f14 \r\n" "paddh $f2, $f2, $f12 \r\n" "psubh $f14, $f14, $f10 \r\n" "paddh $f12, $f12, $f12 \r\n" "paddh $f8, $f8, $f6 \r\n" "psubh $f12, $f12, $f2 \r\n" "paddh $f6, $f6, $f6 \r\n" "sdc1 $f12, 0x8($29) \r\n" "psubh $f6, $f6, $f8 \r\n" "sdc1 $f14, 0x18($29) \r\n" "dmfc1 $9, $f0 \r\n" "xor $f0, $f0, $f0 \r\n" "lwc1 $f12, 0x0($10) \r\n" "gslwxc1 $f14, 0x0($10, %[stride]) \r\n" "psrah $f4, $f4, $f20 \r\n" "psrah $f10, $f10, $f20 \r\n" "punpcklbh $f12, $f12, $f0 \r\n" "punpcklbh $f14, $f14, $f0 \r\n" "paddh $f12, $f12, $f4 \r\n" "paddh $f14, $f14, $f10 \r\n" "packushb $f12, $f12, $f0 \r\n" "packushb $f14, $f14, $f0 \r\n" "swc1 $f12, 0x0($10) \r\n" "gsswxc1 $f14, 0x0($10, %[stride]) \r\n" "daddu $10, $10, %[stride] \r\n" "daddu $10, $10, %[stride] \r\n" "lwc1 $f12, 0x0($10) \r\n" "gslwxc1 $f14, 0x0($10, %[stride]) \r\n" "psrah $f2, $f2, $f20 \r\n" "psrah $f8, $f8, $f20 \r\n" "punpcklbh $f12, $f12, $f0 \r\n" "punpcklbh $f14, $f14, $f0 \r\n" "paddh $f12, $f12, $f2 \r\n" "paddh $f14, $f14, $f8 \r\n" "packushb $f12, $f12, $f0 \r\n" "packushb $f14, $f14, $f0 \r\n" "swc1 $f12, 0x0($10) \r\n" "gsswxc1 $f14, 0x0($10, %[stride]) \r\n" "ldc1 $f4, 0x8($29) \r\n" "ldc1 $f10, 0x18($29) \r\n" "daddu $10, $10, %[stride] \r\n" "dmtc1 $9, $f2 \r\n" "daddu $10, $10, %[stride] \r\n" "lwc1 $f12, 0x0($10) \r\n" "gslwxc1 $f14, 0x0($10, %[stride]) \r\n" "psrah $f6, $f6, $f20 \r\n" "psrah $f4, $f4, $f20 \r\n" "punpcklbh $f12, $f12, $f0 \r\n" "punpcklbh $f14, $f14, $f0 \r\n" "paddh $f12, $f12, $f6 \r\n" "paddh $f14, $f14, $f4 \r\n" "packushb $f12, $f12, $f0 \r\n" "packushb $f14, $f14, $f0 \r\n" "swc1 $f12, 0x0($10) \r\n" "gsswxc1 $f14, 0x0($10, %[stride]) \r\n" "daddu $10, $10, %[stride] \r\n" "daddu $10, $10, %[stride] \r\n" "lwc1 $f12, 0x0($10) \r\n" "gslwxc1 $f14, 0x0($10, %[stride]) \r\n" "psrah $f10, $f10, $f20 \r\n" "psrah $f2, $f2, $f20 \r\n" "punpcklbh $f12, $f12, $f0 \r\n" "punpcklbh $f14, $f14, $f0 \r\n" "paddh $f12, $f12, $f10 \r\n" "paddh $f14, $f14, $f2 \r\n" "packushb $f12, $f12, $f0 \r\n" "packushb $f14, $f14, $f0 \r\n" "swc1 $f12, 0x0($10) \r\n" "gsswxc1 $f14, 0x0($10, %[stride]) \r\n" "daddiu $29, $29, 0x20 \r\n" ::[dst]"r"(dst),[block]"r"(block),[stride]"r"((uint64_t)stride) :"$8","$9","$10","$11","$12","$13","$14","$15","$29","$f0","$f2","$f4", "$f8","$f10","$f12","$f14","$f16","$f18","$f20","$f22","$f24","$f26", "$f28","$f30" ); memset(block, 0, 128); } void ff_h264_idct_dc_add_8_mmi(uint8_t *dst, int16_t *block, int stride) { __asm__ volatile ( "lh $8, 0x0(%[block]) \r\n" "sd $0, 0x0(%[block]) \r\n" "daddiu $8, $8, 0x20 \r\n" "daddu $10, %[stride], %[stride] \r\n" "dsra $8, $8, 0x6 \r\n" "xor $f2, $f2, $f2 \r\n" "mtc1 $8, $f0 \r\n" "pshufh $f0, $f0, $f2 \r\n" "daddu $8, $10, %[stride] \r\n" "psubh $f2, $f2, $f0 \r\n" "packushb $f0, $f0, $f0 \r\n" "packushb $f2, $f2, $f2 \r\n" "lwc1 $f4, 0x0(%[dst]) \r\n" "gslwxc1 $f6, 0x0(%[dst], %[stride]) \r\n" "gslwxc1 $f8, 0x0(%[dst], $10) \r\n" "gslwxc1 $f10, 0x0(%[dst], $8) \r\n" "paddusb $f4, $f4, $f0 \r\n" "paddusb $f6, $f6, $f0 \r\n" "paddusb $f8, $f8, $f0 \r\n" "paddusb $f10, $f10, $f0 \r\n" "psubusb $f4, $f4, $f2 \r\n" "psubusb $f6, $f6, $f2 \r\n" "psubusb $f8, $f8, $f2 \r\n" "psubusb $f10, $f10, $f2 \r\n" "swc1 $f4, 0x0(%[dst]) \r\n" "gsswxc1 $f6, 0x0(%[dst], %[stride]) \r\n" "gsswxc1 $f8, 0x0(%[dst], $10) \r\n" "gsswxc1 $f10, 0x0(%[dst], $8) \r\n" ::[dst]"r"(dst),[block]"r"(block),[stride]"r"((uint64_t)stride) : "$8","$10","$f0","$f2","$f4","$f6","$f8","$f10" ); } void ff_h264_idct8_dc_add_8_mmi(uint8_t *dst, int16_t *block, int stride) { __asm__ volatile ( "lh $8, 0x0(%[block]) \r\n" "sd $0, 0x0(%[block]) \r\n" "daddiu $8, $8, 0x20 \r\n" "daddu $10, %[stride], %[stride] \r\n" "dsra $8, $8, 0x6 \r\n" "xor $f2, $f2, $f2 \r\n" "mtc1 $8, $f0 \r\n" "pshufh $f0, $f0, $f2 \r\n" "daddu $8, $10, %[stride] \r\n" "psubh $f2, $f2, $f0 \r\n" "packushb $f0, $f0, $f0 \r\n" "packushb $f2, $f2, $f2 \r\n" "ldc1 $f4, 0x0(%[dst]) \r\n" "gsldxc1 $f6, 0x0(%[dst], %[stride]) \r\n" "gsldxc1 $f8, 0x0(%[dst], $10) \r\n" "gsldxc1 $f10, 0x0(%[dst], $8) \r\n" "paddusb $f4, $f4, $f0 \r\n" "paddusb $f6, $f6, $f0 \r\n" "paddusb $f8, $f8, $f0 \r\n" "paddusb $f10, $f10, $f0 \r\n" "psubusb $f4, $f4, $f2 \r\n" "psubusb $f6, $f6, $f2 \r\n" "psubusb $f8, $f8, $f2 \r\n" "psubusb $f10, $f10, $f2 \r\n" "sdc1 $f4, 0x0(%[dst]) \r\n" "gssdxc1 $f6, 0x0(%[dst], %[stride]) \r\n" "gssdxc1 $f8, 0x0(%[dst], $10) \r\n" "daddu $9, $10, $10 \r\n" "gssdxc1 $f10, 0x0(%[dst], $8) \r\n" "daddu %[dst], %[dst], $9 \r\n" "ldc1 $f4, 0x0(%[dst]) \r\n" "gsldxc1 $f6, 0x0(%[dst], %[stride]) \r\n" "gsldxc1 $f8, 0x0(%[dst], $10) \r\n" "gsldxc1 $f10, 0x0(%[dst], $8) \r\n" "paddusb $f4, $f4, $f0 \r\n" "paddusb $f6, $f6, $f0 \r\n" "paddusb $f8, $f8, $f0 \r\n" "paddusb $f10, $f10, $f0 \r\n" "psubusb $f4, $f4, $f2 \r\n" "psubusb $f6, $f6, $f2 \r\n" "psubusb $f8, $f8, $f2 \r\n" "psubusb $f10, $f10, $f2 \r\n" "sdc1 $f4, 0x0(%[dst]) \r\n" "gssdxc1 $f6, 0x0(%[dst], %[stride]) \r\n" "gssdxc1 $f8, 0x0(%[dst], $10) \r\n" "gssdxc1 $f10, 0x0(%[dst], $8) \r\n" ::[dst]"r"(dst),[block]"r"(block),[stride]"r"((uint64_t)stride) : "$8","$9","$10","$f0","$f2","$f4","$f6","$f8","$f10" ); } void ff_h264_idct_add16_8_mmi(uint8_t *dst, const int *block_offset, int16_t *block, int stride, const uint8_t nnzc[15*8]) { int i; for(i=0; i<16; i++){ int nnz = nnzc[ scan8[i] ]; if(nnz){ if(nnz==1 && ((int16_t*)block)[i*16]) ff_h264_idct_dc_add_8_mmi(dst + block_offset[i], block + i*16, stride); else ff_h264_idct_add_8_mmi(dst + block_offset[i], block + i*16, stride); } } } void ff_h264_idct_add16intra_8_mmi(uint8_t *dst, const int *block_offset, int16_t *block, int stride, const uint8_t nnzc[15*8]) { int i; for(i=0; i<16; i++){ if(nnzc[ scan8[i] ]) ff_h264_idct_add_8_mmi(dst + block_offset[i], block + i*16, stride); else if(((int16_t*)block)[i*16]) ff_h264_idct_dc_add_8_mmi(dst + block_offset[i], block + i*16, stride); } } void ff_h264_idct8_add4_8_mmi(uint8_t *dst, const int *block_offset, int16_t *block, int stride, const uint8_t nnzc[15*8]) { int i; for(i=0; i<16; i+=4){ int nnz = nnzc[ scan8[i] ]; if(nnz){ if(nnz==1 && ((int16_t*)block)[i*16]) ff_h264_idct8_dc_add_8_mmi(dst + block_offset[i], block + i*16, stride); else ff_h264_idct8_add_8_mmi(dst + block_offset[i], block + i*16, stride); } } } void ff_h264_idct_add8_8_mmi(uint8_t **dest, const int *block_offset, int16_t *block, int stride, const uint8_t nnzc[15*8]) { int i, j; for(j=1; j<3; j++){ for(i=j*16; i> 8; block[ 32]= (t[1]*qmul + 128) >> 8; block[ 64]= (t[2]*qmul + 128) >> 8; block[ 96]= (t[3]*qmul + 128) >> 8; block[ 16]= (t[4]*qmul + 128) >> 8; block[ 48]= (t[5]*qmul + 128) >> 8; block[ 80]= (t[6]*qmul + 128) >> 8; block[112]= (t[7]*qmul + 128) >> 8; } void ff_h264_chroma_dc_dequant_idct_8_mmi(int16_t *block, int qmul) { int a,b,c,d; d = block[0] - block[16]; a = block[0] + block[16]; b = block[32] - block[48]; c = block[32] + block[48]; block[0] = ((a+c)*qmul) >> 7; block[16]= ((d+b)*qmul) >> 7; block[32]= ((a-c)*qmul) >> 7; block[48]= ((d-b)*qmul) >> 7; } void ff_h264_weight_pixels16_8_mmi(uint8_t *block, int stride, int height, int log2_denom, int weight, int offset) { int y; offset <<= log2_denom; if (log2_denom) offset += 1 << (log2_denom - 1); for (y=0; y= 0) ff_deblock_v8_luma_8_mmi(pix + 0, stride, alpha, beta, tc0); if ((tc0[2] & tc0[3]) >= 0) ff_deblock_v8_luma_8_mmi(pix + 8, stride, alpha, beta, tc0 + 2); } void ff_deblock_v_luma_intra_8_mmi(uint8_t *pix, int stride, int alpha, int beta) { ff_deblock_v8_luma_intra_8_mmi(pix + 0, stride, alpha, beta); ff_deblock_v8_luma_intra_8_mmi(pix + 8, stride, alpha, beta); } void ff_deblock_h_luma_8_mmi(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0) { uint64_t stack[0xd]; __asm__ volatile ( "daddu $15, %[stride], %[stride] \r\n" "daddiu $8, %[pix], -0x4 \r\n" "daddu $9, %[stride], $15 \r\n" "gsldlc1 $f0, 0x7($8) \r\n" "gsldrc1 $f0, 0x0($8) \r\n" "daddu $12, $8, %[stride] \r\n" "daddu $10, $8, $9 \r\n" "gsldlc1 $f2, 0x7($12) \r\n" "daddu $11, $8, $15 \r\n" "gsldrc1 $f2, 0x0($12) \r\n" "gsldlc1 $f4, 0x7($11) \r\n" "gsldrc1 $f4, 0x0($11) \r\n" "gsldlc1 $f6, 0x7($10) \r\n" "daddu $12, $10, %[stride] \r\n" "gsldrc1 $f6, 0x0($10) \r\n" "gsldlc1 $f8, 0x7($12) \r\n" "daddu $11, $10, $15 \r\n" "gsldrc1 $f8, 0x0($12) \r\n" "gsldlc1 $f10, 0x7($11) \r\n" "daddu $12, $10, $9 \r\n" "gsldrc1 $f10, 0x0($11) \r\n" "gsldlc1 $f12, 0x7($12) \r\n" "gsldrc1 $f12, 0x0($12) \r\n" "daddu $14, $15, $15 \r\n" "punpckhbh $f14, $f0, $f2 \r\n" "punpcklbh $f0, $f0, $f2 \r\n" "punpckhbh $f2, $f4, $f6 \r\n" "punpcklbh $f4, $f4, $f6 \r\n" "punpckhbh $f6, $f8, $f10 \r\n" "punpcklbh $f8, $f8, $f10 \r\n" "daddu $12, $10, $14 \r\n" "sdc1 $f2, 0x10+%[stack] \r\n" "gsldlc1 $f16, 0x7($12) \r\n" "gsldrc1 $f16, 0x0($12) \r\n" "daddu $13, $14, $14 \r\n" "punpckhbh $f10, $f12, $f16 \r\n" "punpcklbh $f12, $f12, $f16 \r\n" "punpckhhw $f2, $f0, $f4 \r\n" "punpcklhw $f0, $f0, $f4 \r\n" "punpckhhw $f4, $f8, $f12 \r\n" "punpcklhw $f8, $f8, $f12 \r\n" "ldc1 $f16, 0x10+%[stack] \r\n" "punpckhwd $f0, $f0, $f8 \r\n" "sdc1 $f0, 0x0+%[stack] \r\n" "punpckhhw $f12, $f14, $f16 \r\n" "punpcklhw $f14, $f14, $f16 \r\n" "punpckhhw $f0, $f6, $f10 \r\n" "punpcklhw $f6, $f6, $f10 \r\n" "punpcklwd $f12, $f12, $f0 \r\n" "punpckhwd $f10, $f14, $f6 \r\n" "punpcklwd $f14, $f14, $f6 \r\n" "punpckhwd $f6, $f2, $f4 \r\n" "punpcklwd $f2, $f2, $f4 \r\n" "sdc1 $f2, 0x10+%[stack] \r\n" "sdc1 $f6, 0x20+%[stack] \r\n" "sdc1 $f14, 0x30+%[stack] \r\n" "sdc1 $f10, 0x40+%[stack] \r\n" "sdc1 $f12, 0x50+%[stack] \r\n" "daddu $8, $8, $13 \r\n" "daddu $10, $10, $13 \r\n" "gsldlc1 $f0, 0x7($8) \r\n" "daddu $12, $8, %[stride] \r\n" "gsldrc1 $f0, 0x0($8) \r\n" "gsldlc1 $f2, 0x7($12) \r\n" "daddu $11, $8, $15 \r\n" "gsldrc1 $f2, 0x0($12) \r\n" "gsldlc1 $f4, 0x7($11) \r\n" "gsldrc1 $f4, 0x0($11) \r\n" "gsldlc1 $f6, 0x7($10) \r\n" "daddu $12, $10, %[stride] \r\n" "gsldrc1 $f6, 0x0($10) \r\n" "gsldlc1 $f8, 0x7($12) \r\n" "daddu $11, $10, $15 \r\n" "gsldrc1 $f8, 0x0($12) \r\n" "gsldlc1 $f10, 0x7($11) \r\n" "daddu $12, $10, $9 \r\n" "gsldrc1 $f10, 0x0($11) \r\n" "gsldlc1 $f12, 0x7($12) \r\n" "gsldrc1 $f12, 0x0($12) \r\n" "punpckhbh $f14, $f0, $f2 \r\n" "punpcklbh $f0, $f0, $f2 \r\n" "punpckhbh $f2, $f4, $f6 \r\n" "punpcklbh $f4, $f4, $f6 \r\n" "punpckhbh $f6, $f8, $f10 \r\n" "punpcklbh $f8, $f8, $f10 \r\n" "daddu $12, $10, $14 \r\n" "sdc1 $f2, 0x18+%[stack] \r\n" "gsldlc1 $f16, 0x7($12) \r\n" "gsldrc1 $f16, 0x0($12) \r\n" "punpckhhw $f2, $f0, $f4 \r\n" "punpckhbh $f10, $f12, $f16 \r\n" "punpcklbh $f12, $f12, $f16 \r\n" "punpcklhw $f0, $f0, $f4 \r\n" "punpckhhw $f4, $f8, $f12 \r\n" "punpcklhw $f8, $f8, $f12 \r\n" "punpckhwd $f0, $f0, $f8 \r\n" "ldc1 $f16, 0x18+%[stack] \r\n" "sdc1 $f0, 0x8+%[stack] \r\n" "punpckhhw $f12, $f14, $f16 \r\n" "punpcklhw $f14, $f14, $f16 \r\n" "punpckhhw $f0, $f6, $f10 \r\n" "punpcklhw $f6, $f6, $f10 \r\n" "punpckhwd $f10, $f14, $f6 \r\n" "punpcklwd $f14, $f14, $f6 \r\n" "punpckhwd $f6, $f2, $f4 \r\n" "punpcklwd $f2, $f2, $f4 \r\n" "punpcklwd $f12, $f12, $f0 \r\n" "sdc1 $f2, 0x18+%[stack] \r\n" "sdc1 $f6, 0x28+%[stack] \r\n" "sdc1 $f14, 0x38+%[stack] \r\n" "sdc1 $f10, 0x48+%[stack] \r\n" "sdc1 $f12, 0x58+%[stack] \r\n" ::[pix]"r"(pix),[stride]"r"((int64_t)stride),[stack]"m"(stack[0]) : "$8","$9","$10","$11","$12","$13","$14","$15","$f0","$f2","$f4", "$f6","$f8","$f10","$f12","$f14","$f16" ); ff_deblock_v_luma_8_mmi((uint8_t *) &stack[6], 0x10, alpha, beta, tc0); __asm__ volatile ( "daddu $15, %[stride], %[stride] \r\n" "daddiu $8, %[pix], -0x2 \r\n" "daddu $14, $15, $15 \r\n" "daddu $9, $15, %[stride] \r\n" "daddu $13, $14, $14 \r\n" "daddu $10, $8, $9 \r\n" "ldc1 $f0, 0x10+%[stack] \r\n" "ldc1 $f2, 0x20+%[stack] \r\n" "ldc1 $f4, 0x30+%[stack] \r\n" "ldc1 $f6, 0x40+%[stack] \r\n" "punpckhwd $f8, $f0, $f0 \r\n" "punpckhwd $f10, $f2, $f2 \r\n" "punpckhwd $f12, $f4, $f4 \r\n" "punpcklbh $f0, $f0, $f2 \r\n" "punpcklbh $f4, $f4, $f6 \r\n" "punpcklhw $f2, $f0, $f4 \r\n" "punpckhhw $f0, $f0, $f4 \r\n" "gsswlc1 $f2, 0x3($8) \r\n" "gsswrc1 $f2, 0x0($8) \r\n" "daddu $12, $8, %[stride] \r\n" "punpckhwd $f2, $f2, $f2 \r\n" "daddu $11, $8, $15 \r\n" "gsswlc1 $f2, 0x3($12) \r\n" "gsswrc1 $f2, 0x0($12) \r\n" "gsswlc1 $f0, 0x3($11) \r\n" "gsswrc1 $f0, 0x0($11) \r\n" "punpckhwd $f0, $f0, $f0 \r\n" "punpckhwd $f6, $f6, $f6 \r\n" "gsswlc1 $f0, 0x3($10) \r\n" "gsswrc1 $f0, 0x0($10) \r\n" "punpcklbh $f8, $f8, $f10 \r\n" "punpcklbh $f12, $f12, $f6 \r\n" "punpcklhw $f10, $f8, $f12 \r\n" "daddu $12, $10, %[stride] \r\n" "punpckhhw $f8, $f8, $f12 \r\n" "gsswlc1 $f10, 0x3($12) \r\n" "gsswrc1 $f10, 0x0($12) \r\n" "daddu $12, $10, $15 \r\n" "punpckhwd $f10, $f10, $f10 \r\n" "daddu $11, $10, $9 \r\n" "gsswlc1 $f10, 0x3($12) \r\n" "gsswrc1 $f10, 0x0($12) \r\n" "gsswlc1 $f8, 0x3($11) \r\n" "gsswrc1 $f8, 0x0($11) \r\n" "daddu $12, $10, $14 \r\n" "punpckhwd $f8, $f8, $f8 \r\n" "daddu $8, $8, $13 \r\n" "gsswlc1 $f8, 0x3($12) \r\n" "gsswrc1 $f8, 0x0($12) \r\n" "daddu $10, $10, $13 \r\n" "ldc1 $f0, 0x18+%[stack] \r\n" "ldc1 $f2, 0x28+%[stack] \r\n" "ldc1 $f4, 0x38+%[stack] \r\n" "ldc1 $f6, 0x48+%[stack] \r\n" "daddu $15, %[stride], %[stride] \r\n" "punpckhwd $f8, $f0, $f0 \r\n" "daddu $14, $15, $15 \r\n" "punpckhwd $f10, $f2, $f2 \r\n" "punpckhwd $f12, $f4, $f4 \r\n" "punpcklbh $f0, $f0, $f2 \r\n" "punpcklbh $f4, $f4, $f6 \r\n" "daddu $12, $8, %[stride] \r\n" "punpcklhw $f2, $f0, $f4 \r\n" "punpckhhw $f0, $f0, $f4 \r\n" "gsswlc1 $f2, 0x3($8) \r\n" "gsswrc1 $f2, 0x0($8) \r\n" "punpckhwd $f2, $f2, $f2 \r\n" "daddu $11, $8, $15 \r\n" "gsswlc1 $f2, 0x3($12) \r\n" "gsswrc1 $f2, 0x0($12) \r\n" "gsswlc1 $f0, 0x3($11) \r\n" "gsswrc1 $f0, 0x0($11) \r\n" "punpckhwd $f0, $f0, $f0 \r\n" "punpckhwd $f6, $f6, $f6 \r\n" "gsswlc1 $f0, 0x3($10) \r\n" "gsswrc1 $f0, 0x0($10) \r\n" "punpcklbh $f8, $f8, $f10 \r\n" "punpcklbh $f12, $f12, $f6 \r\n" "daddu $12, $10, %[stride] \r\n" "punpcklhw $f10, $f8, $f12 \r\n" "punpckhhw $f8, $f8, $f12 \r\n" "gsswlc1 $f10, 0x3($12) \r\n" "gsswrc1 $f10, 0x0($12) \r\n" "daddu $12, $10, $15 \r\n" "punpckhwd $f10, $f10, $f10 \r\n" "daddu $11, $10, $9 \r\n" "gsswlc1 $f10, 0x3($12) \r\n" "gsswrc1 $f10, 0x0($12) \r\n" "gsswlc1 $f8, 0x3($11) \r\n" "gsswrc1 $f8, 0x0($11) \r\n" "daddu $12, $10, $14 \r\n" "punpckhwd $f8, $f8, $f8 \r\n" "gsswlc1 $f8, 0x3($12) \r\n" "gsswrc1 $f8, 0x0($12) \r\n" ::[pix]"r"(pix),[stride]"r"((int64_t)stride),[stack]"m"(stack[0]) : "$8","$9","$10","$11","$12","$13","$14","$15","$f0","$f2","$f4", "$f6","$f8","$f10","$f12","$f14","$f16" ); } void ff_deblock_h_luma_intra_8_mmi(uint8_t *pix, int stride, int alpha, int beta) { uint64_t ptmp[0x11]; uint64_t pdat[4]; __asm__ volatile ( "daddu $12, %[stride], %[stride] \r\n" "daddiu $10, %[pix], -0x4 \r\n" "daddu $11, $12, %[stride] \r\n" "daddu $13, $12, $12 \r\n" "daddu $9, $10, $11 \r\n" "daddu $8, $10, %[stride] \r\n" "gsldlc1 $f0, 0x7($10) \r\n" "gsldrc1 $f0, 0x0($10) \r\n" "daddu $14, $10, $12 \r\n" "gsldlc1 $f2, 0x7($8) \r\n" "gsldrc1 $f2, 0x0($8) \r\n" "gsldlc1 $f4, 0x7($14) \r\n" "gsldrc1 $f4, 0x0($14) \r\n" "daddu $8, $9, %[stride] \r\n" "gsldlc1 $f6, 0x7($9) \r\n" "gsldrc1 $f6, 0x0($9) \r\n" "daddu $14, $9, $12 \r\n" "gsldlc1 $f8, 0x7($8) \r\n" "gsldrc1 $f8, 0x0($8) \r\n" "daddu $8, $9, $11 \r\n" "gsldlc1 $f10, 0x7($14) \r\n" "gsldrc1 $f10, 0x0($14) \r\n" "gsldlc1 $f12, 0x7($8) \r\n" "gsldrc1 $f12, 0x0($8) \r\n" "daddu $8, $9, $13 \r\n" "punpckhbh $f14, $f0, $f2 \r\n" "punpcklbh $f0, $f0, $f2 \r\n" "punpckhbh $f2, $f4, $f6 \r\n" "punpcklbh $f4, $f4, $f6 \r\n" "punpckhbh $f6, $f8, $f10 \r\n" "punpcklbh $f8, $f8, $f10 \r\n" "gsldlc1 $f16, 0x7($8) \r\n" "gsldrc1 $f16, 0x0($8) \r\n" "punpckhbh $f10, $f12, $f16 \r\n" "punpcklbh $f12, $f12, $f16 \r\n" "sdc1 $f6, 0x0+%[ptmp] \r\n" "punpckhhw $f6, $f0, $f4 \r\n" "punpcklhw $f0, $f0, $f4 \r\n" "punpckhhw $f4, $f8, $f12 \r\n" "punpcklhw $f8, $f8, $f12 \r\n" "punpckhhw $f12, $f14, $f2 \r\n" "punpcklhw $f14, $f14, $f2 \r\n" "sdc1 $f4, 0x20+%[ptmp] \r\n" "ldc1 $f4, 0x0+%[ptmp] \r\n" "punpckhhw $f2, $f4, $f10 \r\n" "punpcklhw $f4, $f4, $f10 \r\n" "punpckhwd $f10, $f0, $f8 \r\n" "punpcklwd $f0, $f0, $f8 \r\n" "punpckhwd $f8, $f14, $f4 \r\n" "punpcklwd $f14, $f14, $f4 \r\n" "sdc1 $f0, 0x0+%[ptmp] \r\n" "sdc1 $f10, 0x10+%[ptmp] \r\n" "sdc1 $f14, 0x40+%[ptmp] \r\n" "sdc1 $f8, 0x50+%[ptmp] \r\n" "ldc1 $f16, 0x20+%[ptmp] \r\n" "punpckhwd $f0, $f6, $f16 \r\n" "punpcklwd $f6, $f6, $f16 \r\n" "punpckhwd $f10, $f12, $f2 \r\n" "punpcklwd $f12, $f12, $f2 \r\n" "daddu $8, $13, $13 \r\n" "sdc1 $f6, 0x20+%[ptmp] \r\n" "sdc1 $f0, 0x30+%[ptmp] \r\n" "sdc1 $f12, 0x60+%[ptmp] \r\n" "sdc1 $f10, 0x70+%[ptmp] \r\n" "daddu $10, $10, $8 \r\n" "daddu $9, $9, $8 \r\n" "daddu $8, $10, %[stride] \r\n" "gsldlc1 $f0, 0x7($10) \r\n" "gsldrc1 $f0, 0x0($10) \r\n" "daddu $14, $10, $12 \r\n" "gsldlc1 $f2, 0x7($8) \r\n" "gsldrc1 $f2, 0x0($8) \r\n" "gsldlc1 $f4, 0x7($14) \r\n" "gsldrc1 $f4, 0x0($14) \r\n" "daddu $8, $9, %[stride] \r\n" "gsldlc1 $f6, 0x7($9) \r\n" "gsldrc1 $f6, 0x0($9) \r\n" "daddu $14, $9, $12 \r\n" "gsldlc1 $f8, 0x7($8) \r\n" "gsldrc1 $f8, 0x0($8) \r\n" "daddu $8, $9, $11 \r\n" "gsldlc1 $f10, 0x7($14) \r\n" "gsldrc1 $f10, 0x0($14) \r\n" "gsldlc1 $f12, 0x7($8) \r\n" "gsldrc1 $f12, 0x0($8) \r\n" "daddu $8, $9, $13 \r\n" "punpckhbh $f14, $f0, $f2 \r\n" "punpcklbh $f0, $f0, $f2 \r\n" "punpckhbh $f2, $f4, $f6 \r\n" "punpcklbh $f4, $f4, $f6 \r\n" "punpckhbh $f6, $f8, $f10 \r\n" "punpcklbh $f8, $f8, $f10 \r\n" "gsldlc1 $f16, 0x7($8) \r\n" "gsldrc1 $f16, 0x0($8) \r\n" "punpckhbh $f10, $f12, $f16 \r\n" "punpcklbh $f12, $f12, $f16 \r\n" "sdc1 $f6, 0x8+%[ptmp] \r\n" "punpckhhw $f6, $f0, $f4 \r\n" "punpcklhw $f0, $f0, $f4 \r\n" "punpckhhw $f4, $f8, $f12 \r\n" "punpcklhw $f8, $f8, $f12 \r\n" "punpckhhw $f12, $f14, $f2 \r\n" "punpcklhw $f14, $f14, $f2 \r\n" "sdc1 $f4, 0x28+%[ptmp] \r\n" "ldc1 $f4, 0x8+%[ptmp] \r\n" "punpckhhw $f2, $f4, $f10 \r\n" "punpcklhw $f4, $f4, $f10 \r\n" "punpckhwd $f10, $f0, $f8 \r\n" "punpcklwd $f0, $f0, $f8 \r\n" "punpckhwd $f8, $f14, $f4 \r\n" "punpcklwd $f14, $f14, $f4 \r\n" "sdc1 $f0, 0x8+%[ptmp] \r\n" "sdc1 $f10, 0x18+%[ptmp] \r\n" "sdc1 $f14, 0x48+%[ptmp] \r\n" "sdc1 $f8, 0x58+%[ptmp] \r\n" "ldc1 $f16, 0x28+%[ptmp] \r\n" "punpckhwd $f0, $f6, $f16 \r\n" "punpcklwd $f6, $f6, $f16 \r\n" "punpckhwd $f10, $f12, $f2 \r\n" "punpcklwd $f12, $f12, $f2 \r\n" "sdc1 $f6, 0x28+%[ptmp] \r\n" "sdc1 $f0, 0x38+%[ptmp] \r\n" "sdc1 $f12, 0x68+%[ptmp] \r\n" "sdc1 $f10, 0x78+%[ptmp] \r\n" "sd $10, 0x00+%[pdat] \r\n" "sd $11, 0x08+%[pdat] \r\n" "sd $12, 0x10+%[pdat] \r\n" "sd $13, 0x18+%[pdat] \r\n" ::[pix]"r"(pix),[stride]"r"((uint64_t)stride),[ptmp]"m"(ptmp[0]), [pdat]"m"(pdat[0]) : "$8","$9","$10","$11","$12","$13","$14","$f0","$f2","$f4","$f6", "$f8","$f10","$f12","$f14","$f16" ); ff_deblock_v_luma_intra_8_mmi((uint8_t *) &ptmp[8], 0x10, alpha, beta); __asm__ volatile ( "ld $10, 0x00+%[pdat] \r\n" "ld $11, 0x08+%[pdat] \r\n" "ld $12, 0x10+%[pdat] \r\n" "ld $13, 0x18+%[pdat] \r\n" "daddu $9, $10, $11 \r\n" "ldc1 $f0, 0x8+%[ptmp] \r\n" "ldc1 $f2, 0x18+%[ptmp] \r\n" "ldc1 $f4, 0x28+%[ptmp] \r\n" "ldc1 $f6, 0x38+%[ptmp] \r\n" "ldc1 $f8, 0x48+%[ptmp] \r\n" "ldc1 $f10, 0x58+%[ptmp] \r\n" "ldc1 $f12, 0x68+%[ptmp] \r\n" "punpckhbh $f14, $f0, $f2 \r\n" "punpcklbh $f0, $f0, $f2 \r\n" "punpckhbh $f2, $f4, $f6 \r\n" "punpcklbh $f4, $f4, $f6 \r\n" "punpckhbh $f6, $f8, $f10 \r\n" "punpcklbh $f8, $f8, $f10 \r\n" "ldc1 $f16, 0x78+%[ptmp] \r\n" "punpckhbh $f10, $f12, $f16 \r\n" "punpcklbh $f12, $f12, $f16 \r\n" "gssdlc1 $f6, 0x7($10) \r\n" "gssdrc1 $f6, 0x0($10) \r\n" "daddu $8, $10, $12 \r\n" "punpckhhw $f6, $f0, $f4 \r\n" "punpcklhw $f0, $f0, $f4 \r\n" "punpckhhw $f4, $f8, $f12 \r\n" "punpcklhw $f8, $f8, $f12 \r\n" "punpckhhw $f12, $f14, $f2 \r\n" "punpcklhw $f14, $f14, $f2 \r\n" "gssdlc1 $f4, 0x7($8) \r\n" "gssdrc1 $f4, 0x0($8) \r\n" "gsldlc1 $f4, 0x7($10) \r\n" "gsldrc1 $f4, 0x0($10) \r\n" "punpckhhw $f2, $f4, $f10 \r\n" "punpcklhw $f4, $f4, $f10 \r\n" "punpckhwd $f10, $f0, $f8 \r\n" "punpcklwd $f0, $f0, $f8 \r\n" "punpckhwd $f8, $f14, $f4 \r\n" "punpcklwd $f14, $f14, $f4 \r\n" "daddu $8, $10, %[stride] \r\n" "gssdlc1 $f0, 0x7($10) \r\n" "gssdrc1 $f0, 0x0($10) \r\n" "daddu $14, $9, %[stride] \r\n" "gssdlc1 $f10, 0x7($8) \r\n" "gssdrc1 $f10, 0x0($8) \r\n" "daddu $8, $9, $12 \r\n" "gssdlc1 $f14, 0x7($14) \r\n" "gssdrc1 $f14, 0x0($14) \r\n" "daddu $14, $10, $12 \r\n" "gssdlc1 $f8, 0x7($8) \r\n" "gssdrc1 $f8, 0x0($8) \r\n" "gsldlc1 $f16, 0x7($14) \r\n" "gsldrc1 $f16, 0x0($14) \r\n" "daddu $8, $10, $12 \r\n" "punpckhwd $f0, $f6, $f16 \r\n" "punpcklwd $f6, $f6, $f16 \r\n" "punpckhwd $f10, $f12, $f2 \r\n" "punpcklwd $f12, $f12, $f2 \r\n" "gssdlc1 $f6, 0x7($8) \r\n" "gssdrc1 $f6, 0x0($8) \r\n" "daddu $8, $9, $11 \r\n" "gssdlc1 $f0, 0x7($9) \r\n" "gssdrc1 $f0, 0x0($9) \r\n" "daddu $14, $9, $13 \r\n" "gssdlc1 $f12, 0x7($8) \r\n" "gssdrc1 $f12, 0x0($8) \r\n" "daddu $8, $13, $13 \r\n" "gssdlc1 $f10, 0x7($14) \r\n" "gssdrc1 $f10, 0x0($14) \r\n" "dsubu $10, $10, $8 \r\n" "dsubu $9, $9, $8 \r\n" "ldc1 $f0, 0x0+%[ptmp] \r\n" "ldc1 $f2, 0x10+%[ptmp] \r\n" "ldc1 $f4, 0x20+%[ptmp] \r\n" "ldc1 $f6, 0x30+%[ptmp] \r\n" "ldc1 $f8, 0x40+%[ptmp] \r\n" "ldc1 $f10, 0x50+%[ptmp] \r\n" "ldc1 $f12, 0x60+%[ptmp] \r\n" "punpckhbh $f14, $f0, $f2 \r\n" "punpcklbh $f0, $f0, $f2 \r\n" "punpckhbh $f2, $f4, $f6 \r\n" "punpcklbh $f4, $f4, $f6 \r\n" "punpckhbh $f6, $f8, $f10 \r\n" "punpcklbh $f8, $f8, $f10 \r\n" "ldc1 $f16, 0x70+%[ptmp] \r\n" "punpckhbh $f10, $f12, $f16 \r\n" "punpcklbh $f12, $f12, $f16 \r\n" "gssdlc1 $f6, 0x7($10) \r\n" "gssdrc1 $f6, 0x0($10) \r\n" "daddu $8, $10, $12 \r\n" "punpckhhw $f6, $f0, $f4 \r\n" "punpcklhw $f0, $f0, $f4 \r\n" "punpckhhw $f4, $f8, $f12 \r\n" "punpcklhw $f8, $f8, $f12 \r\n" "punpckhhw $f12, $f14, $f2 \r\n" "punpcklhw $f14, $f14, $f2 \r\n" "gssdlc1 $f4, 0x7($8) \r\n" "gssdrc1 $f4, 0x0($8) \r\n" "gsldlc1 $f4, 0x7($10) \r\n" "gsldrc1 $f4, 0x0($10) \r\n" "punpckhhw $f2, $f4, $f10 \r\n" "punpcklhw $f4, $f4, $f10 \r\n" "punpckhwd $f10, $f0, $f8 \r\n" "punpcklwd $f0, $f0, $f8 \r\n" "punpckhwd $f8, $f14, $f4 \r\n" "punpcklwd $f14, $f14, $f4 \r\n" "daddu $8, $10, %[stride] \r\n" "gssdlc1 $f0, 0x7($10) \r\n" "gssdrc1 $f0, 0x0($10) \r\n" "daddu $14, $9, %[stride] \r\n" "gssdlc1 $f10, 0x7($8) \r\n" "gssdrc1 $f10, 0x0($8) \r\n" "daddu $8, $9, $12 \r\n" "gssdlc1 $f14, 0x7($14) \r\n" "gssdrc1 $f14, 0x0($14) \r\n" "daddu $14, $10, $12 \r\n" "gssdlc1 $f8, 0x7($8) \r\n" "gssdrc1 $f8, 0x0($8) \r\n" "gsldlc1 $f16, 0x7($14) \r\n" "gsldrc1 $f16, 0x0($14) \r\n" "daddu $8, $10, $12 \r\n" "punpckhwd $f0, $f6, $f16 \r\n" "punpcklwd $f6, $f6, $f16 \r\n" "punpckhwd $f10, $f12, $f2 \r\n" "punpcklwd $f12, $f12, $f2 \r\n" "gssdlc1 $f6, 0x7($8) \r\n" "gssdrc1 $f6, 0x0($8) \r\n" "daddu $8, $9, $11 \r\n" "gssdlc1 $f0, 0x7($9) \r\n" "gssdrc1 $f0, 0x0($9) \r\n" "daddu $14, $9, $13 \r\n" "gssdlc1 $f12, 0x7($8) \r\n" "gssdrc1 $f12, 0x0($8) \r\n" "gssdlc1 $f10, 0x7($14) \r\n" "gssdrc1 $f10, 0x0($14) \r\n" ::[pix]"r"(pix),[stride]"r"((uint64_t)stride),[ptmp]"m"(ptmp[0]), [pdat]"m"(pdat[0]) : "$8","$9","$10","$11","$12","$13","$14","$f0","$f2","$f4","$f6", "$f8","$f10","$f12","$f14","$f16" ); }