summaryrefslogtreecommitdiff
path: root/libavcodec/mips/fmtconvert_mips.c
diff options
context:
space:
mode:
Diffstat (limited to 'libavcodec/mips/fmtconvert_mips.c')
-rw-r--r--libavcodec/mips/fmtconvert_mips.c201
1 files changed, 0 insertions, 201 deletions
diff --git a/libavcodec/mips/fmtconvert_mips.c b/libavcodec/mips/fmtconvert_mips.c
index 8a0265f070..a785c9eea3 100644
--- a/libavcodec/mips/fmtconvert_mips.c
+++ b/libavcodec/mips/fmtconvert_mips.c
@@ -52,203 +52,6 @@
#include "libavcodec/fmtconvert.h"
#if HAVE_INLINE_ASM
-#if HAVE_MIPSDSPR1
-static void float_to_int16_mips(int16_t *dst, const float *src, long len)
-{
- const float *src_end = src + len;
- int ret0, ret1, ret2, ret3, ret4, ret5, ret6, ret7;
- float src0, src1, src2, src3, src4, src5, src6, src7;
-
- /*
- * loop is 8 times unrolled in assembler in order to achieve better performance
- */
- __asm__ volatile(
- "beq %[len], $zero, fti16_end%= \n\t"
- "fti16_lp%=: \n\t"
- "lwc1 %[src0], 0(%[src]) \n\t"
- "lwc1 %[src1], 4(%[src]) \n\t"
- "lwc1 %[src2], 8(%[src]) \n\t"
- "lwc1 %[src3], 12(%[src]) \n\t"
- "cvt.w.s %[src0], %[src0] \n\t"
- "cvt.w.s %[src1], %[src1] \n\t"
- "cvt.w.s %[src2], %[src2] \n\t"
- "cvt.w.s %[src3], %[src3] \n\t"
- "mfc1 %[ret0], %[src0] \n\t"
- "mfc1 %[ret1], %[src1] \n\t"
- "mfc1 %[ret2], %[src2] \n\t"
- "mfc1 %[ret3], %[src3] \n\t"
- "lwc1 %[src4], 16(%[src]) \n\t"
- "lwc1 %[src5], 20(%[src]) \n\t"
- "lwc1 %[src6], 24(%[src]) \n\t"
- "lwc1 %[src7], 28(%[src]) \n\t"
- "cvt.w.s %[src4], %[src4] \n\t"
- "cvt.w.s %[src5], %[src5] \n\t"
- "cvt.w.s %[src6], %[src6] \n\t"
- "cvt.w.s %[src7], %[src7] \n\t"
- "addiu %[src], 32 \n\t"
- "shll_s.w %[ret0], %[ret0], 16 \n\t"
- "shll_s.w %[ret1], %[ret1], 16 \n\t"
- "shll_s.w %[ret2], %[ret2], 16 \n\t"
- "shll_s.w %[ret3], %[ret3], 16 \n\t"
- "srl %[ret0], %[ret0], 16 \n\t"
- "srl %[ret1], %[ret1], 16 \n\t"
- "srl %[ret2], %[ret2], 16 \n\t"
- "srl %[ret3], %[ret3], 16 \n\t"
- "sh %[ret0], 0(%[dst]) \n\t"
- "sh %[ret1], 2(%[dst]) \n\t"
- "sh %[ret2], 4(%[dst]) \n\t"
- "sh %[ret3], 6(%[dst]) \n\t"
- "mfc1 %[ret4], %[src4] \n\t"
- "mfc1 %[ret5], %[src5] \n\t"
- "mfc1 %[ret6], %[src6] \n\t"
- "mfc1 %[ret7], %[src7] \n\t"
- "shll_s.w %[ret4], %[ret4], 16 \n\t"
- "shll_s.w %[ret5], %[ret5], 16 \n\t"
- "shll_s.w %[ret6], %[ret6], 16 \n\t"
- "shll_s.w %[ret7], %[ret7], 16 \n\t"
- "srl %[ret4], %[ret4], 16 \n\t"
- "srl %[ret5], %[ret5], 16 \n\t"
- "srl %[ret6], %[ret6], 16 \n\t"
- "srl %[ret7], %[ret7], 16 \n\t"
- "sh %[ret4], 8(%[dst]) \n\t"
- "sh %[ret5], 10(%[dst]) \n\t"
- "sh %[ret6], 12(%[dst]) \n\t"
- "sh %[ret7], 14(%[dst]) \n\t"
- "addiu %[dst], 16 \n\t"
- "bne %[src], %[src_end], fti16_lp%= \n\t"
- "fti16_end%=: \n\t"
- : [ret0]"=&r"(ret0), [ret1]"=&r"(ret1), [ret2]"=&r"(ret2), [ret3]"=&r"(ret3),
- [ret4]"=&r"(ret4), [ret5]"=&r"(ret5), [ret6]"=&r"(ret6), [ret7]"=&r"(ret7),
- [src0]"=&f"(src0), [src1]"=&f"(src1), [src2]"=&f"(src2), [src3]"=&f"(src3),
- [src4]"=&f"(src4), [src5]"=&f"(src5), [src6]"=&f"(src6), [src7]"=&f"(src7),
- [src]"+r"(src), [dst]"+r"(dst)
- : [src_end]"r"(src_end), [len]"r"(len)
- : "memory"
- );
-}
-
-static void float_to_int16_interleave_mips(int16_t *dst, const float **src, long len,
- int channels)
-{
- int c, ch2 = channels <<1;
- int ret0, ret1, ret2, ret3, ret4, ret5, ret6, ret7;
- float src0, src1, src2, src3, src4, src5, src6, src7;
- int16_t *dst_ptr0, *dst_ptr1, *dst_ptr2, *dst_ptr3;
- int16_t *dst_ptr4, *dst_ptr5, *dst_ptr6, *dst_ptr7;
- const float *src_ptr, *src_ptr2, *src_end;
-
- if (channels == 2) {
- src_ptr = &src[0][0];
- src_ptr2 = &src[1][0];
- src_end = src_ptr + len;
-
- __asm__ volatile (
- "fti16i2_lp%=: \n\t"
- "lwc1 %[src0], 0(%[src_ptr]) \n\t"
- "lwc1 %[src1], 0(%[src_ptr2]) \n\t"
- "addiu %[src_ptr], 4 \n\t"
- "cvt.w.s $f9, %[src0] \n\t"
- "cvt.w.s $f10, %[src1] \n\t"
- "mfc1 %[ret0], $f9 \n\t"
- "mfc1 %[ret1], $f10 \n\t"
- "shll_s.w %[ret0], %[ret0], 16 \n\t"
- "shll_s.w %[ret1], %[ret1], 16 \n\t"
- "addiu %[src_ptr2], 4 \n\t"
- "srl %[ret0], %[ret0], 16 \n\t"
- "srl %[ret1], %[ret1], 16 \n\t"
- "sh %[ret0], 0(%[dst]) \n\t"
- "sh %[ret1], 2(%[dst]) \n\t"
- "addiu %[dst], 4 \n\t"
- "bne %[src_ptr], %[src_end], fti16i2_lp%= \n\t"
- : [ret0]"=&r"(ret0), [ret1]"=&r"(ret1),
- [src0]"=&f"(src0), [src1]"=&f"(src1),
- [src_ptr]"+r"(src_ptr), [src_ptr2]"+r"(src_ptr2),
- [dst]"+r"(dst)
- : [src_end]"r"(src_end)
- : "memory"
- );
- } else {
- for (c = 0; c < channels; c++) {
- src_ptr = &src[c][0];
- dst_ptr0 = &dst[c];
- src_end = src_ptr + len;
- /*
- * loop is 8 times unrolled in assembler in order to achieve better performance
- */
- __asm__ volatile(
- "fti16i_lp%=: \n\t"
- "lwc1 %[src0], 0(%[src_ptr]) \n\t"
- "lwc1 %[src1], 4(%[src_ptr]) \n\t"
- "lwc1 %[src2], 8(%[src_ptr]) \n\t"
- "lwc1 %[src3], 12(%[src_ptr]) \n\t"
- "cvt.w.s %[src0], %[src0] \n\t"
- "cvt.w.s %[src1], %[src1] \n\t"
- "cvt.w.s %[src2], %[src2] \n\t"
- "cvt.w.s %[src3], %[src3] \n\t"
- "mfc1 %[ret0], %[src0] \n\t"
- "mfc1 %[ret1], %[src1] \n\t"
- "mfc1 %[ret2], %[src2] \n\t"
- "mfc1 %[ret3], %[src3] \n\t"
- "lwc1 %[src4], 16(%[src_ptr]) \n\t"
- "lwc1 %[src5], 20(%[src_ptr]) \n\t"
- "lwc1 %[src6], 24(%[src_ptr]) \n\t"
- "lwc1 %[src7], 28(%[src_ptr]) \n\t"
- "addu %[dst_ptr1], %[dst_ptr0], %[ch2] \n\t"
- "addu %[dst_ptr2], %[dst_ptr1], %[ch2] \n\t"
- "addu %[dst_ptr3], %[dst_ptr2], %[ch2] \n\t"
- "addu %[dst_ptr4], %[dst_ptr3], %[ch2] \n\t"
- "addu %[dst_ptr5], %[dst_ptr4], %[ch2] \n\t"
- "addu %[dst_ptr6], %[dst_ptr5], %[ch2] \n\t"
- "addu %[dst_ptr7], %[dst_ptr6], %[ch2] \n\t"
- "addiu %[src_ptr], 32 \n\t"
- "cvt.w.s %[src4], %[src4] \n\t"
- "cvt.w.s %[src5], %[src5] \n\t"
- "cvt.w.s %[src6], %[src6] \n\t"
- "cvt.w.s %[src7], %[src7] \n\t"
- "shll_s.w %[ret0], %[ret0], 16 \n\t"
- "shll_s.w %[ret1], %[ret1], 16 \n\t"
- "shll_s.w %[ret2], %[ret2], 16 \n\t"
- "shll_s.w %[ret3], %[ret3], 16 \n\t"
- "srl %[ret0], %[ret0], 16 \n\t"
- "srl %[ret1], %[ret1], 16 \n\t"
- "srl %[ret2], %[ret2], 16 \n\t"
- "srl %[ret3], %[ret3], 16 \n\t"
- "sh %[ret0], 0(%[dst_ptr0]) \n\t"
- "sh %[ret1], 0(%[dst_ptr1]) \n\t"
- "sh %[ret2], 0(%[dst_ptr2]) \n\t"
- "sh %[ret3], 0(%[dst_ptr3]) \n\t"
- "mfc1 %[ret4], %[src4] \n\t"
- "mfc1 %[ret5], %[src5] \n\t"
- "mfc1 %[ret6], %[src6] \n\t"
- "mfc1 %[ret7], %[src7] \n\t"
- "shll_s.w %[ret4], %[ret4], 16 \n\t"
- "shll_s.w %[ret5], %[ret5], 16 \n\t"
- "shll_s.w %[ret6], %[ret6], 16 \n\t"
- "shll_s.w %[ret7], %[ret7], 16 \n\t"
- "srl %[ret4], %[ret4], 16 \n\t"
- "srl %[ret5], %[ret5], 16 \n\t"
- "srl %[ret6], %[ret6], 16 \n\t"
- "srl %[ret7], %[ret7], 16 \n\t"
- "sh %[ret4], 0(%[dst_ptr4]) \n\t"
- "sh %[ret5], 0(%[dst_ptr5]) \n\t"
- "sh %[ret6], 0(%[dst_ptr6]) \n\t"
- "sh %[ret7], 0(%[dst_ptr7]) \n\t"
- "addu %[dst_ptr0], %[dst_ptr7], %[ch2] \n\t"
- "bne %[src_ptr], %[src_end], fti16i_lp%= \n\t"
- : [ret0]"=&r"(ret0), [ret1]"=&r"(ret1), [ret2]"=&r"(ret2), [ret3]"=&r"(ret3),
- [ret4]"=&r"(ret4), [ret5]"=&r"(ret5), [ret6]"=&r"(ret6), [ret7]"=&r"(ret7),
- [src0]"=&f"(src0), [src1]"=&f"(src1), [src2]"=&f"(src2), [src3]"=&f"(src3),
- [src4]"=&f"(src4), [src5]"=&f"(src5), [src6]"=&f"(src6), [src7]"=&f"(src7),
- [dst_ptr1]"=&r"(dst_ptr1), [dst_ptr2]"=&r"(dst_ptr2), [dst_ptr3]"=&r"(dst_ptr3),
- [dst_ptr4]"=&r"(dst_ptr4), [dst_ptr5]"=&r"(dst_ptr5), [dst_ptr6]"=&r"(dst_ptr6),
- [dst_ptr7]"=&r"(dst_ptr7), [dst_ptr0]"+r"(dst_ptr0), [src_ptr]"+r"(src_ptr)
- : [ch2]"r"(ch2), [src_end]"r"(src_end)
- : "memory"
- );
- }
- }
-}
-#endif /* HAVE_MIPSDSPR1 */
static void int32_to_float_fmul_scalar_mips(float *dst, const int *src,
float mul, int len)
@@ -333,10 +136,6 @@ static void int32_to_float_fmul_scalar_mips(float *dst, const int *src,
av_cold void ff_fmt_convert_init_mips(FmtConvertContext *c)
{
#if HAVE_INLINE_ASM
-#if HAVE_MIPSDSPR1
- c->float_to_int16_interleave = float_to_int16_interleave_mips;
- c->float_to_int16 = float_to_int16_mips;
-#endif
c->int32_to_float_fmul_scalar = int32_to_float_fmul_scalar_mips;
#endif
}