summaryrefslogtreecommitdiff
path: root/libavcodec/mips
diff options
context:
space:
mode:
Diffstat (limited to 'libavcodec/mips')
-rw-r--r--libavcodec/mips/Makefile21
-rw-r--r--libavcodec/mips/aaccoder_mips.c2498
-rw-r--r--libavcodec/mips/aacdec_mips.c831
-rw-r--r--libavcodec/mips/aacdec_mips.h249
-rw-r--r--libavcodec/mips/aacpsdsp_mips.c459
-rw-r--r--libavcodec/mips/aacpsy_mips.h230
-rw-r--r--libavcodec/mips/aacsbr_mips.c618
-rw-r--r--libavcodec/mips/aacsbr_mips.h493
-rw-r--r--libavcodec/mips/ac3dsp_mips.c412
-rw-r--r--libavcodec/mips/acelp_filters_mips.c216
-rw-r--r--libavcodec/mips/acelp_vectors_mips.c101
-rw-r--r--libavcodec/mips/amrwbdec_mips.c187
-rw-r--r--libavcodec/mips/amrwbdec_mips.h62
-rw-r--r--libavcodec/mips/celp_filters_mips.c288
-rw-r--r--libavcodec/mips/celp_math_mips.c89
-rw-r--r--libavcodec/mips/compute_antialias_fixed.h250
-rw-r--r--libavcodec/mips/compute_antialias_float.h185
-rw-r--r--libavcodec/mips/fft_init_table.c67
-rw-r--r--libavcodec/mips/fft_mips.c534
-rw-r--r--libavcodec/mips/fft_table.h63
-rw-r--r--libavcodec/mips/fmtconvert_mips.c342
-rw-r--r--libavcodec/mips/iirfilter_mips.c204
-rw-r--r--libavcodec/mips/lsp_mips.h109
-rw-r--r--libavcodec/mips/mathops.h8
-rw-r--r--libavcodec/mips/mpegaudiodsp_mips_fixed.c907
-rw-r--r--libavcodec/mips/mpegaudiodsp_mips_float.c1250
-rw-r--r--libavcodec/mips/sbrdsp_mips.c940
27 files changed, 11609 insertions, 4 deletions
diff --git a/libavcodec/mips/Makefile b/libavcodec/mips/Makefile
new file mode 100644
index 0000000000..a2ea412305
--- /dev/null
+++ b/libavcodec/mips/Makefile
@@ -0,0 +1,21 @@
+MIPSFPU-OBJS-$(CONFIG_AMRNB_DECODER) += mips/acelp_filters_mips.o \
+ mips/celp_filters_mips.o \
+ mips/celp_math_mips.o \
+ mips/acelp_vectors_mips.o
+MIPSFPU-OBJS-$(CONFIG_AMRWB_DECODER) += mips/acelp_filters_mips.o \
+ mips/celp_filters_mips.o \
+ mips/amrwbdec_mips.o \
+ mips/celp_math_mips.o \
+ mips/acelp_vectors_mips.o
+MIPSFPU-OBJS-$(CONFIG_MPEGAUDIODSP) += mips/mpegaudiodsp_mips_float.o
+MIPSDSPR1-OBJS-$(CONFIG_MPEGAUDIODSP) += mips/mpegaudiodsp_mips_fixed.o
+OBJS-$(CONFIG_FFT) += mips/fft_init_table.o
+MIPSFPU-OBJS-$(CONFIG_FFT) += mips/fft_mips.o
+MIPSFPU-OBJS += mips/fmtconvert_mips.o
+OBJS-$(CONFIG_AC3DSP) += mips/ac3dsp_mips.o
+OBJS-$(CONFIG_AAC_DECODER) += mips/aacdec_mips.o \
+ mips/aacsbr_mips.o \
+ mips/sbrdsp_mips.o \
+ mips/aacpsdsp_mips.o
+MIPSDSPR1-OBJS-$(CONFIG_AAC_ENCODER) += mips/aaccoder_mips.o
+MIPSFPU-OBJS-$(CONFIG_AAC_ENCODER) += mips/iirfilter_mips.o
diff --git a/libavcodec/mips/aaccoder_mips.c b/libavcodec/mips/aaccoder_mips.c
new file mode 100644
index 0000000000..d6210d12ec
--- /dev/null
+++ b/libavcodec/mips/aaccoder_mips.c
@@ -0,0 +1,2498 @@
+/*
+ * Copyright (c) 2012
+ * MIPS Technologies, Inc., California.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the MIPS Technologies, Inc., nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE MIPS TECHNOLOGIES, INC. ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE MIPS TECHNOLOGIES, INC. BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Author: Stanislav Ocovaj (socovaj@mips.com)
+ * Szabolcs Pal (sabolc@mips.com)
+ *
+ * AAC coefficients encoder optimized for MIPS floating-point architecture
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Reference: libavcodec/aaccoder.c
+ */
+
+#include "libavutil/libm.h"
+
+#include <float.h>
+#include "libavutil/mathematics.h"
+#include "libavcodec/avcodec.h"
+#include "libavcodec/put_bits.h"
+#include "libavcodec/aac.h"
+#include "libavcodec/aacenc.h"
+#include "libavcodec/aactab.h"
+
+#if HAVE_INLINE_ASM
+typedef struct BandCodingPath {
+ int prev_idx;
+ float cost;
+ int run;
+} BandCodingPath;
+
+static const uint8_t run_value_bits_long[64] = {
+ 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
+ 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 15
+};
+
+static const uint8_t run_value_bits_short[16] = {
+ 3, 3, 3, 3, 3, 3, 3, 6, 6, 6, 6, 6, 6, 6, 6, 9
+};
+
+static const uint8_t *run_value_bits[2] = {
+ run_value_bits_long, run_value_bits_short
+};
+
+static const uint8_t uquad_sign_bits[81] = {
+ 0, 1, 1, 1, 2, 2, 1, 2, 2,
+ 1, 2, 2, 2, 3, 3, 2, 3, 3,
+ 1, 2, 2, 2, 3, 3, 2, 3, 3,
+ 1, 2, 2, 2, 3, 3, 2, 3, 3,
+ 2, 3, 3, 3, 4, 4, 3, 4, 4,
+ 2, 3, 3, 3, 4, 4, 3, 4, 4,
+ 1, 2, 2, 2, 3, 3, 2, 3, 3,
+ 2, 3, 3, 3, 4, 4, 3, 4, 4,
+ 2, 3, 3, 3, 4, 4, 3, 4, 4
+};
+
+static const uint8_t upair7_sign_bits[64] = {
+ 0, 1, 1, 1, 1, 1, 1, 1,
+ 1, 2, 2, 2, 2, 2, 2, 2,
+ 1, 2, 2, 2, 2, 2, 2, 2,
+ 1, 2, 2, 2, 2, 2, 2, 2,
+ 1, 2, 2, 2, 2, 2, 2, 2,
+ 1, 2, 2, 2, 2, 2, 2, 2,
+ 1, 2, 2, 2, 2, 2, 2, 2,
+ 1, 2, 2, 2, 2, 2, 2, 2,
+};
+
+static const uint8_t upair12_sign_bits[169] = {
+ 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2
+};
+
+static const uint8_t esc_sign_bits[289] = {
+ 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2
+};
+
+static void abs_pow34_v(float *out, const float *in, const int size) {
+#ifndef USE_REALLY_FULL_SEARCH
+ int i;
+ float a, b, c, d;
+ float ax, bx, cx, dx;
+
+ for (i = 0; i < size; i += 4) {
+ a = fabsf(in[i ]);
+ b = fabsf(in[i+1]);
+ c = fabsf(in[i+2]);
+ d = fabsf(in[i+3]);
+
+ ax = sqrtf(a);
+ bx = sqrtf(b);
+ cx = sqrtf(c);
+ dx = sqrtf(d);
+
+ a = a * ax;
+ b = b * bx;
+ c = c * cx;
+ d = d * dx;
+
+ out[i ] = sqrtf(a);
+ out[i+1] = sqrtf(b);
+ out[i+2] = sqrtf(c);
+ out[i+3] = sqrtf(d);
+ }
+#endif /* USE_REALLY_FULL_SEARCH */
+}
+
+static float find_max_val(int group_len, int swb_size, const float *scaled) {
+ float maxval = 0.0f;
+ int w2, i;
+ for (w2 = 0; w2 < group_len; w2++) {
+ for (i = 0; i < swb_size; i++) {
+ maxval = FFMAX(maxval, scaled[w2*128+i]);
+ }
+ }
+ return maxval;
+}
+
+static int find_min_book(float maxval, int sf) {
+ float Q = ff_aac_pow2sf_tab[POW_SF2_ZERO - sf + SCALE_ONE_POS - SCALE_DIV_512];
+ float Q34 = sqrtf(Q * sqrtf(Q));
+ int qmaxval, cb;
+ qmaxval = maxval * Q34 + 0.4054f;
+ if (qmaxval == 0) cb = 0;
+ else if (qmaxval == 1) cb = 1;
+ else if (qmaxval == 2) cb = 3;
+ else if (qmaxval <= 4) cb = 5;
+ else if (qmaxval <= 7) cb = 7;
+ else if (qmaxval <= 12) cb = 9;
+ else cb = 11;
+ return cb;
+}
+
+/**
+ * Functions developed from template function and optimized for quantizing and encoding band
+ */
+static void quantize_and_encode_band_cost_SQUAD_mips(struct AACEncContext *s,
+ PutBitContext *pb, const float *in,
+ const float *scaled, int size, int scale_idx,
+ int cb, const float lambda, const float uplim,
+ int *bits)
+{
+ const float Q34 = ff_aac_pow34sf_tab[POW_SF2_ZERO - scale_idx + SCALE_ONE_POS - SCALE_DIV_512];
+ int i;
+ int qc1, qc2, qc3, qc4;
+
+ uint8_t *p_bits = (uint8_t *)ff_aac_spectral_bits[cb-1];
+ uint16_t *p_codes = (uint16_t *)ff_aac_spectral_codes[cb-1];
+
+ abs_pow34_v(s->scoefs, in, size);
+ scaled = s->scoefs;
+ for (i = 0; i < size; i += 4) {
+ int curidx;
+ int *in_int = (int *)&in[i];
+
+ qc1 = scaled[i ] * Q34 + 0.4054f;
+ qc2 = scaled[i+1] * Q34 + 0.4054f;
+ qc3 = scaled[i+2] * Q34 + 0.4054f;
+ qc4 = scaled[i+3] * Q34 + 0.4054f;
+
+ __asm__ volatile (
+ ".set push \n\t"
+ ".set noreorder \n\t"
+
+ "slt %[qc1], $zero, %[qc1] \n\t"
+ "slt %[qc2], $zero, %[qc2] \n\t"
+ "slt %[qc3], $zero, %[qc3] \n\t"
+ "slt %[qc4], $zero, %[qc4] \n\t"
+ "lw $t0, 0(%[in_int]) \n\t"
+ "lw $t1, 4(%[in_int]) \n\t"
+ "lw $t2, 8(%[in_int]) \n\t"
+ "lw $t3, 12(%[in_int]) \n\t"
+ "srl $t0, $t0, 31 \n\t"
+ "srl $t1, $t1, 31 \n\t"
+ "srl $t2, $t2, 31 \n\t"
+ "srl $t3, $t3, 31 \n\t"
+ "subu $t4, $zero, %[qc1] \n\t"
+ "subu $t5, $zero, %[qc2] \n\t"
+ "subu $t6, $zero, %[qc3] \n\t"
+ "subu $t7, $zero, %[qc4] \n\t"
+ "movn %[qc1], $t4, $t0 \n\t"
+ "movn %[qc2], $t5, $t1 \n\t"
+ "movn %[qc3], $t6, $t2 \n\t"
+ "movn %[qc4], $t7, $t3 \n\t"
+
+ ".set pop \n\t"
+
+ : [qc1]"+r"(qc1), [qc2]"+r"(qc2),
+ [qc3]"+r"(qc3), [qc4]"+r"(qc4)
+ : [in_int]"r"(in_int)
+ : "t0", "t1", "t2", "t3",
+ "t4", "t5", "t6", "t7",
+ "memory"
+ );
+
+ curidx = qc1;
+ curidx *= 3;
+ curidx += qc2;
+ curidx *= 3;
+ curidx += qc3;
+ curidx *= 3;
+ curidx += qc4;
+ curidx += 40;
+
+ put_bits(pb, p_bits[curidx], p_codes[curidx]);
+ }
+}
+
+static void quantize_and_encode_band_cost_UQUAD_mips(struct AACEncContext *s,
+ PutBitContext *pb, const float *in,
+ const float *scaled, int size, int scale_idx,
+ int cb, const float lambda, const float uplim,
+ int *bits)
+{
+ const float Q34 = ff_aac_pow34sf_tab[POW_SF2_ZERO - scale_idx + SCALE_ONE_POS - SCALE_DIV_512];
+ int i;
+ int qc1, qc2, qc3, qc4;
+
+ uint8_t *p_bits = (uint8_t *)ff_aac_spectral_bits[cb-1];
+ uint16_t *p_codes = (uint16_t *)ff_aac_spectral_codes[cb-1];
+
+ abs_pow34_v(s->scoefs, in, size);
+ scaled = s->scoefs;
+ for (i = 0; i < size; i += 4) {
+ int curidx, sign, count;
+ int *in_int = (int *)&in[i];
+ uint8_t v_bits;
+ unsigned int v_codes;
+
+ qc1 = scaled[i ] * Q34 + 0.4054f;
+ qc2 = scaled[i+1] * Q34 + 0.4054f;
+ qc3 = scaled[i+2] * Q34 + 0.4054f;
+ qc4 = scaled[i+3] * Q34 + 0.4054f;
+
+ __asm__ volatile (
+ ".set push \n\t"
+ ".set noreorder \n\t"
+
+ "ori $t4, $zero, 2 \n\t"
+ "ori %[sign], $zero, 0 \n\t"
+ "slt $t0, $t4, %[qc1] \n\t"
+ "slt $t1, $t4, %[qc2] \n\t"
+ "slt $t2, $t4, %[qc3] \n\t"
+ "slt $t3, $t4, %[qc4] \n\t"
+ "movn %[qc1], $t4, $t0 \n\t"
+ "movn %[qc2], $t4, $t1 \n\t"
+ "movn %[qc3], $t4, $t2 \n\t"
+ "movn %[qc4], $t4, $t3 \n\t"
+ "lw $t0, 0(%[in_int]) \n\t"
+ "lw $t1, 4(%[in_int]) \n\t"
+ "lw $t2, 8(%[in_int]) \n\t"
+ "lw $t3, 12(%[in_int]) \n\t"
+ "slt $t0, $t0, $zero \n\t"
+ "movn %[sign], $t0, %[qc1] \n\t"
+ "slt $t1, $t1, $zero \n\t"
+ "slt $t2, $t2, $zero \n\t"
+ "slt $t3, $t3, $zero \n\t"
+ "sll $t0, %[sign], 1 \n\t"
+ "or $t0, $t0, $t1 \n\t"
+ "movn %[sign], $t0, %[qc2] \n\t"
+ "slt $t4, $zero, %[qc1] \n\t"
+ "slt $t1, $zero, %[qc2] \n\t"
+ "slt %[count], $zero, %[qc3] \n\t"
+ "sll $t0, %[sign], 1 \n\t"
+ "or $t0, $t0, $t2 \n\t"
+ "movn %[sign], $t0, %[qc3] \n\t"
+ "slt $t2, $zero, %[qc4] \n\t"
+ "addu %[count], %[count], $t4 \n\t"
+ "addu %[count], %[count], $t1 \n\t"
+ "sll $t0, %[sign], 1 \n\t"
+ "or $t0, $t0, $t3 \n\t"
+ "movn %[sign], $t0, %[qc4] \n\t"
+ "addu %[count], %[count], $t2 \n\t"
+
+ ".set pop \n\t"
+
+ : [qc1]"+r"(qc1), [qc2]"+r"(qc2),
+ [qc3]"+r"(qc3), [qc4]"+r"(qc4),
+ [sign]"=&r"(sign), [count]"=&r"(count)
+ : [in_int]"r"(in_int)
+ : "t0", "t1", "t2", "t3", "t4",
+ "memory"
+ );
+
+ curidx = qc1;
+ curidx *= 3;
+ curidx += qc2;
+ curidx *= 3;
+ curidx += qc3;
+ curidx *= 3;
+ curidx += qc4;
+
+ v_codes = (p_codes[curidx] << count) | (sign & ((1 << count) - 1));
+ v_bits = p_bits[curidx] + count;
+ put_bits(pb, v_bits, v_codes);
+ }
+}
+
+static void quantize_and_encode_band_cost_SPAIR_mips(struct AACEncContext *s,
+ PutBitContext *pb, const float *in,
+ const float *scaled, int size, int scale_idx,
+ int cb, const float lambda, const float uplim,
+ int *bits)
+{
+ const float Q34 = ff_aac_pow34sf_tab[POW_SF2_ZERO - scale_idx + SCALE_ONE_POS - SCALE_DIV_512];
+ int i;
+ int qc1, qc2, qc3, qc4;
+
+ uint8_t *p_bits = (uint8_t *)ff_aac_spectral_bits[cb-1];
+ uint16_t *p_codes = (uint16_t *)ff_aac_spectral_codes[cb-1];
+
+ abs_pow34_v(s->scoefs, in, size);
+ scaled = s->scoefs;
+ for (i = 0; i < size; i += 4) {
+ int curidx, curidx2;
+ int *in_int = (int *)&in[i];
+ uint8_t v_bits;
+ unsigned int v_codes;
+
+ qc1 = scaled[i ] * Q34 + 0.4054f;
+ qc2 = scaled[i+1] * Q34 + 0.4054f;
+ qc3 = scaled[i+2] * Q34 + 0.4054f;
+ qc4 = scaled[i+3] * Q34 + 0.4054f;
+
+ __asm__ volatile (
+ ".set push \n\t"
+ ".set noreorder \n\t"
+
+ "ori $t4, $zero, 4 \n\t"
+ "slt $t0, $t4, %[qc1] \n\t"
+ "slt $t1, $t4, %[qc2] \n\t"
+ "slt $t2, $t4, %[qc3] \n\t"
+ "slt $t3, $t4, %[qc4] \n\t"
+ "movn %[qc1], $t4, $t0 \n\t"
+ "movn %[qc2], $t4, $t1 \n\t"
+ "movn %[qc3], $t4, $t2 \n\t"
+ "movn %[qc4], $t4, $t3 \n\t"
+ "lw $t0, 0(%[in_int]) \n\t"
+ "lw $t1, 4(%[in_int]) \n\t"
+ "lw $t2, 8(%[in_int]) \n\t"
+ "lw $t3, 12(%[in_int]) \n\t"
+ "srl $t0, $t0, 31 \n\t"
+ "srl $t1, $t1, 31 \n\t"
+ "srl $t2, $t2, 31 \n\t"
+ "srl $t3, $t3, 31 \n\t"
+ "subu $t4, $zero, %[qc1] \n\t"
+ "subu $t5, $zero, %[qc2] \n\t"
+ "subu $t6, $zero, %[qc3] \n\t"
+ "subu $t7, $zero, %[qc4] \n\t"
+ "movn %[qc1], $t4, $t0 \n\t"
+ "movn %[qc2], $t5, $t1 \n\t"
+ "movn %[qc3], $t6, $t2 \n\t"
+ "movn %[qc4], $t7, $t3 \n\t"
+
+ ".set pop \n\t"
+
+ : [qc1]"+r"(qc1), [qc2]"+r"(qc2),
+ [qc3]"+r"(qc3), [qc4]"+r"(qc4)
+ : [in_int]"r"(in_int)
+ : "t0", "t1", "t2", "t3",
+ "t4", "t5", "t6", "t7",
+ "memory"
+ );
+
+ curidx = 9 * qc1;
+ curidx += qc2 + 40;
+
+ curidx2 = 9 * qc3;
+ curidx2 += qc4 + 40;
+
+ v_codes = (p_codes[curidx] << p_bits[curidx2]) | (p_codes[curidx2]);
+ v_bits = p_bits[curidx] + p_bits[curidx2];
+ put_bits(pb, v_bits, v_codes);
+ }
+}
+
+static void quantize_and_encode_band_cost_UPAIR7_mips(struct AACEncContext *s,
+ PutBitContext *pb, const float *in,
+ const float *scaled, int size, int scale_idx,
+ int cb, const float lambda, const float uplim,
+ int *bits)
+{
+ const float Q34 = ff_aac_pow34sf_tab[POW_SF2_ZERO - scale_idx + SCALE_ONE_POS - SCALE_DIV_512];
+ int i;
+ int qc1, qc2, qc3, qc4;
+
+ uint8_t *p_bits = (uint8_t*) ff_aac_spectral_bits[cb-1];
+ uint16_t *p_codes = (uint16_t*)ff_aac_spectral_codes[cb-1];
+
+ abs_pow34_v(s->scoefs, in, size);
+ scaled = s->scoefs;
+ for (i = 0; i < size; i += 4) {
+ int curidx, sign1, count1, sign2, count2;
+ int *in_int = (int *)&in[i];
+ uint8_t v_bits;
+ unsigned int v_codes;
+
+ qc1 = scaled[i ] * Q34 + 0.4054f;
+ qc2 = scaled[i+1] * Q34 + 0.4054f;
+ qc3 = scaled[i+2] * Q34 + 0.4054f;
+ qc4 = scaled[i+3] * Q34 + 0.4054f;
+
+ __asm__ volatile (
+ ".set push \n\t"
+ ".set noreorder \n\t"
+
+ "ori $t4, $zero, 7 \n\t"
+ "ori %[sign1], $zero, 0 \n\t"
+ "ori %[sign2], $zero, 0 \n\t"
+ "slt $t0, $t4, %[qc1] \n\t"
+ "slt $t1, $t4, %[qc2] \n\t"
+ "slt $t2, $t4, %[qc3] \n\t"
+ "slt $t3, $t4, %[qc4] \n\t"
+ "movn %[qc1], $t4, $t0 \n\t"
+ "movn %[qc2], $t4, $t1 \n\t"
+ "movn %[qc3], $t4, $t2 \n\t"
+ "movn %[qc4], $t4, $t3 \n\t"
+ "lw $t0, 0(%[in_int]) \n\t"
+ "lw $t1, 4(%[in_int]) \n\t"
+ "lw $t2, 8(%[in_int]) \n\t"
+ "lw $t3, 12(%[in_int]) \n\t"
+ "slt $t0, $t0, $zero \n\t"
+ "movn %[sign1], $t0, %[qc1] \n\t"
+ "slt $t2, $t2, $zero \n\t"
+ "movn %[sign2], $t2, %[qc3] \n\t"
+ "slt $t1, $t1, $zero \n\t"
+ "sll $t0, %[sign1], 1 \n\t"
+ "or $t0, $t0, $t1 \n\t"
+ "movn %[sign1], $t0, %[qc2] \n\t"
+ "slt $t3, $t3, $zero \n\t"
+ "sll $t0, %[sign2], 1 \n\t"
+ "or $t0, $t0, $t3 \n\t"
+ "movn %[sign2], $t0, %[qc4] \n\t"
+ "slt %[count1], $zero, %[qc1] \n\t"
+ "slt $t1, $zero, %[qc2] \n\t"
+ "slt %[count2], $zero, %[qc3] \n\t"
+ "slt $t2, $zero, %[qc4] \n\t"
+ "addu %[count1], %[count1], $t1 \n\t"
+ "addu %[count2], %[count2], $t2 \n\t"
+
+ ".set pop \n\t"
+
+ : [qc1]"+r"(qc1), [qc2]"+r"(qc2),
+ [qc3]"+r"(qc3), [qc4]"+r"(qc4),
+ [sign1]"=&r"(sign1), [count1]"=&r"(count1),
+ [sign2]"=&r"(sign2), [count2]"=&r"(count2)
+ : [in_int]"r"(in_int)
+ : "t0", "t1", "t2", "t3", "t4",
+ "memory"
+ );
+
+ curidx = 8 * qc1;
+ curidx += qc2;
+
+ v_codes = (p_codes[curidx] << count1) | sign1;
+ v_bits = p_bits[curidx] + count1;
+ put_bits(pb, v_bits, v_codes);
+
+ curidx = 8 * qc3;
+ curidx += qc4;
+
+ v_codes = (p_codes[curidx] << count2) | sign2;
+ v_bits = p_bits[curidx] + count2;
+ put_bits(pb, v_bits, v_codes);
+ }
+}
+
+static void quantize_and_encode_band_cost_UPAIR12_mips(struct AACEncContext *s,
+ PutBitContext *pb, const float *in,
+ const float *scaled, int size, int scale_idx,
+ int cb, const float lambda, const float uplim,
+ int *bits)
+{
+ const float Q34 = ff_aac_pow34sf_tab[POW_SF2_ZERO - scale_idx + SCALE_ONE_POS - SCALE_DIV_512];
+ int i;
+ int qc1, qc2, qc3, qc4;
+
+ uint8_t *p_bits = (uint8_t*) ff_aac_spectral_bits[cb-1];
+ uint16_t *p_codes = (uint16_t*)ff_aac_spectral_codes[cb-1];
+
+ abs_pow34_v(s->scoefs, in, size);
+ scaled = s->scoefs;
+ for (i = 0; i < size; i += 4) {
+ int curidx, sign1, count1, sign2, count2;
+ int *in_int = (int *)&in[i];
+ uint8_t v_bits;
+ unsigned int v_codes;
+
+ qc1 = scaled[i ] * Q34 + 0.4054f;
+ qc2 = scaled[i+1] * Q34 + 0.4054f;
+ qc3 = scaled[i+2] * Q34 + 0.4054f;
+ qc4 = scaled[i+3] * Q34 + 0.4054f;
+
+ __asm__ volatile (
+ ".set push \n\t"
+ ".set noreorder \n\t"
+
+ "ori $t4, $zero, 12 \n\t"
+ "ori %[sign1], $zero, 0 \n\t"
+ "ori %[sign2], $zero, 0 \n\t"
+ "slt $t0, $t4, %[qc1] \n\t"
+ "slt $t1, $t4, %[qc2] \n\t"
+ "slt $t2, $t4, %[qc3] \n\t"
+ "slt $t3, $t4, %[qc4] \n\t"
+ "movn %[qc1], $t4, $t0 \n\t"
+ "movn %[qc2], $t4, $t1 \n\t"
+ "movn %[qc3], $t4, $t2 \n\t"
+ "movn %[qc4], $t4, $t3 \n\t"
+ "lw $t0, 0(%[in_int]) \n\t"
+ "lw $t1, 4(%[in_int]) \n\t"
+ "lw $t2, 8(%[in_int]) \n\t"
+ "lw $t3, 12(%[in_int]) \n\t"
+ "slt $t0, $t0, $zero \n\t"
+ "movn %[sign1], $t0, %[qc1] \n\t"
+ "slt $t2, $t2, $zero \n\t"
+ "movn %[sign2], $t2, %[qc3] \n\t"
+ "slt $t1, $t1, $zero \n\t"
+ "sll $t0, %[sign1], 1 \n\t"
+ "or $t0, $t0, $t1 \n\t"
+ "movn %[sign1], $t0, %[qc2] \n\t"
+ "slt $t3, $t3, $zero \n\t"
+ "sll $t0, %[sign2], 1 \n\t"
+ "or $t0, $t0, $t3 \n\t"
+ "movn %[sign2], $t0, %[qc4] \n\t"
+ "slt %[count1], $zero, %[qc1] \n\t"
+ "slt $t1, $zero, %[qc2] \n\t"
+ "slt %[count2], $zero, %[qc3] \n\t"
+ "slt $t2, $zero, %[qc4] \n\t"
+ "addu %[count1], %[count1], $t1 \n\t"
+ "addu %[count2], %[count2], $t2 \n\t"
+
+ ".set pop \n\t"
+
+ : [qc1]"+r"(qc1), [qc2]"+r"(qc2),
+ [qc3]"+r"(qc3), [qc4]"+r"(qc4),
+ [sign1]"=&r"(sign1), [count1]"=&r"(count1),
+ [sign2]"=&r"(sign2), [count2]"=&r"(count2)
+ : [in_int]"r"(in_int)
+ : "t0", "t1", "t2", "t3", "t4",
+ "memory"
+ );
+
+ curidx = 13 * qc1;
+ curidx += qc2;
+
+ v_codes = (p_codes[curidx] << count1) | sign1;
+ v_bits = p_bits[curidx] + count1;
+ put_bits(pb, v_bits, v_codes);
+
+ curidx = 13 * qc3;
+ curidx += qc4;
+
+ v_codes = (p_codes[curidx] << count2) | sign2;
+ v_bits = p_bits[curidx] + count2;
+ put_bits(pb, v_bits, v_codes);
+ }
+}
+
+static void quantize_and_encode_band_cost_ESC_mips(struct AACEncContext *s,
+ PutBitContext *pb, const float *in,
+ const float *scaled, int size, int scale_idx,
+ int cb, const float lambda, const float uplim,
+ int *bits)
+{
+ const float Q34 = ff_aac_pow34sf_tab[POW_SF2_ZERO - scale_idx + SCALE_ONE_POS - SCALE_DIV_512];
+ int i;
+ int qc1, qc2, qc3, qc4;
+
+ uint8_t *p_bits = (uint8_t* )ff_aac_spectral_bits[cb-1];
+ uint16_t *p_codes = (uint16_t*)ff_aac_spectral_codes[cb-1];
+ float *p_vectors = (float* )ff_aac_codebook_vectors[cb-1];
+
+ abs_pow34_v(s->scoefs, in, size);
+ scaled = s->scoefs;
+
+ if (cb < 11) {
+ for (i = 0; i < size; i += 4) {
+ int curidx, curidx2, sign1, count1, sign2, count2;
+ int *in_int = (int *)&in[i];
+ uint8_t v_bits;
+ unsigned int v_codes;
+
+ qc1 = scaled[i ] * Q34 + 0.4054f;
+ qc2 = scaled[i+1] * Q34 + 0.4054f;
+ qc3 = scaled[i+2] * Q34 + 0.4054f;
+ qc4 = scaled[i+3] * Q34 + 0.4054f;
+
+ __asm__ volatile (
+ ".set push \n\t"
+ ".set noreorder \n\t"
+
+ "ori $t4, $zero, 16 \n\t"
+ "ori %[sign1], $zero, 0 \n\t"
+ "ori %[sign2], $zero, 0 \n\t"
+ "slt $t0, $t4, %[qc1] \n\t"
+ "slt $t1, $t4, %[qc2] \n\t"
+ "slt $t2, $t4, %[qc3] \n\t"
+ "slt $t3, $t4, %[qc4] \n\t"
+ "movn %[qc1], $t4, $t0 \n\t"
+ "movn %[qc2], $t4, $t1 \n\t"
+ "movn %[qc3], $t4, $t2 \n\t"
+ "movn %[qc4], $t4, $t3 \n\t"
+ "lw $t0, 0(%[in_int]) \n\t"
+ "lw $t1, 4(%[in_int]) \n\t"
+ "lw $t2, 8(%[in_int]) \n\t"
+ "lw $t3, 12(%[in_int]) \n\t"
+ "slt $t0, $t0, $zero \n\t"
+ "movn %[sign1], $t0, %[qc1] \n\t"
+ "slt $t2, $t2, $zero \n\t"
+ "movn %[sign2], $t2, %[qc3] \n\t"
+ "slt $t1, $t1, $zero \n\t"
+ "sll $t0, %[sign1], 1 \n\t"
+ "or $t0, $t0, $t1 \n\t"
+ "movn %[sign1], $t0, %[qc2] \n\t"
+ "slt $t3, $t3, $zero \n\t"
+ "sll $t0, %[sign2], 1 \n\t"
+ "or $t0, $t0, $t3 \n\t"
+ "movn %[sign2], $t0, %[qc4] \n\t"
+ "slt %[count1], $zero, %[qc1] \n\t"
+ "slt $t1, $zero, %[qc2] \n\t"
+ "slt %[count2], $zero, %[qc3] \n\t"
+ "slt $t2, $zero, %[qc4] \n\t"
+ "addu %[count1], %[count1], $t1 \n\t"
+ "addu %[count2], %[count2], $t2 \n\t"
+
+ ".set pop \n\t"
+
+ : [qc1]"+r"(qc1), [qc2]"+r"(qc2),
+ [qc3]"+r"(qc3), [qc4]"+r"(qc4),
+ [sign1]"=&r"(sign1), [count1]"=&r"(count1),
+ [sign2]"=&r"(sign2), [count2]"=&r"(count2)
+ : [in_int]"r"(in_int)
+ : "t0", "t1", "t2", "t3", "t4",
+ "memory"
+ );
+
+ curidx = 17 * qc1;
+ curidx += qc2;
+ curidx2 = 17 * qc3;
+ curidx2 += qc4;
+
+ v_codes = (p_codes[curidx] << count1) | sign1;
+ v_bits = p_bits[curidx] + count1;
+ put_bits(pb, v_bits, v_codes);
+
+ v_codes = (p_codes[curidx2] << count2) | sign2;
+ v_bits = p_bits[curidx2] + count2;
+ put_bits(pb, v_bits, v_codes);
+ }
+ } else {
+ for (i = 0; i < size; i += 4) {
+ int curidx, curidx2, sign1, count1, sign2, count2;
+ int *in_int = (int *)&in[i];
+ uint8_t v_bits;
+ unsigned int v_codes;
+ int c1, c2, c3, c4;
+
+ qc1 = scaled[i ] * Q34 + 0.4054f;
+ qc2 = scaled[i+1] * Q34 + 0.4054f;
+ qc3 = scaled[i+2] * Q34 + 0.4054f;
+ qc4 = scaled[i+3] * Q34 + 0.4054f;
+
+ __asm__ volatile (
+ ".set push \n\t"
+ ".set noreorder \n\t"
+
+ "ori $t4, $zero, 16 \n\t"
+ "ori %[sign1], $zero, 0 \n\t"
+ "ori %[sign2], $zero, 0 \n\t"
+ "shll_s.w %[c1], %[qc1], 18 \n\t"
+ "shll_s.w %[c2], %[qc2], 18 \n\t"
+ "shll_s.w %[c3], %[qc3], 18 \n\t"
+ "shll_s.w %[c4], %[qc4], 18 \n\t"
+ "srl %[c1], %[c1], 18 \n\t"
+ "srl %[c2], %[c2], 18 \n\t"
+ "srl %[c3], %[c3], 18 \n\t"
+ "srl %[c4], %[c4], 18 \n\t"
+ "slt $t0, $t4, %[qc1] \n\t"
+ "slt $t1, $t4, %[qc2] \n\t"
+ "slt $t2, $t4, %[qc3] \n\t"
+ "slt $t3, $t4, %[qc4] \n\t"
+ "movn %[qc1], $t4, $t0 \n\t"
+ "movn %[qc2], $t4, $t1 \n\t"
+ "movn %[qc3], $t4, $t2 \n\t"
+ "movn %[qc4], $t4, $t3 \n\t"
+ "lw $t0, 0(%[in_int]) \n\t"
+ "lw $t1, 4(%[in_int]) \n\t"
+ "lw $t2, 8(%[in_int]) \n\t"
+ "lw $t3, 12(%[in_int]) \n\t"
+ "slt $t0, $t0, $zero \n\t"
+ "movn %[sign1], $t0, %[qc1] \n\t"
+ "slt $t2, $t2, $zero \n\t"
+ "movn %[sign2], $t2, %[qc3] \n\t"
+ "slt $t1, $t1, $zero \n\t"
+ "sll $t0, %[sign1], 1 \n\t"
+ "or $t0, $t0, $t1 \n\t"
+ "movn %[sign1], $t0, %[qc2] \n\t"
+ "slt $t3, $t3, $zero \n\t"
+ "sll $t0, %[sign2], 1 \n\t"
+ "or $t0, $t0, $t3 \n\t"
+ "movn %[sign2], $t0, %[qc4] \n\t"
+ "slt %[count1], $zero, %[qc1] \n\t"
+ "slt $t1, $zero, %[qc2] \n\t"
+ "slt %[count2], $zero, %[qc3] \n\t"
+ "slt $t2, $zero, %[qc4] \n\t"
+ "addu %[count1], %[count1], $t1 \n\t"
+ "addu %[count2], %[count2], $t2 \n\t"
+
+ ".set pop \n\t"
+
+ : [qc1]"+r"(qc1), [qc2]"+r"(qc2),
+ [qc3]"+r"(qc3), [qc4]"+r"(qc4),
+ [sign1]"=&r"(sign1), [count1]"=&r"(count1),
+ [sign2]"=&r"(sign2), [count2]"=&r"(count2),
+ [c1]"=&r"(c1), [c2]"=&r"(c2),
+ [c3]"=&r"(c3), [c4]"=&r"(c4)
+ : [in_int]"r"(in_int)
+ : "t0", "t1", "t2", "t3", "t4",
+ "memory"
+ );
+
+ curidx = 17 * qc1;
+ curidx += qc2;
+
+ curidx2 = 17 * qc3;
+ curidx2 += qc4;
+
+ v_codes = (p_codes[curidx] << count1) | sign1;
+ v_bits = p_bits[curidx] + count1;
+ put_bits(pb, v_bits, v_codes);
+
+ if (p_vectors[curidx*2 ] == 64.0f) {
+ int len = av_log2(c1);
+ v_codes = (((1 << (len - 3)) - 2) << len) | (c1 & ((1 << len) - 1));
+ put_bits(pb, len * 2 - 3, v_codes);
+ }
+ if (p_vectors[curidx*2+1] == 64.0f) {
+ int len = av_log2(c2);
+ v_codes = (((1 << (len - 3)) - 2) << len) | (c2 & ((1 << len) - 1));
+ put_bits(pb, len*2-3, v_codes);
+ }
+
+ v_codes = (p_codes[curidx2] << count2) | sign2;
+ v_bits = p_bits[curidx2] + count2;
+ put_bits(pb, v_bits, v_codes);
+
+ if (p_vectors[curidx2*2 ] == 64.0f) {
+ int len = av_log2(c3);
+ v_codes = (((1 << (len - 3)) - 2) << len) | (c3 & ((1 << len) - 1));
+ put_bits(pb, len* 2 - 3, v_codes);
+ }
+ if (p_vectors[curidx2*2+1] == 64.0f) {
+ int len = av_log2(c4);
+ v_codes = (((1 << (len - 3)) - 2) << len) | (c4 & ((1 << len) - 1));
+ put_bits(pb, len * 2 - 3, v_codes);
+ }
+ }
+ }
+}
+
+static void (*const quantize_and_encode_band_cost_arr[])(struct AACEncContext *s,
+ PutBitContext *pb, const float *in,
+ const float *scaled, int size, int scale_idx,
+ int cb, const float lambda, const float uplim,
+ int *bits) = {
+ NULL,
+ quantize_and_encode_band_cost_SQUAD_mips,
+ quantize_and_encode_band_cost_SQUAD_mips,
+ quantize_and_encode_band_cost_UQUAD_mips,
+ quantize_and_encode_band_cost_UQUAD_mips,
+ quantize_and_encode_band_cost_SPAIR_mips,
+ quantize_and_encode_band_cost_SPAIR_mips,
+ quantize_and_encode_band_cost_UPAIR7_mips,
+ quantize_and_encode_band_cost_UPAIR7_mips,
+ quantize_and_encode_band_cost_UPAIR12_mips,
+ quantize_and_encode_band_cost_UPAIR12_mips,
+ quantize_and_encode_band_cost_ESC_mips,
+};
+
+#define quantize_and_encode_band_cost( \
+ s, pb, in, scaled, size, scale_idx, cb, \
+ lambda, uplim, bits) \
+ quantize_and_encode_band_cost_arr[cb]( \
+ s, pb, in, scaled, size, scale_idx, cb, \
+ lambda, uplim, bits)
+
+static void quantize_and_encode_band_mips(struct AACEncContext *s, PutBitContext *pb,
+ const float *in, int size, int scale_idx,
+ int cb, const float lambda)
+{
+ quantize_and_encode_band_cost(s, pb, in, NULL, size, scale_idx, cb, lambda,
+ INFINITY, NULL);
+}
+
+/**
+ * Functions developed from template function and optimized for getting the number of bits
+ */
+static float get_band_numbits_ZERO_mips(struct AACEncContext *s,
+ PutBitContext *pb, const float *in,
+ const float *scaled, int size, int scale_idx,
+ int cb, const float lambda, const float uplim,
+ int *bits)
+{
+ return 0;
+}
+
+static float get_band_numbits_SQUAD_mips(struct AACEncContext *s,
+ PutBitContext *pb, const float *in,
+ const float *scaled, int size, int scale_idx,
+ int cb, const float lambda, const float uplim,
+ int *bits)
+{
+ const float Q34 = ff_aac_pow34sf_tab[POW_SF2_ZERO - scale_idx + SCALE_ONE_POS - SCALE_DIV_512];
+ int i;
+ int qc1, qc2, qc3, qc4;
+ int curbits = 0;
+
+ uint8_t *p_bits = (uint8_t *)ff_aac_spectral_bits[cb-1];
+
+ for (i = 0; i < size; i += 4) {
+ int curidx;
+ int *in_int = (int *)&in[i];
+
+ qc1 = scaled[i ] * Q34 + 0.4054f;
+ qc2 = scaled[i+1] * Q34 + 0.4054f;
+ qc3 = scaled[i+2] * Q34 + 0.4054f;
+ qc4 = scaled[i+3] * Q34 + 0.4054f;
+
+ __asm__ volatile (
+ ".set push \n\t"
+ ".set noreorder \n\t"
+
+ "slt %[qc1], $zero, %[qc1] \n\t"
+ "slt %[qc2], $zero, %[qc2] \n\t"
+ "slt %[qc3], $zero, %[qc3] \n\t"
+ "slt %[qc4], $zero, %[qc4] \n\t"
+ "lw $t0, 0(%[in_int]) \n\t"
+ "lw $t1, 4(%[in_int]) \n\t"
+ "lw $t2, 8(%[in_int]) \n\t"
+ "lw $t3, 12(%[in_int]) \n\t"
+ "srl $t0, $t0, 31 \n\t"
+ "srl $t1, $t1, 31 \n\t"
+ "srl $t2, $t2, 31 \n\t"
+ "srl $t3, $t3, 31 \n\t"
+ "subu $t4, $zero, %[qc1] \n\t"
+ "subu $t5, $zero, %[qc2] \n\t"
+ "subu $t6, $zero, %[qc3] \n\t"
+ "subu $t7, $zero, %[qc4] \n\t"
+ "movn %[qc1], $t4, $t0 \n\t"
+ "movn %[qc2], $t5, $t1 \n\t"
+ "movn %[qc3], $t6, $t2 \n\t"
+ "movn %[qc4], $t7, $t3 \n\t"
+
+ ".set pop \n\t"
+
+ : [qc1]"+r"(qc1), [qc2]"+r"(qc2),
+ [qc3]"+r"(qc3), [qc4]"+r"(qc4)
+ : [in_int]"r"(in_int)
+ : "t0", "t1", "t2", "t3",
+ "t4", "t5", "t6", "t7",
+ "memory"
+ );
+
+ curidx = qc1;
+ curidx *= 3;
+ curidx += qc2;
+ curidx *= 3;
+ curidx += qc3;
+ curidx *= 3;
+ curidx += qc4;
+ curidx += 40;
+
+ curbits += p_bits[curidx];
+ }
+ return curbits;
+}
+
+static float get_band_numbits_UQUAD_mips(struct AACEncContext *s,
+ PutBitContext *pb, const float *in,
+ const float *scaled, int size, int scale_idx,
+ int cb, const float lambda, const float uplim,
+ int *bits)
+{
+ const float Q34 = ff_aac_pow34sf_tab[POW_SF2_ZERO - scale_idx + SCALE_ONE_POS - SCALE_DIV_512];
+ int i;
+ int curbits = 0;
+ int qc1, qc2, qc3, qc4;
+
+ uint8_t *p_bits = (uint8_t *)ff_aac_spectral_bits[cb-1];
+
+ for (i = 0; i < size; i += 4) {
+ int curidx;
+
+ qc1 = scaled[i ] * Q34 + 0.4054f;
+ qc2 = scaled[i+1] * Q34 + 0.4054f;
+ qc3 = scaled[i+2] * Q34 + 0.4054f;
+ qc4 = scaled[i+3] * Q34 + 0.4054f;
+
+ __asm__ volatile (
+ ".set push \n\t"
+ ".set noreorder \n\t"
+
+ "ori $t4, $zero, 2 \n\t"
+ "slt $t0, $t4, %[qc1] \n\t"
+ "slt $t1, $t4, %[qc2] \n\t"
+ "slt $t2, $t4, %[qc3] \n\t"
+ "slt $t3, $t4, %[qc4] \n\t"
+ "movn %[qc1], $t4, $t0 \n\t"
+ "movn %[qc2], $t4, $t1 \n\t"
+ "movn %[qc3], $t4, $t2 \n\t"
+ "movn %[qc4], $t4, $t3 \n\t"
+
+ ".set pop \n\t"
+
+ : [qc1]"+r"(qc1), [qc2]"+r"(qc2),
+ [qc3]"+r"(qc3), [qc4]"+r"(qc4)
+ :
+ : "t0", "t1", "t2", "t3", "t4"
+ );
+
+ curidx = qc1;
+ curidx *= 3;
+ curidx += qc2;
+ curidx *= 3;
+ curidx += qc3;
+ curidx *= 3;
+ curidx += qc4;
+
+ curbits += p_bits[curidx];
+ curbits += uquad_sign_bits[curidx];
+ }
+ return curbits;
+}
+
+static float get_band_numbits_SPAIR_mips(struct AACEncContext *s,
+ PutBitContext *pb, const float *in,
+ const float *scaled, int size, int scale_idx,
+ int cb, const float lambda, const float uplim,
+ int *bits)
+{
+ const float Q34 = ff_aac_pow34sf_tab[POW_SF2_ZERO - scale_idx + SCALE_ONE_POS - SCALE_DIV_512];
+ int i;
+ int qc1, qc2, qc3, qc4;
+ int curbits = 0;
+
+ uint8_t *p_bits = (uint8_t*)ff_aac_spectral_bits[cb-1];
+
+ for (i = 0; i < size; i += 4) {
+ int curidx, curidx2;
+ int *in_int = (int *)&in[i];
+
+ qc1 = scaled[i ] * Q34 + 0.4054f;
+ qc2 = scaled[i+1] * Q34 + 0.4054f;
+ qc3 = scaled[i+2] * Q34 + 0.4054f;
+ qc4 = scaled[i+3] * Q34 + 0.4054f;
+
+ __asm__ volatile (
+ ".set push \n\t"
+ ".set noreorder \n\t"
+
+ "ori $t4, $zero, 4 \n\t"
+ "slt $t0, $t4, %[qc1] \n\t"
+ "slt $t1, $t4, %[qc2] \n\t"
+ "slt $t2, $t4, %[qc3] \n\t"
+ "slt $t3, $t4, %[qc4] \n\t"
+ "movn %[qc1], $t4, $t0 \n\t"
+ "movn %[qc2], $t4, $t1 \n\t"
+ "movn %[qc3], $t4, $t2 \n\t"
+ "movn %[qc4], $t4, $t3 \n\t"
+ "lw $t0, 0(%[in_int]) \n\t"
+ "lw $t1, 4(%[in_int]) \n\t"
+ "lw $t2, 8(%[in_int]) \n\t"
+ "lw $t3, 12(%[in_int]) \n\t"
+ "srl $t0, $t0, 31 \n\t"
+ "srl $t1, $t1, 31 \n\t"
+ "srl $t2, $t2, 31 \n\t"
+ "srl $t3, $t3, 31 \n\t"
+ "subu $t4, $zero, %[qc1] \n\t"
+ "subu $t5, $zero, %[qc2] \n\t"
+ "subu $t6, $zero, %[qc3] \n\t"
+ "subu $t7, $zero, %[qc4] \n\t"
+ "movn %[qc1], $t4, $t0 \n\t"
+ "movn %[qc2], $t5, $t1 \n\t"
+ "movn %[qc3], $t6, $t2 \n\t"
+ "movn %[qc4], $t7, $t3 \n\t"
+
+ ".set pop \n\t"
+
+ : [qc1]"+r"(qc1), [qc2]"+r"(qc2),
+ [qc3]"+r"(qc3), [qc4]"+r"(qc4)
+ : [in_int]"r"(in_int)
+ : "t0", "t1", "t2", "t3",
+ "t4", "t5", "t6", "t7",
+ "memory"
+ );
+
+ curidx = 9 * qc1;
+ curidx += qc2 + 40;
+
+ curidx2 = 9 * qc3;
+ curidx2 += qc4 + 40;
+
+ curbits += p_bits[curidx] + p_bits[curidx2];
+ }
+ return curbits;
+}
+
+static float get_band_numbits_UPAIR7_mips(struct AACEncContext *s,
+ PutBitContext *pb, const float *in,
+ const float *scaled, int size, int scale_idx,
+ int cb, const float lambda, const float uplim,
+ int *bits)
+{
+ const float Q34 = ff_aac_pow34sf_tab[POW_SF2_ZERO - scale_idx + SCALE_ONE_POS - SCALE_DIV_512];
+ int i;
+ int qc1, qc2, qc3, qc4;
+ int curbits = 0;
+
+ uint8_t *p_bits = (uint8_t *)ff_aac_spectral_bits[cb-1];
+
+ for (i = 0; i < size; i += 4) {
+ int curidx, curidx2;
+
+ qc1 = scaled[i ] * Q34 + 0.4054f;
+ qc2 = scaled[i+1] * Q34 + 0.4054f;
+ qc3 = scaled[i+2] * Q34 + 0.4054f;
+ qc4 = scaled[i+3] * Q34 + 0.4054f;
+
+ __asm__ volatile (
+ ".set push \n\t"
+ ".set noreorder \n\t"
+
+ "ori $t4, $zero, 7 \n\t"
+ "slt $t0, $t4, %[qc1] \n\t"
+ "slt $t1, $t4, %[qc2] \n\t"
+ "slt $t2, $t4, %[qc3] \n\t"
+ "slt $t3, $t4, %[qc4] \n\t"
+ "movn %[qc1], $t4, $t0 \n\t"
+ "movn %[qc2], $t4, $t1 \n\t"
+ "movn %[qc3], $t4, $t2 \n\t"
+ "movn %[qc4], $t4, $t3 \n\t"
+
+ ".set pop \n\t"
+
+ : [qc1]"+r"(qc1), [qc2]"+r"(qc2),
+ [qc3]"+r"(qc3), [qc4]"+r"(qc4)
+ :
+ : "t0", "t1", "t2", "t3", "t4"
+ );
+
+ curidx = 8 * qc1;
+ curidx += qc2;
+
+ curidx2 = 8 * qc3;
+ curidx2 += qc4;
+
+ curbits += p_bits[curidx] +
+ upair7_sign_bits[curidx] +
+ p_bits[curidx2] +
+ upair7_sign_bits[curidx2];
+ }
+ return curbits;
+}
+
+static float get_band_numbits_UPAIR12_mips(struct AACEncContext *s,
+ PutBitContext *pb, const float *in,
+ const float *scaled, int size, int scale_idx,
+ int cb, const float lambda, const float uplim,
+ int *bits)
+{
+ const float Q34 = ff_aac_pow34sf_tab[POW_SF2_ZERO - scale_idx + SCALE_ONE_POS - SCALE_DIV_512];
+ int i;
+ int qc1, qc2, qc3, qc4;
+ int curbits = 0;
+
+ uint8_t *p_bits = (uint8_t *)ff_aac_spectral_bits[cb-1];
+
+ for (i = 0; i < size; i += 4) {
+ int curidx, curidx2;
+
+ qc1 = scaled[i ] * Q34 + 0.4054f;
+ qc2 = scaled[i+1] * Q34 + 0.4054f;
+ qc3 = scaled[i+2] * Q34 + 0.4054f;
+ qc4 = scaled[i+3] * Q34 + 0.4054f;
+
+ __asm__ volatile (
+ ".set push \n\t"
+ ".set noreorder \n\t"
+
+ "ori $t4, $zero, 12 \n\t"
+ "slt $t0, $t4, %[qc1] \n\t"
+ "slt $t1, $t4, %[qc2] \n\t"
+ "slt $t2, $t4, %[qc3] \n\t"
+ "slt $t3, $t4, %[qc4] \n\t"
+ "movn %[qc1], $t4, $t0 \n\t"
+ "movn %[qc2], $t4, $t1 \n\t"
+ "movn %[qc3], $t4, $t2 \n\t"
+ "movn %[qc4], $t4, $t3 \n\t"
+
+ ".set pop \n\t"
+
+ : [qc1]"+r"(qc1), [qc2]"+r"(qc2),
+ [qc3]"+r"(qc3), [qc4]"+r"(qc4)
+ :
+ : "t0", "t1", "t2", "t3", "t4"
+ );
+
+ curidx = 13 * qc1;
+ curidx += qc2;
+
+ curidx2 = 13 * qc3;
+ curidx2 += qc4;
+
+ curbits += p_bits[curidx] +
+ p_bits[curidx2] +
+ upair12_sign_bits[curidx] +
+ upair12_sign_bits[curidx2];
+ }
+ return curbits;
+}
+
+static float get_band_numbits_ESC_mips(struct AACEncContext *s,
+ PutBitContext *pb, const float *in,
+ const float *scaled, int size, int scale_idx,
+ int cb, const float lambda, const float uplim,
+ int *bits)
+{
+ const float Q34 = ff_aac_pow34sf_tab[POW_SF2_ZERO - scale_idx + SCALE_ONE_POS - SCALE_DIV_512];
+ int i;
+ int qc1, qc2, qc3, qc4;
+ int curbits = 0;
+
+ uint8_t *p_bits = (uint8_t*)ff_aac_spectral_bits[cb-1];
+
+ for (i = 0; i < size; i += 4) {
+ int curidx, curidx2;
+ int cond0, cond1, cond2, cond3;
+ int c1, c2, c3, c4;
+
+ qc1 = scaled[i ] * Q34 + 0.4054f;
+ qc2 = scaled[i+1] * Q34 + 0.4054f;
+ qc3 = scaled[i+2] * Q34 + 0.4054f;
+ qc4 = scaled[i+3] * Q34 + 0.4054f;
+
+ __asm__ volatile (
+ ".set push \n\t"
+ ".set noreorder \n\t"
+
+ "ori $t4, $zero, 15 \n\t"
+ "ori $t5, $zero, 16 \n\t"
+ "shll_s.w %[c1], %[qc1], 18 \n\t"
+ "shll_s.w %[c2], %[qc2], 18 \n\t"
+ "shll_s.w %[c3], %[qc3], 18 \n\t"
+ "shll_s.w %[c4], %[qc4], 18 \n\t"
+ "srl %[c1], %[c1], 18 \n\t"
+ "srl %[c2], %[c2], 18 \n\t"
+ "srl %[c3], %[c3], 18 \n\t"
+ "srl %[c4], %[c4], 18 \n\t"
+ "slt %[cond0], $t4, %[qc1] \n\t"
+ "slt %[cond1], $t4, %[qc2] \n\t"
+ "slt %[cond2], $t4, %[qc3] \n\t"
+ "slt %[cond3], $t4, %[qc4] \n\t"
+ "movn %[qc1], $t5, %[cond0] \n\t"
+ "movn %[qc2], $t5, %[cond1] \n\t"
+ "movn %[qc3], $t5, %[cond2] \n\t"
+ "movn %[qc4], $t5, %[cond3] \n\t"
+ "ori $t5, $zero, 31 \n\t"
+ "clz %[c1], %[c1] \n\t"
+ "clz %[c2], %[c2] \n\t"
+ "clz %[c3], %[c3] \n\t"
+ "clz %[c4], %[c4] \n\t"
+ "subu %[c1], $t5, %[c1] \n\t"
+ "subu %[c2], $t5, %[c2] \n\t"
+ "subu %[c3], $t5, %[c3] \n\t"
+ "subu %[c4], $t5, %[c4] \n\t"
+ "sll %[c1], %[c1], 1 \n\t"
+ "sll %[c2], %[c2], 1 \n\t"
+ "sll %[c3], %[c3], 1 \n\t"
+ "sll %[c4], %[c4], 1 \n\t"
+ "addiu %[c1], %[c1], -3 \n\t"
+ "addiu %[c2], %[c2], -3 \n\t"
+ "addiu %[c3], %[c3], -3 \n\t"
+ "addiu %[c4], %[c4], -3 \n\t"
+ "subu %[cond0], $zero, %[cond0] \n\t"
+ "subu %[cond1], $zero, %[cond1] \n\t"
+ "subu %[cond2], $zero, %[cond2] \n\t"
+ "subu %[cond3], $zero, %[cond3] \n\t"
+ "and %[c1], %[c1], %[cond0] \n\t"
+ "and %[c2], %[c2], %[cond1] \n\t"
+ "and %[c3], %[c3], %[cond2] \n\t"
+ "and %[c4], %[c4], %[cond3] \n\t"
+
+ ".set pop \n\t"
+
+ : [qc1]"+r"(qc1), [qc2]"+r"(qc2),
+ [qc3]"+r"(qc3), [qc4]"+r"(qc4),
+ [cond0]"=&r"(cond0), [cond1]"=&r"(cond1),
+ [cond2]"=&r"(cond2), [cond3]"=&r"(cond3),
+ [c1]"=&r"(c1), [c2]"=&r"(c2),
+ [c3]"=&r"(c3), [c4]"=&r"(c4)
+ :
+ : "t4", "t5"
+ );
+
+ curidx = 17 * qc1;
+ curidx += qc2;
+
+ curidx2 = 17 * qc3;
+ curidx2 += qc4;
+
+ curbits += p_bits[curidx];
+ curbits += esc_sign_bits[curidx];
+ curbits += p_bits[curidx2];
+ curbits += esc_sign_bits[curidx2];
+
+ curbits += c1;
+ curbits += c2;
+ curbits += c3;
+ curbits += c4;
+ }
+ return curbits;
+}
+
+static float (*const get_band_numbits_arr[])(struct AACEncContext *s,
+ PutBitContext *pb, const float *in,
+ const float *scaled, int size, int scale_idx,
+ int cb, const float lambda, const float uplim,
+ int *bits) = {
+ get_band_numbits_ZERO_mips,
+ get_band_numbits_SQUAD_mips,
+ get_band_numbits_SQUAD_mips,
+ get_band_numbits_UQUAD_mips,
+ get_band_numbits_UQUAD_mips,
+ get_band_numbits_SPAIR_mips,
+ get_band_numbits_SPAIR_mips,
+ get_band_numbits_UPAIR7_mips,
+ get_band_numbits_UPAIR7_mips,
+ get_band_numbits_UPAIR12_mips,
+ get_band_numbits_UPAIR12_mips,
+ get_band_numbits_ESC_mips,
+};
+
+#define get_band_numbits( \
+ s, pb, in, scaled, size, scale_idx, cb, \
+ lambda, uplim, bits) \
+ get_band_numbits_arr[cb]( \
+ s, pb, in, scaled, size, scale_idx, cb, \
+ lambda, uplim, bits)
+
+static float quantize_band_cost_bits(struct AACEncContext *s, const float *in,
+ const float *scaled, int size, int scale_idx,
+ int cb, const float lambda, const float uplim,
+ int *bits)
+{
+ return get_band_numbits(s, NULL, in, scaled, size, scale_idx, cb, lambda, uplim, bits);
+}
+
+/**
+ * Functions developed from template function and optimized for getting the band cost
+ */
+#if HAVE_MIPSFPU
+static float get_band_cost_ZERO_mips(struct AACEncContext *s,
+ PutBitContext *pb, const float *in,
+ const float *scaled, int size, int scale_idx,
+ int cb, const float lambda, const float uplim,
+ int *bits)
+{
+ int i;
+ float cost = 0;
+
+ for (i = 0; i < size; i += 4) {
+ cost += in[i ] * in[i ];
+ cost += in[i+1] * in[i+1];
+ cost += in[i+2] * in[i+2];
+ cost += in[i+3] * in[i+3];
+ }
+ if (bits)
+ *bits = 0;
+ return cost * lambda;
+}
+
+static float get_band_cost_SQUAD_mips(struct AACEncContext *s,
+ PutBitContext *pb, const float *in,
+ const float *scaled, int size, int scale_idx,
+ int cb, const float lambda, const float uplim,
+ int *bits)
+{
+ const float Q34 = ff_aac_pow34sf_tab[POW_SF2_ZERO - scale_idx + SCALE_ONE_POS - SCALE_DIV_512];
+ const float IQ = ff_aac_pow2sf_tab [POW_SF2_ZERO + scale_idx - SCALE_ONE_POS + SCALE_DIV_512];
+ int i;
+ float cost = 0;
+ int qc1, qc2, qc3, qc4;
+ int curbits = 0;
+
+ uint8_t *p_bits = (uint8_t *)ff_aac_spectral_bits[cb-1];
+ float *p_codes = (float *)ff_aac_codebook_vectors[cb-1];
+
+ for (i = 0; i < size; i += 4) {
+ const float *vec;
+ int curidx;
+ int *in_int = (int *)&in[i];
+ float *in_pos = (float *)&in[i];
+ float di0, di1, di2, di3;
+
+ qc1 = scaled[i ] * Q34 + 0.4054f;
+ qc2 = scaled[i+1] * Q34 + 0.4054f;
+ qc3 = scaled[i+2] * Q34 + 0.4054f;
+ qc4 = scaled[i+3] * Q34 + 0.4054f;
+
+ __asm__ volatile (
+ ".set push \n\t"
+ ".set noreorder \n\t"
+
+ "slt %[qc1], $zero, %[qc1] \n\t"
+ "slt %[qc2], $zero, %[qc2] \n\t"
+ "slt %[qc3], $zero, %[qc3] \n\t"
+ "slt %[qc4], $zero, %[qc4] \n\t"
+ "lw $t0, 0(%[in_int]) \n\t"
+ "lw $t1, 4(%[in_int]) \n\t"
+ "lw $t2, 8(%[in_int]) \n\t"
+ "lw $t3, 12(%[in_int]) \n\t"
+ "srl $t0, $t0, 31 \n\t"
+ "srl $t1, $t1, 31 \n\t"
+ "srl $t2, $t2, 31 \n\t"
+ "srl $t3, $t3, 31 \n\t"
+ "subu $t4, $zero, %[qc1] \n\t"
+ "subu $t5, $zero, %[qc2] \n\t"
+ "subu $t6, $zero, %[qc3] \n\t"
+ "subu $t7, $zero, %[qc4] \n\t"
+ "movn %[qc1], $t4, $t0 \n\t"
+ "movn %[qc2], $t5, $t1 \n\t"
+ "movn %[qc3], $t6, $t2 \n\t"
+ "movn %[qc4], $t7, $t3 \n\t"
+
+ ".set pop \n\t"
+
+ : [qc1]"+r"(qc1), [qc2]"+r"(qc2),
+ [qc3]"+r"(qc3), [qc4]"+r"(qc4)
+ : [in_int]"r"(in_int)
+ : "t0", "t1", "t2", "t3",
+ "t4", "t5", "t6", "t7",
+ "memory"
+ );
+
+ curidx = qc1;
+ curidx *= 3;
+ curidx += qc2;
+ curidx *= 3;
+ curidx += qc3;
+ curidx *= 3;
+ curidx += qc4;
+ curidx += 40;
+
+ curbits += p_bits[curidx];
+ vec = &p_codes[curidx*4];
+
+ __asm__ volatile (
+ ".set push \n\t"
+ ".set noreorder \n\t"
+
+ "lwc1 $f0, 0(%[in_pos]) \n\t"
+ "lwc1 $f1, 0(%[vec]) \n\t"
+ "lwc1 $f2, 4(%[in_pos]) \n\t"
+ "lwc1 $f3, 4(%[vec]) \n\t"
+ "lwc1 $f4, 8(%[in_pos]) \n\t"
+ "lwc1 $f5, 8(%[vec]) \n\t"
+ "lwc1 $f6, 12(%[in_pos]) \n\t"
+ "lwc1 $f7, 12(%[vec]) \n\t"
+ "nmsub.s %[di0], $f0, $f1, %[IQ] \n\t"
+ "nmsub.s %[di1], $f2, $f3, %[IQ] \n\t"
+ "nmsub.s %[di2], $f4, $f5, %[IQ] \n\t"
+ "nmsub.s %[di3], $f6, $f7, %[IQ] \n\t"
+
+ ".set pop \n\t"
+
+ : [di0]"=&f"(di0), [di1]"=&f"(di1),
+ [di2]"=&f"(di2), [di3]"=&f"(di3)
+ : [in_pos]"r"(in_pos), [vec]"r"(vec),
+ [IQ]"f"(IQ)
+ : "$f0", "$f1", "$f2", "$f3",
+ "$f4", "$f5", "$f6", "$f7",
+ "memory"
+ );
+
+ cost += di0 * di0 + di1 * di1
+ + di2 * di2 + di3 * di3;
+ }
+
+ if (bits)
+ *bits = curbits;
+ return cost * lambda + curbits;
+}
+
+static float get_band_cost_UQUAD_mips(struct AACEncContext *s,
+ PutBitContext *pb, const float *in,
+ const float *scaled, int size, int scale_idx,
+ int cb, const float lambda, const float uplim,
+ int *bits)
+{
+ const float Q34 = ff_aac_pow34sf_tab[POW_SF2_ZERO - scale_idx + SCALE_ONE_POS - SCALE_DIV_512];
+ const float IQ = ff_aac_pow2sf_tab [POW_SF2_ZERO + scale_idx - SCALE_ONE_POS + SCALE_DIV_512];
+ int i;
+ float cost = 0;
+ int curbits = 0;
+ int qc1, qc2, qc3, qc4;
+
+ uint8_t *p_bits = (uint8_t*)ff_aac_spectral_bits[cb-1];
+ float *p_codes = (float *)ff_aac_codebook_vectors[cb-1];
+
+ for (i = 0; i < size; i += 4) {
+ const float *vec;
+ int curidx;
+ float *in_pos = (float *)&in[i];
+ float di0, di1, di2, di3;
+
+ qc1 = scaled[i ] * Q34 + 0.4054f;
+ qc2 = scaled[i+1] * Q34 + 0.4054f;
+ qc3 = scaled[i+2] * Q34 + 0.4054f;
+ qc4 = scaled[i+3] * Q34 + 0.4054f;
+
+ __asm__ volatile (
+ ".set push \n\t"
+ ".set noreorder \n\t"
+
+ "ori $t4, $zero, 2 \n\t"
+ "slt $t0, $t4, %[qc1] \n\t"
+ "slt $t1, $t4, %[qc2] \n\t"
+ "slt $t2, $t4, %[qc3] \n\t"
+ "slt $t3, $t4, %[qc4] \n\t"
+ "movn %[qc1], $t4, $t0 \n\t"
+ "movn %[qc2], $t4, $t1 \n\t"
+ "movn %[qc3], $t4, $t2 \n\t"
+ "movn %[qc4], $t4, $t3 \n\t"
+
+ ".set pop \n\t"
+
+ : [qc1]"+r"(qc1), [qc2]"+r"(qc2),
+ [qc3]"+r"(qc3), [qc4]"+r"(qc4)
+ :
+ : "t0", "t1", "t2", "t3", "t4"
+ );
+
+ curidx = qc1;
+ curidx *= 3;
+ curidx += qc2;
+ curidx *= 3;
+ curidx += qc3;
+ curidx *= 3;
+ curidx += qc4;
+
+ curbits += p_bits[curidx];
+ curbits += uquad_sign_bits[curidx];
+ vec = &p_codes[curidx*4];
+
+ __asm__ volatile (
+ ".set push \n\t"
+ ".set noreorder \n\t"
+
+ "lwc1 %[di0], 0(%[in_pos]) \n\t"
+ "lwc1 %[di1], 4(%[in_pos]) \n\t"
+ "lwc1 %[di2], 8(%[in_pos]) \n\t"
+ "lwc1 %[di3], 12(%[in_pos]) \n\t"
+ "abs.s %[di0], %[di0] \n\t"
+ "abs.s %[di1], %[di1] \n\t"
+ "abs.s %[di2], %[di2] \n\t"
+ "abs.s %[di3], %[di3] \n\t"
+ "lwc1 $f0, 0(%[vec]) \n\t"
+ "lwc1 $f1, 4(%[vec]) \n\t"
+ "lwc1 $f2, 8(%[vec]) \n\t"
+ "lwc1 $f3, 12(%[vec]) \n\t"
+ "nmsub.s %[di0], %[di0], $f0, %[IQ] \n\t"
+ "nmsub.s %[di1], %[di1], $f1, %[IQ] \n\t"
+ "nmsub.s %[di2], %[di2], $f2, %[IQ] \n\t"
+ "nmsub.s %[di3], %[di3], $f3, %[IQ] \n\t"
+
+ ".set pop \n\t"
+
+ : [di0]"=&f"(di0), [di1]"=&f"(di1),
+ [di2]"=&f"(di2), [di3]"=&f"(di3)
+ : [in_pos]"r"(in_pos), [vec]"r"(vec),
+ [IQ]"f"(IQ)
+ : "$f0", "$f1", "$f2", "$f3",
+ "memory"
+ );
+
+ cost += di0 * di0 + di1 * di1
+ + di2 * di2 + di3 * di3;
+ }
+
+ if (bits)
+ *bits = curbits;
+ return cost * lambda + curbits;
+}
+
+static float get_band_cost_SPAIR_mips(struct AACEncContext *s,
+ PutBitContext *pb, const float *in,
+ const float *scaled, int size, int scale_idx,
+ int cb, const float lambda, const float uplim,
+ int *bits)
+{
+ const float Q34 = ff_aac_pow34sf_tab[POW_SF2_ZERO - scale_idx + SCALE_ONE_POS - SCALE_DIV_512];
+ const float IQ = ff_aac_pow2sf_tab [POW_SF2_ZERO + scale_idx - SCALE_ONE_POS + SCALE_DIV_512];
+ int i;
+ float cost = 0;
+ int qc1, qc2, qc3, qc4;
+ int curbits = 0;
+
+ uint8_t *p_bits = (uint8_t *)ff_aac_spectral_bits[cb-1];
+ float *p_codes = (float *)ff_aac_codebook_vectors[cb-1];
+
+ for (i = 0; i < size; i += 4) {
+ const float *vec, *vec2;
+ int curidx, curidx2;
+ int *in_int = (int *)&in[i];
+ float *in_pos = (float *)&in[i];
+ float di0, di1, di2, di3;
+
+ qc1 = scaled[i ] * Q34 + 0.4054f;
+ qc2 = scaled[i+1] * Q34 + 0.4054f;
+ qc3 = scaled[i+2] * Q34 + 0.4054f;
+ qc4 = scaled[i+3] * Q34 + 0.4054f;
+
+ __asm__ volatile (
+ ".set push \n\t"
+ ".set noreorder \n\t"
+
+ "ori $t4, $zero, 4 \n\t"
+ "slt $t0, $t4, %[qc1] \n\t"
+ "slt $t1, $t4, %[qc2] \n\t"
+ "slt $t2, $t4, %[qc3] \n\t"
+ "slt $t3, $t4, %[qc4] \n\t"
+ "movn %[qc1], $t4, $t0 \n\t"
+ "movn %[qc2], $t4, $t1 \n\t"
+ "movn %[qc3], $t4, $t2 \n\t"
+ "movn %[qc4], $t4, $t3 \n\t"
+ "lw $t0, 0(%[in_int]) \n\t"
+ "lw $t1, 4(%[in_int]) \n\t"
+ "lw $t2, 8(%[in_int]) \n\t"
+ "lw $t3, 12(%[in_int]) \n\t"
+ "srl $t0, $t0, 31 \n\t"
+ "srl $t1, $t1, 31 \n\t"
+ "srl $t2, $t2, 31 \n\t"
+ "srl $t3, $t3, 31 \n\t"
+ "subu $t4, $zero, %[qc1] \n\t"
+ "subu $t5, $zero, %[qc2] \n\t"
+ "subu $t6, $zero, %[qc3] \n\t"
+ "subu $t7, $zero, %[qc4] \n\t"
+ "movn %[qc1], $t4, $t0 \n\t"
+ "movn %[qc2], $t5, $t1 \n\t"
+ "movn %[qc3], $t6, $t2 \n\t"
+ "movn %[qc4], $t7, $t3 \n\t"
+
+ ".set pop \n\t"
+
+ : [qc1]"+r"(qc1), [qc2]"+r"(qc2),
+ [qc3]"+r"(qc3), [qc4]"+r"(qc4)
+ : [in_int]"r"(in_int)
+ : "t0", "t1", "t2", "t3",
+ "t4", "t5", "t6", "t7",
+ "memory"
+ );
+
+ curidx = 9 * qc1;
+ curidx += qc2 + 40;
+
+ curidx2 = 9 * qc3;
+ curidx2 += qc4 + 40;
+
+ curbits += p_bits[curidx];
+ curbits += p_bits[curidx2];
+
+ vec = &p_codes[curidx*2];
+ vec2 = &p_codes[curidx2*2];
+
+ __asm__ volatile (
+ ".set push \n\t"
+ ".set noreorder \n\t"
+
+ "lwc1 $f0, 0(%[in_pos]) \n\t"
+ "lwc1 $f1, 0(%[vec]) \n\t"
+ "lwc1 $f2, 4(%[in_pos]) \n\t"
+ "lwc1 $f3, 4(%[vec]) \n\t"
+ "lwc1 $f4, 8(%[in_pos]) \n\t"
+ "lwc1 $f5, 0(%[vec2]) \n\t"
+ "lwc1 $f6, 12(%[in_pos]) \n\t"
+ "lwc1 $f7, 4(%[vec2]) \n\t"
+ "nmsub.s %[di0], $f0, $f1, %[IQ] \n\t"
+ "nmsub.s %[di1], $f2, $f3, %[IQ] \n\t"
+ "nmsub.s %[di2], $f4, $f5, %[IQ] \n\t"
+ "nmsub.s %[di3], $f6, $f7, %[IQ] \n\t"
+
+ ".set pop \n\t"
+
+ : [di0]"=&f"(di0), [di1]"=&f"(di1),
+ [di2]"=&f"(di2), [di3]"=&f"(di3)
+ : [in_pos]"r"(in_pos), [vec]"r"(vec),
+ [vec2]"r"(vec2), [IQ]"f"(IQ)
+ : "$f0", "$f1", "$f2", "$f3",
+ "$f4", "$f5", "$f6", "$f7",
+ "memory"
+ );
+
+ cost += di0 * di0 + di1 * di1
+ + di2 * di2 + di3 * di3;
+ }
+
+ if (bits)
+ *bits = curbits;
+ return cost * lambda + curbits;
+}
+
+static float get_band_cost_UPAIR7_mips(struct AACEncContext *s,
+ PutBitContext *pb, const float *in,
+ const float *scaled, int size, int scale_idx,
+ int cb, const float lambda, const float uplim,
+ int *bits)
+{
+ const float Q34 = ff_aac_pow34sf_tab[POW_SF2_ZERO - scale_idx + SCALE_ONE_POS - SCALE_DIV_512];
+ const float IQ = ff_aac_pow2sf_tab [POW_SF2_ZERO + scale_idx - SCALE_ONE_POS + SCALE_DIV_512];
+ int i;
+ float cost = 0;
+ int qc1, qc2, qc3, qc4;
+ int curbits = 0;
+
+ uint8_t *p_bits = (uint8_t *)ff_aac_spectral_bits[cb-1];
+ float *p_codes = (float *)ff_aac_codebook_vectors[cb-1];
+
+ for (i = 0; i < size; i += 4) {
+ const float *vec, *vec2;
+ int curidx, curidx2, sign1, count1, sign2, count2;
+ int *in_int = (int *)&in[i];
+ float *in_pos = (float *)&in[i];
+ float di0, di1, di2, di3;
+
+ qc1 = scaled[i ] * Q34 + 0.4054f;
+ qc2 = scaled[i+1] * Q34 + 0.4054f;
+ qc3 = scaled[i+2] * Q34 + 0.4054f;
+ qc4 = scaled[i+3] * Q34 + 0.4054f;
+
+ __asm__ volatile (
+ ".set push \n\t"
+ ".set noreorder \n\t"
+
+ "ori $t4, $zero, 7 \n\t"
+ "ori %[sign1], $zero, 0 \n\t"
+ "ori %[sign2], $zero, 0 \n\t"
+ "slt $t0, $t4, %[qc1] \n\t"
+ "slt $t1, $t4, %[qc2] \n\t"
+ "slt $t2, $t4, %[qc3] \n\t"
+ "slt $t3, $t4, %[qc4] \n\t"
+ "movn %[qc1], $t4, $t0 \n\t"
+ "movn %[qc2], $t4, $t1 \n\t"
+ "movn %[qc3], $t4, $t2 \n\t"
+ "movn %[qc4], $t4, $t3 \n\t"
+ "lw $t0, 0(%[in_int]) \n\t"
+ "lw $t1, 4(%[in_int]) \n\t"
+ "lw $t2, 8(%[in_int]) \n\t"
+ "lw $t3, 12(%[in_int]) \n\t"
+ "slt $t0, $t0, $zero \n\t"
+ "movn %[sign1], $t0, %[qc1] \n\t"
+ "slt $t2, $t2, $zero \n\t"
+ "movn %[sign2], $t2, %[qc3] \n\t"
+ "slt $t1, $t1, $zero \n\t"
+ "sll $t0, %[sign1], 1 \n\t"
+ "or $t0, $t0, $t1 \n\t"
+ "movn %[sign1], $t0, %[qc2] \n\t"
+ "slt $t3, $t3, $zero \n\t"
+ "sll $t0, %[sign2], 1 \n\t"
+ "or $t0, $t0, $t3 \n\t"
+ "movn %[sign2], $t0, %[qc4] \n\t"
+ "slt %[count1], $zero, %[qc1] \n\t"
+ "slt $t1, $zero, %[qc2] \n\t"
+ "slt %[count2], $zero, %[qc3] \n\t"
+ "slt $t2, $zero, %[qc4] \n\t"
+ "addu %[count1], %[count1], $t1 \n\t"
+ "addu %[count2], %[count2], $t2 \n\t"
+
+ ".set pop \n\t"
+
+ : [qc1]"+r"(qc1), [qc2]"+r"(qc2),
+ [qc3]"+r"(qc3), [qc4]"+r"(qc4),
+ [sign1]"=&r"(sign1), [count1]"=&r"(count1),
+ [sign2]"=&r"(sign2), [count2]"=&r"(count2)
+ : [in_int]"r"(in_int)
+ : "t0", "t1", "t2", "t3", "t4",
+ "memory"
+ );
+
+ curidx = 8 * qc1;
+ curidx += qc2;
+
+ curidx2 = 8 * qc3;
+ curidx2 += qc4;
+
+ curbits += p_bits[curidx];
+ curbits += upair7_sign_bits[curidx];
+ vec = &p_codes[curidx*2];
+
+ curbits += p_bits[curidx2];
+ curbits += upair7_sign_bits[curidx2];
+ vec2 = &p_codes[curidx2*2];
+
+ __asm__ volatile (
+ ".set push \n\t"
+ ".set noreorder \n\t"
+
+ "lwc1 %[di0], 0(%[in_pos]) \n\t"
+ "lwc1 %[di1], 4(%[in_pos]) \n\t"
+ "lwc1 %[di2], 8(%[in_pos]) \n\t"
+ "lwc1 %[di3], 12(%[in_pos]) \n\t"
+ "abs.s %[di0], %[di0] \n\t"
+ "abs.s %[di1], %[di1] \n\t"
+ "abs.s %[di2], %[di2] \n\t"
+ "abs.s %[di3], %[di3] \n\t"
+ "lwc1 $f0, 0(%[vec]) \n\t"
+ "lwc1 $f1, 4(%[vec]) \n\t"
+ "lwc1 $f2, 0(%[vec2]) \n\t"
+ "lwc1 $f3, 4(%[vec2]) \n\t"
+ "nmsub.s %[di0], %[di0], $f0, %[IQ] \n\t"
+ "nmsub.s %[di1], %[di1], $f1, %[IQ] \n\t"
+ "nmsub.s %[di2], %[di2], $f2, %[IQ] \n\t"
+ "nmsub.s %[di3], %[di3], $f3, %[IQ] \n\t"
+
+ ".set pop \n\t"
+
+ : [di0]"=&f"(di0), [di1]"=&f"(di1),
+ [di2]"=&f"(di2), [di3]"=&f"(di3)
+ : [in_pos]"r"(in_pos), [vec]"r"(vec),
+ [vec2]"r"(vec2), [IQ]"f"(IQ)
+ : "$f0", "$f1", "$f2", "$f3",
+ "memory"
+ );
+
+ cost += di0 * di0 + di1 * di1
+ + di2 * di2 + di3 * di3;
+ }
+
+ if (bits)
+ *bits = curbits;
+ return cost * lambda + curbits;
+}
+
+static float get_band_cost_UPAIR12_mips(struct AACEncContext *s,
+ PutBitContext *pb, const float *in,
+ const float *scaled, int size, int scale_idx,
+ int cb, const float lambda, const float uplim,
+ int *bits)
+{
+ const float Q34 = ff_aac_pow34sf_tab[POW_SF2_ZERO - scale_idx + SCALE_ONE_POS - SCALE_DIV_512];
+ const float IQ = ff_aac_pow2sf_tab [POW_SF2_ZERO + scale_idx - SCALE_ONE_POS + SCALE_DIV_512];
+ int i;
+ float cost = 0;
+ int qc1, qc2, qc3, qc4;
+ int curbits = 0;
+
+ uint8_t *p_bits = (uint8_t *)ff_aac_spectral_bits[cb-1];
+ float *p_codes = (float *)ff_aac_codebook_vectors[cb-1];
+
+ for (i = 0; i < size; i += 4) {
+ const float *vec, *vec2;
+ int curidx, curidx2;
+ int sign1, count1, sign2, count2;
+ int *in_int = (int *)&in[i];
+ float *in_pos = (float *)&in[i];
+ float di0, di1, di2, di3;
+
+ qc1 = scaled[i ] * Q34 + 0.4054f;
+ qc2 = scaled[i+1] * Q34 + 0.4054f;
+ qc3 = scaled[i+2] * Q34 + 0.4054f;
+ qc4 = scaled[i+3] * Q34 + 0.4054f;
+
+ __asm__ volatile (
+ ".set push \n\t"
+ ".set noreorder \n\t"
+
+ "ori $t4, $zero, 12 \n\t"
+ "ori %[sign1], $zero, 0 \n\t"
+ "ori %[sign2], $zero, 0 \n\t"
+ "slt $t0, $t4, %[qc1] \n\t"
+ "slt $t1, $t4, %[qc2] \n\t"
+ "slt $t2, $t4, %[qc3] \n\t"
+ "slt $t3, $t4, %[qc4] \n\t"
+ "movn %[qc1], $t4, $t0 \n\t"
+ "movn %[qc2], $t4, $t1 \n\t"
+ "movn %[qc3], $t4, $t2 \n\t"
+ "movn %[qc4], $t4, $t3 \n\t"
+ "lw $t0, 0(%[in_int]) \n\t"
+ "lw $t1, 4(%[in_int]) \n\t"
+ "lw $t2, 8(%[in_int]) \n\t"
+ "lw $t3, 12(%[in_int]) \n\t"
+ "slt $t0, $t0, $zero \n\t"
+ "movn %[sign1], $t0, %[qc1] \n\t"
+ "slt $t2, $t2, $zero \n\t"
+ "movn %[sign2], $t2, %[qc3] \n\t"
+ "slt $t1, $t1, $zero \n\t"
+ "sll $t0, %[sign1], 1 \n\t"
+ "or $t0, $t0, $t1 \n\t"
+ "movn %[sign1], $t0, %[qc2] \n\t"
+ "slt $t3, $t3, $zero \n\t"
+ "sll $t0, %[sign2], 1 \n\t"
+ "or $t0, $t0, $t3 \n\t"
+ "movn %[sign2], $t0, %[qc4] \n\t"
+ "slt %[count1], $zero, %[qc1] \n\t"
+ "slt $t1, $zero, %[qc2] \n\t"
+ "slt %[count2], $zero, %[qc3] \n\t"
+ "slt $t2, $zero, %[qc4] \n\t"
+ "addu %[count1], %[count1], $t1 \n\t"
+ "addu %[count2], %[count2], $t2 \n\t"
+
+ ".set pop \n\t"
+
+ : [qc1]"+r"(qc1), [qc2]"+r"(qc2),
+ [qc3]"+r"(qc3), [qc4]"+r"(qc4),
+ [sign1]"=&r"(sign1), [count1]"=&r"(count1),
+ [sign2]"=&r"(sign2), [count2]"=&r"(count2)
+ : [in_int]"r"(in_int)
+ : "t0", "t1", "t2", "t3", "t4",
+ "memory"
+ );
+
+ curidx = 13 * qc1;
+ curidx += qc2;
+
+ curidx2 = 13 * qc3;
+ curidx2 += qc4;
+
+ curbits += p_bits[curidx];
+ curbits += p_bits[curidx2];
+ curbits += upair12_sign_bits[curidx];
+ curbits += upair12_sign_bits[curidx2];
+ vec = &p_codes[curidx*2];
+ vec2 = &p_codes[curidx2*2];
+
+ __asm__ volatile (
+ ".set push \n\t"
+ ".set noreorder \n\t"
+
+ "lwc1 %[di0], 0(%[in_pos]) \n\t"
+ "lwc1 %[di1], 4(%[in_pos]) \n\t"
+ "lwc1 %[di2], 8(%[in_pos]) \n\t"
+ "lwc1 %[di3], 12(%[in_pos]) \n\t"
+ "abs.s %[di0], %[di0] \n\t"
+ "abs.s %[di1], %[di1] \n\t"
+ "abs.s %[di2], %[di2] \n\t"
+ "abs.s %[di3], %[di3] \n\t"
+ "lwc1 $f0, 0(%[vec]) \n\t"
+ "lwc1 $f1, 4(%[vec]) \n\t"
+ "lwc1 $f2, 0(%[vec2]) \n\t"
+ "lwc1 $f3, 4(%[vec2]) \n\t"
+ "nmsub.s %[di0], %[di0], $f0, %[IQ] \n\t"
+ "nmsub.s %[di1], %[di1], $f1, %[IQ] \n\t"
+ "nmsub.s %[di2], %[di2], $f2, %[IQ] \n\t"
+ "nmsub.s %[di3], %[di3], $f3, %[IQ] \n\t"
+
+ ".set pop \n\t"
+
+ : [di0]"=&f"(di0), [di1]"=&f"(di1),
+ [di2]"=&f"(di2), [di3]"=&f"(di3)
+ : [in_pos]"r"(in_pos), [vec]"r"(vec),
+ [vec2]"r"(vec2), [IQ]"f"(IQ)
+ : "$f0", "$f1", "$f2", "$f3",
+ "memory"
+ );
+
+ cost += di0 * di0 + di1 * di1
+ + di2 * di2 + di3 * di3;
+ }
+
+ if (bits)
+ *bits = curbits;
+ return cost * lambda + curbits;
+}
+
+static float get_band_cost_ESC_mips(struct AACEncContext *s,
+ PutBitContext *pb, const float *in,
+ const float *scaled, int size, int scale_idx,
+ int cb, const float lambda, const float uplim,
+ int *bits)
+{
+ const float Q34 = ff_aac_pow34sf_tab[POW_SF2_ZERO - scale_idx + SCALE_ONE_POS - SCALE_DIV_512];
+ const float IQ = ff_aac_pow2sf_tab [POW_SF2_ZERO + scale_idx - SCALE_ONE_POS + SCALE_DIV_512];
+ const float CLIPPED_ESCAPE = 165140.0f * IQ;
+ int i;
+ float cost = 0;
+ int qc1, qc2, qc3, qc4;
+ int curbits = 0;
+
+ uint8_t *p_bits = (uint8_t*)ff_aac_spectral_bits[cb-1];
+ float *p_codes = (float* )ff_aac_codebook_vectors[cb-1];
+
+ for (i = 0; i < size; i += 4) {
+ const float *vec, *vec2;
+ int curidx, curidx2;
+ float t1, t2, t3, t4;
+ float di1, di2, di3, di4;
+ int cond0, cond1, cond2, cond3;
+ int c1, c2, c3, c4;
+
+ qc1 = scaled[i ] * Q34 + 0.4054f;
+ qc2 = scaled[i+1] * Q34 + 0.4054f;
+ qc3 = scaled[i+2] * Q34 + 0.4054f;
+ qc4 = scaled[i+3] * Q34 + 0.4054f;
+
+ __asm__ volatile (
+ ".set push \n\t"
+ ".set noreorder \n\t"
+
+ "ori $t4, $zero, 15 \n\t"
+ "ori $t5, $zero, 16 \n\t"
+ "shll_s.w %[c1], %[qc1], 18 \n\t"
+ "shll_s.w %[c2], %[qc2], 18 \n\t"
+ "shll_s.w %[c3], %[qc3], 18 \n\t"
+ "shll_s.w %[c4], %[qc4], 18 \n\t"
+ "srl %[c1], %[c1], 18 \n\t"
+ "srl %[c2], %[c2], 18 \n\t"
+ "srl %[c3], %[c3], 18 \n\t"
+ "srl %[c4], %[c4], 18 \n\t"
+ "slt %[cond0], $t4, %[qc1] \n\t"
+ "slt %[cond1], $t4, %[qc2] \n\t"
+ "slt %[cond2], $t4, %[qc3] \n\t"
+ "slt %[cond3], $t4, %[qc4] \n\t"
+ "movn %[qc1], $t5, %[cond0] \n\t"
+ "movn %[qc2], $t5, %[cond1] \n\t"
+ "movn %[qc3], $t5, %[cond2] \n\t"
+ "movn %[qc4], $t5, %[cond3] \n\t"
+
+ ".set pop \n\t"
+
+ : [qc1]"+r"(qc1), [qc2]"+r"(qc2),
+ [qc3]"+r"(qc3), [qc4]"+r"(qc4),
+ [cond0]"=&r"(cond0), [cond1]"=&r"(cond1),
+ [cond2]"=&r"(cond2), [cond3]"=&r"(cond3),
+ [c1]"=&r"(c1), [c2]"=&r"(c2),
+ [c3]"=&r"(c3), [c4]"=&r"(c4)
+ :
+ : "t4", "t5"
+ );
+
+ curidx = 17 * qc1;
+ curidx += qc2;
+
+ curidx2 = 17 * qc3;
+ curidx2 += qc4;
+
+ curbits += p_bits[curidx];
+ curbits += esc_sign_bits[curidx];
+ vec = &p_codes[curidx*2];
+
+ curbits += p_bits[curidx2];
+ curbits += esc_sign_bits[curidx2];
+ vec2 = &p_codes[curidx2*2];
+
+ curbits += (av_log2(c1) * 2 - 3) & (-cond0);
+ curbits += (av_log2(c2) * 2 - 3) & (-cond1);
+ curbits += (av_log2(c3) * 2 - 3) & (-cond2);
+ curbits += (av_log2(c4) * 2 - 3) & (-cond3);
+
+ t1 = fabsf(in[i ]);
+ t2 = fabsf(in[i+1]);
+ t3 = fabsf(in[i+2]);
+ t4 = fabsf(in[i+3]);
+
+ if (cond0) {
+ if (t1 >= CLIPPED_ESCAPE) {
+ di1 = t1 - CLIPPED_ESCAPE;
+ } else {
+ di1 = t1 - c1 * cbrtf(c1) * IQ;
+ }
+ } else
+ di1 = t1 - vec[0] * IQ;
+
+ if (cond1) {
+ if (t2 >= CLIPPED_ESCAPE) {
+ di2 = t2 - CLIPPED_ESCAPE;
+ } else {
+ di2 = t2 - c2 * cbrtf(c2) * IQ;
+ }
+ } else
+ di2 = t2 - vec[1] * IQ;
+
+ if (cond2) {
+ if (t3 >= CLIPPED_ESCAPE) {
+ di3 = t3 - CLIPPED_ESCAPE;
+ } else {
+ di3 = t3 - c3 * cbrtf(c3) * IQ;
+ }
+ } else
+ di3 = t3 - vec2[0] * IQ;
+
+ if (cond3) {
+ if (t4 >= CLIPPED_ESCAPE) {
+ di4 = t4 - CLIPPED_ESCAPE;
+ } else {
+ di4 = t4 - c4 * cbrtf(c4) * IQ;
+ }
+ } else
+ di4 = t4 - vec2[1]*IQ;
+
+ cost += di1 * di1 + di2 * di2
+ + di3 * di3 + di4 * di4;
+ }
+
+ if (bits)
+ *bits = curbits;
+ return cost * lambda + curbits;
+}
+
+static float (*const get_band_cost_arr[])(struct AACEncContext *s,
+ PutBitContext *pb, const float *in,
+ const float *scaled, int size, int scale_idx,
+ int cb, const float lambda, const float uplim,
+ int *bits) = {
+ get_band_cost_ZERO_mips,
+ get_band_cost_SQUAD_mips,
+ get_band_cost_SQUAD_mips,
+ get_band_cost_UQUAD_mips,
+ get_band_cost_UQUAD_mips,
+ get_band_cost_SPAIR_mips,
+ get_band_cost_SPAIR_mips,
+ get_band_cost_UPAIR7_mips,
+ get_band_cost_UPAIR7_mips,
+ get_band_cost_UPAIR12_mips,
+ get_band_cost_UPAIR12_mips,
+ get_band_cost_ESC_mips,
+};
+
+#define get_band_cost( \
+ s, pb, in, scaled, size, scale_idx, cb, \
+ lambda, uplim, bits) \
+ get_band_cost_arr[cb]( \
+ s, pb, in, scaled, size, scale_idx, cb, \
+ lambda, uplim, bits)
+
+static float quantize_band_cost(struct AACEncContext *s, const float *in,
+ const float *scaled, int size, int scale_idx,
+ int cb, const float lambda, const float uplim,
+ int *bits)
+{
+ return get_band_cost(s, NULL, in, scaled, size, scale_idx, cb, lambda, uplim, bits);
+}
+
+static void search_for_quantizers_twoloop_mips(AVCodecContext *avctx,
+ AACEncContext *s,
+ SingleChannelElement *sce,
+ const float lambda)
+{
+ int start = 0, i, w, w2, g;
+ int destbits = avctx->bit_rate * 1024.0 / avctx->sample_rate / avctx->channels;
+ float dists[128] = { 0 }, uplims[128];
+ float maxvals[128];
+ int fflag, minscaler;
+ int its = 0;
+ int allz = 0;
+ float minthr = INFINITY;
+
+ destbits = FFMIN(destbits, 5800);
+ for (w = 0; w < sce->ics.num_windows; w += sce->ics.group_len[w]) {
+ for (g = 0; g < sce->ics.num_swb; g++) {
+ int nz = 0;
+ float uplim = 0.0f;
+ for (w2 = 0; w2 < sce->ics.group_len[w]; w2++) {
+ FFPsyBand *band = &s->psy.ch[s->cur_channel].psy_bands[(w+w2)*16+g];
+ uplim += band->threshold;
+ if (band->energy <= band->threshold || band->threshold == 0.0f) {
+ sce->zeroes[(w+w2)*16+g] = 1;
+ continue;
+ }
+ nz = 1;
+ }
+ uplims[w*16+g] = uplim *512;
+ sce->zeroes[w*16+g] = !nz;
+ if (nz)
+ minthr = FFMIN(minthr, uplim);
+ allz |= nz;
+ }
+ }
+ for (w = 0; w < sce->ics.num_windows; w += sce->ics.group_len[w]) {
+ for (g = 0; g < sce->ics.num_swb; g++) {
+ if (sce->zeroes[w*16+g]) {
+ sce->sf_idx[w*16+g] = SCALE_ONE_POS;
+ continue;
+ }
+ sce->sf_idx[w*16+g] = SCALE_ONE_POS + FFMIN(log2f(uplims[w*16+g]/minthr)*4,59);
+ }
+ }
+
+ if (!allz)
+ return;
+ abs_pow34_v(s->scoefs, sce->coeffs, 1024);
+
+ for (w = 0; w < sce->ics.num_windows; w += sce->ics.group_len[w]) {
+ start = w*128;
+ for (g = 0; g < sce->ics.num_swb; g++) {
+ const float *scaled = s->scoefs + start;
+ maxvals[w*16+g] = find_max_val(sce->ics.group_len[w], sce->ics.swb_sizes[g], scaled);
+ start += sce->ics.swb_sizes[g];
+ }
+ }
+
+ do {
+ int tbits, qstep;
+ minscaler = sce->sf_idx[0];
+ qstep = its ? 1 : 32;
+ do {
+ int prev = -1;
+ tbits = 0;
+ fflag = 0;
+
+ if (qstep > 1) {
+ for (w = 0; w < sce->ics.num_windows; w += sce->ics.group_len[w]) {
+ start = w*128;
+ for (g = 0; g < sce->ics.num_swb; g++) {
+ const float *coefs = sce->coeffs + start;
+ const float *scaled = s->scoefs + start;
+ int bits = 0;
+ int cb;
+
+ if (sce->zeroes[w*16+g] || sce->sf_idx[w*16+g] >= 218) {
+ start += sce->ics.swb_sizes[g];
+ continue;
+ }
+ minscaler = FFMIN(minscaler, sce->sf_idx[w*16+g]);
+ cb = find_min_book(maxvals[w*16+g], sce->sf_idx[w*16+g]);
+ for (w2 = 0; w2 < sce->ics.group_len[w]; w2++) {
+ int b;
+ bits += quantize_band_cost_bits(s, coefs + w2*128,
+ scaled + w2*128,
+ sce->ics.swb_sizes[g],
+ sce->sf_idx[w*16+g],
+ cb,
+ 1.0f,
+ INFINITY,
+ &b);
+ }
+ if (prev != -1) {
+ bits += ff_aac_scalefactor_bits[sce->sf_idx[w*16+g] - prev + SCALE_DIFF_ZERO];
+ }
+ tbits += bits;
+ start += sce->ics.swb_sizes[g];
+ prev = sce->sf_idx[w*16+g];
+ }
+ }
+ }
+ else {
+ for (w = 0; w < sce->ics.num_windows; w += sce->ics.group_len[w]) {
+ start = w*128;
+ for (g = 0; g < sce->ics.num_swb; g++) {
+ const float *coefs = sce->coeffs + start;
+ const float *scaled = s->scoefs + start;
+ int bits = 0;
+ int cb;
+ float dist = 0.0f;
+
+ if (sce->zeroes[w*16+g] || sce->sf_idx[w*16+g] >= 218) {
+ start += sce->ics.swb_sizes[g];
+ continue;
+ }
+ minscaler = FFMIN(minscaler, sce->sf_idx[w*16+g]);
+ cb = find_min_book(maxvals[w*16+g], sce->sf_idx[w*16+g]);
+ for (w2 = 0; w2 < sce->ics.group_len[w]; w2++) {
+ int b;
+ dist += quantize_band_cost(s, coefs + w2*128,
+ scaled + w2*128,
+ sce->ics.swb_sizes[g],
+ sce->sf_idx[w*16+g],
+ cb,
+ 1.0f,
+ INFINITY,
+ &b);
+ bits += b;
+ }
+ dists[w*16+g] = dist - bits;
+ if (prev != -1) {
+ bits += ff_aac_scalefactor_bits[sce->sf_idx[w*16+g] - prev + SCALE_DIFF_ZERO];
+ }
+ tbits += bits;
+ start += sce->ics.swb_sizes[g];
+ prev = sce->sf_idx[w*16+g];
+ }
+ }
+ }
+ if (tbits > destbits) {
+ for (i = 0; i < 128; i++)
+ if (sce->sf_idx[i] < 218 - qstep)
+ sce->sf_idx[i] += qstep;
+ } else {
+ for (i = 0; i < 128; i++)
+ if (sce->sf_idx[i] > 60 - qstep)
+ sce->sf_idx[i] -= qstep;
+ }
+ qstep >>= 1;
+ if (!qstep && tbits > destbits*1.02 && sce->sf_idx[0] < 217)
+ qstep = 1;
+ } while (qstep);
+
+ fflag = 0;
+ minscaler = av_clip(minscaler, 60, 255 - SCALE_MAX_DIFF);
+ for (w = 0; w < sce->ics.num_windows; w += sce->ics.group_len[w]) {
+ for (g = 0; g < sce->ics.num_swb; g++) {
+ int prevsc = sce->sf_idx[w*16+g];
+ if (dists[w*16+g] > uplims[w*16+g] && sce->sf_idx[w*16+g] > 60) {
+ if (find_min_book(maxvals[w*16+g], sce->sf_idx[w*16+g]-1))
+ sce->sf_idx[w*16+g]--;
+ else
+ sce->sf_idx[w*16+g]-=2;
+ }
+ sce->sf_idx[w*16+g] = av_clip(sce->sf_idx[w*16+g], minscaler, minscaler + SCALE_MAX_DIFF);
+ sce->sf_idx[w*16+g] = FFMIN(sce->sf_idx[w*16+g], 219);
+ if (sce->sf_idx[w*16+g] != prevsc)
+ fflag = 1;
+ sce->band_type[w*16+g] = find_min_book(maxvals[w*16+g], sce->sf_idx[w*16+g]);
+ }
+ }
+ its++;
+ } while (fflag && its < 10);
+}
+
+static void search_for_ms_mips(AACEncContext *s, ChannelElement *cpe,
+ const float lambda)
+{
+ int start = 0, i, w, w2, g;
+ float M[128], S[128];
+ float *L34 = s->scoefs, *R34 = s->scoefs + 128, *M34 = s->scoefs + 128*2, *S34 = s->scoefs + 128*3;
+ SingleChannelElement *sce0 = &cpe->ch[0];
+ SingleChannelElement *sce1 = &cpe->ch[1];
+ if (!cpe->common_window)
+ return;
+ for (w = 0; w < sce0->ics.num_windows; w += sce0->ics.group_len[w]) {
+ for (g = 0; g < sce0->ics.num_swb; g++) {
+ if (!cpe->ch[0].zeroes[w*16+g] && !cpe->ch[1].zeroes[w*16+g]) {
+ float dist1 = 0.0f, dist2 = 0.0f;
+ for (w2 = 0; w2 < sce0->ics.group_len[w]; w2++) {
+ FFPsyBand *band0 = &s->psy.ch[s->cur_channel+0].psy_bands[(w+w2)*16+g];
+ FFPsyBand *band1 = &s->psy.ch[s->cur_channel+1].psy_bands[(w+w2)*16+g];
+ float minthr = FFMIN(band0->threshold, band1->threshold);
+ float maxthr = FFMAX(band0->threshold, band1->threshold);
+ for (i = 0; i < sce0->ics.swb_sizes[g]; i+=4) {
+ M[i ] = (sce0->coeffs[start+w2*128+i ]
+ + sce1->coeffs[start+w2*128+i ]) * 0.5;
+ M[i+1] = (sce0->coeffs[start+w2*128+i+1]
+ + sce1->coeffs[start+w2*128+i+1]) * 0.5;
+ M[i+2] = (sce0->coeffs[start+w2*128+i+2]
+ + sce1->coeffs[start+w2*128+i+2]) * 0.5;
+ M[i+3] = (sce0->coeffs[start+w2*128+i+3]
+ + sce1->coeffs[start+w2*128+i+3]) * 0.5;
+
+ S[i ] = M[i ]
+ - sce1->coeffs[start+w2*128+i ];
+ S[i+1] = M[i+1]
+ - sce1->coeffs[start+w2*128+i+1];
+ S[i+2] = M[i+2]
+ - sce1->coeffs[start+w2*128+i+2];
+ S[i+3] = M[i+3]
+ - sce1->coeffs[start+w2*128+i+3];
+ }
+ abs_pow34_v(L34, sce0->coeffs+start+w2*128, sce0->ics.swb_sizes[g]);
+ abs_pow34_v(R34, sce1->coeffs+start+w2*128, sce0->ics.swb_sizes[g]);
+ abs_pow34_v(M34, M, sce0->ics.swb_sizes[g]);
+ abs_pow34_v(S34, S, sce0->ics.swb_sizes[g]);
+ dist1 += quantize_band_cost(s, sce0->coeffs + start + w2*128,
+ L34,
+ sce0->ics.swb_sizes[g],
+ sce0->sf_idx[(w+w2)*16+g],
+ sce0->band_type[(w+w2)*16+g],
+ lambda / band0->threshold, INFINITY, NULL);
+ dist1 += quantize_band_cost(s, sce1->coeffs + start + w2*128,
+ R34,
+ sce1->ics.swb_sizes[g],
+ sce1->sf_idx[(w+w2)*16+g],
+ sce1->band_type[(w+w2)*16+g],
+ lambda / band1->threshold, INFINITY, NULL);
+ dist2 += quantize_band_cost(s, M,
+ M34,
+ sce0->ics.swb_sizes[g],
+ sce0->sf_idx[(w+w2)*16+g],
+ sce0->band_type[(w+w2)*16+g],
+ lambda / maxthr, INFINITY, NULL);
+ dist2 += quantize_band_cost(s, S,
+ S34,
+ sce1->ics.swb_sizes[g],
+ sce1->sf_idx[(w+w2)*16+g],
+ sce1->band_type[(w+w2)*16+g],
+ lambda / minthr, INFINITY, NULL);
+ }
+ cpe->ms_mask[w*16+g] = dist2 < dist1;
+ }
+ start += sce0->ics.swb_sizes[g];
+ }
+ }
+}
+#endif /*HAVE_MIPSFPU */
+
+static void codebook_trellis_rate_mips(AACEncContext *s, SingleChannelElement *sce,
+ int win, int group_len, const float lambda)
+{
+ BandCodingPath path[120][12];
+ int w, swb, cb, start, size;
+ int i, j;
+ const int max_sfb = sce->ics.max_sfb;
+ const int run_bits = sce->ics.num_windows == 1 ? 5 : 3;
+ const int run_esc = (1 << run_bits) - 1;
+ int idx, ppos, count;
+ int stackrun[120], stackcb[120], stack_len;
+ float next_minbits = INFINITY;
+ int next_mincb = 0;
+
+ abs_pow34_v(s->scoefs, sce->coeffs, 1024);
+ start = win*128;
+ for (cb = 0; cb < 12; cb++) {
+ path[0][cb].cost = run_bits+4;
+ path[0][cb].prev_idx = -1;
+ path[0][cb].run = 0;
+ }
+ for (swb = 0; swb < max_sfb; swb++) {
+ size = sce->ics.swb_sizes[swb];
+ if (sce->zeroes[win*16 + swb]) {
+ float cost_stay_here = path[swb][0].cost;
+ float cost_get_here = next_minbits + run_bits + 4;
+ if ( run_value_bits[sce->ics.num_windows == 8][path[swb][0].run]
+ != run_value_bits[sce->ics.num_windows == 8][path[swb][0].run+1])
+ cost_stay_here += run_bits;
+ if (cost_get_here < cost_stay_here) {
+ path[swb+1][0].prev_idx = next_mincb;
+ path[swb+1][0].cost = cost_get_here;
+ path[swb+1][0].run = 1;
+ } else {
+ path[swb+1][0].prev_idx = 0;
+ path[swb+1][0].cost = cost_stay_here;
+ path[swb+1][0].run = path[swb][0].run + 1;
+ }
+ next_minbits = path[swb+1][0].cost;
+ next_mincb = 0;
+ for (cb = 1; cb < 12; cb++) {
+ path[swb+1][cb].cost = 61450;
+ path[swb+1][cb].prev_idx = -1;
+ path[swb+1][cb].run = 0;
+ }
+ } else {
+ float minbits = next_minbits;
+ int mincb = next_mincb;
+ int startcb = sce->band_type[win*16+swb];
+ next_minbits = INFINITY;
+ next_mincb = 0;
+ for (cb = 0; cb < startcb; cb++) {
+ path[swb+1][cb].cost = 61450;
+ path[swb+1][cb].prev_idx = -1;
+ path[swb+1][cb].run = 0;
+ }
+ for (cb = startcb; cb < 12; cb++) {
+ float cost_stay_here, cost_get_here;
+ float bits = 0.0f;
+ for (w = 0; w < group_len; w++) {
+ bits += quantize_band_cost_bits(s, sce->coeffs + start + w*128,
+ s->scoefs + start + w*128, size,
+ sce->sf_idx[(win+w)*16+swb], cb,
+ 0, INFINITY, NULL);
+ }
+ cost_stay_here = path[swb][cb].cost + bits;
+ cost_get_here = minbits + bits + run_bits + 4;
+ if ( run_value_bits[sce->ics.num_windows == 8][path[swb][cb].run]
+ != run_value_bits[sce->ics.num_windows == 8][path[swb][cb].run+1])
+ cost_stay_here += run_bits;
+ if (cost_get_here < cost_stay_here) {
+ path[swb+1][cb].prev_idx = mincb;
+ path[swb+1][cb].cost = cost_get_here;
+ path[swb+1][cb].run = 1;
+ } else {
+ path[swb+1][cb].prev_idx = cb;
+ path[swb+1][cb].cost = cost_stay_here;
+ path[swb+1][cb].run = path[swb][cb].run + 1;
+ }
+ if (path[swb+1][cb].cost < next_minbits) {
+ next_minbits = path[swb+1][cb].cost;
+ next_mincb = cb;
+ }
+ }
+ }
+ start += sce->ics.swb_sizes[swb];
+ }
+
+ stack_len = 0;
+ idx = 0;
+ for (cb = 1; cb < 12; cb++)
+ if (path[max_sfb][cb].cost < path[max_sfb][idx].cost)
+ idx = cb;
+ ppos = max_sfb;
+ while (ppos > 0) {
+ av_assert1(idx >= 0);
+ cb = idx;
+ stackrun[stack_len] = path[ppos][cb].run;
+ stackcb [stack_len] = cb;
+ idx = path[ppos-path[ppos][cb].run+1][cb].prev_idx;
+ ppos -= path[ppos][cb].run;
+ stack_len++;
+ }
+
+ start = 0;
+ for (i = stack_len - 1; i >= 0; i--) {
+ put_bits(&s->pb, 4, stackcb[i]);
+ count = stackrun[i];
+ memset(sce->zeroes + win*16 + start, !stackcb[i], count);
+ for (j = 0; j < count; j++) {
+ sce->band_type[win*16 + start] = stackcb[i];
+ start++;
+ }
+ while (count >= run_esc) {
+ put_bits(&s->pb, run_bits, run_esc);
+ count -= run_esc;
+ }
+ put_bits(&s->pb, run_bits, count);
+ }
+}
+#endif /* HAVE_INLINE_ASM */
+
+void ff_aac_coder_init_mips(AACEncContext *c) {
+#if HAVE_INLINE_ASM
+ AACCoefficientsEncoder *e = c->coder;
+ int option = c->options.aac_coder;
+
+ if (option == 2) {
+ e->quantize_and_encode_band = quantize_and_encode_band_mips;
+ e->encode_window_bands_info = codebook_trellis_rate_mips;
+#if HAVE_MIPSFPU
+ e->search_for_quantizers = search_for_quantizers_twoloop_mips;
+ e->search_for_ms = search_for_ms_mips;
+#endif /* HAVE_MIPSFPU */
+ }
+#endif /* HAVE_INLINE_ASM */
+}
diff --git a/libavcodec/mips/aacdec_mips.c b/libavcodec/mips/aacdec_mips.c
new file mode 100644
index 0000000000..e4033668da
--- /dev/null
+++ b/libavcodec/mips/aacdec_mips.c
@@ -0,0 +1,831 @@
+/*
+ * Copyright (c) 2012
+ * MIPS Technologies, Inc., California.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the MIPS Technologies, Inc., nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE MIPS TECHNOLOGIES, INC. ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE MIPS TECHNOLOGIES, INC. BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Authors: Darko Laus (darko@mips.com)
+ * Djordje Pesut (djordje@mips.com)
+ * Mirjana Vulin (mvulin@mips.com)
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Reference: libavcodec/aacdec.c
+ */
+
+#include "libavcodec/aac.h"
+#include "aacdec_mips.h"
+#include "libavcodec/aactab.h"
+#include "libavcodec/sinewin.h"
+
+#if HAVE_INLINE_ASM
+static av_always_inline int lcg_random(unsigned previous_val)
+{
+ union { unsigned u; int s; } v = { previous_val * 1664525u + 1013904223 };
+ return v.s;
+}
+
+static void imdct_and_windowing_mips(AACContext *ac, SingleChannelElement *sce)
+{
+ IndividualChannelStream *ics = &sce->ics;
+ float *in = sce->coeffs;
+ float *out = sce->ret;
+ float *saved = sce->saved;
+ const float *swindow = ics->use_kb_window[0] ? ff_aac_kbd_short_128 : ff_sine_128;
+ const float *lwindow_prev = ics->use_kb_window[1] ? ff_aac_kbd_long_1024 : ff_sine_1024;
+ const float *swindow_prev = ics->use_kb_window[1] ? ff_aac_kbd_short_128 : ff_sine_128;
+ float *buf = ac->buf_mdct;
+ int i;
+
+ if (ics->window_sequence[0] == EIGHT_SHORT_SEQUENCE) {
+ for (i = 0; i < 1024; i += 128)
+ ac->mdct_small.imdct_half(&ac->mdct_small, buf + i, in + i);
+ } else
+ ac->mdct.imdct_half(&ac->mdct, buf, in);
+
+ /* window overlapping
+ * NOTE: To simplify the overlapping code, all 'meaningless' short to long
+ * and long to short transitions are considered to be short to short
+ * transitions. This leaves just two cases (long to long and short to short)
+ * with a little special sauce for EIGHT_SHORT_SEQUENCE.
+ */
+ if ((ics->window_sequence[1] == ONLY_LONG_SEQUENCE || ics->window_sequence[1] == LONG_STOP_SEQUENCE) &&
+ (ics->window_sequence[0] == ONLY_LONG_SEQUENCE || ics->window_sequence[0] == LONG_START_SEQUENCE)) {
+ ac->fdsp.vector_fmul_window( out, saved, buf, lwindow_prev, 512);
+ } else {
+ {
+ float *buf1 = saved;
+ float *buf2 = out;
+ int temp0, temp1, temp2, temp3, temp4, temp5, temp6, temp7;
+ int loop_end;
+
+ /* loop unrolled 8 times */
+ __asm__ volatile (
+ ".set push \n\t"
+ ".set noreorder \n\t"
+ "addiu %[loop_end], %[src], 1792 \n\t"
+ "1: \n\t"
+ "lw %[temp0], 0(%[src]) \n\t"
+ "lw %[temp1], 4(%[src]) \n\t"
+ "lw %[temp2], 8(%[src]) \n\t"
+ "lw %[temp3], 12(%[src]) \n\t"
+ "lw %[temp4], 16(%[src]) \n\t"
+ "lw %[temp5], 20(%[src]) \n\t"
+ "lw %[temp6], 24(%[src]) \n\t"
+ "lw %[temp7], 28(%[src]) \n\t"
+ "addiu %[src], %[src], 32 \n\t"
+ "sw %[temp0], 0(%[dst]) \n\t"
+ "sw %[temp1], 4(%[dst]) \n\t"
+ "sw %[temp2], 8(%[dst]) \n\t"
+ "sw %[temp3], 12(%[dst]) \n\t"
+ "sw %[temp4], 16(%[dst]) \n\t"
+ "sw %[temp5], 20(%[dst]) \n\t"
+ "sw %[temp6], 24(%[dst]) \n\t"
+ "sw %[temp7], 28(%[dst]) \n\t"
+ "bne %[src], %[loop_end], 1b \n\t"
+ " addiu %[dst], %[dst], 32 \n\t"
+ ".set pop \n\t"
+
+ : [temp0]"=&r"(temp0), [temp1]"=&r"(temp1),
+ [temp2]"=&r"(temp2), [temp3]"=&r"(temp3),
+ [temp4]"=&r"(temp4), [temp5]"=&r"(temp5),
+ [temp6]"=&r"(temp6), [temp7]"=&r"(temp7),
+ [loop_end]"=&r"(loop_end), [src]"+r"(buf1),
+ [dst]"+r"(buf2)
+ :
+ : "memory"
+ );
+ }
+
+ if (ics->window_sequence[0] == EIGHT_SHORT_SEQUENCE) {
+ {
+ float wi;
+ float wj;
+ int i;
+ float temp0, temp1, temp2, temp3;
+ float *dst0 = out + 448 + 0*128;
+ float *dst1 = dst0 + 64 + 63;
+ float *dst2 = saved + 63;
+ float *win0 = (float*)swindow;
+ float *win1 = win0 + 64 + 63;
+ float *win0_prev = (float*)swindow_prev;
+ float *win1_prev = win0_prev + 64 + 63;
+ float *src0_prev = saved + 448;
+ float *src1_prev = buf + 0*128 + 63;
+ float *src0 = buf + 0*128 + 64;
+ float *src1 = buf + 1*128 + 63;
+
+ for(i = 0; i < 64; i++)
+ {
+ temp0 = src0_prev[0];
+ temp1 = src1_prev[0];
+ wi = *win0_prev;
+ wj = *win1_prev;
+ temp2 = src0[0];
+ temp3 = src1[0];
+ dst0[0] = temp0 * wj - temp1 * wi;
+ dst1[0] = temp0 * wi + temp1 * wj;
+
+ wi = *win0;
+ wj = *win1;
+
+ temp0 = src0[128];
+ temp1 = src1[128];
+ dst0[128] = temp2 * wj - temp3 * wi;
+ dst1[128] = temp2 * wi + temp3 * wj;
+
+ temp2 = src0[256];
+ temp3 = src1[256];
+ dst0[256] = temp0 * wj - temp1 * wi;
+ dst1[256] = temp0 * wi + temp1 * wj;
+ dst0[384] = temp2 * wj - temp3 * wi;
+ dst1[384] = temp2 * wi + temp3 * wj;
+
+ temp0 = src0[384];
+ temp1 = src1[384];
+ dst0[512] = temp0 * wj - temp1 * wi;
+ dst2[0] = temp0 * wi + temp1 * wj;
+
+ src0++;
+ src1--;
+ src0_prev++;
+ src1_prev--;
+ win0++;
+ win1--;
+ win0_prev++;
+ win1_prev--;
+ dst0++;
+ dst1--;
+ dst2--;
+ }
+ }
+ } else {
+ ac->fdsp.vector_fmul_window(out + 448, saved + 448, buf, swindow_prev, 64);
+ {
+ float *buf1 = buf + 64;
+ float *buf2 = out + 576;
+ int temp0, temp1, temp2, temp3, temp4, temp5, temp6, temp7;
+ int loop_end;
+
+ /* loop unrolled 8 times */
+ __asm__ volatile (
+ ".set push \n\t"
+ ".set noreorder \n\t"
+ "addiu %[loop_end], %[src], 1792 \n\t"
+ "1: \n\t"
+ "lw %[temp0], 0(%[src]) \n\t"
+ "lw %[temp1], 4(%[src]) \n\t"
+ "lw %[temp2], 8(%[src]) \n\t"
+ "lw %[temp3], 12(%[src]) \n\t"
+ "lw %[temp4], 16(%[src]) \n\t"
+ "lw %[temp5], 20(%[src]) \n\t"
+ "lw %[temp6], 24(%[src]) \n\t"
+ "lw %[temp7], 28(%[src]) \n\t"
+ "addiu %[src], %[src], 32 \n\t"
+ "sw %[temp0], 0(%[dst]) \n\t"
+ "sw %[temp1], 4(%[dst]) \n\t"
+ "sw %[temp2], 8(%[dst]) \n\t"
+ "sw %[temp3], 12(%[dst]) \n\t"
+ "sw %[temp4], 16(%[dst]) \n\t"
+ "sw %[temp5], 20(%[dst]) \n\t"
+ "sw %[temp6], 24(%[dst]) \n\t"
+ "sw %[temp7], 28(%[dst]) \n\t"
+ "bne %[src], %[loop_end], 1b \n\t"
+ " addiu %[dst], %[dst], 32 \n\t"
+ ".set pop \n\t"
+
+ : [temp0]"=&r"(temp0), [temp1]"=&r"(temp1),
+ [temp2]"=&r"(temp2), [temp3]"=&r"(temp3),
+ [temp4]"=&r"(temp4), [temp5]"=&r"(temp5),
+ [temp6]"=&r"(temp6), [temp7]"=&r"(temp7),
+ [loop_end]"=&r"(loop_end), [src]"+r"(buf1),
+ [dst]"+r"(buf2)
+ :
+ : "memory"
+ );
+ }
+ }
+ }
+
+ // buffer update
+ if (ics->window_sequence[0] == EIGHT_SHORT_SEQUENCE) {
+ ac->fdsp.vector_fmul_window(saved + 64, buf + 4*128 + 64, buf + 5*128, swindow, 64);
+ ac->fdsp.vector_fmul_window(saved + 192, buf + 5*128 + 64, buf + 6*128, swindow, 64);
+ ac->fdsp.vector_fmul_window(saved + 320, buf + 6*128 + 64, buf + 7*128, swindow, 64);
+ {
+ float *buf1 = buf + 7*128 + 64;
+ float *buf2 = saved + 448;
+ int temp0, temp1, temp2, temp3, temp4, temp5, temp6, temp7;
+ int loop_end;
+
+ /* loop unrolled 8 times */
+ __asm__ volatile (
+ ".set push \n\t"
+ ".set noreorder \n\t"
+ "addiu %[loop_end], %[src], 256 \n\t"
+ "1: \n\t"
+ "lw %[temp0], 0(%[src]) \n\t"
+ "lw %[temp1], 4(%[src]) \n\t"
+ "lw %[temp2], 8(%[src]) \n\t"
+ "lw %[temp3], 12(%[src]) \n\t"
+ "lw %[temp4], 16(%[src]) \n\t"
+ "lw %[temp5], 20(%[src]) \n\t"
+ "lw %[temp6], 24(%[src]) \n\t"
+ "lw %[temp7], 28(%[src]) \n\t"
+ "addiu %[src], %[src], 32 \n\t"
+ "sw %[temp0], 0(%[dst]) \n\t"
+ "sw %[temp1], 4(%[dst]) \n\t"
+ "sw %[temp2], 8(%[dst]) \n\t"
+ "sw %[temp3], 12(%[dst]) \n\t"
+ "sw %[temp4], 16(%[dst]) \n\t"
+ "sw %[temp5], 20(%[dst]) \n\t"
+ "sw %[temp6], 24(%[dst]) \n\t"
+ "sw %[temp7], 28(%[dst]) \n\t"
+ "bne %[src], %[loop_end], 1b \n\t"
+ " addiu %[dst], %[dst], 32 \n\t"
+ ".set pop \n\t"
+
+ : [temp0]"=&r"(temp0), [temp1]"=&r"(temp1),
+ [temp2]"=&r"(temp2), [temp3]"=&r"(temp3),
+ [temp4]"=&r"(temp4), [temp5]"=&r"(temp5),
+ [temp6]"=&r"(temp6), [temp7]"=&r"(temp7),
+ [loop_end]"=&r"(loop_end), [src]"+r"(buf1),
+ [dst]"+r"(buf2)
+ :
+ : "memory"
+ );
+ }
+ } else if (ics->window_sequence[0] == LONG_START_SEQUENCE) {
+ float *buf1 = buf + 512;
+ float *buf2 = saved;
+ int temp0, temp1, temp2, temp3, temp4, temp5, temp6, temp7;
+ int loop_end;
+
+ /* loop unrolled 8 times */
+ __asm__ volatile (
+ ".set push \n\t"
+ ".set noreorder \n\t"
+ "addiu %[loop_end], %[src], 1792 \n\t"
+ "1: \n\t"
+ "lw %[temp0], 0(%[src]) \n\t"
+ "lw %[temp1], 4(%[src]) \n\t"
+ "lw %[temp2], 8(%[src]) \n\t"
+ "lw %[temp3], 12(%[src]) \n\t"
+ "lw %[temp4], 16(%[src]) \n\t"
+ "lw %[temp5], 20(%[src]) \n\t"
+ "lw %[temp6], 24(%[src]) \n\t"
+ "lw %[temp7], 28(%[src]) \n\t"
+ "addiu %[src], %[src], 32 \n\t"
+ "sw %[temp0], 0(%[dst]) \n\t"
+ "sw %[temp1], 4(%[dst]) \n\t"
+ "sw %[temp2], 8(%[dst]) \n\t"
+ "sw %[temp3], 12(%[dst]) \n\t"
+ "sw %[temp4], 16(%[dst]) \n\t"
+ "sw %[temp5], 20(%[dst]) \n\t"
+ "sw %[temp6], 24(%[dst]) \n\t"
+ "sw %[temp7], 28(%[dst]) \n\t"
+ "bne %[src], %[loop_end], 1b \n\t"
+ " addiu %[dst], %[dst], 32 \n\t"
+ ".set pop \n\t"
+
+ : [temp0]"=&r"(temp0), [temp1]"=&r"(temp1),
+ [temp2]"=&r"(temp2), [temp3]"=&r"(temp3),
+ [temp4]"=&r"(temp4), [temp5]"=&r"(temp5),
+ [temp6]"=&r"(temp6), [temp7]"=&r"(temp7),
+ [loop_end]"=&r"(loop_end), [src]"+r"(buf1),
+ [dst]"+r"(buf2)
+ :
+ : "memory"
+ );
+ {
+ float *buf1 = buf + 7*128 + 64;
+ float *buf2 = saved + 448;
+ int temp0, temp1, temp2, temp3, temp4, temp5, temp6, temp7;
+ int loop_end;
+
+ /* loop unrolled 8 times */
+ __asm__ volatile (
+ ".set push \n\t"
+ ".set noreorder \n\t"
+ "addiu %[loop_end], %[src], 256 \n\t"
+ "1: \n\t"
+ "lw %[temp0], 0(%[src]) \n\t"
+ "lw %[temp1], 4(%[src]) \n\t"
+ "lw %[temp2], 8(%[src]) \n\t"
+ "lw %[temp3], 12(%[src]) \n\t"
+ "lw %[temp4], 16(%[src]) \n\t"
+ "lw %[temp5], 20(%[src]) \n\t"
+ "lw %[temp6], 24(%[src]) \n\t"
+ "lw %[temp7], 28(%[src]) \n\t"
+ "addiu %[src], %[src], 32 \n\t"
+ "sw %[temp0], 0(%[dst]) \n\t"
+ "sw %[temp1], 4(%[dst]) \n\t"
+ "sw %[temp2], 8(%[dst]) \n\t"
+ "sw %[temp3], 12(%[dst]) \n\t"
+ "sw %[temp4], 16(%[dst]) \n\t"
+ "sw %[temp5], 20(%[dst]) \n\t"
+ "sw %[temp6], 24(%[dst]) \n\t"
+ "sw %[temp7], 28(%[dst]) \n\t"
+ "bne %[src], %[loop_end], 1b \n\t"
+ " addiu %[dst], %[dst], 32 \n\t"
+ ".set pop \n\t"
+
+ : [temp0]"=&r"(temp0), [temp1]"=&r"(temp1),
+ [temp2]"=&r"(temp2), [temp3]"=&r"(temp3),
+ [temp4]"=&r"(temp4), [temp5]"=&r"(temp5),
+ [temp6]"=&r"(temp6), [temp7]"=&r"(temp7),
+ [loop_end]"=&r"(loop_end), [src]"+r"(buf1),
+ [dst]"+r"(buf2)
+ :
+ : "memory"
+ );
+ }
+ } else { // LONG_STOP or ONLY_LONG
+ float *buf1 = buf + 512;
+ float *buf2 = saved;
+ int temp0, temp1, temp2, temp3, temp4, temp5, temp6, temp7;
+ int loop_end;
+
+ /* loop unrolled 8 times */
+ __asm__ volatile (
+ ".set push \n\t"
+ ".set noreorder \n\t"
+ "addiu %[loop_end], %[src], 2048 \n\t"
+ "1: \n\t"
+ "lw %[temp0], 0(%[src]) \n\t"
+ "lw %[temp1], 4(%[src]) \n\t"
+ "lw %[temp2], 8(%[src]) \n\t"
+ "lw %[temp3], 12(%[src]) \n\t"
+ "lw %[temp4], 16(%[src]) \n\t"
+ "lw %[temp5], 20(%[src]) \n\t"
+ "lw %[temp6], 24(%[src]) \n\t"
+ "lw %[temp7], 28(%[src]) \n\t"
+ "addiu %[src], %[src], 32 \n\t"
+ "sw %[temp0], 0(%[dst]) \n\t"
+ "sw %[temp1], 4(%[dst]) \n\t"
+ "sw %[temp2], 8(%[dst]) \n\t"
+ "sw %[temp3], 12(%[dst]) \n\t"
+ "sw %[temp4], 16(%[dst]) \n\t"
+ "sw %[temp5], 20(%[dst]) \n\t"
+ "sw %[temp6], 24(%[dst]) \n\t"
+ "sw %[temp7], 28(%[dst]) \n\t"
+ "bne %[src], %[loop_end], 1b \n\t"
+ " addiu %[dst], %[dst], 32 \n\t"
+ ".set pop \n\t"
+
+ : [temp0]"=&r"(temp0), [temp1]"=&r"(temp1),
+ [temp2]"=&r"(temp2), [temp3]"=&r"(temp3),
+ [temp4]"=&r"(temp4), [temp5]"=&r"(temp5),
+ [temp6]"=&r"(temp6), [temp7]"=&r"(temp7),
+ [loop_end]"=&r"(loop_end), [src]"+r"(buf1),
+ [dst]"+r"(buf2)
+ :
+ : "memory"
+ );
+ }
+}
+
+static void apply_ltp_mips(AACContext *ac, SingleChannelElement *sce)
+{
+ const LongTermPrediction *ltp = &sce->ics.ltp;
+ const uint16_t *offsets = sce->ics.swb_offset;
+ int i, sfb;
+ int j, k;
+
+ if (sce->ics.window_sequence[0] != EIGHT_SHORT_SEQUENCE) {
+ float *predTime = sce->ret;
+ float *predFreq = ac->buf_mdct;
+ float *p_predTime;
+ int16_t num_samples = 2048;
+
+ if (ltp->lag < 1024)
+ num_samples = ltp->lag + 1024;
+ j = (2048 - num_samples) >> 2;
+ k = (2048 - num_samples) & 3;
+ p_predTime = &predTime[num_samples];
+
+ for (i = 0; i < num_samples; i++)
+ predTime[i] = sce->ltp_state[i + 2048 - ltp->lag] * ltp->coef;
+ for (i = 0; i < j; i++) {
+
+ /* loop unrolled 4 times */
+ __asm__ volatile (
+ "sw $0, 0(%[p_predTime]) \n\t"
+ "sw $0, 4(%[p_predTime]) \n\t"
+ "sw $0, 8(%[p_predTime]) \n\t"
+ "sw $0, 12(%[p_predTime]) \n\t"
+ "addiu %[p_predTime], %[p_predTime], 16 \n\t"
+
+ : [p_predTime]"+r"(p_predTime)
+ :
+ : "memory"
+ );
+ }
+ for (i = 0; i < k; i++) {
+
+ __asm__ volatile (
+ "sw $0, 0(%[p_predTime]) \n\t"
+ "addiu %[p_predTime], %[p_predTime], 4 \n\t"
+
+ : [p_predTime]"+r"(p_predTime)
+ :
+ : "memory"
+ );
+ }
+
+ ac->windowing_and_mdct_ltp(ac, predFreq, predTime, &sce->ics);
+
+ if (sce->tns.present)
+ ac->apply_tns(predFreq, &sce->tns, &sce->ics, 0);
+
+ for (sfb = 0; sfb < FFMIN(sce->ics.max_sfb, MAX_LTP_LONG_SFB); sfb++)
+ if (ltp->used[sfb])
+ for (i = offsets[sfb]; i < offsets[sfb + 1]; i++)
+ sce->coeffs[i] += predFreq[i];
+ }
+}
+
+#if HAVE_MIPSFPU
+static void update_ltp_mips(AACContext *ac, SingleChannelElement *sce)
+{
+ IndividualChannelStream *ics = &sce->ics;
+ float *saved = sce->saved;
+ float *saved_ltp = sce->coeffs;
+ const float *lwindow = ics->use_kb_window[0] ? ff_aac_kbd_long_1024 : ff_sine_1024;
+ const float *swindow = ics->use_kb_window[0] ? ff_aac_kbd_short_128 : ff_sine_128;
+ int i;
+ int loop_end, loop_end1, loop_end2;
+ float temp0, temp1, temp2, temp3, temp4, temp5, temp6, temp7, temp8, temp9, temp10, temp11;
+
+ if (ics->window_sequence[0] == EIGHT_SHORT_SEQUENCE) {
+ float *buf = saved;
+ float *buf0 = saved_ltp;
+ float *p_saved_ltp = saved_ltp + 576;
+ float *ptr1 = &saved_ltp[512];
+ float *ptr2 = &ac->buf_mdct[1023];
+ float *ptr3 = (float*)&swindow[63];
+ loop_end1 = (int)(p_saved_ltp + 448);
+
+ /* loop unrolled 8 times */
+ __asm__ volatile (
+ ".set push \n\t"
+ ".set noreorder \n\t"
+ "addiu %[loop_end], %[src], 2048 \n\t"
+ "1: \n\t"
+ "lw %[temp0], 0(%[src]) \n\t"
+ "lw %[temp1], 4(%[src]) \n\t"
+ "lw %[temp2], 8(%[src]) \n\t"
+ "lw %[temp3], 12(%[src]) \n\t"
+ "lw %[temp4], 16(%[src]) \n\t"
+ "lw %[temp5], 20(%[src]) \n\t"
+ "lw %[temp6], 24(%[src]) \n\t"
+ "lw %[temp7], 28(%[src]) \n\t"
+ "addiu %[src], %[src], 32 \n\t"
+ "sw %[temp0], 0(%[dst]) \n\t"
+ "sw %[temp1], 4(%[dst]) \n\t"
+ "sw %[temp2], 8(%[dst]) \n\t"
+ "sw %[temp3], 12(%[dst]) \n\t"
+ "sw %[temp4], 16(%[dst]) \n\t"
+ "sw %[temp5], 20(%[dst]) \n\t"
+ "sw %[temp6], 24(%[dst]) \n\t"
+ "sw %[temp7], 28(%[dst]) \n\t"
+ "bne %[src], %[loop_end], 1b \n\t"
+ " addiu %[dst], %[dst], 32 \n\t"
+ ".set pop \n\t"
+
+ : [temp0]"=&r"(temp0), [temp1]"=&r"(temp1),
+ [temp2]"=&r"(temp2), [temp3]"=&r"(temp3),
+ [temp4]"=&r"(temp4), [temp5]"=&r"(temp5),
+ [temp6]"=&r"(temp6), [temp7]"=&r"(temp7),
+ [loop_end]"=&r"(loop_end), [src]"+r"(buf),
+ [dst]"+r"(buf0)
+ :
+ : "memory"
+ );
+
+ /* loop unrolled 8 times */
+ __asm__ volatile (
+ "1: \n\t"
+ "sw $0, 0(%[p_saved_ltp]) \n\t"
+ "sw $0, 4(%[p_saved_ltp]) \n\t"
+ "sw $0, 8(%[p_saved_ltp]) \n\t"
+ "sw $0, 12(%[p_saved_ltp]) \n\t"
+ "sw $0, 16(%[p_saved_ltp]) \n\t"
+ "sw $0, 20(%[p_saved_ltp]) \n\t"
+ "sw $0, 24(%[p_saved_ltp]) \n\t"
+ "sw $0, 28(%[p_saved_ltp]) \n\t"
+ "addiu %[p_saved_ltp], %[p_saved_ltp], 32 \n\t"
+ "bne %[p_saved_ltp], %[loop_end1], 1b \n\t"
+
+ : [p_saved_ltp]"+r"(p_saved_ltp)
+ : [loop_end1]"r"(loop_end1)
+ : "memory"
+ );
+
+ ac->fdsp.vector_fmul_reverse(saved_ltp + 448, ac->buf_mdct + 960, &swindow[64], 64);
+ for (i = 0; i < 16; i++){
+ /* loop unrolled 4 times */
+ __asm__ volatile (
+ "lwc1 %[temp0], 0(%[ptr2]) \n\t"
+ "lwc1 %[temp1], -4(%[ptr2]) \n\t"
+ "lwc1 %[temp2], -8(%[ptr2]) \n\t"
+ "lwc1 %[temp3], -12(%[ptr2]) \n\t"
+ "lwc1 %[temp4], 0(%[ptr3]) \n\t"
+ "lwc1 %[temp5], -4(%[ptr3]) \n\t"
+ "lwc1 %[temp6], -8(%[ptr3]) \n\t"
+ "lwc1 %[temp7], -12(%[ptr3]) \n\t"
+ "mul.s %[temp8], %[temp0], %[temp4] \n\t"
+ "mul.s %[temp9], %[temp1], %[temp5] \n\t"
+ "mul.s %[temp10], %[temp2], %[temp6] \n\t"
+ "mul.s %[temp11], %[temp3], %[temp7] \n\t"
+ "swc1 %[temp8], 0(%[ptr1]) \n\t"
+ "swc1 %[temp9], 4(%[ptr1]) \n\t"
+ "swc1 %[temp10], 8(%[ptr1]) \n\t"
+ "swc1 %[temp11], 12(%[ptr1]) \n\t"
+ "addiu %[ptr1], %[ptr1], 16 \n\t"
+ "addiu %[ptr2], %[ptr2], -16 \n\t"
+ "addiu %[ptr3], %[ptr3], -16 \n\t"
+
+ : [temp0]"=&f"(temp0), [temp1]"=&f"(temp1),
+ [temp2]"=&f"(temp2), [temp3]"=&f"(temp3),
+ [temp4]"=&f"(temp4), [temp5]"=&f"(temp5),
+ [temp6]"=&f"(temp6), [temp7]"=&f"(temp7),
+ [temp8]"=&f"(temp8), [temp9]"=&f"(temp9),
+ [temp10]"=&f"(temp10), [temp11]"=&f"(temp11),
+ [ptr1]"+r"(ptr1), [ptr2]"+r"(ptr2), [ptr3]"+r"(ptr3)
+ :
+ : "memory"
+ );
+ }
+ } else if (ics->window_sequence[0] == LONG_START_SEQUENCE) {
+ float *buff0 = saved;
+ float *buff1 = saved_ltp;
+ float *ptr1 = &saved_ltp[512];
+ float *ptr2 = &ac->buf_mdct[1023];
+ float *ptr3 = (float*)&swindow[63];
+ loop_end = (int)(saved + 448);
+
+ /* loop unrolled 8 times */
+ __asm__ volatile (
+ ".set push \n\t"
+ ".set noreorder \n\t"
+ "1: \n\t"
+ "lw %[temp0], 0(%[src]) \n\t"
+ "lw %[temp1], 4(%[src]) \n\t"
+ "lw %[temp2], 8(%[src]) \n\t"
+ "lw %[temp3], 12(%[src]) \n\t"
+ "lw %[temp4], 16(%[src]) \n\t"
+ "lw %[temp5], 20(%[src]) \n\t"
+ "lw %[temp6], 24(%[src]) \n\t"
+ "lw %[temp7], 28(%[src]) \n\t"
+ "addiu %[src], %[src], 32 \n\t"
+ "sw %[temp0], 0(%[dst]) \n\t"
+ "sw %[temp1], 4(%[dst]) \n\t"
+ "sw %[temp2], 8(%[dst]) \n\t"
+ "sw %[temp3], 12(%[dst]) \n\t"
+ "sw %[temp4], 16(%[dst]) \n\t"
+ "sw %[temp5], 20(%[dst]) \n\t"
+ "sw %[temp6], 24(%[dst]) \n\t"
+ "sw %[temp7], 28(%[dst]) \n\t"
+ "sw $0, 2304(%[dst]) \n\t"
+ "sw $0, 2308(%[dst]) \n\t"
+ "sw $0, 2312(%[dst]) \n\t"
+ "sw $0, 2316(%[dst]) \n\t"
+ "sw $0, 2320(%[dst]) \n\t"
+ "sw $0, 2324(%[dst]) \n\t"
+ "sw $0, 2328(%[dst]) \n\t"
+ "sw $0, 2332(%[dst]) \n\t"
+ "bne %[src], %[loop_end], 1b \n\t"
+ " addiu %[dst], %[dst], 32 \n\t"
+ ".set pop \n\t"
+
+ : [temp0]"=&r"(temp0), [temp1]"=&r"(temp1),
+ [temp2]"=&r"(temp2), [temp3]"=&r"(temp3),
+ [temp4]"=&r"(temp4), [temp5]"=&r"(temp5),
+ [temp6]"=&r"(temp6), [temp7]"=&r"(temp7),
+ [src]"+r"(buff0), [dst]"+r"(buff1)
+ : [loop_end]"r"(loop_end)
+ : "memory"
+ );
+ ac->fdsp.vector_fmul_reverse(saved_ltp + 448, ac->buf_mdct + 960, &swindow[64], 64);
+ for (i = 0; i < 16; i++){
+ /* loop unrolled 8 times */
+ __asm__ volatile (
+ "lwc1 %[temp0], 0(%[ptr2]) \n\t"
+ "lwc1 %[temp1], -4(%[ptr2]) \n\t"
+ "lwc1 %[temp2], -8(%[ptr2]) \n\t"
+ "lwc1 %[temp3], -12(%[ptr2]) \n\t"
+ "lwc1 %[temp4], 0(%[ptr3]) \n\t"
+ "lwc1 %[temp5], -4(%[ptr3]) \n\t"
+ "lwc1 %[temp6], -8(%[ptr3]) \n\t"
+ "lwc1 %[temp7], -12(%[ptr3]) \n\t"
+ "mul.s %[temp8], %[temp0], %[temp4] \n\t"
+ "mul.s %[temp9], %[temp1], %[temp5] \n\t"
+ "mul.s %[temp10], %[temp2], %[temp6] \n\t"
+ "mul.s %[temp11], %[temp3], %[temp7] \n\t"
+ "swc1 %[temp8], 0(%[ptr1]) \n\t"
+ "swc1 %[temp9], 4(%[ptr1]) \n\t"
+ "swc1 %[temp10], 8(%[ptr1]) \n\t"
+ "swc1 %[temp11], 12(%[ptr1]) \n\t"
+ "addiu %[ptr1], %[ptr1], 16 \n\t"
+ "addiu %[ptr2], %[ptr2], -16 \n\t"
+ "addiu %[ptr3], %[ptr3], -16 \n\t"
+
+ : [temp0]"=&f"(temp0), [temp1]"=&f"(temp1),
+ [temp2]"=&f"(temp2), [temp3]"=&f"(temp3),
+ [temp4]"=&f"(temp4), [temp5]"=&f"(temp5),
+ [temp6]"=&f"(temp6), [temp7]"=&f"(temp7),
+ [temp8]"=&f"(temp8), [temp9]"=&f"(temp9),
+ [temp10]"=&f"(temp10), [temp11]"=&f"(temp11),
+ [ptr1]"+r"(ptr1), [ptr2]"+r"(ptr2), [ptr3]"+r"(ptr3)
+ :
+ : "memory"
+ );
+ }
+ } else { // LONG_STOP or ONLY_LONG
+ float *ptr1, *ptr2, *ptr3;
+ ac->fdsp.vector_fmul_reverse(saved_ltp, ac->buf_mdct + 512, &lwindow[512], 512);
+
+ ptr1 = &saved_ltp[512];
+ ptr2 = &ac->buf_mdct[1023];
+ ptr3 = (float*)&lwindow[511];
+
+ for (i = 0; i < 512; i+=4){
+ /* loop unrolled 4 times */
+ __asm__ volatile (
+ "lwc1 %[temp0], 0(%[ptr2]) \n\t"
+ "lwc1 %[temp1], -4(%[ptr2]) \n\t"
+ "lwc1 %[temp2], -8(%[ptr2]) \n\t"
+ "lwc1 %[temp3], -12(%[ptr2]) \n\t"
+ "lwc1 %[temp4], 0(%[ptr3]) \n\t"
+ "lwc1 %[temp5], -4(%[ptr3]) \n\t"
+ "lwc1 %[temp6], -8(%[ptr3]) \n\t"
+ "lwc1 %[temp7], -12(%[ptr3]) \n\t"
+ "mul.s %[temp8], %[temp0], %[temp4] \n\t"
+ "mul.s %[temp9], %[temp1], %[temp5] \n\t"
+ "mul.s %[temp10], %[temp2], %[temp6] \n\t"
+ "mul.s %[temp11], %[temp3], %[temp7] \n\t"
+ "swc1 %[temp8], 0(%[ptr1]) \n\t"
+ "swc1 %[temp9], 4(%[ptr1]) \n\t"
+ "swc1 %[temp10], 8(%[ptr1]) \n\t"
+ "swc1 %[temp11], 12(%[ptr1]) \n\t"
+ "addiu %[ptr1], %[ptr1], 16 \n\t"
+ "addiu %[ptr2], %[ptr2], -16 \n\t"
+ "addiu %[ptr3], %[ptr3], -16 \n\t"
+
+ : [temp0]"=&f"(temp0), [temp1]"=&f"(temp1),
+ [temp2]"=&f"(temp2), [temp3]"=&f"(temp3),
+ [temp4]"=&f"(temp4), [temp5]"=&f"(temp5),
+ [temp6]"=&f"(temp6), [temp7]"=&f"(temp7),
+ [temp8]"=&f"(temp8), [temp9]"=&f"(temp9),
+ [temp10]"=&f"(temp10), [temp11]"=&f"(temp11),
+ [ptr1]"+r"(ptr1), [ptr2]"+r"(ptr2),
+ [ptr3]"+r"(ptr3)
+ :
+ : "memory"
+ );
+ }
+ }
+
+ {
+ float *buf1 = sce->ltp_state+1024;
+ float *buf2 = sce->ltp_state;
+ float *buf3 = sce->ret;
+ float *buf4 = sce->ltp_state+1024;
+ float *buf5 = saved_ltp;
+ float *buf6 = sce->ltp_state+2048;
+
+ /* loops unrolled 8 times */
+ __asm__ volatile (
+ ".set push \n\t"
+ ".set noreorder \n\t"
+ "addiu %[loop_end], %[src], 4096 \n\t"
+ "addiu %[loop_end1], %[src1], 4096 \n\t"
+ "addiu %[loop_end2], %[src2], 4096 \n\t"
+ "1: \n\t"
+ "lw %[temp0], 0(%[src]) \n\t"
+ "lw %[temp1], 4(%[src]) \n\t"
+ "lw %[temp2], 8(%[src]) \n\t"
+ "lw %[temp3], 12(%[src]) \n\t"
+ "lw %[temp4], 16(%[src]) \n\t"
+ "lw %[temp5], 20(%[src]) \n\t"
+ "lw %[temp6], 24(%[src]) \n\t"
+ "lw %[temp7], 28(%[src]) \n\t"
+ "addiu %[src], %[src], 32 \n\t"
+ "sw %[temp0], 0(%[dst]) \n\t"
+ "sw %[temp1], 4(%[dst]) \n\t"
+ "sw %[temp2], 8(%[dst]) \n\t"
+ "sw %[temp3], 12(%[dst]) \n\t"
+ "sw %[temp4], 16(%[dst]) \n\t"
+ "sw %[temp5], 20(%[dst]) \n\t"
+ "sw %[temp6], 24(%[dst]) \n\t"
+ "sw %[temp7], 28(%[dst]) \n\t"
+ "bne %[src], %[loop_end], 1b \n\t"
+ " addiu %[dst], %[dst], 32 \n\t"
+ "2: \n\t"
+ "lw %[temp0], 0(%[src1]) \n\t"
+ "lw %[temp1], 4(%[src1]) \n\t"
+ "lw %[temp2], 8(%[src1]) \n\t"
+ "lw %[temp3], 12(%[src1]) \n\t"
+ "lw %[temp4], 16(%[src1]) \n\t"
+ "lw %[temp5], 20(%[src1]) \n\t"
+ "lw %[temp6], 24(%[src1]) \n\t"
+ "lw %[temp7], 28(%[src1]) \n\t"
+ "addiu %[src1], %[src1], 32 \n\t"
+ "sw %[temp0], 0(%[dst1]) \n\t"
+ "sw %[temp1], 4(%[dst1]) \n\t"
+ "sw %[temp2], 8(%[dst1]) \n\t"
+ "sw %[temp3], 12(%[dst1]) \n\t"
+ "sw %[temp4], 16(%[dst1]) \n\t"
+ "sw %[temp5], 20(%[dst1]) \n\t"
+ "sw %[temp6], 24(%[dst1]) \n\t"
+ "sw %[temp7], 28(%[dst1]) \n\t"
+ "bne %[src1], %[loop_end1], 2b \n\t"
+ " addiu %[dst1], %[dst1], 32 \n\t"
+ "3: \n\t"
+ "lw %[temp0], 0(%[src2]) \n\t"
+ "lw %[temp1], 4(%[src2]) \n\t"
+ "lw %[temp2], 8(%[src2]) \n\t"
+ "lw %[temp3], 12(%[src2]) \n\t"
+ "lw %[temp4], 16(%[src2]) \n\t"
+ "lw %[temp5], 20(%[src2]) \n\t"
+ "lw %[temp6], 24(%[src2]) \n\t"
+ "lw %[temp7], 28(%[src2]) \n\t"
+ "addiu %[src2], %[src2], 32 \n\t"
+ "sw %[temp0], 0(%[dst2]) \n\t"
+ "sw %[temp1], 4(%[dst2]) \n\t"
+ "sw %[temp2], 8(%[dst2]) \n\t"
+ "sw %[temp3], 12(%[dst2]) \n\t"
+ "sw %[temp4], 16(%[dst2]) \n\t"
+ "sw %[temp5], 20(%[dst2]) \n\t"
+ "sw %[temp6], 24(%[dst2]) \n\t"
+ "sw %[temp7], 28(%[dst2]) \n\t"
+ "bne %[src2], %[loop_end2], 3b \n\t"
+ " addiu %[dst2], %[dst2], 32 \n\t"
+ ".set pop \n\t"
+
+ : [temp0]"=&r"(temp0), [temp1]"=&r"(temp1),
+ [temp2]"=&r"(temp2), [temp3]"=&r"(temp3),
+ [temp4]"=&r"(temp4), [temp5]"=&r"(temp5),
+ [temp6]"=&r"(temp6), [temp7]"=&r"(temp7),
+ [loop_end]"=&r"(loop_end), [loop_end1]"=&r"(loop_end1),
+ [loop_end2]"=&r"(loop_end2), [src]"+r"(buf1),
+ [dst]"+r"(buf2), [src1]"+r"(buf3), [dst1]"+r"(buf4),
+ [src2]"+r"(buf5), [dst2]"+r"(buf6)
+ :
+ : "memory"
+ );
+ }
+}
+#endif /* HAVE_MIPSFPU */
+#endif /* HAVE_INLINE_ASM */
+
+void ff_aacdec_init_mips(AACContext *c)
+{
+#if HAVE_INLINE_ASM
+ c->imdct_and_windowing = imdct_and_windowing_mips;
+ c->apply_ltp = apply_ltp_mips;
+#if HAVE_MIPSFPU
+ c->update_ltp = update_ltp_mips;
+#endif /* HAVE_MIPSFPU */
+#endif /* HAVE_INLINE_ASM */
+}
diff --git a/libavcodec/mips/aacdec_mips.h b/libavcodec/mips/aacdec_mips.h
new file mode 100644
index 0000000000..9ba307962f
--- /dev/null
+++ b/libavcodec/mips/aacdec_mips.h
@@ -0,0 +1,249 @@
+/*
+ * Copyright (c) 2012
+ * MIPS Technologies, Inc., California.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the MIPS Technologies, Inc., nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE MIPS TECHNOLOGIES, INC. ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE MIPS TECHNOLOGIES, INC. BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Authors: Darko Laus (darko@mips.com)
+ * Djordje Pesut (djordje@mips.com)
+ * Mirjana Vulin (mvulin@mips.com)
+ *
+ * AAC Spectral Band Replication decoding functions optimized for MIPS
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Reference: libavcodec/aacdec.c
+ */
+
+#ifndef AVCODEC_MIPS_AACDEC_FLOAT_H
+#define AVCODEC_MIPS_AACDEC_FLOAT_H
+
+#include "libavcodec/aac.h"
+
+#if HAVE_INLINE_ASM && HAVE_MIPSFPU
+static inline float *VMUL2_mips(float *dst, const float *v, unsigned idx,
+ const float *scale)
+{
+ float temp0, temp1, temp2;
+ int temp3, temp4;
+ float *ret;
+
+ __asm__ volatile(
+ "andi %[temp3], %[idx], 15 \n\t"
+ "ext %[temp4], %[idx], 4, 4 \n\t"
+ "sll %[temp3], %[temp3], 2 \n\t"
+ "sll %[temp4], %[temp4], 2 \n\t"
+ "lwc1 %[temp2], 0(%[scale]) \n\t"
+ "lwxc1 %[temp0], %[temp3](%[v]) \n\t"
+ "lwxc1 %[temp1], %[temp4](%[v]) \n\t"
+ "mul.s %[temp0], %[temp0], %[temp2] \n\t"
+ "mul.s %[temp1], %[temp1], %[temp2] \n\t"
+ "addiu %[ret], %[dst], 8 \n\t"
+ "swc1 %[temp0], 0(%[dst]) \n\t"
+ "swc1 %[temp1], 4(%[dst]) \n\t"
+
+ : [temp0]"=&f"(temp0), [temp1]"=&f"(temp1),
+ [temp2]"=&f"(temp2), [temp3]"=&r"(temp3),
+ [temp4]"=&r"(temp4), [ret]"=&r"(ret)
+ : [idx]"r"(idx), [scale]"r"(scale), [v]"r"(v),
+ [dst]"r"(dst)
+ : "memory"
+ );
+ return ret;
+}
+
+static inline float *VMUL4_mips(float *dst, const float *v, unsigned idx,
+ const float *scale)
+{
+ int temp0, temp1, temp2, temp3;
+ float temp4, temp5, temp6, temp7, temp8;
+ float *ret;
+
+ __asm__ volatile(
+ "andi %[temp0], %[idx], 3 \n\t"
+ "ext %[temp1], %[idx], 2, 2 \n\t"
+ "ext %[temp2], %[idx], 4, 2 \n\t"
+ "ext %[temp3], %[idx], 6, 2 \n\t"
+ "sll %[temp0], %[temp0], 2 \n\t"
+ "sll %[temp1], %[temp1], 2 \n\t"
+ "sll %[temp2], %[temp2], 2 \n\t"
+ "sll %[temp3], %[temp3], 2 \n\t"
+ "lwc1 %[temp4], 0(%[scale]) \n\t"
+ "lwxc1 %[temp5], %[temp0](%[v]) \n\t"
+ "lwxc1 %[temp6], %[temp1](%[v]) \n\t"
+ "lwxc1 %[temp7], %[temp2](%[v]) \n\t"
+ "lwxc1 %[temp8], %[temp3](%[v]) \n\t"
+ "mul.s %[temp5], %[temp5], %[temp4] \n\t"
+ "mul.s %[temp6], %[temp6], %[temp4] \n\t"
+ "mul.s %[temp7], %[temp7], %[temp4] \n\t"
+ "mul.s %[temp8], %[temp8], %[temp4] \n\t"
+ "addiu %[ret], %[dst], 16 \n\t"
+ "swc1 %[temp5], 0(%[dst]) \n\t"
+ "swc1 %[temp6], 4(%[dst]) \n\t"
+ "swc1 %[temp7], 8(%[dst]) \n\t"
+ "swc1 %[temp8], 12(%[dst]) \n\t"
+
+ : [temp0]"=&r"(temp0), [temp1]"=&r"(temp1),
+ [temp2]"=&r"(temp2), [temp3]"=&r"(temp3),
+ [temp4]"=&f"(temp4), [temp5]"=&f"(temp5),
+ [temp6]"=&f"(temp6), [temp7]"=&f"(temp7),
+ [temp8]"=&f"(temp8), [ret]"=&r"(ret)
+ : [idx]"r"(idx), [scale]"r"(scale), [v]"r"(v),
+ [dst]"r"(dst)
+ : "memory"
+ );
+ return ret;
+}
+
+static inline float *VMUL2S_mips(float *dst, const float *v, unsigned idx,
+ unsigned sign, const float *scale)
+{
+ int temp0, temp1, temp2, temp3, temp4, temp5;
+ float temp6, temp7, temp8, temp9;
+ float *ret;
+
+ __asm__ volatile(
+ "andi %[temp0], %[idx], 15 \n\t"
+ "ext %[temp1], %[idx], 4, 4 \n\t"
+ "lw %[temp4], 0(%[scale]) \n\t"
+ "srl %[temp2], %[sign], 1 \n\t"
+ "sll %[temp3], %[sign], 31 \n\t"
+ "sll %[temp2], %[temp2], 31 \n\t"
+ "sll %[temp0], %[temp0], 2 \n\t"
+ "sll %[temp1], %[temp1], 2 \n\t"
+ "lwxc1 %[temp8], %[temp0](%[v]) \n\t"
+ "lwxc1 %[temp9], %[temp1](%[v]) \n\t"
+ "xor %[temp5], %[temp4], %[temp2] \n\t"
+ "xor %[temp4], %[temp4], %[temp3] \n\t"
+ "mtc1 %[temp5], %[temp6] \n\t"
+ "mtc1 %[temp4], %[temp7] \n\t"
+ "mul.s %[temp8], %[temp8], %[temp6] \n\t"
+ "mul.s %[temp9], %[temp9], %[temp7] \n\t"
+ "addiu %[ret], %[dst], 8 \n\t"
+ "swc1 %[temp8], 0(%[dst]) \n\t"
+ "swc1 %[temp9], 4(%[dst]) \n\t"
+
+ : [temp0]"=&r"(temp0), [temp1]"=&r"(temp1),
+ [temp2]"=&r"(temp2), [temp3]"=&r"(temp3),
+ [temp4]"=&r"(temp4), [temp5]"=&r"(temp5),
+ [temp6]"=&f"(temp6), [temp7]"=&f"(temp7),
+ [temp8]"=&f"(temp8), [temp9]"=&f"(temp9),
+ [ret]"=&r"(ret)
+ : [idx]"r"(idx), [scale]"r"(scale), [v]"r"(v),
+ [dst]"r"(dst), [sign]"r"(sign)
+ : "memory"
+ );
+ return ret;
+}
+
+static inline float *VMUL4S_mips(float *dst, const float *v, unsigned idx,
+ unsigned sign, const float *scale)
+{
+ int temp0, temp1, temp2, temp3, temp4;
+ float temp10, temp11, temp12, temp13, temp14, temp15, temp16, temp17;
+ float *ret;
+ unsigned int mask = 1U << 31;
+
+ __asm__ volatile(
+ "lw %[temp0], 0(%[scale]) \n\t"
+ "and %[temp1], %[idx], 3 \n\t"
+ "ext %[temp2], %[idx], 2, 2 \n\t"
+ "ext %[temp3], %[idx], 4, 2 \n\t"
+ "ext %[temp4], %[idx], 6, 2 \n\t"
+ "sll %[temp1], %[temp1], 2 \n\t"
+ "sll %[temp2], %[temp2], 2 \n\t"
+ "sll %[temp3], %[temp3], 2 \n\t"
+ "sll %[temp4], %[temp4], 2 \n\t"
+ "lwxc1 %[temp10], %[temp1](%[v]) \n\t"
+ "lwxc1 %[temp11], %[temp2](%[v]) \n\t"
+ "lwxc1 %[temp12], %[temp3](%[v]) \n\t"
+ "lwxc1 %[temp13], %[temp4](%[v]) \n\t"
+ "and %[temp1], %[sign], %[mask] \n\t"
+ "ext %[temp2], %[idx], 12, 1 \n\t"
+ "ext %[temp3], %[idx], 13, 1 \n\t"
+ "ext %[temp4], %[idx], 14, 1 \n\t"
+ "sllv %[sign], %[sign], %[temp2] \n\t"
+ "xor %[temp1], %[temp0], %[temp1] \n\t"
+ "and %[temp2], %[sign], %[mask] \n\t"
+ "mtc1 %[temp1], %[temp14] \n\t"
+ "xor %[temp2], %[temp0], %[temp2] \n\t"
+ "sllv %[sign], %[sign], %[temp3] \n\t"
+ "mtc1 %[temp2], %[temp15] \n\t"
+ "and %[temp3], %[sign], %[mask] \n\t"
+ "sllv %[sign], %[sign], %[temp4] \n\t"
+ "xor %[temp3], %[temp0], %[temp3] \n\t"
+ "and %[temp4], %[sign], %[mask] \n\t"
+ "mtc1 %[temp3], %[temp16] \n\t"
+ "xor %[temp4], %[temp0], %[temp4] \n\t"
+ "mtc1 %[temp4], %[temp17] \n\t"
+ "mul.s %[temp10], %[temp10], %[temp14] \n\t"
+ "mul.s %[temp11], %[temp11], %[temp15] \n\t"
+ "mul.s %[temp12], %[temp12], %[temp16] \n\t"
+ "mul.s %[temp13], %[temp13], %[temp17] \n\t"
+ "addiu %[ret], %[dst], 16 \n\t"
+ "swc1 %[temp10], 0(%[dst]) \n\t"
+ "swc1 %[temp11], 4(%[dst]) \n\t"
+ "swc1 %[temp12], 8(%[dst]) \n\t"
+ "swc1 %[temp13], 12(%[dst]) \n\t"
+
+ : [temp0]"=&r"(temp0), [temp1]"=&r"(temp1),
+ [temp2]"=&r"(temp2), [temp3]"=&r"(temp3),
+ [temp4]"=&r"(temp4), [temp10]"=&f"(temp10),
+ [temp11]"=&f"(temp11), [temp12]"=&f"(temp12),
+ [temp13]"=&f"(temp13), [temp14]"=&f"(temp14),
+ [temp15]"=&f"(temp15), [temp16]"=&f"(temp16),
+ [temp17]"=&f"(temp17), [ret]"=&r"(ret),
+ [sign]"+r"(sign)
+ : [idx]"r"(idx), [scale]"r"(scale), [v]"r"(v),
+ [dst]"r"(dst), [mask]"r"(mask)
+ : "memory"
+ );
+ return ret;
+}
+
+#define VMUL2 VMUL2_mips
+#define VMUL4 VMUL4_mips
+#define VMUL2S VMUL2S_mips
+#define VMUL4S VMUL4S_mips
+#endif /* HAVE_INLINE_ASM && HAVE_MIPSFPU */
+
+#endif /* AVCODEC_MIPS_AACDEC_FLOAT_H */
diff --git a/libavcodec/mips/aacpsdsp_mips.c b/libavcodec/mips/aacpsdsp_mips.c
new file mode 100644
index 0000000000..4730a7f714
--- /dev/null
+++ b/libavcodec/mips/aacpsdsp_mips.c
@@ -0,0 +1,459 @@
+/*
+ * Copyright (c) 2012
+ * MIPS Technologies, Inc., California.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the MIPS Technologies, Inc., nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE MIPS TECHNOLOGIES, INC. ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE MIPS TECHNOLOGIES, INC. BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Authors: Darko Laus (darko@mips.com)
+ * Djordje Pesut (djordje@mips.com)
+ * Mirjana Vulin (mvulin@mips.com)
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Reference: libavcodec/aacpsdsp.c
+ */
+
+#include "config.h"
+#include "libavcodec/aacpsdsp.h"
+
+#if HAVE_INLINE_ASM
+static void ps_hybrid_analysis_ileave_mips(float (*out)[32][2], float L[2][38][64],
+ int i, int len)
+{
+ int temp0, temp1, temp2, temp3;
+ int temp4, temp5, temp6, temp7;
+ float *out1=&out[i][0][0];
+ float *L1=&L[0][0][i];
+ float *j=out1+ len*2;
+
+ for (; i < 64; i++) {
+
+ /* loop unrolled 8 times */
+ __asm__ volatile (
+ "1: \n\t"
+ "lw %[temp0], 0(%[L1]) \n\t"
+ "lw %[temp1], 9728(%[L1]) \n\t"
+ "lw %[temp2], 256(%[L1]) \n\t"
+ "lw %[temp3], 9984(%[L1]) \n\t"
+ "lw %[temp4], 512(%[L1]) \n\t"
+ "lw %[temp5], 10240(%[L1]) \n\t"
+ "lw %[temp6], 768(%[L1]) \n\t"
+ "lw %[temp7], 10496(%[L1]) \n\t"
+ "sw %[temp0], 0(%[out1]) \n\t"
+ "sw %[temp1], 4(%[out1]) \n\t"
+ "sw %[temp2], 8(%[out1]) \n\t"
+ "sw %[temp3], 12(%[out1]) \n\t"
+ "sw %[temp4], 16(%[out1]) \n\t"
+ "sw %[temp5], 20(%[out1]) \n\t"
+ "sw %[temp6], 24(%[out1]) \n\t"
+ "sw %[temp7], 28(%[out1]) \n\t"
+ "addiu %[out1], %[out1], 32 \n\t"
+ "addiu %[L1], %[L1], 1024 \n\t"
+ "bne %[out1], %[j], 1b \n\t"
+
+ : [out1]"+r"(out1), [L1]"+r"(L1), [j]"+r"(j),
+ [temp0]"=&r"(temp0), [temp1]"=&r"(temp1),
+ [temp2]"=&r"(temp2), [temp3]"=&r"(temp3),
+ [temp4]"=&r"(temp4), [temp5]"=&r"(temp5),
+ [temp6]"=&r"(temp6), [temp7]"=&r"(temp7)
+ : [len]"r"(len)
+ : "memory"
+ );
+ out1-=(len<<1)-64;
+ L1-=(len<<6)-1;
+ j+=len*2;
+ }
+}
+
+static void ps_hybrid_synthesis_deint_mips(float out[2][38][64],
+ float (*in)[32][2],
+ int i, int len)
+{
+ int n;
+ int temp0, temp1, temp2, temp3, temp4, temp5, temp6, temp7;
+ float *out1 = (float*)out + i;
+ float *out2 = (float*)out + 2432 + i;
+ float *in1 = (float*)in + 64 * i;
+ float *in2 = (float*)in + 64 * i + 1;
+
+ for (; i < 64; i++) {
+ for (n = 0; n < 7; n++) {
+
+ /* loop unrolled 8 times */
+ __asm__ volatile (
+ "lw %[temp0], 0(%[in1]) \n\t"
+ "lw %[temp1], 0(%[in2]) \n\t"
+ "lw %[temp2], 8(%[in1]) \n\t"
+ "lw %[temp3], 8(%[in2]) \n\t"
+ "lw %[temp4], 16(%[in1]) \n\t"
+ "lw %[temp5], 16(%[in2]) \n\t"
+ "lw %[temp6], 24(%[in1]) \n\t"
+ "lw %[temp7], 24(%[in2]) \n\t"
+ "addiu %[out1], %[out1], 1024 \n\t"
+ "addiu %[out2], %[out2], 1024 \n\t"
+ "addiu %[in1], %[in1], 32 \n\t"
+ "addiu %[in2], %[in2], 32 \n\t"
+ "sw %[temp0], -1024(%[out1]) \n\t"
+ "sw %[temp1], -1024(%[out2]) \n\t"
+ "sw %[temp2], -768(%[out1]) \n\t"
+ "sw %[temp3], -768(%[out2]) \n\t"
+ "sw %[temp4], -512(%[out1]) \n\t"
+ "sw %[temp5], -512(%[out2]) \n\t"
+ "sw %[temp6], -256(%[out1]) \n\t"
+ "sw %[temp7], -256(%[out2]) \n\t"
+
+ : [temp0]"=&r"(temp0), [temp1]"=&r"(temp1),
+ [temp2]"=&r"(temp2), [temp3]"=&r"(temp3),
+ [temp4]"=&r"(temp4), [temp5]"=&r"(temp5),
+ [temp6]"=&r"(temp6), [temp7]"=&r"(temp7),
+ [out1]"+r"(out1), [out2]"+r"(out2),
+ [in1]"+r"(in1), [in2]"+r"(in2)
+ :
+ : "memory"
+ );
+ }
+ /* loop unrolled 8 times */
+ __asm__ volatile (
+ "lw %[temp0], 0(%[in1]) \n\t"
+ "lw %[temp1], 0(%[in2]) \n\t"
+ "lw %[temp2], 8(%[in1]) \n\t"
+ "lw %[temp3], 8(%[in2]) \n\t"
+ "lw %[temp4], 16(%[in1]) \n\t"
+ "lw %[temp5], 16(%[in2]) \n\t"
+ "lw %[temp6], 24(%[in1]) \n\t"
+ "lw %[temp7], 24(%[in2]) \n\t"
+ "addiu %[out1], %[out1], -7164 \n\t"
+ "addiu %[out2], %[out2], -7164 \n\t"
+ "addiu %[in1], %[in1], 32 \n\t"
+ "addiu %[in2], %[in2], 32 \n\t"
+ "sw %[temp0], 7164(%[out1]) \n\t"
+ "sw %[temp1], 7164(%[out2]) \n\t"
+ "sw %[temp2], 7420(%[out1]) \n\t"
+ "sw %[temp3], 7420(%[out2]) \n\t"
+ "sw %[temp4], 7676(%[out1]) \n\t"
+ "sw %[temp5], 7676(%[out2]) \n\t"
+ "sw %[temp6], 7932(%[out1]) \n\t"
+ "sw %[temp7], 7932(%[out2]) \n\t"
+
+ : [temp0]"=&r"(temp0), [temp1]"=&r"(temp1),
+ [temp2]"=&r"(temp2), [temp3]"=&r"(temp3),
+ [temp4]"=&r"(temp4), [temp5]"=&r"(temp5),
+ [temp6]"=&r"(temp6), [temp7]"=&r"(temp7),
+ [out1]"+r"(out1), [out2]"+r"(out2),
+ [in1]"+r"(in1), [in2]"+r"(in2)
+ :
+ : "memory"
+ );
+ }
+}
+
+#if HAVE_MIPSFPU
+static void ps_add_squares_mips(float *dst, const float (*src)[2], int n)
+{
+ int i;
+ float temp0, temp1, temp2, temp3, temp4, temp5;
+ float temp6, temp7, temp8, temp9, temp10, temp11;
+ float *src0 = (float*)&src[0][0];
+ float *dst0 = &dst[0];
+
+ for (i = 0; i < 8; i++) {
+ /* loop unrolled 4 times */
+ __asm__ volatile (
+ "lwc1 %[temp0], 0(%[src0]) \n\t"
+ "lwc1 %[temp1], 4(%[src0]) \n\t"
+ "lwc1 %[temp2], 8(%[src0]) \n\t"
+ "lwc1 %[temp3], 12(%[src0]) \n\t"
+ "lwc1 %[temp4], 16(%[src0]) \n\t"
+ "lwc1 %[temp5], 20(%[src0]) \n\t"
+ "lwc1 %[temp6], 24(%[src0]) \n\t"
+ "lwc1 %[temp7], 28(%[src0]) \n\t"
+ "lwc1 %[temp8], 0(%[dst0]) \n\t"
+ "lwc1 %[temp9], 4(%[dst0]) \n\t"
+ "lwc1 %[temp10], 8(%[dst0]) \n\t"
+ "lwc1 %[temp11], 12(%[dst0]) \n\t"
+ "mul.s %[temp1], %[temp1], %[temp1] \n\t"
+ "mul.s %[temp3], %[temp3], %[temp3] \n\t"
+ "mul.s %[temp5], %[temp5], %[temp5] \n\t"
+ "mul.s %[temp7], %[temp7], %[temp7] \n\t"
+ "madd.s %[temp0], %[temp1], %[temp0], %[temp0] \n\t"
+ "madd.s %[temp2], %[temp3], %[temp2], %[temp2] \n\t"
+ "madd.s %[temp4], %[temp5], %[temp4], %[temp4] \n\t"
+ "madd.s %[temp6], %[temp7], %[temp6], %[temp6] \n\t"
+ "add.s %[temp0], %[temp8], %[temp0] \n\t"
+ "add.s %[temp2], %[temp9], %[temp2] \n\t"
+ "add.s %[temp4], %[temp10], %[temp4] \n\t"
+ "add.s %[temp6], %[temp11], %[temp6] \n\t"
+ "swc1 %[temp0], 0(%[dst0]) \n\t"
+ "swc1 %[temp2], 4(%[dst0]) \n\t"
+ "swc1 %[temp4], 8(%[dst0]) \n\t"
+ "swc1 %[temp6], 12(%[dst0]) \n\t"
+ "addiu %[dst0], %[dst0], 16 \n\t"
+ "addiu %[src0], %[src0], 32 \n\t"
+
+ : [temp0]"=&f"(temp0), [temp1]"=&f"(temp1), [temp2]"=&f"(temp2),
+ [temp3]"=&f"(temp3), [temp4]"=&f"(temp4), [temp5]"=&f"(temp5),
+ [temp6]"=&f"(temp6), [temp7]"=&f"(temp7), [temp8]"=&f"(temp8),
+ [temp9]"=&f"(temp9), [dst0]"+r"(dst0), [src0]"+r"(src0),
+ [temp10]"=&f"(temp10), [temp11]"=&f"(temp11)
+ :
+ : "memory"
+ );
+ }
+}
+
+static void ps_mul_pair_single_mips(float (*dst)[2], float (*src0)[2], float *src1,
+ int n)
+{
+ float temp0, temp1, temp2;
+ float *p_d, *p_s0, *p_s1, *end;
+ p_d = &dst[0][0];
+ p_s0 = &src0[0][0];
+ p_s1 = &src1[0];
+ end = p_s1 + n;
+
+ __asm__ volatile(
+ ".set push \n\t"
+ ".set noreorder \n\t"
+ "1: \n\t"
+ "lwc1 %[temp2], 0(%[p_s1]) \n\t"
+ "lwc1 %[temp0], 0(%[p_s0]) \n\t"
+ "lwc1 %[temp1], 4(%[p_s0]) \n\t"
+ "addiu %[p_d], %[p_d], 8 \n\t"
+ "mul.s %[temp0], %[temp0], %[temp2] \n\t"
+ "mul.s %[temp1], %[temp1], %[temp2] \n\t"
+ "addiu %[p_s0], %[p_s0], 8 \n\t"
+ "swc1 %[temp0], -8(%[p_d]) \n\t"
+ "swc1 %[temp1], -4(%[p_d]) \n\t"
+ "bne %[p_s1], %[end], 1b \n\t"
+ " addiu %[p_s1], %[p_s1], 4 \n\t"
+ ".set pop \n\t"
+
+ : [temp0]"=&f"(temp0), [temp1]"=&f"(temp1),
+ [temp2]"=&f"(temp2), [p_d]"+r"(p_d),
+ [p_s0]"+r"(p_s0), [p_s1]"+r"(p_s1)
+ : [end]"r"(end)
+ : "memory"
+ );
+}
+
+static void ps_decorrelate_mips(float (*out)[2], float (*delay)[2],
+ float (*ap_delay)[PS_QMF_TIME_SLOTS + PS_MAX_AP_DELAY][2],
+ const float phi_fract[2], float (*Q_fract)[2],
+ const float *transient_gain,
+ float g_decay_slope,
+ int len)
+{
+ float *p_delay = &delay[0][0];
+ float *p_out = &out[0][0];
+ float *p_ap_delay = &ap_delay[0][0][0];
+ float *p_t_gain = (float*)transient_gain;
+ float *p_Q_fract = &Q_fract[0][0];
+ float ag0, ag1, ag2;
+ float phi_fract0 = phi_fract[0];
+ float phi_fract1 = phi_fract[1];
+ float temp0, temp1, temp2, temp3, temp4, temp5, temp6, temp7, temp8, temp9;
+
+ len = (int)((int*)p_delay + (len << 1));
+
+ /* merged 2 loops */
+ __asm__ volatile(
+ ".set push \n\t"
+ ".set noreorder \n\t"
+ "li.s %[ag0], 0.65143905753106 \n\t"
+ "li.s %[ag1], 0.56471812200776 \n\t"
+ "li.s %[ag2], 0.48954165955695 \n\t"
+ "mul.s %[ag0], %[ag0], %[g_decay_slope] \n\t"
+ "mul.s %[ag1], %[ag1], %[g_decay_slope] \n\t"
+ "mul.s %[ag2], %[ag2], %[g_decay_slope] \n\t"
+ "1: \n\t"
+ "lwc1 %[temp0], 0(%[p_delay]) \n\t"
+ "lwc1 %[temp1], 4(%[p_delay]) \n\t"
+ "lwc1 %[temp4], 16(%[p_ap_delay]) \n\t"
+ "lwc1 %[temp5], 20(%[p_ap_delay]) \n\t"
+ "mul.s %[temp3], %[temp0], %[phi_fract1] \n\t"
+ "lwc1 %[temp6], 0(%[p_Q_fract]) \n\t"
+ "mul.s %[temp2], %[temp1], %[phi_fract1] \n\t"
+ "lwc1 %[temp7], 4(%[p_Q_fract]) \n\t"
+ "madd.s %[temp3], %[temp3], %[temp1], %[phi_fract0] \n\t"
+ "msub.s %[temp2], %[temp2], %[temp0], %[phi_fract0] \n\t"
+ "mul.s %[temp8], %[temp5], %[temp7] \n\t"
+ "mul.s %[temp9], %[temp4], %[temp7] \n\t"
+ "lwc1 %[temp7], 12(%[p_Q_fract]) \n\t"
+ "mul.s %[temp0], %[ag0], %[temp2] \n\t"
+ "mul.s %[temp1], %[ag0], %[temp3] \n\t"
+ "msub.s %[temp8], %[temp8], %[temp4], %[temp6] \n\t"
+ "lwc1 %[temp4], 304(%[p_ap_delay]) \n\t"
+ "madd.s %[temp9], %[temp9], %[temp5], %[temp6] \n\t"
+ "lwc1 %[temp5], 308(%[p_ap_delay]) \n\t"
+ "sub.s %[temp0], %[temp8], %[temp0] \n\t"
+ "sub.s %[temp1], %[temp9], %[temp1] \n\t"
+ "madd.s %[temp2], %[temp2], %[ag0], %[temp0] \n\t"
+ "lwc1 %[temp6], 8(%[p_Q_fract]) \n\t"
+ "madd.s %[temp3], %[temp3], %[ag0], %[temp1] \n\t"
+ "mul.s %[temp8], %[temp5], %[temp7] \n\t"
+ "mul.s %[temp9], %[temp4], %[temp7] \n\t"
+ "lwc1 %[temp7], 20(%[p_Q_fract]) \n\t"
+ "msub.s %[temp8], %[temp8], %[temp4], %[temp6] \n\t"
+ "swc1 %[temp2], 40(%[p_ap_delay]) \n\t"
+ "mul.s %[temp2], %[ag1], %[temp0] \n\t"
+ "swc1 %[temp3], 44(%[p_ap_delay]) \n\t"
+ "mul.s %[temp3], %[ag1], %[temp1] \n\t"
+ "lwc1 %[temp4], 592(%[p_ap_delay]) \n\t"
+ "madd.s %[temp9], %[temp9], %[temp5], %[temp6] \n\t"
+ "lwc1 %[temp5], 596(%[p_ap_delay]) \n\t"
+ "sub.s %[temp2], %[temp8], %[temp2] \n\t"
+ "sub.s %[temp3], %[temp9], %[temp3] \n\t"
+ "lwc1 %[temp6], 16(%[p_Q_fract]) \n\t"
+ "madd.s %[temp0], %[temp0], %[ag1], %[temp2] \n\t"
+ "madd.s %[temp1], %[temp1], %[ag1], %[temp3] \n\t"
+ "mul.s %[temp8], %[temp5], %[temp7] \n\t"
+ "mul.s %[temp9], %[temp4], %[temp7] \n\t"
+ "msub.s %[temp8], %[temp8], %[temp4], %[temp6] \n\t"
+ "madd.s %[temp9], %[temp9], %[temp5], %[temp6] \n\t"
+ "swc1 %[temp0], 336(%[p_ap_delay]) \n\t"
+ "mul.s %[temp0], %[ag2], %[temp2] \n\t"
+ "swc1 %[temp1], 340(%[p_ap_delay]) \n\t"
+ "mul.s %[temp1], %[ag2], %[temp3] \n\t"
+ "lwc1 %[temp4], 0(%[p_t_gain]) \n\t"
+ "sub.s %[temp0], %[temp8], %[temp0] \n\t"
+ "addiu %[p_ap_delay], %[p_ap_delay], 8 \n\t"
+ "sub.s %[temp1], %[temp9], %[temp1] \n\t"
+ "addiu %[p_t_gain], %[p_t_gain], 4 \n\t"
+ "madd.s %[temp2], %[temp2], %[ag2], %[temp0] \n\t"
+ "addiu %[p_delay], %[p_delay], 8 \n\t"
+ "madd.s %[temp3], %[temp3], %[ag2], %[temp1] \n\t"
+ "addiu %[p_out], %[p_out], 8 \n\t"
+ "mul.s %[temp5], %[temp4], %[temp0] \n\t"
+ "mul.s %[temp6], %[temp4], %[temp1] \n\t"
+ "swc1 %[temp2], 624(%[p_ap_delay]) \n\t"
+ "swc1 %[temp3], 628(%[p_ap_delay]) \n\t"
+ "swc1 %[temp5], -8(%[p_out]) \n\t"
+ "swc1 %[temp6], -4(%[p_out]) \n\t"
+ "bne %[p_delay], %[len], 1b \n\t"
+ " swc1 %[temp6], -4(%[p_out]) \n\t"
+ ".set pop \n\t"
+
+ : [temp0]"=&f"(temp0), [temp1]"=&f"(temp1), [temp2]"=&f"(temp2),
+ [temp3]"=&f"(temp3), [temp4]"=&f"(temp4), [temp5]"=&f"(temp5),
+ [temp6]"=&f"(temp6), [temp7]"=&f"(temp7), [temp8]"=&f"(temp8),
+ [temp9]"=&f"(temp9), [p_delay]"+r"(p_delay), [p_ap_delay]"+r"(p_ap_delay),
+ [p_Q_fract]"+r"(p_Q_fract), [p_t_gain]"+r"(p_t_gain), [p_out]"+r"(p_out),
+ [ag0]"=&f"(ag0), [ag1]"=&f"(ag1), [ag2]"=&f"(ag2)
+ : [phi_fract0]"f"(phi_fract0), [phi_fract1]"f"(phi_fract1),
+ [len]"r"(len), [g_decay_slope]"f"(g_decay_slope)
+ : "memory"
+ );
+}
+
+static void ps_stereo_interpolate_mips(float (*l)[2], float (*r)[2],
+ float h[2][4], float h_step[2][4],
+ int len)
+{
+ float h0 = h[0][0];
+ float h1 = h[0][1];
+ float h2 = h[0][2];
+ float h3 = h[0][3];
+ float hs0 = h_step[0][0];
+ float hs1 = h_step[0][1];
+ float hs2 = h_step[0][2];
+ float hs3 = h_step[0][3];
+ float temp0, temp1, temp2, temp3;
+ float l_re, l_im, r_re, r_im;
+
+ len = (int)((int*)l + (len << 1));
+
+ __asm__ volatile(
+ ".set push \n\t"
+ ".set noreorder \n\t"
+ "1: \n\t"
+ "add.s %[h0], %[h0], %[hs0] \n\t"
+ "lwc1 %[l_re], 0(%[l]) \n\t"
+ "add.s %[h1], %[h1], %[hs1] \n\t"
+ "lwc1 %[r_re], 0(%[r]) \n\t"
+ "add.s %[h2], %[h2], %[hs2] \n\t"
+ "lwc1 %[l_im], 4(%[l]) \n\t"
+ "add.s %[h3], %[h3], %[hs3] \n\t"
+ "lwc1 %[r_im], 4(%[r]) \n\t"
+ "mul.s %[temp0], %[h0], %[l_re] \n\t"
+ "addiu %[l], %[l], 8 \n\t"
+ "mul.s %[temp2], %[h1], %[l_re] \n\t"
+ "addiu %[r], %[r], 8 \n\t"
+ "madd.s %[temp0], %[temp0], %[h2], %[r_re] \n\t"
+ "madd.s %[temp2], %[temp2], %[h3], %[r_re] \n\t"
+ "mul.s %[temp1], %[h0], %[l_im] \n\t"
+ "mul.s %[temp3], %[h1], %[l_im] \n\t"
+ "madd.s %[temp1], %[temp1], %[h2], %[r_im] \n\t"
+ "madd.s %[temp3], %[temp3], %[h3], %[r_im] \n\t"
+ "swc1 %[temp0], -8(%[l]) \n\t"
+ "swc1 %[temp2], -8(%[r]) \n\t"
+ "swc1 %[temp1], -4(%[l]) \n\t"
+ "bne %[l], %[len], 1b \n\t"
+ " swc1 %[temp3], -4(%[r]) \n\t"
+ ".set pop \n\t"
+
+ : [temp0]"=&f"(temp0), [temp1]"=&f"(temp1),
+ [temp2]"=&f"(temp2), [temp3]"=&f"(temp3),
+ [h0]"+f"(h0), [h1]"+f"(h1), [h2]"+f"(h2),
+ [h3]"+f"(h3), [l]"+r"(l), [r]"+r"(r),
+ [l_re]"=&f"(l_re), [l_im]"=&f"(l_im),
+ [r_re]"=&f"(r_re), [r_im]"=&f"(r_im)
+ : [hs0]"f"(hs0), [hs1]"f"(hs1), [hs2]"f"(hs2),
+ [hs3]"f"(hs3), [len]"r"(len)
+ : "memory"
+ );
+}
+#endif /* HAVE_MIPSFPU */
+#endif /* HAVE_INLINE_ASM */
+
+void ff_psdsp_init_mips(PSDSPContext *s)
+{
+#if HAVE_INLINE_ASM
+ s->hybrid_analysis_ileave = ps_hybrid_analysis_ileave_mips;
+ s->hybrid_synthesis_deint = ps_hybrid_synthesis_deint_mips;
+#if HAVE_MIPSFPU
+ s->add_squares = ps_add_squares_mips;
+ s->mul_pair_single = ps_mul_pair_single_mips;
+ s->decorrelate = ps_decorrelate_mips;
+ s->stereo_interpolate[0] = ps_stereo_interpolate_mips;
+#endif /* HAVE_MIPSFPU */
+#endif /* HAVE_INLINE_ASM */
+}
diff --git a/libavcodec/mips/aacpsy_mips.h b/libavcodec/mips/aacpsy_mips.h
new file mode 100644
index 0000000000..d1353c43b3
--- /dev/null
+++ b/libavcodec/mips/aacpsy_mips.h
@@ -0,0 +1,230 @@
+/*
+ * Copyright (c) 2012
+ * MIPS Technologies, Inc., California.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the MIPS Technologies, Inc., nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE MIPS TECHNOLOGIES, INC. ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE MIPS TECHNOLOGIES, INC. BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Author: Bojan Zivkovic (bojan@mips.com)
+ *
+ * AAC encoder psychoacoustic model routines optimized
+ * for MIPS floating-point architecture
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Reference: libavcodec/aacpsy.c
+ */
+
+#ifndef AVCODEC_MIPS_AACPSY_MIPS_H
+#define AVCODEC_MIPS_AACPSY_MIPS_H
+
+#if HAVE_INLINE_ASM && HAVE_MIPSFPU && ( PSY_LAME_FIR_LEN == 21 )
+static void calc_thr_3gpp_mips(const FFPsyWindowInfo *wi, const int num_bands,
+ AacPsyChannel *pch, const uint8_t *band_sizes,
+ const float *coefs)
+{
+ int i, w, g;
+ int start = 0;
+ for (w = 0; w < wi->num_windows*16; w += 16) {
+ for (g = 0; g < num_bands; g++) {
+ AacPsyBand *band = &pch->band[w+g];
+
+ float form_factor = 0.0f;
+ float Temp;
+ band->energy = 0.0f;
+ for (i = 0; i < band_sizes[g]; i+=4) {
+ float a, b, c, d;
+ float ax, bx, cx, dx;
+ float *cf = (float *)&coefs[start+i];
+
+ __asm__ volatile (
+ "lwc1 %[a], 0(%[cf]) \n\t"
+ "lwc1 %[b], 4(%[cf]) \n\t"
+ "lwc1 %[c], 8(%[cf]) \n\t"
+ "lwc1 %[d], 12(%[cf]) \n\t"
+ "abs.s %[a], %[a] \n\t"
+ "abs.s %[b], %[b] \n\t"
+ "abs.s %[c], %[c] \n\t"
+ "abs.s %[d], %[d] \n\t"
+ "sqrt.s %[ax], %[a] \n\t"
+ "sqrt.s %[bx], %[b] \n\t"
+ "sqrt.s %[cx], %[c] \n\t"
+ "sqrt.s %[dx], %[d] \n\t"
+ "madd.s %[e], %[e], %[a], %[a] \n\t"
+ "madd.s %[e], %[e], %[b], %[b] \n\t"
+ "madd.s %[e], %[e], %[c], %[c] \n\t"
+ "madd.s %[e], %[e], %[d], %[d] \n\t"
+ "add.s %[f], %[f], %[ax] \n\t"
+ "add.s %[f], %[f], %[bx] \n\t"
+ "add.s %[f], %[f], %[cx] \n\t"
+ "add.s %[f], %[f], %[dx] \n\t"
+
+ : [a]"=&f"(a), [b]"=&f"(b),
+ [c]"=&f"(c), [d]"=&f"(d),
+ [e]"+f"(band->energy), [f]"+f"(form_factor),
+ [ax]"=&f"(ax), [bx]"=&f"(bx),
+ [cx]"=&f"(cx), [dx]"=&f"(dx)
+ : [cf]"r"(cf)
+ : "memory"
+ );
+ }
+
+ Temp = sqrtf((float)band_sizes[g] / band->energy);
+ band->thr = band->energy * 0.001258925f;
+ band->nz_lines = form_factor * sqrtf(Temp);
+ start += band_sizes[g];
+ }
+ }
+}
+
+static void psy_hp_filter_mips(const float *firbuf, float *hpfsmpl, const float * psy_fir_coeffs)
+{
+ float sum1, sum2, sum3, sum4;
+ float *fb = (float*)firbuf;
+ float *fb_end = fb + AAC_BLOCK_SIZE_LONG;
+ float *hp = hpfsmpl;
+
+ float coeff0 = psy_fir_coeffs[1];
+ float coeff1 = psy_fir_coeffs[3];
+ float coeff2 = psy_fir_coeffs[5];
+ float coeff3 = psy_fir_coeffs[7];
+ float coeff4 = psy_fir_coeffs[9];
+
+ __asm__ volatile (
+ ".set push \n\t"
+ ".set noreorder \n\t"
+
+ "li.s $f12, 32768 \n\t"
+ "1: \n\t"
+ "lwc1 $f0, 40(%[fb]) \n\t"
+ "lwc1 $f1, 4(%[fb]) \n\t"
+ "lwc1 $f2, 80(%[fb]) \n\t"
+ "lwc1 $f3, 44(%[fb]) \n\t"
+ "lwc1 $f4, 8(%[fb]) \n\t"
+ "madd.s %[sum1], $f0, $f1, %[coeff0] \n\t"
+ "lwc1 $f5, 84(%[fb]) \n\t"
+ "lwc1 $f6, 48(%[fb]) \n\t"
+ "madd.s %[sum2], $f3, $f4, %[coeff0] \n\t"
+ "lwc1 $f7, 12(%[fb]) \n\t"
+ "madd.s %[sum1], %[sum1], $f2, %[coeff0] \n\t"
+ "lwc1 $f8, 88(%[fb]) \n\t"
+ "lwc1 $f9, 52(%[fb]) \n\t"
+ "madd.s %[sum2], %[sum2], $f5, %[coeff0] \n\t"
+ "madd.s %[sum3], $f6, $f7, %[coeff0] \n\t"
+ "lwc1 $f10, 16(%[fb]) \n\t"
+ "lwc1 $f11, 92(%[fb]) \n\t"
+ "madd.s %[sum1], %[sum1], $f7, %[coeff1] \n\t"
+ "lwc1 $f1, 72(%[fb]) \n\t"
+ "madd.s %[sum3], %[sum3], $f8, %[coeff0] \n\t"
+ "madd.s %[sum4], $f9, $f10, %[coeff0] \n\t"
+ "madd.s %[sum2], %[sum2], $f10, %[coeff1] \n\t"
+ "madd.s %[sum1], %[sum1], $f1, %[coeff1] \n\t"
+ "lwc1 $f4, 76(%[fb]) \n\t"
+ "lwc1 $f8, 20(%[fb]) \n\t"
+ "madd.s %[sum4], %[sum4], $f11, %[coeff0] \n\t"
+ "lwc1 $f11, 24(%[fb]) \n\t"
+ "madd.s %[sum2], %[sum2], $f4, %[coeff1] \n\t"
+ "madd.s %[sum1], %[sum1], $f8, %[coeff2] \n\t"
+ "madd.s %[sum3], %[sum3], $f8, %[coeff1] \n\t"
+ "madd.s %[sum4], %[sum4], $f11, %[coeff1] \n\t"
+ "lwc1 $f7, 64(%[fb]) \n\t"
+ "madd.s %[sum2], %[sum2], $f11, %[coeff2] \n\t"
+ "lwc1 $f10, 68(%[fb]) \n\t"
+ "madd.s %[sum3], %[sum3], $f2, %[coeff1] \n\t"
+ "madd.s %[sum4], %[sum4], $f5, %[coeff1] \n\t"
+ "madd.s %[sum1], %[sum1], $f7, %[coeff2] \n\t"
+ "madd.s %[sum2], %[sum2], $f10, %[coeff2] \n\t"
+ "lwc1 $f2, 28(%[fb]) \n\t"
+ "lwc1 $f5, 32(%[fb]) \n\t"
+ "lwc1 $f8, 56(%[fb]) \n\t"
+ "lwc1 $f11, 60(%[fb]) \n\t"
+ "madd.s %[sum3], %[sum3], $f2, %[coeff2] \n\t"
+ "madd.s %[sum4], %[sum4], $f5, %[coeff2] \n\t"
+ "madd.s %[sum1], %[sum1], $f2, %[coeff3] \n\t"
+ "madd.s %[sum2], %[sum2], $f5, %[coeff3] \n\t"
+ "madd.s %[sum3], %[sum3], $f1, %[coeff2] \n\t"
+ "madd.s %[sum4], %[sum4], $f4, %[coeff2] \n\t"
+ "madd.s %[sum1], %[sum1], $f8, %[coeff3] \n\t"
+ "madd.s %[sum2], %[sum2], $f11, %[coeff3] \n\t"
+ "lwc1 $f1, 36(%[fb]) \n\t"
+ "addiu %[fb], %[fb], 16 \n\t"
+ "madd.s %[sum4], %[sum4], $f0, %[coeff3] \n\t"
+ "madd.s %[sum3], %[sum3], $f1, %[coeff3] \n\t"
+ "madd.s %[sum1], %[sum1], $f1, %[coeff4] \n\t"
+ "madd.s %[sum2], %[sum2], $f0, %[coeff4] \n\t"
+ "madd.s %[sum4], %[sum4], $f10, %[coeff3] \n\t"
+ "madd.s %[sum3], %[sum3], $f7, %[coeff3] \n\t"
+ "madd.s %[sum1], %[sum1], $f6, %[coeff4] \n\t"
+ "madd.s %[sum2], %[sum2], $f9, %[coeff4] \n\t"
+ "madd.s %[sum4], %[sum4], $f6, %[coeff4] \n\t"
+ "madd.s %[sum3], %[sum3], $f3, %[coeff4] \n\t"
+ "mul.s %[sum1], %[sum1], $f12 \n\t"
+ "mul.s %[sum2], %[sum2], $f12 \n\t"
+ "madd.s %[sum4], %[sum4], $f11, %[coeff4] \n\t"
+ "madd.s %[sum3], %[sum3], $f8, %[coeff4] \n\t"
+ "swc1 %[sum1], 0(%[hp]) \n\t"
+ "swc1 %[sum2], 4(%[hp]) \n\t"
+ "mul.s %[sum4], %[sum4], $f12 \n\t"
+ "mul.s %[sum3], %[sum3], $f12 \n\t"
+ "swc1 %[sum4], 12(%[hp]) \n\t"
+ "swc1 %[sum3], 8(%[hp]) \n\t"
+ "bne %[fb], %[fb_end], 1b \n\t"
+ " addiu %[hp], %[hp], 16 \n\t"
+
+ ".set pop \n\t"
+
+ : [sum1]"=&f"(sum1), [sum2]"=&f"(sum2),
+ [sum3]"=&f"(sum3), [sum4]"=&f"(sum4),
+ [fb]"+r"(fb), [hp]"+r"(hp)
+ : [coeff0]"f"(coeff0), [coeff1]"f"(coeff1),
+ [coeff2]"f"(coeff2), [coeff3]"f"(coeff3),
+ [coeff4]"f"(coeff4), [fb_end]"r"(fb_end)
+ : "$f0", "$f1", "$f2", "$f3", "$f4", "$f5", "$f6",
+ "$f7", "$f8", "$f9", "$f10", "$f11", "$f12",
+ "memory"
+ );
+}
+
+#define calc_thr_3gpp calc_thr_3gpp_mips
+#define psy_hp_filter psy_hp_filter_mips
+
+#endif /* HAVE_INLINE_ASM && HAVE_MIPSFPU */
+#endif /* AVCODEC_MIPS_AACPSY_MIPS_H */
diff --git a/libavcodec/mips/aacsbr_mips.c b/libavcodec/mips/aacsbr_mips.c
new file mode 100644
index 0000000000..53a5fd06b7
--- /dev/null
+++ b/libavcodec/mips/aacsbr_mips.c
@@ -0,0 +1,618 @@
+/*
+ * Copyright (c) 2012
+ * MIPS Technologies, Inc., California.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the MIPS Technologies, Inc., nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE MIPS TECHNOLOGIES, INC. ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE MIPS TECHNOLOGIES, INC. BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Authors: Djordje Pesut (djordje@mips.com)
+ * Mirjana Vulin (mvulin@mips.com)
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Reference: libavcodec/aacsbr.c
+ */
+
+#include "libavcodec/aac.h"
+#include "libavcodec/aacsbr.h"
+
+#define ENVELOPE_ADJUSTMENT_OFFSET 2
+
+#if HAVE_INLINE_ASM
+static int sbr_lf_gen_mips(AACContext *ac, SpectralBandReplication *sbr,
+ float X_low[32][40][2], const float W[2][32][32][2],
+ int buf_idx)
+{
+ int i, k;
+ int temp0, temp1, temp2, temp3, temp4, temp5, temp6, temp7;
+ float *p_x_low = &X_low[0][8][0];
+ float *p_w = (float*)&W[buf_idx][0][0][0];
+ float *p_x1_low = &X_low[0][0][0];
+ float *p_w1 = (float*)&W[1-buf_idx][24][0][0];
+
+ float *loop_end=p_x1_low + 2560;
+
+ /* loop unrolled 8 times */
+ __asm__ volatile (
+ "1: \n\t"
+ "sw $0, 0(%[p_x1_low]) \n\t"
+ "sw $0, 4(%[p_x1_low]) \n\t"
+ "sw $0, 8(%[p_x1_low]) \n\t"
+ "sw $0, 12(%[p_x1_low]) \n\t"
+ "sw $0, 16(%[p_x1_low]) \n\t"
+ "sw $0, 20(%[p_x1_low]) \n\t"
+ "sw $0, 24(%[p_x1_low]) \n\t"
+ "sw $0, 28(%[p_x1_low]) \n\t"
+ "addiu %[p_x1_low], %[p_x1_low], 32 \n\t"
+ "bne %[p_x1_low], %[loop_end], 1b \n\t"
+ "addiu %[p_x1_low], %[p_x1_low], -10240 \n\t"
+
+ : [p_x1_low]"+r"(p_x1_low)
+ : [loop_end]"r"(loop_end)
+ : "memory"
+ );
+
+ for (k = 0; k < sbr->kx[1]; k++) {
+ for (i = 0; i < 32; i+=4) {
+ /* loop unrolled 4 times */
+ __asm__ volatile (
+ "lw %[temp0], 0(%[p_w]) \n\t"
+ "lw %[temp1], 4(%[p_w]) \n\t"
+ "lw %[temp2], 256(%[p_w]) \n\t"
+ "lw %[temp3], 260(%[p_w]) \n\t"
+ "lw %[temp4], 512(%[p_w]) \n\t"
+ "lw %[temp5], 516(%[p_w]) \n\t"
+ "lw %[temp6], 768(%[p_w]) \n\t"
+ "lw %[temp7], 772(%[p_w]) \n\t"
+ "sw %[temp0], 0(%[p_x_low]) \n\t"
+ "sw %[temp1], 4(%[p_x_low]) \n\t"
+ "sw %[temp2], 8(%[p_x_low]) \n\t"
+ "sw %[temp3], 12(%[p_x_low]) \n\t"
+ "sw %[temp4], 16(%[p_x_low]) \n\t"
+ "sw %[temp5], 20(%[p_x_low]) \n\t"
+ "sw %[temp6], 24(%[p_x_low]) \n\t"
+ "sw %[temp7], 28(%[p_x_low]) \n\t"
+ "addiu %[p_x_low], %[p_x_low], 32 \n\t"
+ "addiu %[p_w], %[p_w], 1024 \n\t"
+
+ : [temp0]"=&r"(temp0), [temp1]"=&r"(temp1),
+ [temp2]"=&r"(temp2), [temp3]"=&r"(temp3),
+ [temp4]"=&r"(temp4), [temp5]"=&r"(temp5),
+ [temp6]"=&r"(temp6), [temp7]"=&r"(temp7),
+ [p_w]"+r"(p_w), [p_x_low]"+r"(p_x_low)
+ :
+ : "memory"
+ );
+ }
+ p_x_low += 16;
+ p_w -= 2046;
+ }
+
+ for (k = 0; k < sbr->kx[0]; k++) {
+ for (i = 0; i < 2; i++) {
+
+ /* loop unrolled 4 times */
+ __asm__ volatile (
+ "lw %[temp0], 0(%[p_w1]) \n\t"
+ "lw %[temp1], 4(%[p_w1]) \n\t"
+ "lw %[temp2], 256(%[p_w1]) \n\t"
+ "lw %[temp3], 260(%[p_w1]) \n\t"
+ "lw %[temp4], 512(%[p_w1]) \n\t"
+ "lw %[temp5], 516(%[p_w1]) \n\t"
+ "lw %[temp6], 768(%[p_w1]) \n\t"
+ "lw %[temp7], 772(%[p_w1]) \n\t"
+ "sw %[temp0], 0(%[p_x1_low]) \n\t"
+ "sw %[temp1], 4(%[p_x1_low]) \n\t"
+ "sw %[temp2], 8(%[p_x1_low]) \n\t"
+ "sw %[temp3], 12(%[p_x1_low]) \n\t"
+ "sw %[temp4], 16(%[p_x1_low]) \n\t"
+ "sw %[temp5], 20(%[p_x1_low]) \n\t"
+ "sw %[temp6], 24(%[p_x1_low]) \n\t"
+ "sw %[temp7], 28(%[p_x1_low]) \n\t"
+ "addiu %[p_x1_low], %[p_x1_low], 32 \n\t"
+ "addiu %[p_w1], %[p_w1], 1024 \n\t"
+
+ : [temp0]"=&r"(temp0), [temp1]"=&r"(temp1),
+ [temp2]"=&r"(temp2), [temp3]"=&r"(temp3),
+ [temp4]"=&r"(temp4), [temp5]"=&r"(temp5),
+ [temp6]"=&r"(temp6), [temp7]"=&r"(temp7),
+ [p_w1]"+r"(p_w1), [p_x1_low]"+r"(p_x1_low)
+ :
+ : "memory"
+ );
+ }
+ p_x1_low += 64;
+ p_w1 -= 510;
+ }
+ return 0;
+}
+
+static int sbr_x_gen_mips(SpectralBandReplication *sbr, float X[2][38][64],
+ const float Y0[38][64][2], const float Y1[38][64][2],
+ const float X_low[32][40][2], int ch)
+{
+ int k, i;
+ const int i_f = 32;
+ int temp0, temp1, temp2, temp3;
+ const float *X_low1, *Y01, *Y11;
+ float *x1=&X[0][0][0];
+ float *j=x1+4864;
+ const int i_Temp = FFMAX(2*sbr->data[ch].t_env_num_env_old - i_f, 0);
+
+ /* loop unrolled 8 times */
+ __asm__ volatile (
+ "1: \n\t"
+ "sw $0, 0(%[x1]) \n\t"
+ "sw $0, 4(%[x1]) \n\t"
+ "sw $0, 8(%[x1]) \n\t"
+ "sw $0, 12(%[x1]) \n\t"
+ "sw $0, 16(%[x1]) \n\t"
+ "sw $0, 20(%[x1]) \n\t"
+ "sw $0, 24(%[x1]) \n\t"
+ "sw $0, 28(%[x1]) \n\t"
+ "addiu %[x1], %[x1], 32 \n\t"
+ "bne %[x1], %[j], 1b \n\t"
+ "addiu %[x1], %[x1], -19456 \n\t"
+
+ : [x1]"+r"(x1)
+ : [j]"r"(j)
+ : "memory"
+ );
+
+ if (i_Temp != 0) {
+
+ X_low1=&X_low[0][2][0];
+
+ for (k = 0; k < sbr->kx[0]; k++) {
+
+ __asm__ volatile (
+ "move %[i], $zero \n\t"
+ "2: \n\t"
+ "lw %[temp0], 0(%[X_low1]) \n\t"
+ "lw %[temp1], 4(%[X_low1]) \n\t"
+ "sw %[temp0], 0(%[x1]) \n\t"
+ "sw %[temp1], 9728(%[x1]) \n\t"
+ "addiu %[x1], %[x1], 256 \n\t"
+ "addiu %[X_low1], %[X_low1], 8 \n\t"
+ "addiu %[i], %[i], 1 \n\t"
+ "bne %[i], %[i_Temp], 2b \n\t"
+
+ : [x1]"+r"(x1), [X_low1]"+r"(X_low1), [i]"=&r"(i),
+ [temp0]"=&r"(temp0), [temp1]"=&r"(temp1)
+ : [i_Temp]"r"(i_Temp)
+ : "memory"
+ );
+ x1-=(i_Temp<<6)-1;
+ X_low1-=(i_Temp<<1)-80;
+ }
+
+ x1=&X[0][0][k];
+ Y01=(float*)&Y0[32][k][0];
+
+ for (; k < sbr->kx[0] + sbr->m[0]; k++) {
+ __asm__ volatile (
+ "move %[i], $zero \n\t"
+ "3: \n\t"
+ "lw %[temp0], 0(%[Y01]) \n\t"
+ "lw %[temp1], 4(%[Y01]) \n\t"
+ "sw %[temp0], 0(%[x1]) \n\t"
+ "sw %[temp1], 9728(%[x1]) \n\t"
+ "addiu %[x1], %[x1], 256 \n\t"
+ "addiu %[Y01], %[Y01], 512 \n\t"
+ "addiu %[i], %[i], 1 \n\t"
+ "bne %[i], %[i_Temp], 3b \n\t"
+
+ : [x1]"+r"(x1), [Y01]"+r"(Y01), [i]"=&r"(i),
+ [temp0]"=&r"(temp0), [temp1]"=&r"(temp1)
+ : [i_Temp]"r"(i_Temp)
+ : "memory"
+ );
+ x1 -=(i_Temp<<6)-1;
+ Y01 -=(i_Temp<<7)-2;
+ }
+ }
+
+ x1=&X[0][i_Temp][0];
+ X_low1=&X_low[0][i_Temp+2][0];
+ temp3=38;
+
+ for (k = 0; k < sbr->kx[1]; k++) {
+
+ __asm__ volatile (
+ "move %[i], %[i_Temp] \n\t"
+ "4: \n\t"
+ "lw %[temp0], 0(%[X_low1]) \n\t"
+ "lw %[temp1], 4(%[X_low1]) \n\t"
+ "sw %[temp0], 0(%[x1]) \n\t"
+ "sw %[temp1], 9728(%[x1]) \n\t"
+ "addiu %[x1], %[x1], 256 \n\t"
+ "addiu %[X_low1], %[X_low1], 8 \n\t"
+ "addiu %[i], %[i], 1 \n\t"
+ "bne %[i], %[temp3], 4b \n\t"
+
+ : [x1]"+r"(x1), [X_low1]"+r"(X_low1), [i]"=&r"(i),
+ [temp0]"=&r"(temp0), [temp1]"=&r"(temp1),
+ [temp2]"=&r"(temp2)
+ : [i_Temp]"r"(i_Temp), [temp3]"r"(temp3)
+ : "memory"
+ );
+ x1 -= ((38-i_Temp)<<6)-1;
+ X_low1 -= ((38-i_Temp)<<1)- 80;
+ }
+
+ x1=&X[0][i_Temp][k];
+ Y11=&Y1[i_Temp][k][0];
+ temp2=32;
+
+ for (; k < sbr->kx[1] + sbr->m[1]; k++) {
+
+ __asm__ volatile (
+ "move %[i], %[i_Temp] \n\t"
+ "5: \n\t"
+ "lw %[temp0], 0(%[Y11]) \n\t"
+ "lw %[temp1], 4(%[Y11]) \n\t"
+ "sw %[temp0], 0(%[x1]) \n\t"
+ "sw %[temp1], 9728(%[x1]) \n\t"
+ "addiu %[x1], %[x1], 256 \n\t"
+ "addiu %[Y11], %[Y11], 512 \n\t"
+ "addiu %[i], %[i], 1 \n\t"
+ "bne %[i], %[temp2], 5b \n\t"
+
+ : [x1]"+r"(x1), [Y11]"+r"(Y11), [i]"=&r"(i),
+ [temp0]"=&r"(temp0), [temp1]"=&r"(temp1)
+ : [i_Temp]"r"(i_Temp), [temp3]"r"(temp3),
+ [temp2]"r"(temp2)
+ : "memory"
+ );
+
+ x1 -= ((32-i_Temp)<<6)-1;
+ Y11 -= ((32-i_Temp)<<7)-2;
+ }
+ return 0;
+}
+
+#if HAVE_MIPSFPU
+static void sbr_hf_assemble_mips(float Y1[38][64][2],
+ const float X_high[64][40][2],
+ SpectralBandReplication *sbr, SBRData *ch_data,
+ const int e_a[2])
+{
+ int e, i, j, m;
+ const int h_SL = 4 * !sbr->bs_smoothing_mode;
+ const int kx = sbr->kx[1];
+ const int m_max = sbr->m[1];
+ static const float h_smooth[5] = {
+ 0.33333333333333,
+ 0.30150283239582,
+ 0.21816949906249,
+ 0.11516383427084,
+ 0.03183050093751,
+ };
+
+ float (*g_temp)[48] = ch_data->g_temp, (*q_temp)[48] = ch_data->q_temp;
+ int indexnoise = ch_data->f_indexnoise;
+ int indexsine = ch_data->f_indexsine;
+ float *g_temp1, *q_temp1, *pok, *pok1;
+ float temp1, temp2, temp3, temp4;
+ int size = m_max;
+
+ if (sbr->reset) {
+ for (i = 0; i < h_SL; i++) {
+ memcpy(g_temp[i + 2*ch_data->t_env[0]], sbr->gain[0], m_max * sizeof(sbr->gain[0][0]));
+ memcpy(q_temp[i + 2*ch_data->t_env[0]], sbr->q_m[0], m_max * sizeof(sbr->q_m[0][0]));
+ }
+ } else if (h_SL) {
+ memcpy(g_temp[2*ch_data->t_env[0]], g_temp[2*ch_data->t_env_num_env_old], 4*sizeof(g_temp[0]));
+ memcpy(q_temp[2*ch_data->t_env[0]], q_temp[2*ch_data->t_env_num_env_old], 4*sizeof(q_temp[0]));
+ }
+
+ for (e = 0; e < ch_data->bs_num_env; e++) {
+ for (i = 2 * ch_data->t_env[e]; i < 2 * ch_data->t_env[e + 1]; i++) {
+ g_temp1 = g_temp[h_SL + i];
+ pok = sbr->gain[e];
+ q_temp1 = q_temp[h_SL + i];
+ pok1 = sbr->q_m[e];
+
+ /* loop unrolled 4 times */
+ for (j=0; j<(size>>2); j++) {
+ __asm__ volatile (
+ "lw %[temp1], 0(%[pok]) \n\t"
+ "lw %[temp2], 4(%[pok]) \n\t"
+ "lw %[temp3], 8(%[pok]) \n\t"
+ "lw %[temp4], 12(%[pok]) \n\t"
+ "sw %[temp1], 0(%[g_temp1]) \n\t"
+ "sw %[temp2], 4(%[g_temp1]) \n\t"
+ "sw %[temp3], 8(%[g_temp1]) \n\t"
+ "sw %[temp4], 12(%[g_temp1]) \n\t"
+ "lw %[temp1], 0(%[pok1]) \n\t"
+ "lw %[temp2], 4(%[pok1]) \n\t"
+ "lw %[temp3], 8(%[pok1]) \n\t"
+ "lw %[temp4], 12(%[pok1]) \n\t"
+ "sw %[temp1], 0(%[q_temp1]) \n\t"
+ "sw %[temp2], 4(%[q_temp1]) \n\t"
+ "sw %[temp3], 8(%[q_temp1]) \n\t"
+ "sw %[temp4], 12(%[q_temp1]) \n\t"
+ "addiu %[pok], %[pok], 16 \n\t"
+ "addiu %[g_temp1], %[g_temp1], 16 \n\t"
+ "addiu %[pok1], %[pok1], 16 \n\t"
+ "addiu %[q_temp1], %[q_temp1], 16 \n\t"
+
+ : [temp1]"=&r"(temp1), [temp2]"=&r"(temp2),
+ [temp3]"=&r"(temp3), [temp4]"=&r"(temp4),
+ [pok]"+r"(pok), [g_temp1]"+r"(g_temp1),
+ [pok1]"+r"(pok1), [q_temp1]"+r"(q_temp1)
+ :
+ : "memory"
+ );
+ }
+
+ for (j=0; j<(size&3); j++) {
+ __asm__ volatile (
+ "lw %[temp1], 0(%[pok]) \n\t"
+ "lw %[temp2], 0(%[pok1]) \n\t"
+ "sw %[temp1], 0(%[g_temp1]) \n\t"
+ "sw %[temp2], 0(%[q_temp1]) \n\t"
+ "addiu %[pok], %[pok], 4 \n\t"
+ "addiu %[g_temp1], %[g_temp1], 4 \n\t"
+ "addiu %[pok1], %[pok1], 4 \n\t"
+ "addiu %[q_temp1], %[q_temp1], 4 \n\t"
+
+ : [temp1]"=&r"(temp1), [temp2]"=&r"(temp2),
+ [temp3]"=&r"(temp3), [temp4]"=&r"(temp4),
+ [pok]"+r"(pok), [g_temp1]"+r"(g_temp1),
+ [pok1]"+r"(pok1), [q_temp1]"+r"(q_temp1)
+ :
+ : "memory"
+ );
+ }
+ }
+ }
+
+ for (e = 0; e < ch_data->bs_num_env; e++) {
+ for (i = 2 * ch_data->t_env[e]; i < 2 * ch_data->t_env[e + 1]; i++) {
+ LOCAL_ALIGNED_16(float, g_filt_tab, [48]);
+ LOCAL_ALIGNED_16(float, q_filt_tab, [48]);
+ float *g_filt, *q_filt;
+
+ if (h_SL && e != e_a[0] && e != e_a[1]) {
+ g_filt = g_filt_tab;
+ q_filt = q_filt_tab;
+
+ for (m = 0; m < m_max; m++) {
+ const int idx1 = i + h_SL;
+ g_filt[m] = 0.0f;
+ q_filt[m] = 0.0f;
+
+ for (j = 0; j <= h_SL; j++) {
+ g_filt[m] += g_temp[idx1 - j][m] * h_smooth[j];
+ q_filt[m] += q_temp[idx1 - j][m] * h_smooth[j];
+ }
+ }
+ } else {
+ g_filt = g_temp[i + h_SL];
+ q_filt = q_temp[i];
+ }
+
+ sbr->dsp.hf_g_filt(Y1[i] + kx, X_high + kx, g_filt, m_max,
+ i + ENVELOPE_ADJUSTMENT_OFFSET);
+
+ if (e != e_a[0] && e != e_a[1]) {
+ sbr->dsp.hf_apply_noise[indexsine](Y1[i] + kx, sbr->s_m[e],
+ q_filt, indexnoise,
+ kx, m_max);
+ } else {
+ int idx = indexsine&1;
+ int A = (1-((indexsine+(kx & 1))&2));
+ int B = (A^(-idx)) + idx;
+ float *out = &Y1[i][kx][idx];
+ float *in = sbr->s_m[e];
+ float temp0, temp1, temp2, temp3, temp4, temp5;
+ float A_f = (float)A;
+ float B_f = (float)B;
+
+ for (m = 0; m+1 < m_max; m+=2) {
+
+ temp2 = out[0];
+ temp3 = out[2];
+
+ __asm__ volatile(
+ "lwc1 %[temp0], 0(%[in]) \n\t"
+ "lwc1 %[temp1], 4(%[in]) \n\t"
+ "madd.s %[temp4], %[temp2], %[temp0], %[A_f] \n\t"
+ "madd.s %[temp5], %[temp3], %[temp1], %[B_f] \n\t"
+ "swc1 %[temp4], 0(%[out]) \n\t"
+ "swc1 %[temp5], 8(%[out]) \n\t"
+ "addiu %[in], %[in], 8 \n\t"
+ "addiu %[out], %[out], 16 \n\t"
+
+ : [temp0]"=&f" (temp0), [temp1]"=&f"(temp1),
+ [temp4]"=&f" (temp4), [temp5]"=&f"(temp5),
+ [in]"+r"(in), [out]"+r"(out)
+ : [A_f]"f"(A_f), [B_f]"f"(B_f), [temp2]"f"(temp2),
+ [temp3]"f"(temp3)
+ : "memory"
+ );
+ }
+ if(m_max&1)
+ out[2*m ] += in[m ] * A;
+ }
+ indexnoise = (indexnoise + m_max) & 0x1ff;
+ indexsine = (indexsine + 1) & 3;
+ }
+ }
+ ch_data->f_indexnoise = indexnoise;
+ ch_data->f_indexsine = indexsine;
+}
+
+static void sbr_hf_inverse_filter_mips(SBRDSPContext *dsp,
+ float (*alpha0)[2], float (*alpha1)[2],
+ const float X_low[32][40][2], int k0)
+{
+ int k;
+ float temp0, temp1, temp2, temp3, temp4, temp5, temp6, temp7, c;
+ float *phi1, *alpha_1, *alpha_0, res1, res2, temp_real, temp_im;
+
+ c = 1.000001f;
+
+ for (k = 0; k < k0; k++) {
+ LOCAL_ALIGNED_16(float, phi, [3], [2][2]);
+ float dk;
+ phi1 = &phi[0][0][0];
+ alpha_1 = &alpha1[k][0];
+ alpha_0 = &alpha0[k][0];
+ dsp->autocorrelate(X_low[k], phi);
+
+ __asm__ volatile (
+ "lwc1 %[temp0], 40(%[phi1]) \n\t"
+ "lwc1 %[temp1], 16(%[phi1]) \n\t"
+ "lwc1 %[temp2], 24(%[phi1]) \n\t"
+ "lwc1 %[temp3], 28(%[phi1]) \n\t"
+ "mul.s %[dk], %[temp0], %[temp1] \n\t"
+ "lwc1 %[temp4], 0(%[phi1]) \n\t"
+ "mul.s %[res2], %[temp2], %[temp2] \n\t"
+ "lwc1 %[temp5], 4(%[phi1]) \n\t"
+ "madd.s %[res2], %[res2], %[temp3], %[temp3] \n\t"
+ "lwc1 %[temp6], 8(%[phi1]) \n\t"
+ "div.s %[res2], %[res2], %[c] \n\t"
+ "lwc1 %[temp0], 12(%[phi1]) \n\t"
+ "sub.s %[dk], %[dk], %[res2] \n\t"
+
+ : [temp0]"=&f"(temp0), [temp1]"=&f"(temp1), [temp2]"=&f"(temp2),
+ [temp3]"=&f"(temp3), [temp4]"=&f"(temp4), [temp5]"=&f"(temp5),
+ [temp6]"=&f"(temp6), [res2]"=&f"(res2), [dk]"=&f"(dk)
+ : [phi1]"r"(phi1), [c]"f"(c)
+ : "memory"
+ );
+
+ if (!dk) {
+ alpha_1[0] = 0;
+ alpha_1[1] = 0;
+ } else {
+ __asm__ volatile (
+ "mul.s %[temp_real], %[temp4], %[temp2] \n\t"
+ "nmsub.s %[temp_real], %[temp_real], %[temp5], %[temp3] \n\t"
+ "nmsub.s %[temp_real], %[temp_real], %[temp6], %[temp1] \n\t"
+ "mul.s %[temp_im], %[temp4], %[temp3] \n\t"
+ "madd.s %[temp_im], %[temp_im], %[temp5], %[temp2] \n\t"
+ "nmsub.s %[temp_im], %[temp_im], %[temp0], %[temp1] \n\t"
+ "div.s %[temp_real], %[temp_real], %[dk] \n\t"
+ "div.s %[temp_im], %[temp_im], %[dk] \n\t"
+ "swc1 %[temp_real], 0(%[alpha_1]) \n\t"
+ "swc1 %[temp_im], 4(%[alpha_1]) \n\t"
+
+ : [temp_real]"=&f" (temp_real), [temp_im]"=&f"(temp_im)
+ : [phi1]"r"(phi1), [temp0]"f"(temp0), [temp1]"f"(temp1),
+ [temp2]"f"(temp2), [temp3]"f"(temp3), [temp4]"f"(temp4),
+ [temp5]"f"(temp5), [temp6]"f"(temp6),
+ [alpha_1]"r"(alpha_1), [dk]"f"(dk)
+ : "memory"
+ );
+ }
+
+ if (!phi1[4]) {
+ alpha_0[0] = 0;
+ alpha_0[1] = 0;
+ } else {
+ __asm__ volatile (
+ "lwc1 %[temp6], 0(%[alpha_1]) \n\t"
+ "lwc1 %[temp7], 4(%[alpha_1]) \n\t"
+ "mul.s %[temp_real], %[temp6], %[temp2] \n\t"
+ "add.s %[temp_real], %[temp_real], %[temp4] \n\t"
+ "madd.s %[temp_real], %[temp_real], %[temp7], %[temp3] \n\t"
+ "mul.s %[temp_im], %[temp7], %[temp2] \n\t"
+ "add.s %[temp_im], %[temp_im], %[temp5] \n\t"
+ "nmsub.s %[temp_im], %[temp_im], %[temp6], %[temp3] \n\t"
+ "div.s %[temp_real], %[temp_real], %[temp1] \n\t"
+ "div.s %[temp_im], %[temp_im], %[temp1] \n\t"
+ "neg.s %[temp_real], %[temp_real] \n\t"
+ "neg.s %[temp_im], %[temp_im] \n\t"
+ "swc1 %[temp_real], 0(%[alpha_0]) \n\t"
+ "swc1 %[temp_im], 4(%[alpha_0]) \n\t"
+
+ : [temp_real]"=&f"(temp_real), [temp_im]"=&f"(temp_im),
+ [temp6]"=&f"(temp6), [temp7]"=&f"(temp7),
+ [res1]"=&f"(res1), [res2]"=&f"(res2)
+ : [alpha_1]"r"(alpha_1), [alpha_0]"r"(alpha_0),
+ [temp0]"f"(temp0), [temp1]"f"(temp1), [temp2]"f"(temp2),
+ [temp3]"f"(temp3), [temp4]"f"(temp4), [temp5]"f"(temp5)
+ : "memory"
+ );
+ }
+
+ __asm__ volatile (
+ "lwc1 %[temp1], 0(%[alpha_1]) \n\t"
+ "lwc1 %[temp2], 4(%[alpha_1]) \n\t"
+ "lwc1 %[temp_real], 0(%[alpha_0]) \n\t"
+ "lwc1 %[temp_im], 4(%[alpha_0]) \n\t"
+ "mul.s %[res1], %[temp1], %[temp1] \n\t"
+ "madd.s %[res1], %[res1], %[temp2], %[temp2] \n\t"
+ "mul.s %[res2], %[temp_real], %[temp_real] \n\t"
+ "madd.s %[res2], %[res2], %[temp_im], %[temp_im] \n\t"
+
+ : [temp_real]"=&f"(temp_real), [temp_im]"=&f"(temp_im),
+ [temp1]"=&f"(temp1), [temp2]"=&f"(temp2),
+ [res1]"=&f"(res1), [res2]"=&f"(res2)
+ : [alpha_1]"r"(alpha_1), [alpha_0]"r"(alpha_0)
+ : "memory"
+ );
+
+ if (res1 >= 16.0f || res2 >= 16.0f) {
+ alpha_1[0] = 0;
+ alpha_1[1] = 0;
+ alpha_0[0] = 0;
+ alpha_0[1] = 0;
+ }
+ }
+}
+#endif /* HAVE_MIPSFPU */
+#endif /* HAVE_INLINE_ASM */
+
+void ff_aacsbr_func_ptr_init_mips(AACSBRContext *c)
+{
+#if HAVE_INLINE_ASM
+ c->sbr_lf_gen = sbr_lf_gen_mips;
+ c->sbr_x_gen = sbr_x_gen_mips;
+#if HAVE_MIPSFPU
+ c->sbr_hf_inverse_filter = sbr_hf_inverse_filter_mips;
+ c->sbr_hf_assemble = sbr_hf_assemble_mips;
+#endif /* HAVE_MIPSFPU */
+#endif /* HAVE_INLINE_ASM */
+}
diff --git a/libavcodec/mips/aacsbr_mips.h b/libavcodec/mips/aacsbr_mips.h
new file mode 100644
index 0000000000..8e6ad7d839
--- /dev/null
+++ b/libavcodec/mips/aacsbr_mips.h
@@ -0,0 +1,493 @@
+/*
+ * Copyright (c) 2012
+ * MIPS Technologies, Inc., California.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the MIPS Technologies, Inc., nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE MIPS TECHNOLOGIES, INC. ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE MIPS TECHNOLOGIES, INC. BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Authors: Djordje Pesut (djordje@mips.com)
+ * Mirjana Vulin (mvulin@mips.com)
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Reference: libavcodec/aacsbr.c
+ */
+
+#ifndef AVCODEC_MIPS_AACSBR_FLOAT_H
+#define AVCODEC_MIPS_AACSBR_FLOAT_H
+
+#include "libavcodec/aac.h"
+#include "libavcodec/sbr.h"
+
+#if HAVE_INLINE_ASM
+static void sbr_qmf_analysis_mips(AVFloatDSPContext *fdsp, FFTContext *mdct,
+ SBRDSPContext *sbrdsp, const float *in, float *x,
+ float z[320], float W[2][32][32][2], int buf_idx)
+{
+ int i;
+ float *w0;
+ float *w1;
+ int temp0, temp1, temp2, temp3, temp4, temp5, temp6, temp7;
+
+ w0 = x;
+ w1 = x + 1024;
+ for(i = 0; i < 36; i++)
+ {
+ /* loop unrolled 8 times */
+ __asm__ volatile(
+ "lw %[temp0], 0(%[w1]) \n\t"
+ "lw %[temp1], 4(%[w1]) \n\t"
+ "lw %[temp2], 8(%[w1]) \n\t"
+ "lw %[temp3], 12(%[w1]) \n\t"
+ "lw %[temp4], 16(%[w1]) \n\t"
+ "lw %[temp5], 20(%[w1]) \n\t"
+ "lw %[temp6], 24(%[w1]) \n\t"
+ "lw %[temp7], 28(%[w1]) \n\t"
+ "sw %[temp0], 0(%[w0]) \n\t"
+ "sw %[temp1], 4(%[w0]) \n\t"
+ "sw %[temp2], 8(%[w0]) \n\t"
+ "sw %[temp3], 12(%[w0]) \n\t"
+ "sw %[temp4], 16(%[w0]) \n\t"
+ "sw %[temp5], 20(%[w0]) \n\t"
+ "sw %[temp6], 24(%[w0]) \n\t"
+ "sw %[temp7], 28(%[w0]) \n\t"
+ "addiu %[w0], %[w0], 32 \n\t"
+ "addiu %[w1], %[w1], 32 \n\t"
+
+ : [w0]"+r"(w0), [w1]"+r"(w1),
+ [temp0]"=&r"(temp0), [temp1]"=&r"(temp1),
+ [temp2]"=&r"(temp2), [temp3]"=&r"(temp3),
+ [temp4]"=&r"(temp4), [temp5]"=&r"(temp5),
+ [temp6]"=&r"(temp6), [temp7]"=&r"(temp7)
+ :
+ : "memory"
+ );
+ }
+
+ w0 = x + 288;
+ w1 = (float*)in;
+ for(i = 0; i < 128; i++)
+ {
+ /* loop unrolled 8 times */
+ __asm__ volatile(
+ "lw %[temp0], 0(%[w1]) \n\t"
+ "lw %[temp1], 4(%[w1]) \n\t"
+ "lw %[temp2], 8(%[w1]) \n\t"
+ "lw %[temp3], 12(%[w1]) \n\t"
+ "lw %[temp4], 16(%[w1]) \n\t"
+ "lw %[temp5], 20(%[w1]) \n\t"
+ "lw %[temp6], 24(%[w1]) \n\t"
+ "lw %[temp7], 28(%[w1]) \n\t"
+ "sw %[temp0], 0(%[w0]) \n\t"
+ "sw %[temp1], 4(%[w0]) \n\t"
+ "sw %[temp2], 8(%[w0]) \n\t"
+ "sw %[temp3], 12(%[w0]) \n\t"
+ "sw %[temp4], 16(%[w0]) \n\t"
+ "sw %[temp5], 20(%[w0]) \n\t"
+ "sw %[temp6], 24(%[w0]) \n\t"
+ "sw %[temp7], 28(%[w0]) \n\t"
+ "addiu %[w0], %[w0], 32 \n\t"
+ "addiu %[w1], %[w1], 32 \n\t"
+
+ : [w0]"+r"(w0), [w1]"+r"(w1),
+ [temp0]"=&r"(temp0), [temp1]"=&r"(temp1),
+ [temp2]"=&r"(temp2), [temp3]"=&r"(temp3),
+ [temp4]"=&r"(temp4), [temp5]"=&r"(temp5),
+ [temp6]"=&r"(temp6), [temp7]"=&r"(temp7)
+ :
+ : "memory"
+ );
+ }
+
+ for (i = 0; i < 32; i++) { // numTimeSlots*RATE = 16*2 as 960 sample frames
+ // are not supported
+ fdsp->vector_fmul_reverse(z, sbr_qmf_window_ds, x, 320);
+ sbrdsp->sum64x5(z);
+ sbrdsp->qmf_pre_shuffle(z);
+ mdct->imdct_half(mdct, z, z+64);
+ sbrdsp->qmf_post_shuffle(W[buf_idx][i], z);
+ x += 32;
+ }
+}
+
+#if HAVE_MIPSFPU
+static void sbr_qmf_synthesis_mips(FFTContext *mdct,
+ SBRDSPContext *sbrdsp, AVFloatDSPContext *fdsp,
+ float *out, float X[2][38][64],
+ float mdct_buf[2][64],
+ float *v0, int *v_off, const unsigned int div)
+{
+ int i, n;
+ const float *sbr_qmf_window = div ? sbr_qmf_window_ds : sbr_qmf_window_us;
+ const int step = 128 >> div;
+ float *v;
+ float temp0, temp1, temp2, temp3, temp4, temp5, temp6, temp7, temp8, temp9, temp10, temp11, temp12, temp13;
+ float temp14, temp15, temp16, temp17, temp18, temp19;
+ float *vv0, *s0, *dst;
+ dst = out;
+
+ for (i = 0; i < 32; i++) {
+ if (*v_off < step) {
+ int saved_samples = (1280 - 128) >> div;
+ memcpy(&v0[SBR_SYNTHESIS_BUF_SIZE - saved_samples], v0, saved_samples * sizeof(float));
+ *v_off = SBR_SYNTHESIS_BUF_SIZE - saved_samples - step;
+ } else {
+ *v_off -= step;
+ }
+ v = v0 + *v_off;
+ if (div) {
+ for (n = 0; n < 32; n++) {
+ X[0][i][ n] = -X[0][i][n];
+ X[0][i][32+n] = X[1][i][31-n];
+ }
+ mdct->imdct_half(mdct, mdct_buf[0], X[0][i]);
+ sbrdsp->qmf_deint_neg(v, mdct_buf[0]);
+ } else {
+ sbrdsp->neg_odd_64(X[1][i]);
+ mdct->imdct_half(mdct, mdct_buf[0], X[0][i]);
+ mdct->imdct_half(mdct, mdct_buf[1], X[1][i]);
+ sbrdsp->qmf_deint_bfly(v, mdct_buf[1], mdct_buf[0]);
+ }
+
+ if(div == 0)
+ {
+ float *v0_end;
+ vv0 = v;
+ v0_end = v + 60;
+ s0 = (float*)sbr_qmf_window;
+
+ /* 10 calls of function vector_fmul_add merged into one loop
+ and loop unrolled 4 times */
+ __asm__ volatile(
+ ".set push \n\t"
+ ".set noreorder \n\t"
+ "lwc1 %[temp4], 0(%[v0]) \n\t"
+ "lwc1 %[temp5], 0(%[s0]) \n\t"
+ "lwc1 %[temp6], 4(%[v0]) \n\t"
+ "lwc1 %[temp7], 4(%[s0]) \n\t"
+ "lwc1 %[temp8], 8(%[v0]) \n\t"
+ "lwc1 %[temp9], 8(%[s0]) \n\t"
+ "lwc1 %[temp10], 12(%[v0]) \n\t"
+ "lwc1 %[temp11], 12(%[s0]) \n\t"
+ "lwc1 %[temp12], 768(%[v0]) \n\t"
+ "lwc1 %[temp13], 256(%[s0]) \n\t"
+ "lwc1 %[temp14], 772(%[v0]) \n\t"
+ "lwc1 %[temp15], 260(%[s0]) \n\t"
+ "lwc1 %[temp16], 776(%[v0]) \n\t"
+ "lwc1 %[temp17], 264(%[s0]) \n\t"
+ "lwc1 %[temp18], 780(%[v0]) \n\t"
+ "lwc1 %[temp19], 268(%[s0]) \n\t"
+ "1: \n\t"
+ "mul.s %[temp0], %[temp4], %[temp5] \n\t"
+ "lwc1 %[temp4], 1024(%[v0]) \n\t"
+ "mul.s %[temp1], %[temp6], %[temp7] \n\t"
+ "lwc1 %[temp5], 512(%[s0]) \n\t"
+ "mul.s %[temp2], %[temp8], %[temp9] \n\t"
+ "lwc1 %[temp6], 1028(%[v0]) \n\t"
+ "mul.s %[temp3], %[temp10], %[temp11] \n\t"
+ "lwc1 %[temp7], 516(%[s0]) \n\t"
+ "madd.s %[temp0], %[temp0], %[temp12], %[temp13] \n\t"
+ "lwc1 %[temp8], 1032(%[v0]) \n\t"
+ "madd.s %[temp1], %[temp1], %[temp14], %[temp15] \n\t"
+ "lwc1 %[temp9], 520(%[s0]) \n\t"
+ "madd.s %[temp2], %[temp2], %[temp16], %[temp17] \n\t"
+ "lwc1 %[temp10], 1036(%[v0]) \n\t"
+ "madd.s %[temp3], %[temp3], %[temp18], %[temp19] \n\t"
+ "lwc1 %[temp11], 524(%[s0]) \n\t"
+ "lwc1 %[temp12], 1792(%[v0]) \n\t"
+ "lwc1 %[temp13], 768(%[s0]) \n\t"
+ "lwc1 %[temp14], 1796(%[v0]) \n\t"
+ "lwc1 %[temp15], 772(%[s0]) \n\t"
+ "lwc1 %[temp16], 1800(%[v0]) \n\t"
+ "lwc1 %[temp17], 776(%[s0]) \n\t"
+ "lwc1 %[temp18], 1804(%[v0]) \n\t"
+ "lwc1 %[temp19], 780(%[s0]) \n\t"
+ "madd.s %[temp0], %[temp0], %[temp4], %[temp5] \n\t"
+ "lwc1 %[temp4], 2048(%[v0]) \n\t"
+ "madd.s %[temp1], %[temp1], %[temp6], %[temp7] \n\t"
+ "lwc1 %[temp5], 1024(%[s0]) \n\t"
+ "madd.s %[temp2], %[temp2], %[temp8], %[temp9] \n\t"
+ "lwc1 %[temp6], 2052(%[v0]) \n\t"
+ "madd.s %[temp3], %[temp3], %[temp10], %[temp11] \n\t"
+ "lwc1 %[temp7], 1028(%[s0]) \n\t"
+ "madd.s %[temp0], %[temp0], %[temp12], %[temp13] \n\t"
+ "lwc1 %[temp8], 2056(%[v0]) \n\t"
+ "madd.s %[temp1], %[temp1], %[temp14], %[temp15] \n\t"
+ "lwc1 %[temp9], 1032(%[s0]) \n\t"
+ "madd.s %[temp2], %[temp2], %[temp16], %[temp17] \n\t"
+ "lwc1 %[temp10], 2060(%[v0]) \n\t"
+ "madd.s %[temp3], %[temp3], %[temp18], %[temp19] \n\t"
+ "lwc1 %[temp11], 1036(%[s0]) \n\t"
+ "lwc1 %[temp12], 2816(%[v0]) \n\t"
+ "lwc1 %[temp13], 1280(%[s0]) \n\t"
+ "lwc1 %[temp14], 2820(%[v0]) \n\t"
+ "lwc1 %[temp15], 1284(%[s0]) \n\t"
+ "lwc1 %[temp16], 2824(%[v0]) \n\t"
+ "lwc1 %[temp17], 1288(%[s0]) \n\t"
+ "lwc1 %[temp18], 2828(%[v0]) \n\t"
+ "lwc1 %[temp19], 1292(%[s0]) \n\t"
+ "madd.s %[temp0], %[temp0], %[temp4], %[temp5] \n\t"
+ "lwc1 %[temp4], 3072(%[v0]) \n\t"
+ "madd.s %[temp1], %[temp1], %[temp6], %[temp7] \n\t"
+ "lwc1 %[temp5], 1536(%[s0]) \n\t"
+ "madd.s %[temp2], %[temp2], %[temp8], %[temp9] \n\t"
+ "lwc1 %[temp6], 3076(%[v0]) \n\t"
+ "madd.s %[temp3], %[temp3], %[temp10], %[temp11] \n\t"
+ "lwc1 %[temp7], 1540(%[s0]) \n\t"
+ "madd.s %[temp0], %[temp0], %[temp12], %[temp13] \n\t"
+ "lwc1 %[temp8], 3080(%[v0]) \n\t"
+ "madd.s %[temp1], %[temp1], %[temp14], %[temp15] \n\t"
+ "lwc1 %[temp9], 1544(%[s0]) \n\t"
+ "madd.s %[temp2], %[temp2], %[temp16], %[temp17] \n\t"
+ "lwc1 %[temp10], 3084(%[v0]) \n\t"
+ "madd.s %[temp3], %[temp3], %[temp18], %[temp19] \n\t"
+ "lwc1 %[temp11], 1548(%[s0]) \n\t"
+ "lwc1 %[temp12], 3840(%[v0]) \n\t"
+ "lwc1 %[temp13], 1792(%[s0]) \n\t"
+ "lwc1 %[temp14], 3844(%[v0]) \n\t"
+ "lwc1 %[temp15], 1796(%[s0]) \n\t"
+ "lwc1 %[temp16], 3848(%[v0]) \n\t"
+ "lwc1 %[temp17], 1800(%[s0]) \n\t"
+ "lwc1 %[temp18], 3852(%[v0]) \n\t"
+ "lwc1 %[temp19], 1804(%[s0]) \n\t"
+ "madd.s %[temp0], %[temp0], %[temp4], %[temp5] \n\t"
+ "lwc1 %[temp4], 4096(%[v0]) \n\t"
+ "madd.s %[temp1], %[temp1], %[temp6], %[temp7] \n\t"
+ "lwc1 %[temp5], 2048(%[s0]) \n\t"
+ "madd.s %[temp2], %[temp2], %[temp8], %[temp9] \n\t"
+ "lwc1 %[temp6], 4100(%[v0]) \n\t"
+ "madd.s %[temp3], %[temp3], %[temp10], %[temp11] \n\t"
+ "lwc1 %[temp7], 2052(%[s0]) \n\t"
+ "madd.s %[temp0], %[temp0], %[temp12], %[temp13] \n\t"
+ "lwc1 %[temp8], 4104(%[v0]) \n\t"
+ "addiu %[dst], %[dst], 16 \n\t"
+ "madd.s %[temp1], %[temp1], %[temp14], %[temp15] \n\t"
+ "lwc1 %[temp9], 2056(%[s0]) \n\t"
+ "addiu %[s0], %[s0], 16 \n\t"
+ "madd.s %[temp2], %[temp2], %[temp16], %[temp17] \n\t"
+ "lwc1 %[temp10], 4108(%[v0]) \n\t"
+ "addiu %[v0], %[v0], 16 \n\t"
+ "madd.s %[temp3], %[temp3], %[temp18], %[temp19] \n\t"
+ "lwc1 %[temp11], 2044(%[s0]) \n\t"
+ "lwc1 %[temp12], 4848(%[v0]) \n\t"
+ "lwc1 %[temp13], 2288(%[s0]) \n\t"
+ "lwc1 %[temp14], 4852(%[v0]) \n\t"
+ "lwc1 %[temp15], 2292(%[s0]) \n\t"
+ "lwc1 %[temp16], 4856(%[v0]) \n\t"
+ "lwc1 %[temp17], 2296(%[s0]) \n\t"
+ "lwc1 %[temp18], 4860(%[v0]) \n\t"
+ "lwc1 %[temp19], 2300(%[s0]) \n\t"
+ "madd.s %[temp0], %[temp0], %[temp4], %[temp5] \n\t"
+ "lwc1 %[temp4], 0(%[v0]) \n\t"
+ "madd.s %[temp1], %[temp1], %[temp6], %[temp7] \n\t"
+ "lwc1 %[temp5], 0(%[s0]) \n\t"
+ "madd.s %[temp2], %[temp2], %[temp8], %[temp9] \n\t"
+ "lwc1 %[temp6], 4(%[v0]) \n\t"
+ "madd.s %[temp3], %[temp3], %[temp10], %[temp11] \n\t"
+ "lwc1 %[temp7], 4(%[s0]) \n\t"
+ "madd.s %[temp0], %[temp0], %[temp12], %[temp13] \n\t"
+ "lwc1 %[temp8], 8(%[v0]) \n\t"
+ "madd.s %[temp1], %[temp1], %[temp14], %[temp15] \n\t"
+ "lwc1 %[temp9], 8(%[s0]) \n\t"
+ "madd.s %[temp2], %[temp2], %[temp16], %[temp17] \n\t"
+ "lwc1 %[temp10], 12(%[v0]) \n\t"
+ "madd.s %[temp3], %[temp3], %[temp18], %[temp19] \n\t"
+ "lwc1 %[temp11], 12(%[s0]) \n\t"
+ "lwc1 %[temp12], 768(%[v0]) \n\t"
+ "lwc1 %[temp13], 256(%[s0]) \n\t"
+ "lwc1 %[temp14], 772(%[v0]) \n\t"
+ "lwc1 %[temp15], 260(%[s0]) \n\t"
+ "lwc1 %[temp16], 776(%[v0]) \n\t"
+ "lwc1 %[temp17], 264(%[s0]) \n\t"
+ "lwc1 %[temp18], 780(%[v0]) \n\t"
+ "lwc1 %[temp19], 268(%[s0]) \n\t"
+ "swc1 %[temp0], -16(%[dst]) \n\t"
+ "swc1 %[temp1], -12(%[dst]) \n\t"
+ "swc1 %[temp2], -8(%[dst]) \n\t"
+ "bne %[v0], %[v0_end], 1b \n\t"
+ " swc1 %[temp3], -4(%[dst]) \n\t"
+ "mul.s %[temp0], %[temp4], %[temp5] \n\t"
+ "lwc1 %[temp4], 1024(%[v0]) \n\t"
+ "mul.s %[temp1], %[temp6], %[temp7] \n\t"
+ "lwc1 %[temp5], 512(%[s0]) \n\t"
+ "mul.s %[temp2], %[temp8], %[temp9] \n\t"
+ "lwc1 %[temp6], 1028(%[v0]) \n\t"
+ "mul.s %[temp3], %[temp10], %[temp11] \n\t"
+ "lwc1 %[temp7], 516(%[s0]) \n\t"
+ "madd.s %[temp0], %[temp0], %[temp12], %[temp13] \n\t"
+ "lwc1 %[temp8], 1032(%[v0]) \n\t"
+ "madd.s %[temp1], %[temp1], %[temp14], %[temp15] \n\t"
+ "lwc1 %[temp9], 520(%[s0]) \n\t"
+ "madd.s %[temp2], %[temp2], %[temp16], %[temp17] \n\t"
+ "lwc1 %[temp10], 1036(%[v0]) \n\t"
+ "madd.s %[temp3], %[temp3], %[temp18], %[temp19] \n\t"
+ "lwc1 %[temp11], 524(%[s0]) \n\t"
+ "lwc1 %[temp12], 1792(%[v0]) \n\t"
+ "lwc1 %[temp13], 768(%[s0]) \n\t"
+ "lwc1 %[temp14], 1796(%[v0]) \n\t"
+ "lwc1 %[temp15], 772(%[s0]) \n\t"
+ "lwc1 %[temp16], 1800(%[v0]) \n\t"
+ "lwc1 %[temp17], 776(%[s0]) \n\t"
+ "lwc1 %[temp18], 1804(%[v0]) \n\t"
+ "lwc1 %[temp19], 780(%[s0]) \n\t"
+ "madd.s %[temp0], %[temp0], %[temp4], %[temp5] \n\t"
+ "lwc1 %[temp4], 2048(%[v0]) \n\t"
+ "madd.s %[temp1], %[temp1], %[temp6], %[temp7] \n\t"
+ "lwc1 %[temp5], 1024(%[s0]) \n\t"
+ "madd.s %[temp2], %[temp2], %[temp8], %[temp9] \n\t"
+ "lwc1 %[temp6], 2052(%[v0]) \n\t"
+ "madd.s %[temp3], %[temp3], %[temp10], %[temp11] \n\t"
+ "lwc1 %[temp7], 1028(%[s0]) \n\t"
+ "madd.s %[temp0], %[temp0], %[temp12], %[temp13] \n\t"
+ "lwc1 %[temp8], 2056(%[v0]) \n\t"
+ "madd.s %[temp1], %[temp1], %[temp14], %[temp15] \n\t"
+ "lwc1 %[temp9], 1032(%[s0]) \n\t"
+ "madd.s %[temp2], %[temp2], %[temp16], %[temp17] \n\t"
+ "lwc1 %[temp10], 2060(%[v0]) \n\t"
+ "madd.s %[temp3], %[temp3], %[temp18], %[temp19] \n\t"
+ "lwc1 %[temp11], 1036(%[s0]) \n\t"
+ "lwc1 %[temp12], 2816(%[v0]) \n\t"
+ "lwc1 %[temp13], 1280(%[s0]) \n\t"
+ "lwc1 %[temp14], 2820(%[v0]) \n\t"
+ "lwc1 %[temp15], 1284(%[s0]) \n\t"
+ "lwc1 %[temp16], 2824(%[v0]) \n\t"
+ "lwc1 %[temp17], 1288(%[s0]) \n\t"
+ "lwc1 %[temp18], 2828(%[v0]) \n\t"
+ "lwc1 %[temp19], 1292(%[s0]) \n\t"
+ "madd.s %[temp0], %[temp0], %[temp4], %[temp5] \n\t"
+ "lwc1 %[temp4], 3072(%[v0]) \n\t"
+ "madd.s %[temp1], %[temp1], %[temp6], %[temp7] \n\t"
+ "lwc1 %[temp5], 1536(%[s0]) \n\t"
+ "madd.s %[temp2], %[temp2], %[temp8], %[temp9] \n\t"
+ "lwc1 %[temp6], 3076(%[v0]) \n\t"
+ "madd.s %[temp3], %[temp3], %[temp10], %[temp11] \n\t"
+ "lwc1 %[temp7], 1540(%[s0]) \n\t"
+ "madd.s %[temp0], %[temp0], %[temp12], %[temp13] \n\t"
+ "lwc1 %[temp8], 3080(%[v0]) \n\t"
+ "madd.s %[temp1], %[temp1], %[temp14], %[temp15] \n\t"
+ "lwc1 %[temp9], 1544(%[s0]) \n\t"
+ "madd.s %[temp2], %[temp2], %[temp16], %[temp17] \n\t"
+ "lwc1 %[temp10], 3084(%[v0]) \n\t"
+ "madd.s %[temp3], %[temp3], %[temp18], %[temp19] \n\t"
+ "lwc1 %[temp11], 1548(%[s0]) \n\t"
+ "lwc1 %[temp12], 3840(%[v0]) \n\t"
+ "lwc1 %[temp13], 1792(%[s0]) \n\t"
+ "lwc1 %[temp14], 3844(%[v0]) \n\t"
+ "lwc1 %[temp15], 1796(%[s0]) \n\t"
+ "lwc1 %[temp16], 3848(%[v0]) \n\t"
+ "lwc1 %[temp17], 1800(%[s0]) \n\t"
+ "lwc1 %[temp18], 3852(%[v0]) \n\t"
+ "lwc1 %[temp19], 1804(%[s0]) \n\t"
+ "madd.s %[temp0], %[temp0], %[temp4], %[temp5] \n\t"
+ "lwc1 %[temp4], 4096(%[v0]) \n\t"
+ "madd.s %[temp1], %[temp1], %[temp6], %[temp7] \n\t"
+ "lwc1 %[temp5], 2048(%[s0]) \n\t"
+ "madd.s %[temp2], %[temp2], %[temp8], %[temp9] \n\t"
+ "lwc1 %[temp6], 4100(%[v0]) \n\t"
+ "madd.s %[temp3], %[temp3], %[temp10], %[temp11] \n\t"
+ "lwc1 %[temp7], 2052(%[s0]) \n\t"
+ "madd.s %[temp0], %[temp0], %[temp12], %[temp13] \n\t"
+ "lwc1 %[temp8], 4104(%[v0]) \n\t"
+ "madd.s %[temp1], %[temp1], %[temp14], %[temp15] \n\t"
+ "lwc1 %[temp9], 2056(%[s0]) \n\t"
+ "madd.s %[temp2], %[temp2], %[temp16], %[temp17] \n\t"
+ "lwc1 %[temp10], 4108(%[v0]) \n\t"
+ "madd.s %[temp3], %[temp3], %[temp18], %[temp19] \n\t"
+ "lwc1 %[temp11], 2060(%[s0]) \n\t"
+ "lwc1 %[temp12], 4864(%[v0]) \n\t"
+ "lwc1 %[temp13], 2304(%[s0]) \n\t"
+ "lwc1 %[temp14], 4868(%[v0]) \n\t"
+ "lwc1 %[temp15], 2308(%[s0]) \n\t"
+ "madd.s %[temp0], %[temp0], %[temp4], %[temp5] \n\t"
+ "lwc1 %[temp16], 4872(%[v0]) \n\t"
+ "madd.s %[temp1], %[temp1], %[temp6], %[temp7] \n\t"
+ "lwc1 %[temp17], 2312(%[s0]) \n\t"
+ "madd.s %[temp2], %[temp2], %[temp8], %[temp9] \n\t"
+ "lwc1 %[temp18], 4876(%[v0]) \n\t"
+ "madd.s %[temp3], %[temp3], %[temp10], %[temp11] \n\t"
+ "lwc1 %[temp19], 2316(%[s0]) \n\t"
+ "madd.s %[temp0], %[temp0], %[temp12], %[temp13] \n\t"
+ "addiu %[dst], %[dst], 16 \n\t"
+ "madd.s %[temp1], %[temp1], %[temp14], %[temp15] \n\t"
+ "madd.s %[temp2], %[temp2], %[temp16], %[temp17] \n\t"
+ "madd.s %[temp3], %[temp3], %[temp18], %[temp19] \n\t"
+ "swc1 %[temp0], -16(%[dst]) \n\t"
+ "swc1 %[temp1], -12(%[dst]) \n\t"
+ "swc1 %[temp2], -8(%[dst]) \n\t"
+ "swc1 %[temp3], -4(%[dst]) \n\t"
+ ".set pop \n\t"
+
+ : [dst]"+r"(dst), [v0]"+r"(vv0), [s0]"+r"(s0),
+ [temp0]"=&f"(temp0), [temp1]"=&f"(temp1), [temp2]"=&f"(temp2),
+ [temp3]"=&f"(temp3), [temp4]"=&f"(temp4), [temp5]"=&f"(temp5),
+ [temp6]"=&f"(temp6), [temp7]"=&f"(temp7), [temp8]"=&f"(temp8),
+ [temp9]"=&f"(temp9), [temp10]"=&f"(temp10), [temp11]"=&f"(temp11),
+ [temp12]"=&f"(temp12), [temp13]"=&f"(temp13), [temp14]"=&f"(temp14),
+ [temp15]"=&f"(temp15), [temp16]"=&f"(temp16), [temp17]"=&f"(temp17),
+ [temp18]"=&f"(temp18), [temp19]"=&f"(temp19)
+ : [v0_end]"r"(v0_end)
+ : "memory"
+ );
+ }
+ else
+ {
+ fdsp->vector_fmul (out, v , sbr_qmf_window , 64 >> div);
+ fdsp->vector_fmul_add(out, v + ( 192 >> div), sbr_qmf_window + ( 64 >> div), out , 64 >> div);
+ fdsp->vector_fmul_add(out, v + ( 256 >> div), sbr_qmf_window + (128 >> div), out , 64 >> div);
+ fdsp->vector_fmul_add(out, v + ( 448 >> div), sbr_qmf_window + (192 >> div), out , 64 >> div);
+ fdsp->vector_fmul_add(out, v + ( 512 >> div), sbr_qmf_window + (256 >> div), out , 64 >> div);
+ fdsp->vector_fmul_add(out, v + ( 704 >> div), sbr_qmf_window + (320 >> div), out , 64 >> div);
+ fdsp->vector_fmul_add(out, v + ( 768 >> div), sbr_qmf_window + (384 >> div), out , 64 >> div);
+ fdsp->vector_fmul_add(out, v + ( 960 >> div), sbr_qmf_window + (448 >> div), out , 64 >> div);
+ fdsp->vector_fmul_add(out, v + (1024 >> div), sbr_qmf_window + (512 >> div), out , 64 >> div);
+ fdsp->vector_fmul_add(out, v + (1216 >> div), sbr_qmf_window + (576 >> div), out , 64 >> div);
+ out += 64 >> div;
+ }
+ }
+}
+
+#define sbr_qmf_analysis sbr_qmf_analysis_mips
+#define sbr_qmf_synthesis sbr_qmf_synthesis_mips
+
+#endif /* HAVE_MIPSFPU */
+#endif /* HAVE_INLINE_ASM */
+
+#endif /* AVCODEC_MIPS_AACSBR_FLOAT_H */
diff --git a/libavcodec/mips/ac3dsp_mips.c b/libavcodec/mips/ac3dsp_mips.c
new file mode 100644
index 0000000000..f33c6f1809
--- /dev/null
+++ b/libavcodec/mips/ac3dsp_mips.c
@@ -0,0 +1,412 @@
+/*
+ * Copyright (c) 2012
+ * MIPS Technologies, Inc., California.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the MIPS Technologies, Inc., nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE MIPS TECHNOLOGIES, INC. ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE MIPS TECHNOLOGIES, INC. BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Authors: Branimir Vasic (bvasic@mips.com)
+ * Nedeljko Babic (nbabic@mips.com)
+ *
+ * Various AC-3 DSP Utils optimized for MIPS
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Reference: libavcodec/ac3dsp.c
+ */
+
+#include "config.h"
+#include "libavcodec/ac3dsp.h"
+#include "libavcodec/ac3.h"
+
+
+#if HAVE_INLINE_ASM
+#if HAVE_MIPSDSPR1
+static void ac3_bit_alloc_calc_bap_mips(int16_t *mask, int16_t *psd,
+ int start, int end,
+ int snr_offset, int floor,
+ const uint8_t *bap_tab, uint8_t *bap)
+{
+ int band, band_end, cond;
+ int m, address1, address2;
+ int16_t *psd1, *psd_end;
+ uint8_t *bap1;
+
+ if (snr_offset == -960) {
+ memset(bap, 0, AC3_MAX_COEFS);
+ return;
+ }
+
+ psd1 = &psd[start];
+ bap1 = &bap[start];
+ band = ff_ac3_bin_to_band_tab[start];
+
+ do {
+ m = (FFMAX(mask[band] - snr_offset - floor, 0) & 0x1FE0) + floor;
+ band_end = ff_ac3_band_start_tab[++band];
+ band_end = FFMIN(band_end, end);
+ psd_end = psd + band_end - 1;
+
+ __asm__ volatile (
+ "slt %[cond], %[psd1], %[psd_end] \n\t"
+ "beqz %[cond], 1f \n\t"
+ "2: \n\t"
+ "lh %[address1], 0(%[psd1]) \n\t"
+ "lh %[address2], 2(%[psd1]) \n\t"
+ "addiu %[psd1], %[psd1], 4 \n\t"
+ "subu %[address1], %[address1], %[m] \n\t"
+ "sra %[address1], %[address1], 5 \n\t"
+ "addiu %[address1], %[address1], -32 \n\t"
+ "shll_s.w %[address1], %[address1], 26 \n\t"
+ "subu %[address2], %[address2], %[m] \n\t"
+ "sra %[address2], %[address2], 5 \n\t"
+ "sra %[address1], %[address1], 26 \n\t"
+ "addiu %[address1], %[address1], 32 \n\t"
+ "lbux %[address1], %[address1](%[bap_tab]) \n\t"
+ "addiu %[address2], %[address2], -32 \n\t"
+ "shll_s.w %[address2], %[address2], 26 \n\t"
+ "sb %[address1], 0(%[bap1]) \n\t"
+ "slt %[cond], %[psd1], %[psd_end] \n\t"
+ "sra %[address2], %[address2], 26 \n\t"
+ "addiu %[address2], %[address2], 32 \n\t"
+ "lbux %[address2], %[address2](%[bap_tab]) \n\t"
+ "sb %[address2], 1(%[bap1]) \n\t"
+ "addiu %[bap1], %[bap1], 2 \n\t"
+ "bnez %[cond], 2b \n\t"
+ "addiu %[psd_end], %[psd_end], 2 \n\t"
+ "slt %[cond], %[psd1], %[psd_end] \n\t"
+ "beqz %[cond], 3f \n\t"
+ "1: \n\t"
+ "lh %[address1], 0(%[psd1]) \n\t"
+ "addiu %[psd1], %[psd1], 2 \n\t"
+ "subu %[address1], %[address1], %[m] \n\t"
+ "sra %[address1], %[address1], 5 \n\t"
+ "addiu %[address1], %[address1], -32 \n\t"
+ "shll_s.w %[address1], %[address1], 26 \n\t"
+ "sra %[address1], %[address1], 26 \n\t"
+ "addiu %[address1], %[address1], 32 \n\t"
+ "lbux %[address1], %[address1](%[bap_tab]) \n\t"
+ "sb %[address1], 0(%[bap1]) \n\t"
+ "addiu %[bap1], %[bap1], 1 \n\t"
+ "3: \n\t"
+
+ : [address1]"=&r"(address1), [address2]"=&r"(address2),
+ [cond]"=&r"(cond), [bap1]"+r"(bap1),
+ [psd1]"+r"(psd1), [psd_end]"+r"(psd_end)
+ : [m]"r"(m), [bap_tab]"r"(bap_tab)
+ : "memory"
+ );
+ } while (end > band_end);
+}
+
+static void ac3_update_bap_counts_mips(uint16_t mant_cnt[16], uint8_t *bap,
+ int len)
+{
+ int temp0, temp1, temp2, temp3, temp4, temp5, temp6, temp7;
+
+ __asm__ volatile (
+ "andi %[temp3], %[len], 3 \n\t"
+ "addu %[temp2], %[bap], %[len] \n\t"
+ "addu %[temp4], %[bap], %[temp3] \n\t"
+ "beq %[temp2], %[temp4], 4f \n\t"
+ "1: \n\t"
+ "lbu %[temp0], -1(%[temp2]) \n\t"
+ "lbu %[temp5], -2(%[temp2]) \n\t"
+ "lbu %[temp6], -3(%[temp2]) \n\t"
+ "sll %[temp0], %[temp0], 1 \n\t"
+ "addu %[temp0], %[mant_cnt], %[temp0] \n\t"
+ "sll %[temp5], %[temp5], 1 \n\t"
+ "addu %[temp5], %[mant_cnt], %[temp5] \n\t"
+ "lhu %[temp1], 0(%[temp0]) \n\t"
+ "sll %[temp6], %[temp6], 1 \n\t"
+ "addu %[temp6], %[mant_cnt], %[temp6] \n\t"
+ "addiu %[temp1], %[temp1], 1 \n\t"
+ "sh %[temp1], 0(%[temp0]) \n\t"
+ "lhu %[temp1], 0(%[temp5]) \n\t"
+ "lbu %[temp7], -4(%[temp2]) \n\t"
+ "addiu %[temp2], %[temp2], -4 \n\t"
+ "addiu %[temp1], %[temp1], 1 \n\t"
+ "sh %[temp1], 0(%[temp5]) \n\t"
+ "lhu %[temp1], 0(%[temp6]) \n\t"
+ "sll %[temp7], %[temp7], 1 \n\t"
+ "addu %[temp7], %[mant_cnt], %[temp7] \n\t"
+ "addiu %[temp1], %[temp1],1 \n\t"
+ "sh %[temp1], 0(%[temp6]) \n\t"
+ "lhu %[temp1], 0(%[temp7]) \n\t"
+ "addiu %[temp1], %[temp1], 1 \n\t"
+ "sh %[temp1], 0(%[temp7]) \n\t"
+ "bne %[temp2], %[temp4], 1b \n\t"
+ "4: \n\t"
+ "beqz %[temp3], 2f \n\t"
+ "3: \n\t"
+ "addiu %[temp3], %[temp3], -1 \n\t"
+ "lbu %[temp0], -1(%[temp2]) \n\t"
+ "addiu %[temp2], %[temp2], -1 \n\t"
+ "sll %[temp0], %[temp0], 1 \n\t"
+ "addu %[temp0], %[mant_cnt], %[temp0] \n\t"
+ "lhu %[temp1], 0(%[temp0]) \n\t"
+ "addiu %[temp1], %[temp1], 1 \n\t"
+ "sh %[temp1], 0(%[temp0]) \n\t"
+ "bgtz %[temp3], 3b \n\t"
+ "2: \n\t"
+
+ : [temp0] "=&r" (temp0), [temp1] "=&r" (temp1),
+ [temp2] "=&r" (temp2), [temp3] "=&r" (temp3),
+ [temp4] "=&r" (temp4), [temp5] "=&r" (temp5),
+ [temp6] "=&r" (temp6), [temp7] "=&r" (temp7)
+ : [len] "r" (len), [bap] "r" (bap),
+ [mant_cnt] "r" (mant_cnt)
+ : "memory"
+ );
+}
+#endif
+
+#if HAVE_MIPSFPU && HAVE_MIPS32R2
+static void float_to_fixed24_mips(int32_t *dst, const float *src, unsigned int len)
+{
+ const float scale = 1 << 24;
+ float src0, src1, src2, src3, src4, src5, src6, src7;
+ int temp0, temp1, temp2, temp3, temp4, temp5, temp6, temp7;
+
+ do {
+ __asm__ volatile (
+ "lwc1 %[src0], 0(%[src]) \n\t"
+ "lwc1 %[src1], 4(%[src]) \n\t"
+ "lwc1 %[src2], 8(%[src]) \n\t"
+ "lwc1 %[src3], 12(%[src]) \n\t"
+ "lwc1 %[src4], 16(%[src]) \n\t"
+ "lwc1 %[src5], 20(%[src]) \n\t"
+ "lwc1 %[src6], 24(%[src]) \n\t"
+ "lwc1 %[src7], 28(%[src]) \n\t"
+ "mul.s %[src0], %[src0], %[scale] \n\t"
+ "mul.s %[src1], %[src1], %[scale] \n\t"
+ "mul.s %[src2], %[src2], %[scale] \n\t"
+ "mul.s %[src3], %[src3], %[scale] \n\t"
+ "mul.s %[src4], %[src4], %[scale] \n\t"
+ "mul.s %[src5], %[src5], %[scale] \n\t"
+ "mul.s %[src6], %[src6], %[scale] \n\t"
+ "mul.s %[src7], %[src7], %[scale] \n\t"
+ "cvt.w.s %[src0], %[src0] \n\t"
+ "cvt.w.s %[src1], %[src1] \n\t"
+ "cvt.w.s %[src2], %[src2] \n\t"
+ "cvt.w.s %[src3], %[src3] \n\t"
+ "cvt.w.s %[src4], %[src4] \n\t"
+ "cvt.w.s %[src5], %[src5] \n\t"
+ "cvt.w.s %[src6], %[src6] \n\t"
+ "cvt.w.s %[src7], %[src7] \n\t"
+ "mfc1 %[temp0], %[src0] \n\t"
+ "mfc1 %[temp1], %[src1] \n\t"
+ "mfc1 %[temp2], %[src2] \n\t"
+ "mfc1 %[temp3], %[src3] \n\t"
+ "mfc1 %[temp4], %[src4] \n\t"
+ "mfc1 %[temp5], %[src5] \n\t"
+ "mfc1 %[temp6], %[src6] \n\t"
+ "mfc1 %[temp7], %[src7] \n\t"
+ "sw %[temp0], 0(%[dst]) \n\t"
+ "sw %[temp1], 4(%[dst]) \n\t"
+ "sw %[temp2], 8(%[dst]) \n\t"
+ "sw %[temp3], 12(%[dst]) \n\t"
+ "sw %[temp4], 16(%[dst]) \n\t"
+ "sw %[temp5], 20(%[dst]) \n\t"
+ "sw %[temp6], 24(%[dst]) \n\t"
+ "sw %[temp7], 28(%[dst]) \n\t"
+
+ : [dst] "+r" (dst), [src] "+r" (src),
+ [src0] "=&f" (src0), [src1] "=&f" (src1),
+ [src2] "=&f" (src2), [src3] "=&f" (src3),
+ [src4] "=&f" (src4), [src5] "=&f" (src5),
+ [src6] "=&f" (src6), [src7] "=&f" (src7),
+ [temp0] "=r" (temp0), [temp1] "=r" (temp1),
+ [temp2] "=r" (temp2), [temp3] "=r" (temp3),
+ [temp4] "=r" (temp4), [temp5] "=r" (temp5),
+ [temp6] "=r" (temp6), [temp7] "=r" (temp7)
+ : [scale] "f" (scale)
+ : "memory"
+ );
+ src = src + 8;
+ dst = dst + 8;
+ len -= 8;
+ } while (len > 0);
+}
+
+static void ac3_downmix_mips(float **samples, float (*matrix)[2],
+ int out_ch, int in_ch, int len)
+{
+ int i, j, i1, i2, i3;
+ float v0, v1, v2, v3;
+ float v4, v5, v6, v7;
+ float samples0, samples1, samples2, samples3, matrix_j, matrix_j2;
+ float *samples_p,*matrix_p, **samples_x, **samples_end, **samples_sw;
+
+ __asm__ volatile(
+ ".set push \n\t"
+ ".set noreorder \n\t"
+
+ "li %[i1], 2 \n\t"
+ "sll %[len], 2 \n\t"
+ "move %[i], $zero \n\t"
+ "sll %[j], %[in_ch], 2 \n\t"
+
+ "bne %[out_ch], %[i1], 3f \n\t" // if (out_ch == 2)
+ " li %[i2], 1 \n\t"
+
+ "2: \n\t" // start of the for loop (for (i = 0; i < len; i+=4))
+ "move %[matrix_p], %[matrix] \n\t"
+ "move %[samples_x], %[samples] \n\t"
+ "mtc1 $zero, %[v0] \n\t"
+ "mtc1 $zero, %[v1] \n\t"
+ "mtc1 $zero, %[v2] \n\t"
+ "mtc1 $zero, %[v3] \n\t"
+ "mtc1 $zero, %[v4] \n\t"
+ "mtc1 $zero, %[v5] \n\t"
+ "mtc1 $zero, %[v6] \n\t"
+ "mtc1 $zero, %[v7] \n\t"
+ "addiu %[i1], %[i], 4 \n\t"
+ "addiu %[i2], %[i], 8 \n\t"
+ "lw %[samples_p], 0(%[samples_x]) \n\t"
+ "addiu %[i3], %[i], 12 \n\t"
+ "addu %[samples_end], %[samples_x], %[j] \n\t"
+ "move %[samples_sw], %[samples_p] \n\t"
+
+ "1: \n\t" // start of the inner for loop (for (j = 0; j < in_ch; j++))
+ "lwc1 %[matrix_j], 0(%[matrix_p]) \n\t"
+ "lwc1 %[matrix_j2], 4(%[matrix_p]) \n\t"
+ "lwxc1 %[samples0], %[i](%[samples_p]) \n\t"
+ "lwxc1 %[samples1], %[i1](%[samples_p]) \n\t"
+ "lwxc1 %[samples2], %[i2](%[samples_p]) \n\t"
+ "lwxc1 %[samples3], %[i3](%[samples_p]) \n\t"
+ "addiu %[matrix_p], 8 \n\t"
+ "addiu %[samples_x], 4 \n\t"
+ "madd.s %[v0], %[v0], %[samples0], %[matrix_j] \n\t"
+ "madd.s %[v1], %[v1], %[samples1], %[matrix_j] \n\t"
+ "madd.s %[v2], %[v2], %[samples2], %[matrix_j] \n\t"
+ "madd.s %[v3], %[v3], %[samples3], %[matrix_j] \n\t"
+ "madd.s %[v4], %[v4], %[samples0], %[matrix_j2]\n\t"
+ "madd.s %[v5], %[v5], %[samples1], %[matrix_j2]\n\t"
+ "madd.s %[v6], %[v6], %[samples2], %[matrix_j2]\n\t"
+ "madd.s %[v7], %[v7], %[samples3], %[matrix_j2]\n\t"
+ "bne %[samples_x], %[samples_end], 1b \n\t"
+ " lw %[samples_p], 0(%[samples_x]) \n\t"
+
+ "lw %[samples_p], 4(%[samples]) \n\t"
+ "swxc1 %[v0], %[i](%[samples_sw]) \n\t"
+ "swxc1 %[v1], %[i1](%[samples_sw]) \n\t"
+ "swxc1 %[v2], %[i2](%[samples_sw]) \n\t"
+ "swxc1 %[v3], %[i3](%[samples_sw]) \n\t"
+ "swxc1 %[v4], %[i](%[samples_p]) \n\t"
+ "addiu %[i], 16 \n\t"
+ "swxc1 %[v5], %[i1](%[samples_p]) \n\t"
+ "swxc1 %[v6], %[i2](%[samples_p]) \n\t"
+ "bne %[i], %[len], 2b \n\t"
+ " swxc1 %[v7], %[i3](%[samples_p]) \n\t"
+
+ "3: \n\t"
+ "bne %[out_ch], %[i2], 6f \n\t" // if (out_ch == 1)
+ " nop \n\t"
+
+ "5: \n\t" // start of the outer for loop (for (i = 0; i < len; i+=4))
+ "move %[matrix_p], %[matrix] \n\t"
+ "move %[samples_x], %[samples] \n\t"
+ "mtc1 $zero, %[v0] \n\t"
+ "mtc1 $zero, %[v1] \n\t"
+ "mtc1 $zero, %[v2] \n\t"
+ "mtc1 $zero, %[v3] \n\t"
+ "addiu %[i1], %[i], 4 \n\t"
+ "addiu %[i2], %[i], 8 \n\t"
+ "lw %[samples_p], 0(%[samples_x]) \n\t"
+ "addiu %[i3], %[i], 12 \n\t"
+ "addu %[samples_end], %[samples_x], %[j] \n\t"
+ "move %[samples_sw], %[samples_p] \n\t"
+
+ "4: \n\t" // start of the inner for loop (for (j = 0; j < in_ch; j++))
+ "lwc1 %[matrix_j], 0(%[matrix_p]) \n\t"
+ "lwxc1 %[samples0], %[i](%[samples_p]) \n\t"
+ "lwxc1 %[samples1], %[i1](%[samples_p]) \n\t"
+ "lwxc1 %[samples2], %[i2](%[samples_p]) \n\t"
+ "lwxc1 %[samples3], %[i3](%[samples_p]) \n\t"
+ "addiu %[matrix_p], 8 \n\t"
+ "addiu %[samples_x], 4 \n\t"
+ "madd.s %[v0], %[v0], %[samples0], %[matrix_j] \n\t"
+ "madd.s %[v1], %[v1], %[samples1], %[matrix_j] \n\t"
+ "madd.s %[v2], %[v2], %[samples2], %[matrix_j] \n\t"
+ "madd.s %[v3], %[v3], %[samples3], %[matrix_j] \n\t"
+ "bne %[samples_x], %[samples_end], 4b \n\t"
+ " lw %[samples_p], 0(%[samples_x]) \n\t"
+
+ "swxc1 %[v0], %[i](%[samples_sw]) \n\t"
+ "addiu %[i], 16 \n\t"
+ "swxc1 %[v1], %[i1](%[samples_sw]) \n\t"
+ "swxc1 %[v2], %[i2](%[samples_sw]) \n\t"
+ "bne %[i], %[len], 5b \n\t"
+ " swxc1 %[v3], %[i3](%[samples_sw]) \n\t"
+ "6: \n\t"
+
+ ".set pop"
+ :[samples_p]"=&r"(samples_p), [matrix_j]"=&f"(matrix_j), [matrix_j2]"=&f"(matrix_j2),
+ [samples0]"=&f"(samples0), [samples1]"=&f"(samples1),
+ [samples2]"=&f"(samples2), [samples3]"=&f"(samples3),
+ [v0]"=&f"(v0), [v1]"=&f"(v1), [v2]"=&f"(v2), [v3]"=&f"(v3),
+ [v4]"=&f"(v4), [v5]"=&f"(v5), [v6]"=&f"(v6), [v7]"=&f"(v7),
+ [samples_x]"=&r"(samples_x), [matrix_p]"=&r"(matrix_p),
+ [samples_end]"=&r"(samples_end), [samples_sw]"=&r"(samples_sw),
+ [i1]"=&r"(i1), [i2]"=&r"(i2), [i3]"=&r"(i3), [i]"=&r"(i),
+ [j]"=&r"(j), [len]"+r"(len)
+ :[samples]"r"(samples), [matrix]"r"(matrix),
+ [in_ch]"r"(in_ch), [out_ch]"r"(out_ch)
+ :"memory"
+ );
+}
+#endif
+#endif /* HAVE_INLINE_ASM */
+
+void ff_ac3dsp_init_mips(AC3DSPContext *c, int bit_exact) {
+#if HAVE_INLINE_ASM
+#if HAVE_MIPSDSPR1
+ c->bit_alloc_calc_bap = ac3_bit_alloc_calc_bap_mips;
+ c->update_bap_counts = ac3_update_bap_counts_mips;
+#endif
+#if HAVE_MIPSFPU && HAVE_MIPS32R2
+ c->float_to_fixed24 = float_to_fixed24_mips;
+ c->downmix = ac3_downmix_mips;
+#endif
+#endif
+
+}
diff --git a/libavcodec/mips/acelp_filters_mips.c b/libavcodec/mips/acelp_filters_mips.c
new file mode 100644
index 0000000000..c8d980aa00
--- /dev/null
+++ b/libavcodec/mips/acelp_filters_mips.c
@@ -0,0 +1,216 @@
+ /*
+ * Copyright (c) 2012
+ * MIPS Technologies, Inc., California.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the MIPS Technologies, Inc., nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE MIPS TECHNOLOGIES, INC. ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE MIPS TECHNOLOGIES, INC. BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Author: Nedeljko Babic (nbabic@mips.com)
+ *
+ * various filters for ACELP-based codecs optimized for MIPS
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Reference: libavcodec/acelp_filters.c
+ */
+#include "config.h"
+#include "libavutil/attributes.h"
+#include "libavcodec/acelp_filters.h"
+
+#if HAVE_INLINE_ASM
+static void ff_acelp_interpolatef_mips(float *out, const float *in,
+ const float *filter_coeffs, int precision,
+ int frac_pos, int filter_length, int length)
+{
+ int n, i;
+ int prec = precision * 4;
+ int fc_offset = precision - frac_pos;
+ float in_val_p, in_val_m, fc_val_p, fc_val_m;
+
+ for (n = 0; n < length; n++) {
+ /**
+ * four pointers are defined in order to minimize number of
+ * computations done in inner loop
+ */
+ const float *p_in_p = &in[n];
+ const float *p_in_m = &in[n-1];
+ const float *p_filter_coeffs_p = &filter_coeffs[frac_pos];
+ const float *p_filter_coeffs_m = filter_coeffs + fc_offset;
+ float v = 0;
+
+ for (i = 0; i < filter_length;i++) {
+ __asm__ volatile (
+ "lwc1 %[in_val_p], 0(%[p_in_p]) \n\t"
+ "lwc1 %[fc_val_p], 0(%[p_filter_coeffs_p]) \n\t"
+ "lwc1 %[in_val_m], 0(%[p_in_m]) \n\t"
+ "lwc1 %[fc_val_m], 0(%[p_filter_coeffs_m]) \n\t"
+ "addiu %[p_in_p], %[p_in_p], 4 \n\t"
+ "madd.s %[v],%[v], %[in_val_p],%[fc_val_p] \n\t"
+ "addiu %[p_in_m], %[p_in_m], -4 \n\t"
+ "addu %[p_filter_coeffs_p], %[p_filter_coeffs_p], %[prec] \n\t"
+ "addu %[p_filter_coeffs_m], %[p_filter_coeffs_m], %[prec] \n\t"
+ "madd.s %[v],%[v],%[in_val_m], %[fc_val_m] \n\t"
+
+ : [v] "=&f" (v),[p_in_p] "+r" (p_in_p), [p_in_m] "+r" (p_in_m),
+ [p_filter_coeffs_p] "+r" (p_filter_coeffs_p),
+ [in_val_p] "=&f" (in_val_p), [in_val_m] "=&f" (in_val_m),
+ [fc_val_p] "=&f" (fc_val_p), [fc_val_m] "=&f" (fc_val_m),
+ [p_filter_coeffs_m] "+r" (p_filter_coeffs_m)
+ : [prec] "r" (prec)
+ : "memory"
+ );
+ }
+ out[n] = v;
+ }
+}
+
+static void ff_acelp_apply_order_2_transfer_function_mips(float *out, const float *in,
+ const float zero_coeffs[2],
+ const float pole_coeffs[2],
+ float gain, float mem[2], int n)
+{
+ /**
+ * loop is unrolled eight times
+ */
+
+ __asm__ volatile (
+ "lwc1 $f0, 0(%[mem]) \n\t"
+ "blez %[n], ff_acelp_apply_order_2_transfer_function_end%= \n\t"
+ "lwc1 $f1, 4(%[mem]) \n\t"
+ "lwc1 $f2, 0(%[pole_coeffs]) \n\t"
+ "lwc1 $f3, 4(%[pole_coeffs]) \n\t"
+ "lwc1 $f4, 0(%[zero_coeffs]) \n\t"
+ "lwc1 $f5, 4(%[zero_coeffs]) \n\t"
+
+ "ff_acelp_apply_order_2_transfer_function_madd%=: \n\t"
+
+ "lwc1 $f6, 0(%[in]) \n\t"
+ "mul.s $f9, $f3, $f1 \n\t"
+ "mul.s $f7, $f2, $f0 \n\t"
+ "msub.s $f7, $f7, %[gain], $f6 \n\t"
+ "sub.s $f7, $f7, $f9 \n\t"
+ "madd.s $f8, $f7, $f4, $f0 \n\t"
+ "madd.s $f8, $f8, $f5, $f1 \n\t"
+ "lwc1 $f11, 4(%[in]) \n\t"
+ "mul.s $f12, $f3, $f0 \n\t"
+ "mul.s $f13, $f2, $f7 \n\t"
+ "msub.s $f13, $f13, %[gain], $f11 \n\t"
+ "sub.s $f13, $f13, $f12 \n\t"
+ "madd.s $f14, $f13, $f4, $f7 \n\t"
+ "madd.s $f14, $f14, $f5, $f0 \n\t"
+ "swc1 $f8, 0(%[out]) \n\t"
+ "lwc1 $f6, 8(%[in]) \n\t"
+ "mul.s $f9, $f3, $f7 \n\t"
+ "mul.s $f15, $f2, $f13 \n\t"
+ "msub.s $f15, $f15, %[gain], $f6 \n\t"
+ "sub.s $f15, $f15, $f9 \n\t"
+ "madd.s $f8, $f15, $f4, $f13 \n\t"
+ "madd.s $f8, $f8, $f5, $f7 \n\t"
+ "swc1 $f14, 4(%[out]) \n\t"
+ "lwc1 $f11, 12(%[in]) \n\t"
+ "mul.s $f12, $f3, $f13 \n\t"
+ "mul.s $f16, $f2, $f15 \n\t"
+ "msub.s $f16, $f16, %[gain], $f11 \n\t"
+ "sub.s $f16, $f16, $f12 \n\t"
+ "madd.s $f14, $f16, $f4, $f15 \n\t"
+ "madd.s $f14, $f14, $f5, $f13 \n\t"
+ "swc1 $f8, 8(%[out]) \n\t"
+ "lwc1 $f6, 16(%[in]) \n\t"
+ "mul.s $f9, $f3, $f15 \n\t"
+ "mul.s $f7, $f2, $f16 \n\t"
+ "msub.s $f7, $f7, %[gain], $f6 \n\t"
+ "sub.s $f7, $f7, $f9 \n\t"
+ "madd.s $f8, $f7, $f4, $f16 \n\t"
+ "madd.s $f8, $f8, $f5, $f15 \n\t"
+ "swc1 $f14, 12(%[out]) \n\t"
+ "lwc1 $f11, 20(%[in]) \n\t"
+ "mul.s $f12, $f3, $f16 \n\t"
+ "mul.s $f13, $f2, $f7 \n\t"
+ "msub.s $f13, $f13, %[gain], $f11 \n\t"
+ "sub.s $f13, $f13, $f12 \n\t"
+ "madd.s $f14, $f13, $f4, $f7 \n\t"
+ "madd.s $f14, $f14, $f5, $f16 \n\t"
+ "swc1 $f8, 16(%[out]) \n\t"
+ "lwc1 $f6, 24(%[in]) \n\t"
+ "mul.s $f9, $f3, $f7 \n\t"
+ "mul.s $f15, $f2, $f13 \n\t"
+ "msub.s $f15, $f15, %[gain], $f6 \n\t"
+ "sub.s $f1, $f15, $f9 \n\t"
+ "madd.s $f8, $f1, $f4, $f13 \n\t"
+ "madd.s $f8, $f8, $f5, $f7 \n\t"
+ "swc1 $f14, 20(%[out]) \n\t"
+ "lwc1 $f11, 28(%[in]) \n\t"
+ "mul.s $f12, $f3, $f13 \n\t"
+ "mul.s $f16, $f2, $f1 \n\t"
+ "msub.s $f16, $f16, %[gain], $f11 \n\t"
+ "sub.s $f0, $f16, $f12 \n\t"
+ "madd.s $f14, $f0, $f4, $f1 \n\t"
+ "madd.s $f14, $f14, $f5, $f13 \n\t"
+ "swc1 $f8, 24(%[out]) \n\t"
+ "addiu %[out], 32 \n\t"
+ "addiu %[in], 32 \n\t"
+ "addiu %[n], -8 \n\t"
+ "swc1 $f14, -4(%[out]) \n\t"
+ "bnez %[n], ff_acelp_apply_order_2_transfer_function_madd%= \n\t"
+ "swc1 $f1, 4(%[mem]) \n\t"
+ "swc1 $f0, 0(%[mem]) \n\t"
+
+ "ff_acelp_apply_order_2_transfer_function_end%=: \n\t"
+
+ : [out] "+r" (out),
+ [in] "+r" (in), [gain] "+f" (gain),
+ [n] "+r" (n), [mem] "+r" (mem)
+ : [zero_coeffs] "r" (zero_coeffs),
+ [pole_coeffs] "r" (pole_coeffs)
+ : "$f0", "$f1", "$f2", "$f3", "$f4", "$f5",
+ "$f6", "$f7", "$f8", "$f9", "$f10", "$f11",
+ "$f12", "$f13", "$f14", "$f15", "$f16", "memory"
+ );
+}
+#endif /* HAVE_INLINE_ASM */
+
+void ff_acelp_filter_init_mips(ACELPFContext *c)
+{
+#if HAVE_INLINE_ASM
+ c->acelp_interpolatef = ff_acelp_interpolatef_mips;
+ c->acelp_apply_order_2_transfer_function = ff_acelp_apply_order_2_transfer_function_mips;
+#endif
+}
diff --git a/libavcodec/mips/acelp_vectors_mips.c b/libavcodec/mips/acelp_vectors_mips.c
new file mode 100644
index 0000000000..8770df8576
--- /dev/null
+++ b/libavcodec/mips/acelp_vectors_mips.c
@@ -0,0 +1,101 @@
+/*
+ * Copyright (c) 2012
+ * MIPS Technologies, Inc., California.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the MIPS Technologies, Inc., nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE MIPS TECHNOLOGIES, INC. ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE MIPS TECHNOLOGIES, INC. BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Author: Nedeljko Babic (nbabic@mips.com)
+ *
+ * adaptive and fixed codebook vector operations for ACELP-based codecs
+ * optimized for MIPS
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Reference: libavcodec/acelp_vectors.c
+ */
+#include "config.h"
+#include "libavcodec/acelp_vectors.h"
+
+#if HAVE_INLINE_ASM
+static void ff_weighted_vector_sumf_mips(
+ float *out, const float *in_a, const float *in_b,
+ float weight_coeff_a, float weight_coeff_b, int length)
+{
+ const float *a_end = in_a + length;
+
+ /* loop unrolled two times */
+ __asm__ volatile (
+ "blez %[length], ff_weighted_vector_sumf_end%= \n\t"
+
+ "ff_weighted_vector_sumf_madd%=: \n\t"
+ "lwc1 $f0, 0(%[in_a]) \n\t"
+ "lwc1 $f3, 4(%[in_a]) \n\t"
+ "lwc1 $f1, 0(%[in_b]) \n\t"
+ "lwc1 $f4, 4(%[in_b]) \n\t"
+ "mul.s $f2, %[weight_coeff_a], $f0 \n\t"
+ "mul.s $f5, %[weight_coeff_a], $f3 \n\t"
+ "madd.s $f2, $f2, %[weight_coeff_b], $f1 \n\t"
+ "madd.s $f5, $f5, %[weight_coeff_b], $f4 \n\t"
+ "addiu %[in_a], 8 \n\t"
+ "addiu %[in_b], 8 \n\t"
+ "swc1 $f2, 0(%[out]) \n\t"
+ "swc1 $f5, 4(%[out]) \n\t"
+ "addiu %[out], 8 \n\t"
+ "bne %[in_a], %[a_end], ff_weighted_vector_sumf_madd%= \n\t"
+
+ "ff_weighted_vector_sumf_end%=: \n\t"
+
+ : [out] "+r" (out), [in_a] "+r" (in_a), [in_b] "+r" (in_b)
+ : [weight_coeff_a] "f" (weight_coeff_a),
+ [weight_coeff_b] "f" (weight_coeff_b),
+ [length] "r" (length), [a_end]"r"(a_end)
+ : "$f0", "$f1", "$f2", "$f3", "$f4", "$f5", "memory"
+ );
+}
+#endif /* HAVE_INLINE_ASM */
+
+void ff_acelp_vectors_init_mips(ACELPVContext *c)
+{
+#if HAVE_INLINE_ASM
+ c->weighted_vector_sumf = ff_weighted_vector_sumf_mips;
+#endif
+}
diff --git a/libavcodec/mips/amrwbdec_mips.c b/libavcodec/mips/amrwbdec_mips.c
new file mode 100644
index 0000000000..1d6ed2dfca
--- /dev/null
+++ b/libavcodec/mips/amrwbdec_mips.c
@@ -0,0 +1,187 @@
+/*
+ * Copyright (c) 2012
+ * MIPS Technologies, Inc., California.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the MIPS Technologies, Inc., nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE MIPS TECHNOLOGIES, INC. ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE MIPS TECHNOLOGIES, INC. BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Author: Nedeljko Babic (nbabic@mips.com)
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Reference: libavcodec/amrwbdec.c
+ */
+#include "libavutil/avutil.h"
+#include "libavcodec/amrwbdata.h"
+#include "amrwbdec_mips.h"
+
+#if HAVE_INLINE_ASM
+void hb_fir_filter_mips(float *out, const float fir_coef[HB_FIR_SIZE + 1],
+ float mem[HB_FIR_SIZE], const float *in)
+{
+ int i;
+ float data[AMRWB_SFR_SIZE_16k + HB_FIR_SIZE]; // past and current samples
+
+ memcpy(data, mem, HB_FIR_SIZE * sizeof(float));
+ memcpy(data + HB_FIR_SIZE, in, AMRWB_SFR_SIZE_16k * sizeof(float));
+
+ for (i = 0; i < AMRWB_SFR_SIZE_16k; i++) {
+ float output;
+ float * p_data = (data+i);
+
+ /**
+ * inner loop is entirely unrolled and instructions are scheduled
+ * to minimize pipeline stall
+ */
+ __asm__ volatile(
+ "mtc1 $zero, %[output] \n\t"
+ "lwc1 $f0, 0(%[p_data]) \n\t"
+ "lwc1 $f1, 0(%[fir_coef]) \n\t"
+ "lwc1 $f2, 4(%[p_data]) \n\t"
+ "madd.s %[output], %[output], $f0, $f1 \n\t"
+ "lwc1 $f3, 4(%[fir_coef]) \n\t"
+ "lwc1 $f4, 8(%[p_data]) \n\t"
+ "madd.s %[output], %[output], $f2, $f3 \n\t"
+ "lwc1 $f5, 8(%[fir_coef]) \n\t"
+
+ "lwc1 $f0, 12(%[p_data]) \n\t"
+ "lwc1 $f1, 12(%[fir_coef]) \n\t"
+ "madd.s %[output], %[output], $f4, $f5 \n\t"
+ "lwc1 $f2, 16(%[p_data]) \n\t"
+ "madd.s %[output], %[output], $f0, $f1 \n\t"
+ "lwc1 $f3, 16(%[fir_coef]) \n\t"
+ "lwc1 $f4, 20(%[p_data]) \n\t"
+ "lwc1 $f5, 20(%[fir_coef]) \n\t"
+ "madd.s %[output], %[output], $f2, $f3 \n\t"
+
+ "lwc1 $f0, 24(%[p_data]) \n\t"
+ "lwc1 $f1, 24(%[fir_coef]) \n\t"
+ "lwc1 $f2, 28(%[p_data]) \n\t"
+ "madd.s %[output], %[output], $f4, $f5 \n\t"
+ "lwc1 $f3, 28(%[fir_coef]) \n\t"
+ "madd.s %[output], %[output], $f0, $f1 \n\t"
+ "lwc1 $f4, 32(%[p_data]) \n\t"
+ "madd.s %[output], %[output], $f2, $f3 \n\t"
+ "lwc1 $f5, 32(%[fir_coef]) \n\t"
+ "madd.s %[output], %[output], $f4, $f5 \n\t"
+
+ "lwc1 $f0, 36(%[p_data]) \n\t"
+ "lwc1 $f1, 36(%[fir_coef]) \n\t"
+ "lwc1 $f2, 40(%[p_data]) \n\t"
+ "lwc1 $f3, 40(%[fir_coef]) \n\t"
+ "madd.s %[output], %[output], $f0, $f1 \n\t"
+ "lwc1 $f4, 44(%[p_data]) \n\t"
+ "lwc1 $f5, 44(%[fir_coef]) \n\t"
+ "madd.s %[output], %[output], $f2, $f3 \n\t"
+
+ "lwc1 $f0, 48(%[p_data]) \n\t"
+ "lwc1 $f1, 48(%[fir_coef]) \n\t"
+ "lwc1 $f2, 52(%[p_data]) \n\t"
+ "madd.s %[output], %[output], $f4, $f5 \n\t"
+ "lwc1 $f3, 52(%[fir_coef]) \n\t"
+ "lwc1 $f4, 56(%[p_data]) \n\t"
+ "madd.s %[output], %[output], $f0, $f1 \n\t"
+ "lwc1 $f5, 56(%[fir_coef]) \n\t"
+ "madd.s %[output], %[output], $f2, $f3 \n\t"
+
+ "lwc1 $f0, 60(%[p_data]) \n\t"
+ "lwc1 $f1, 60(%[fir_coef]) \n\t"
+ "lwc1 $f2, 64(%[p_data]) \n\t"
+ "madd.s %[output], %[output], $f4, $f5 \n\t"
+ "lwc1 $f3, 64(%[fir_coef]) \n\t"
+ "madd.s %[output], %[output], $f0, $f1 \n\t"
+ "lwc1 $f4, 68(%[p_data]) \n\t"
+ "madd.s %[output], %[output], $f2, $f3 \n\t"
+ "lwc1 $f5, 68(%[fir_coef]) \n\t"
+ "madd.s %[output], %[output], $f4, $f5 \n\t"
+
+ "lwc1 $f0, 72(%[p_data]) \n\t"
+ "lwc1 $f1, 72(%[fir_coef]) \n\t"
+ "lwc1 $f2, 76(%[p_data]) \n\t"
+ "lwc1 $f3, 76(%[fir_coef]) \n\t"
+ "madd.s %[output], %[output], $f0, $f1 \n\t"
+ "lwc1 $f4, 80(%[p_data]) \n\t"
+ "lwc1 $f5, 80(%[fir_coef]) \n\t"
+ "madd.s %[output], %[output], $f2, $f3 \n\t"
+
+ "lwc1 $f0, 84(%[p_data]) \n\t"
+ "lwc1 $f1, 84(%[fir_coef]) \n\t"
+ "lwc1 $f2, 88(%[p_data]) \n\t"
+ "madd.s %[output], %[output], $f4, $f5 \n\t"
+ "lwc1 $f3, 88(%[fir_coef]) \n\t"
+ "lwc1 $f4, 92(%[p_data]) \n\t"
+ "madd.s %[output], %[output], $f0, $f1 \n\t"
+ "lwc1 $f5, 92(%[fir_coef]) \n\t"
+ "madd.s %[output], %[output], $f2, $f3 \n\t"
+
+ "lwc1 $f0, 96(%[p_data]) \n\t"
+ "lwc1 $f1, 96(%[fir_coef]) \n\t"
+ "lwc1 $f2, 100(%[p_data]) \n\t"
+ "madd.s %[output], %[output], $f4, $f5 \n\t"
+ "lwc1 $f3, 100(%[fir_coef]) \n\t"
+ "lwc1 $f4, 104(%[p_data]) \n\t"
+ "madd.s %[output], %[output], $f0, $f1 \n\t"
+ "lwc1 $f5, 104(%[fir_coef]) \n\t"
+ "madd.s %[output], %[output], $f2, $f3 \n\t"
+
+ "lwc1 $f0, 108(%[p_data]) \n\t"
+ "lwc1 $f1, 108(%[fir_coef]) \n\t"
+ "madd.s %[output], %[output], $f4, $f5 \n\t"
+ "lwc1 $f2, 112(%[p_data]) \n\t"
+ "lwc1 $f3, 112(%[fir_coef]) \n\t"
+ "madd.s %[output], %[output], $f0, $f1 \n\t"
+ "lwc1 $f4, 116(%[p_data]) \n\t"
+ "lwc1 $f5, 116(%[fir_coef]) \n\t"
+ "lwc1 $f0, 120(%[p_data]) \n\t"
+ "madd.s %[output], %[output], $f2, $f3 \n\t"
+ "lwc1 $f1, 120(%[fir_coef]) \n\t"
+ "madd.s %[output], %[output], $f4, $f5 \n\t"
+ "madd.s %[output], %[output], $f0, $f1 \n\t"
+
+ : [output]"=&f"(output)
+ : [fir_coef]"r"(fir_coef), [p_data]"r"(p_data)
+ : "$f0", "$f1", "$f2", "$f3", "$f4", "$f5", "memory"
+ );
+ out[i] = output;
+ }
+ memcpy(mem, data + AMRWB_SFR_SIZE_16k, HB_FIR_SIZE * sizeof(float));
+}
+#endif /* HAVE_INLINE_ASM */
diff --git a/libavcodec/mips/amrwbdec_mips.h b/libavcodec/mips/amrwbdec_mips.h
new file mode 100644
index 0000000000..a469918d2c
--- /dev/null
+++ b/libavcodec/mips/amrwbdec_mips.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2012
+ * MIPS Technologies, Inc., California.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the MIPS Technologies, Inc., nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE MIPS TECHNOLOGIES, INC. ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE MIPS TECHNOLOGIES, INC. BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Author: Nedeljko Babic (nbabic@mips.com)
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Reference: libavcodec/amrwbdec.c
+ */
+#ifndef AVCODEC_AMRWBDEC_MIPS_H
+#define AVCODEC_AMRWBDEC_MIPS_H
+#include "config.h"
+
+#if HAVE_MIPSFPU && HAVE_INLINE_ASM
+void hb_fir_filter_mips(float *out, const float fir_coef[],
+ float mem[], const float *in);
+#define hb_fir_filter hb_fir_filter_mips
+#endif
+
+#endif /* AVCODEC_AMRWBDEC_MIPS_H */
diff --git a/libavcodec/mips/celp_filters_mips.c b/libavcodec/mips/celp_filters_mips.c
new file mode 100644
index 0000000000..ef5b07be3f
--- /dev/null
+++ b/libavcodec/mips/celp_filters_mips.c
@@ -0,0 +1,288 @@
+/*
+ * Copyright (c) 2012
+ * MIPS Technologies, Inc., California.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the MIPS Technologies, Inc., nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE MIPS TECHNOLOGIES, INC. ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE MIPS TECHNOLOGIES, INC. BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Author: Nedeljko Babic (nbabic@mips.com)
+ *
+ * various filters for CELP-based codecs optimized for MIPS
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Reference: libavcodec/celp_filters.c
+ */
+#include "config.h"
+#include "libavutil/attributes.h"
+#include "libavutil/common.h"
+#include "libavcodec/celp_filters.h"
+
+#if HAVE_INLINE_ASM
+static void ff_celp_lp_synthesis_filterf_mips(float *out,
+ const float *filter_coeffs,
+ const float* in, int buffer_length,
+ int filter_length)
+{
+ int i,n;
+
+ float out0, out1, out2, out3;
+ float old_out0, old_out1, old_out2, old_out3;
+ float a,b,c;
+ const float *p_filter_coeffs;
+ float *p_out;
+
+ a = filter_coeffs[0];
+ b = filter_coeffs[1];
+ c = filter_coeffs[2];
+ b -= filter_coeffs[0] * filter_coeffs[0];
+ c -= filter_coeffs[1] * filter_coeffs[0];
+ c -= filter_coeffs[0] * b;
+
+ old_out0 = out[-4];
+ old_out1 = out[-3];
+ old_out2 = out[-2];
+ old_out3 = out[-1];
+ for (n = 0; n <= buffer_length - 4; n+=4) {
+ p_filter_coeffs = filter_coeffs;
+ p_out = out;
+
+ out0 = in[0];
+ out1 = in[1];
+ out2 = in[2];
+ out3 = in[3];
+
+ __asm__ volatile(
+ "lwc1 $f2, 8(%[filter_coeffs]) \n\t"
+ "lwc1 $f1, 4(%[filter_coeffs]) \n\t"
+ "lwc1 $f0, 0(%[filter_coeffs]) \n\t"
+ "nmsub.s %[out0], %[out0], $f2, %[old_out1] \n\t"
+ "nmsub.s %[out1], %[out1], $f2, %[old_out2] \n\t"
+ "nmsub.s %[out2], %[out2], $f2, %[old_out3] \n\t"
+ "lwc1 $f3, 12(%[filter_coeffs]) \n\t"
+ "nmsub.s %[out0], %[out0], $f1, %[old_out2] \n\t"
+ "nmsub.s %[out1], %[out1], $f1, %[old_out3] \n\t"
+ "nmsub.s %[out2], %[out2], $f3, %[old_out2] \n\t"
+ "nmsub.s %[out0], %[out0], $f0, %[old_out3] \n\t"
+ "nmsub.s %[out3], %[out3], $f3, %[old_out3] \n\t"
+ "nmsub.s %[out1], %[out1], $f3, %[old_out1] \n\t"
+ "nmsub.s %[out0], %[out0], $f3, %[old_out0] \n\t"
+
+ : [out0]"+f"(out0), [out1]"+f"(out1),
+ [out2]"+f"(out2), [out3]"+f"(out3)
+ : [old_out0]"f"(old_out0), [old_out1]"f"(old_out1),
+ [old_out2]"f"(old_out2), [old_out3]"f"(old_out3),
+ [filter_coeffs]"r"(filter_coeffs)
+ : "$f0", "$f1", "$f2", "$f3", "$f4", "memory"
+ );
+
+ for (i = 5; i <= filter_length; i += 2) {
+ __asm__ volatile(
+ "lwc1 %[old_out3], -20(%[p_out]) \n\t"
+ "lwc1 $f5, 16(%[p_filter_coeffs]) \n\t"
+ "addiu %[p_out], -8 \n\t"
+ "addiu %[p_filter_coeffs], 8 \n\t"
+ "nmsub.s %[out1], %[out1], $f5, %[old_out0] \n\t"
+ "nmsub.s %[out3], %[out3], $f5, %[old_out2] \n\t"
+ "lwc1 $f4, 12(%[p_filter_coeffs]) \n\t"
+ "lwc1 %[old_out2], -16(%[p_out]) \n\t"
+ "nmsub.s %[out0], %[out0], $f5, %[old_out3] \n\t"
+ "nmsub.s %[out2], %[out2], $f5, %[old_out1] \n\t"
+ "nmsub.s %[out1], %[out1], $f4, %[old_out3] \n\t"
+ "nmsub.s %[out3], %[out3], $f4, %[old_out1] \n\t"
+ "mov.s %[old_out1], %[old_out3] \n\t"
+ "nmsub.s %[out0], %[out0], $f4, %[old_out2] \n\t"
+ "nmsub.s %[out2], %[out2], $f4, %[old_out0] \n\t"
+
+ : [out0]"+f"(out0), [out1]"+f"(out1),
+ [out2]"+f"(out2), [out3]"+f"(out3), [old_out0]"+f"(old_out0),
+ [old_out1]"+f"(old_out1), [old_out2]"+f"(old_out2),
+ [old_out3]"+f"(old_out3),[p_filter_coeffs]"+r"(p_filter_coeffs),
+ [p_out]"+r"(p_out)
+ :
+ : "$f4", "$f5", "memory"
+ );
+ FFSWAP(float, old_out0, old_out2);
+ }
+
+ __asm__ volatile(
+ "nmsub.s %[out3], %[out3], %[a], %[out2] \n\t"
+ "nmsub.s %[out2], %[out2], %[a], %[out1] \n\t"
+ "nmsub.s %[out3], %[out3], %[b], %[out1] \n\t"
+ "nmsub.s %[out1], %[out1], %[a], %[out0] \n\t"
+ "nmsub.s %[out2], %[out2], %[b], %[out0] \n\t"
+ "nmsub.s %[out3], %[out3], %[c], %[out0] \n\t"
+
+ : [out0]"+f"(out0), [out1]"+f"(out1),
+ [out2]"+f"(out2), [out3]"+f"(out3)
+ : [a]"f"(a), [b]"f"(b), [c]"f"(c)
+ );
+
+ out[0] = out0;
+ out[1] = out1;
+ out[2] = out2;
+ out[3] = out3;
+
+ old_out0 = out0;
+ old_out1 = out1;
+ old_out2 = out2;
+ old_out3 = out3;
+
+ out += 4;
+ in += 4;
+ }
+
+ out -= n;
+ in -= n;
+ for (; n < buffer_length; n++) {
+ float out_val, out_val_i, fc_val;
+ p_filter_coeffs = filter_coeffs;
+ p_out = &out[n];
+ out_val = in[n];
+ for (i = 1; i <= filter_length; i++) {
+ __asm__ volatile(
+ "lwc1 %[fc_val], 0(%[p_filter_coeffs]) \n\t"
+ "lwc1 %[out_val_i], -4(%[p_out]) \n\t"
+ "addiu %[p_filter_coeffs], 4 \n\t"
+ "addiu %[p_out], -4 \n\t"
+ "nmsub.s %[out_val], %[out_val], %[fc_val], %[out_val_i] \n\t"
+
+ : [fc_val]"=&f"(fc_val), [out_val]"+f"(out_val),
+ [out_val_i]"=&f"(out_val_i), [p_out]"+r"(p_out),
+ [p_filter_coeffs]"+r"(p_filter_coeffs)
+ :
+ : "memory"
+ );
+ }
+ out[n] = out_val;
+ }
+}
+
+static void ff_celp_lp_zero_synthesis_filterf_mips(float *out,
+ const float *filter_coeffs,
+ const float *in, int buffer_length,
+ int filter_length)
+{
+ int i,n;
+ float sum_out8, sum_out7, sum_out6, sum_out5, sum_out4, fc_val;
+ float sum_out3, sum_out2, sum_out1;
+ const float *p_filter_coeffs, *p_in;
+
+ for (n = 0; n < buffer_length; n+=8) {
+ p_in = &in[n];
+ p_filter_coeffs = filter_coeffs;
+ sum_out8 = in[n+7];
+ sum_out7 = in[n+6];
+ sum_out6 = in[n+5];
+ sum_out5 = in[n+4];
+ sum_out4 = in[n+3];
+ sum_out3 = in[n+2];
+ sum_out2 = in[n+1];
+ sum_out1 = in[n];
+ i = filter_length;
+
+ /* i is always greater than 0
+ * outer loop is unrolled eight times so there is less memory access
+ * inner loop is unrolled two times
+ */
+ __asm__ volatile(
+ "filt_lp_inner%=: \n\t"
+ "lwc1 %[fc_val], 0(%[p_filter_coeffs]) \n\t"
+ "lwc1 $f7, 6*4(%[p_in]) \n\t"
+ "lwc1 $f6, 5*4(%[p_in]) \n\t"
+ "lwc1 $f5, 4*4(%[p_in]) \n\t"
+ "lwc1 $f4, 3*4(%[p_in]) \n\t"
+ "lwc1 $f3, 2*4(%[p_in]) \n\t"
+ "lwc1 $f2, 4(%[p_in]) \n\t"
+ "lwc1 $f1, 0(%[p_in]) \n\t"
+ "lwc1 $f0, -4(%[p_in]) \n\t"
+ "addiu %[i], -2 \n\t"
+ "madd.s %[sum_out8], %[sum_out8], %[fc_val], $f7 \n\t"
+ "madd.s %[sum_out7], %[sum_out7], %[fc_val], $f6 \n\t"
+ "madd.s %[sum_out6], %[sum_out6], %[fc_val], $f5 \n\t"
+ "madd.s %[sum_out5], %[sum_out5], %[fc_val], $f4 \n\t"
+ "madd.s %[sum_out4], %[sum_out4], %[fc_val], $f3 \n\t"
+ "madd.s %[sum_out3], %[sum_out3], %[fc_val], $f2 \n\t"
+ "madd.s %[sum_out2], %[sum_out2], %[fc_val], $f1 \n\t"
+ "madd.s %[sum_out1], %[sum_out1], %[fc_val], $f0 \n\t"
+ "lwc1 %[fc_val], 4(%[p_filter_coeffs]) \n\t"
+ "lwc1 $f7, -8(%[p_in]) \n\t"
+ "addiu %[p_filter_coeffs], 8 \n\t"
+ "addiu %[p_in], -8 \n\t"
+ "madd.s %[sum_out8], %[sum_out8], %[fc_val], $f6 \n\t"
+ "madd.s %[sum_out7], %[sum_out7], %[fc_val], $f5 \n\t"
+ "madd.s %[sum_out6], %[sum_out6], %[fc_val], $f4 \n\t"
+ "madd.s %[sum_out5], %[sum_out5], %[fc_val], $f3 \n\t"
+ "madd.s %[sum_out4], %[sum_out4], %[fc_val], $f2 \n\t"
+ "madd.s %[sum_out3], %[sum_out3], %[fc_val], $f1 \n\t"
+ "madd.s %[sum_out2], %[sum_out2], %[fc_val], $f0 \n\t"
+ "madd.s %[sum_out1], %[sum_out1], %[fc_val], $f7 \n\t"
+ "bgtz %[i], filt_lp_inner%= \n\t"
+
+ : [sum_out8]"+f"(sum_out8), [sum_out7]"+f"(sum_out7),
+ [sum_out6]"+f"(sum_out6), [sum_out5]"+f"(sum_out5),
+ [sum_out4]"+f"(sum_out4), [sum_out3]"+f"(sum_out3),
+ [sum_out2]"+f"(sum_out2), [sum_out1]"+f"(sum_out1),
+ [fc_val]"=&f"(fc_val), [p_filter_coeffs]"+r"(p_filter_coeffs),
+ [p_in]"+r"(p_in), [i]"+r"(i)
+ :
+ : "$f0", "$f1", "$f2", "$f3", "$f4", "$f5", "$f6", "$f7", "memory"
+ );
+
+ out[n+7] = sum_out8;
+ out[n+6] = sum_out7;
+ out[n+5] = sum_out6;
+ out[n+4] = sum_out5;
+ out[n+3] = sum_out4;
+ out[n+2] = sum_out3;
+ out[n+1] = sum_out2;
+ out[n] = sum_out1;
+ }
+}
+#endif /* HAVE_INLINE_ASM */
+
+void ff_celp_filter_init_mips(CELPFContext *c)
+{
+#if HAVE_INLINE_ASM
+ c->celp_lp_synthesis_filterf = ff_celp_lp_synthesis_filterf_mips;
+ c->celp_lp_zero_synthesis_filterf = ff_celp_lp_zero_synthesis_filterf_mips;
+#endif
+}
diff --git a/libavcodec/mips/celp_math_mips.c b/libavcodec/mips/celp_math_mips.c
new file mode 100644
index 0000000000..d7ccc23fd9
--- /dev/null
+++ b/libavcodec/mips/celp_math_mips.c
@@ -0,0 +1,89 @@
+/*
+ * Copyright (c) 2012
+ * MIPS Technologies, Inc., California.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the MIPS Technologies, Inc., nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE MIPS TECHNOLOGIES, INC. ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE MIPS TECHNOLOGIES, INC. BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Author: Nedeljko Babic (nbabic@mips.com)
+ *
+ * Math operations optimized for MIPS
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Reference: libavcodec/celp_math.c
+ */
+#include "config.h"
+#include "libavcodec/celp_math.h"
+
+#if HAVE_INLINE_ASM
+static float ff_dot_productf_mips(const float* a, const float* b,
+ int length)
+{
+ float sum;
+ const float* a_end = a + length;
+
+ __asm__ volatile (
+ "mtc1 $zero, %[sum] \n\t"
+ "blez %[length], ff_dot_productf_end%= \n\t"
+ "ff_dot_productf_madd%=: \n\t"
+ "lwc1 $f2, 0(%[a]) \n\t"
+ "lwc1 $f1, 0(%[b]) \n\t"
+ "addiu %[a], %[a], 4 \n\t"
+ "addiu %[b], %[b], 4 \n\t"
+ "madd.s %[sum], %[sum], $f1, $f2 \n\t"
+ "bne %[a], %[a_end], ff_dot_productf_madd%= \n\t"
+ "ff_dot_productf_end%=: \n\t"
+
+ : [sum] "=&f" (sum), [a] "+r" (a), [b] "+r" (b)
+ : [a_end]"r"(a_end), [length] "r" (length)
+ : "$f1", "$f2", "memory"
+ );
+ return sum;
+}
+#endif /* HAVE_INLINE_ASM */
+
+void ff_celp_math_init_mips(CELPMContext *c)
+{
+#if HAVE_INLINE_ASM
+ c->dot_productf = ff_dot_productf_mips;
+#endif
+}
diff --git a/libavcodec/mips/compute_antialias_fixed.h b/libavcodec/mips/compute_antialias_fixed.h
new file mode 100644
index 0000000000..a967f67de7
--- /dev/null
+++ b/libavcodec/mips/compute_antialias_fixed.h
@@ -0,0 +1,250 @@
+/*
+ * Copyright (c) 2012
+ * MIPS Technologies, Inc., California.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the MIPS Technologies, Inc., nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE MIPS TECHNOLOGIES, INC. ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE MIPS TECHNOLOGIES, INC. BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Author: Bojan Zivkovic (bojan@mips.com)
+ *
+ * Compute antialias function optimised for MIPS fixed-point architecture
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Reference: libavcodec/mpegaudiodec.c
+ */
+
+#ifndef AVCODEC_MIPS_COMPUTE_ANTIALIAS_FIXED_H
+#define AVCODEC_MIPS_COMPUTE_ANTIALIAS_FIXED_H
+
+#if HAVE_INLINE_ASM
+static void compute_antialias_mips_fixed(MPADecodeContext *s,
+ GranuleDef *g)
+{
+ int32_t *ptr, *csa;
+ int n, i;
+ int MAX_lo = 0xffffffff;
+
+ /* we antialias only "long" bands */
+ if (g->block_type == 2) {
+ if (!g->switch_point)
+ return;
+ /* XXX: check this for 8000Hz case */
+ n = 1;
+ } else {
+ n = SBLIMIT - 1;
+ }
+
+
+ ptr = g->sb_hybrid + 18;
+
+ for(i = n;i > 0;i--) {
+ int tmp0, tmp1, tmp2, tmp00, tmp11;
+ int temp_reg1, temp_reg2, temp_reg3, temp_reg4, temp_reg5, temp_reg6;
+ csa = &csa_table[0][0];
+
+ /**
+ * instructions are scheduled to minimize pipeline stall.
+ */
+ __asm__ volatile (
+ "lw %[tmp0], -1*4(%[ptr]) \n\t"
+ "lw %[tmp1], 0*4(%[ptr]) \n\t"
+ "lw %[temp_reg1], 0*4(%[csa]) \n\t"
+ "lw %[temp_reg2], 2*4(%[csa]) \n\t"
+ "add %[tmp2], %[tmp0], %[tmp1] \n\t"
+ "lw %[temp_reg3], 3*4(%[csa]) \n\t"
+ "mult $ac0, %[tmp2], %[temp_reg1] \n\t"
+ "mult $ac1, %[tmp2], %[temp_reg1] \n\t"
+ "lw %[tmp00], -2*4(%[ptr]) \n\t"
+ "lw %[tmp11], 1*4(%[ptr]) \n\t"
+ "lw %[temp_reg4], 4*4(%[csa]) \n\t"
+ "mtlo %[MAX_lo], $ac0 \n\t"
+ "mtlo $zero, $ac1 \n\t"
+ "msub $ac0, %[tmp1], %[temp_reg2] \n\t"
+ "madd $ac1, %[tmp0], %[temp_reg3] \n\t"
+ "add %[tmp2], %[tmp00], %[tmp11] \n\t"
+ "lw %[temp_reg5], 6*4(%[csa]) \n\t"
+ "mult $ac2, %[tmp2], %[temp_reg4] \n\t"
+ "mult $ac3, %[tmp2], %[temp_reg4] \n\t"
+ "mfhi %[temp_reg1], $ac0 \n\t"
+ "mfhi %[temp_reg2], $ac1 \n\t"
+ "lw %[temp_reg6], 7*4(%[csa]) \n\t"
+ "mtlo %[MAX_lo], $ac2 \n\t"
+ "msub $ac2, %[tmp11], %[temp_reg5] \n\t"
+ "mtlo $zero, $ac3 \n\t"
+ "madd $ac3, %[tmp00], %[temp_reg6] \n\t"
+ "sll %[temp_reg1], %[temp_reg1], 2 \n\t"
+ "sw %[temp_reg1], -1*4(%[ptr]) \n\t"
+ "mfhi %[temp_reg4], $ac2 \n\t"
+ "sll %[temp_reg2], %[temp_reg2], 2 \n\t"
+ "mfhi %[temp_reg5], $ac3 \n\t"
+ "sw %[temp_reg2], 0*4(%[ptr]) \n\t"
+ "lw %[tmp0], -3*4(%[ptr]) \n\t"
+ "lw %[tmp1], 2*4(%[ptr]) \n\t"
+ "lw %[temp_reg1], 8*4(%[csa]) \n\t"
+ "sll %[temp_reg4], %[temp_reg4], 2 \n\t"
+ "add %[tmp2], %[tmp0], %[tmp1] \n\t"
+ "sll %[temp_reg5], %[temp_reg5], 2 \n\t"
+ "mult $ac0, %[tmp2], %[temp_reg1] \n\t"
+ "mult $ac1, %[tmp2], %[temp_reg1] \n\t"
+ "sw %[temp_reg4], -2*4(%[ptr]) \n\t"
+ "sw %[temp_reg5], 1*4(%[ptr]) \n\t"
+ "lw %[temp_reg2], 10*4(%[csa]) \n\t"
+ "mtlo %[MAX_lo], $ac0 \n\t"
+ "lw %[temp_reg3], 11*4(%[csa]) \n\t"
+ "msub $ac0, %[tmp1], %[temp_reg2] \n\t"
+ "mtlo $zero, $ac1 \n\t"
+ "madd $ac1, %[tmp0], %[temp_reg3] \n\t"
+ "lw %[tmp00], -4*4(%[ptr]) \n\t"
+ "lw %[tmp11], 3*4(%[ptr]) \n\t"
+ "mfhi %[temp_reg1], $ac0 \n\t"
+ "lw %[temp_reg4], 12*4(%[csa]) \n\t"
+ "mfhi %[temp_reg2], $ac1 \n\t"
+ "add %[tmp2], %[tmp00], %[tmp11] \n\t"
+ "mult $ac2, %[tmp2], %[temp_reg4] \n\t"
+ "mult $ac3, %[tmp2], %[temp_reg4] \n\t"
+ "lw %[temp_reg5], 14*4(%[csa]) \n\t"
+ "lw %[temp_reg6], 15*4(%[csa]) \n\t"
+ "sll %[temp_reg1], %[temp_reg1], 2 \n\t"
+ "mtlo %[MAX_lo], $ac2 \n\t"
+ "msub $ac2, %[tmp11], %[temp_reg5] \n\t"
+ "mtlo $zero, $ac3 \n\t"
+ "madd $ac3, %[tmp00], %[temp_reg6] \n\t"
+ "sll %[temp_reg2], %[temp_reg2], 2 \n\t"
+ "sw %[temp_reg1], -3*4(%[ptr]) \n\t"
+ "mfhi %[temp_reg4], $ac2 \n\t"
+ "sw %[temp_reg2], 2*4(%[ptr]) \n\t"
+ "mfhi %[temp_reg5], $ac3 \n\t"
+ "lw %[tmp0], -5*4(%[ptr]) \n\t"
+ "lw %[tmp1], 4*4(%[ptr]) \n\t"
+ "lw %[temp_reg1], 16*4(%[csa]) \n\t"
+ "lw %[temp_reg2], 18*4(%[csa]) \n\t"
+ "add %[tmp2], %[tmp0], %[tmp1] \n\t"
+ "lw %[temp_reg3], 19*4(%[csa]) \n\t"
+ "mult $ac0, %[tmp2], %[temp_reg1] \n\t"
+ "mult $ac1, %[tmp2], %[temp_reg1] \n\t"
+ "sll %[temp_reg4], %[temp_reg4], 2 \n\t"
+ "sll %[temp_reg5], %[temp_reg5], 2 \n\t"
+ "sw %[temp_reg4], -4*4(%[ptr]) \n\t"
+ "mtlo %[MAX_lo], $ac0 \n\t"
+ "msub $ac0, %[tmp1], %[temp_reg2] \n\t"
+ "mtlo $zero, $ac1 \n\t"
+ "madd $ac1, %[tmp0], %[temp_reg3] \n\t"
+ "sw %[temp_reg5], 3*4(%[ptr]) \n\t"
+ "lw %[tmp00], -6*4(%[ptr]) \n\t"
+ "mfhi %[temp_reg1], $ac0 \n\t"
+ "lw %[tmp11], 5*4(%[ptr]) \n\t"
+ "mfhi %[temp_reg2], $ac1 \n\t"
+ "lw %[temp_reg4], 20*4(%[csa]) \n\t"
+ "add %[tmp2], %[tmp00], %[tmp11] \n\t"
+ "lw %[temp_reg5], 22*4(%[csa]) \n\t"
+ "mult $ac2, %[tmp2], %[temp_reg4] \n\t"
+ "mult $ac3, %[tmp2], %[temp_reg4] \n\t"
+ "lw %[temp_reg6], 23*4(%[csa]) \n\t"
+ "sll %[temp_reg1], %[temp_reg1], 2 \n\t"
+ "sll %[temp_reg2], %[temp_reg2], 2 \n\t"
+ "mtlo %[MAX_lo], $ac2 \n\t"
+ "msub $ac2, %[tmp11], %[temp_reg5] \n\t"
+ "mtlo $zero, $ac3 \n\t"
+ "madd $ac3, %[tmp00], %[temp_reg6] \n\t"
+ "sw %[temp_reg1], -5*4(%[ptr]) \n\t"
+ "sw %[temp_reg2], 4*4(%[ptr]) \n\t"
+ "mfhi %[temp_reg4], $ac2 \n\t"
+ "lw %[tmp0], -7*4(%[ptr]) \n\t"
+ "mfhi %[temp_reg5], $ac3 \n\t"
+ "lw %[tmp1], 6*4(%[ptr]) \n\t"
+ "lw %[temp_reg1], 24*4(%[csa]) \n\t"
+ "lw %[temp_reg2], 26*4(%[csa]) \n\t"
+ "add %[tmp2], %[tmp0], %[tmp1] \n\t"
+ "lw %[temp_reg3], 27*4(%[csa]) \n\t"
+ "mult $ac0, %[tmp2], %[temp_reg1] \n\t"
+ "mult $ac1, %[tmp2], %[temp_reg1] \n\t"
+ "sll %[temp_reg4], %[temp_reg4], 2 \n\t"
+ "sll %[temp_reg5], %[temp_reg5], 2 \n\t"
+ "sw %[temp_reg4], -6*4(%[ptr]) \n\t"
+ "mtlo %[MAX_lo], $ac0 \n\t"
+ "msub $ac0, %[tmp1], %[temp_reg2] \n\t"
+ "mtlo $zero, $ac1 \n\t"
+ "madd $ac1, %[tmp0], %[temp_reg3] \n\t"
+ "sw %[temp_reg5], 5*4(%[ptr]) \n\t"
+ "lw %[tmp00], -8*4(%[ptr]) \n\t"
+ "mfhi %[temp_reg1], $ac0 \n\t"
+ "lw %[tmp11], 7*4(%[ptr]) \n\t"
+ "mfhi %[temp_reg2], $ac1 \n\t"
+ "lw %[temp_reg4], 28*4(%[csa]) \n\t"
+ "add %[tmp2], %[tmp00], %[tmp11] \n\t"
+ "lw %[temp_reg5], 30*4(%[csa]) \n\t"
+ "mult $ac2, %[tmp2], %[temp_reg4] \n\t"
+ "mult $ac3, %[tmp2], %[temp_reg4] \n\t"
+ "lw %[temp_reg6], 31*4(%[csa]) \n\t"
+ "sll %[temp_reg1], %[temp_reg1], 2 \n\t"
+ "sll %[temp_reg2], %[temp_reg2], 2 \n\t"
+ "mtlo %[MAX_lo], $ac2 \n\t"
+ "msub $ac2, %[tmp11], %[temp_reg5] \n\t"
+ "mtlo $zero, $ac3 \n\t"
+ "madd $ac3, %[tmp00], %[temp_reg6] \n\t"
+ "sw %[temp_reg1], -7*4(%[ptr]) \n\t"
+ "sw %[temp_reg2], 6*4(%[ptr]) \n\t"
+ "mfhi %[temp_reg4], $ac2 \n\t"
+ "mfhi %[temp_reg5], $ac3 \n\t"
+ "sll %[temp_reg4], %[temp_reg4], 2 \n\t"
+ "sll %[temp_reg5], %[temp_reg5], 2 \n\t"
+ "sw %[temp_reg4], -8*4(%[ptr]) \n\t"
+ "sw %[temp_reg5], 7*4(%[ptr]) \n\t"
+
+ : [tmp0] "=&r" (tmp0), [tmp1] "=&r" (tmp1), [tmp2] "=&r" (tmp2),
+ [tmp00] "=&r" (tmp00), [tmp11] "=&r" (tmp11),
+ [temp_reg1] "=&r" (temp_reg1), [temp_reg2] "=&r" (temp_reg2),
+ [temp_reg3] "=&r" (temp_reg3), [temp_reg4] "=&r" (temp_reg4),
+ [temp_reg5] "=&r" (temp_reg5), [temp_reg6] "=&r" (temp_reg6)
+ : [csa] "r" (csa), [ptr] "r" (ptr),
+ [MAX_lo] "r" (MAX_lo)
+ : "memory", "hi", "lo", "$ac1hi", "$ac1lo", "$ac2hi", "$ac2lo",
+ "$ac3hi", "$ac3lo"
+ );
+
+ ptr += 18;
+ }
+}
+#define compute_antialias compute_antialias_mips_fixed
+#endif /* HAVE_INLINE_ASM */
+
+#endif /* AVCODEC_MIPS_COMPUTE_ANTIALIAS_FIXED_H */
diff --git a/libavcodec/mips/compute_antialias_float.h b/libavcodec/mips/compute_antialias_float.h
new file mode 100644
index 0000000000..0f6f03f587
--- /dev/null
+++ b/libavcodec/mips/compute_antialias_float.h
@@ -0,0 +1,185 @@
+/*
+ * Copyright (c) 2012
+ * MIPS Technologies, Inc., California.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the MIPS Technologies, Inc., nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE MIPS TECHNOLOGIES, INC. ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE MIPS TECHNOLOGIES, INC. BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Author: Bojan Zivkovic (bojan@mips.com)
+ *
+ * Compute antialias function optimised for MIPS floating-point architecture
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Reference: libavcodec/mpegaudiodec.c
+ */
+
+#ifndef AVCODEC_MIPS_COMPUTE_ANTIALIAS_FLOAT_H
+#define AVCODEC_MIPS_COMPUTE_ANTIALIAS_FLOAT_H
+
+#if HAVE_INLINE_ASM
+static void compute_antialias_mips_float(MPADecodeContext *s,
+ GranuleDef *g)
+{
+ float *ptr, *ptr_end;
+ float *csa = &csa_table[0][0];
+ int n;
+ /* temporary variables */
+ float in1, in2, in3, in4, in5, in6, in7, in8;
+ float out1, out2, out3, out4;
+
+ ptr = g->sb_hybrid + 18;
+ /* we antialias only "long" bands */
+ if (g->block_type == 2) {
+ if (!g->switch_point)
+ return;
+ /* XXX: check this for 8000Hz case */
+ n = 1;
+ ptr_end = ptr + 18;
+ } else {
+ n = 31;
+ ptr_end = ptr + 558;
+ }
+
+ /**
+ * instructions are scheduled to minimize pipeline stall.
+ */
+
+ __asm__ volatile (
+ "compute_antialias_float_loop%=: \t\n"
+ "lwc1 %[in1], -1*4(%[ptr]) \t\n"
+ "lwc1 %[in2], 0(%[csa]) \t\n"
+ "lwc1 %[in3], 1*4(%[csa]) \t\n"
+ "lwc1 %[in4], 0(%[ptr]) \t\n"
+ "lwc1 %[in5], -2*4(%[ptr]) \t\n"
+ "lwc1 %[in6], 4*4(%[csa]) \t\n"
+ "mul.s %[out1], %[in1], %[in2] \t\n"
+ "mul.s %[out2], %[in1], %[in3] \t\n"
+ "lwc1 %[in7], 5*4(%[csa]) \t\n"
+ "lwc1 %[in8], 1*4(%[ptr]) \t\n"
+ "nmsub.s %[out1], %[out1], %[in3], %[in4] \t\n"
+ "madd.s %[out2], %[out2], %[in2], %[in4] \t\n"
+ "mul.s %[out3], %[in5], %[in6] \t\n"
+ "mul.s %[out4], %[in5], %[in7] \t\n"
+ "lwc1 %[in1], -3*4(%[ptr]) \t\n"
+ "swc1 %[out1], -1*4(%[ptr]) \t\n"
+ "swc1 %[out2], 0(%[ptr]) \t\n"
+ "nmsub.s %[out3], %[out3], %[in7], %[in8] \t\n"
+ "madd.s %[out4], %[out4], %[in6], %[in8] \t\n"
+ "lwc1 %[in2], 8*4(%[csa]) \t\n"
+ "swc1 %[out3], -2*4(%[ptr]) \t\n"
+ "swc1 %[out4], 1*4(%[ptr]) \t\n"
+ "lwc1 %[in3], 9*4(%[csa]) \t\n"
+ "lwc1 %[in4], 2*4(%[ptr]) \t\n"
+ "mul.s %[out1], %[in1], %[in2] \t\n"
+ "lwc1 %[in5], -4*4(%[ptr]) \t\n"
+ "lwc1 %[in6], 12*4(%[csa]) \t\n"
+ "mul.s %[out2], %[in1], %[in3] \t\n"
+ "lwc1 %[in7], 13*4(%[csa]) \t\n"
+ "nmsub.s %[out1], %[out1], %[in3], %[in4] \t\n"
+ "lwc1 %[in8], 3*4(%[ptr]) \t\n"
+ "mul.s %[out3], %[in5], %[in6] \t\n"
+ "madd.s %[out2], %[out2], %[in2], %[in4] \t\n"
+ "mul.s %[out4], %[in5], %[in7] \t\n"
+ "swc1 %[out1], -3*4(%[ptr]) \t\n"
+ "lwc1 %[in1], -5*4(%[ptr]) \t\n"
+ "nmsub.s %[out3], %[out3], %[in7], %[in8] \t\n"
+ "swc1 %[out2], 2*4(%[ptr]) \t\n"
+ "madd.s %[out4], %[out4], %[in6], %[in8] \t\n"
+ "lwc1 %[in2], 16*4(%[csa]) \t\n"
+ "lwc1 %[in3], 17*4(%[csa]) \t\n"
+ "swc1 %[out3], -4*4(%[ptr]) \t\n"
+ "lwc1 %[in4], 4*4(%[ptr]) \t\n"
+ "swc1 %[out4], 3*4(%[ptr]) \t\n"
+ "mul.s %[out1], %[in1], %[in2] \t\n"
+ "mul.s %[out2], %[in1], %[in3] \t\n"
+ "lwc1 %[in5], -6*4(%[ptr]) \t\n"
+ "lwc1 %[in6], 20*4(%[csa]) \t\n"
+ "lwc1 %[in7], 21*4(%[csa]) \t\n"
+ "nmsub.s %[out1], %[out1], %[in3], %[in4] \t\n"
+ "madd.s %[out2], %[out2], %[in2], %[in4] \t\n"
+ "lwc1 %[in8], 5*4(%[ptr]) \t\n"
+ "mul.s %[out3], %[in5], %[in6] \t\n"
+ "mul.s %[out4], %[in5], %[in7] \t\n"
+ "swc1 %[out1], -5*4(%[ptr]) \t\n"
+ "swc1 %[out2], 4*4(%[ptr]) \t\n"
+ "lwc1 %[in1], -7*4(%[ptr]) \t\n"
+ "nmsub.s %[out3], %[out3], %[in7], %[in8] \t\n"
+ "madd.s %[out4], %[out4], %[in6], %[in8] \t\n"
+ "lwc1 %[in2], 24*4(%[csa]) \t\n"
+ "lwc1 %[in3], 25*4(%[csa]) \t\n"
+ "lwc1 %[in4], 6*4(%[ptr]) \t\n"
+ "swc1 %[out3], -6*4(%[ptr]) \t\n"
+ "swc1 %[out4], 5*4(%[ptr]) \t\n"
+ "mul.s %[out1], %[in1], %[in2] \t\n"
+ "lwc1 %[in5], -8*4(%[ptr]) \t\n"
+ "mul.s %[out2], %[in1], %[in3] \t\n"
+ "lwc1 %[in6], 28*4(%[csa]) \t\n"
+ "lwc1 %[in7], 29*4(%[csa]) \t\n"
+ "nmsub.s %[out1], %[out1], %[in3], %[in4] \t\n"
+ "lwc1 %[in8], 7*4(%[ptr]) \t\n"
+ "madd.s %[out2], %[out2], %[in2], %[in4] \t\n"
+ "mul.s %[out3], %[in5], %[in6] \t\n"
+ "mul.s %[out4], %[in5], %[in7] \t\n"
+ "swc1 %[out1], -7*4(%[ptr]) \t\n"
+ "swc1 %[out2], 6*4(%[ptr]) \t\n"
+ "addiu %[ptr], %[ptr], 72 \t\n"
+ "nmsub.s %[out3], %[out3], %[in7], %[in8] \t\n"
+ "madd.s %[out4], %[out4], %[in6], %[in8] \t\n"
+ "swc1 %[out3], -26*4(%[ptr]) \t\n"
+ "swc1 %[out4], -11*4(%[ptr]) \t\n"
+ "bne %[ptr], %[ptr_end], compute_antialias_float_loop%= \t\n"
+
+ : [ptr] "+r" (ptr),
+ [in1] "=&f" (in1), [in2] "=&f" (in2),
+ [in3] "=&f" (in3), [in4] "=&f" (in4),
+ [in5] "=&f" (in5), [in6] "=&f" (in6),
+ [in7] "=&f" (in7), [in8] "=&f" (in8),
+ [out1] "=&f" (out1), [out2] "=&f" (out2),
+ [out3] "=&f" (out3), [out4] "=&f" (out4)
+ : [csa] "r" (csa), [ptr_end] "r" (ptr_end)
+ : "memory"
+ );
+}
+#define compute_antialias compute_antialias_mips_float
+#endif /* HAVE_INLINE_ASM */
+
+#endif /* AVCODEC_MIPS_COMPUTE_ANTIALIAS_FLOAT_H */
diff --git a/libavcodec/mips/fft_init_table.c b/libavcodec/mips/fft_init_table.c
new file mode 100644
index 0000000000..9c2e998e9c
--- /dev/null
+++ b/libavcodec/mips/fft_init_table.c
@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 2012
+ * MIPS Technologies, Inc., California.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the MIPS Technologies, Inc., nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE MIPS TECHNOLOGIES, INC. ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE MIPS TECHNOLOGIES, INC. BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Author: Stanislav Ocovaj (socovaj@mips.com)
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * definitions and initialization of LUT table for MIPS FFT
+ */
+#include "fft_table.h"
+
+uint16_t fft_offsets_lut[0x2aab];
+
+void ff_fft_lut_init(uint16_t *table, int off, int size, int *index)
+{
+ if (size < 16) {
+ table[*index] = off >> 2;
+ (*index)++;
+ }
+ else {
+ ff_fft_lut_init(table, off, size>>1, index);
+ ff_fft_lut_init(table, off+(size>>1), size>>2, index);
+ ff_fft_lut_init(table, off+3*(size>>2), size>>2, index);
+ }
+}
diff --git a/libavcodec/mips/fft_mips.c b/libavcodec/mips/fft_mips.c
new file mode 100644
index 0000000000..ae4ed30347
--- /dev/null
+++ b/libavcodec/mips/fft_mips.c
@@ -0,0 +1,534 @@
+/*
+ * Copyright (c) 2012
+ * MIPS Technologies, Inc., California.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the MIPS Technologies, Inc., nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE MIPS TECHNOLOGIES, INC. ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE MIPS TECHNOLOGIES, INC. BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Author: Stanislav Ocovaj (socovaj@mips.com)
+ * Author: Zoran Lukic (zoranl@mips.com)
+ *
+ * Optimized MDCT/IMDCT and FFT transforms
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include "config.h"
+#include "libavcodec/fft.h"
+#include "fft_table.h"
+
+/**
+ * FFT transform
+ */
+
+#if HAVE_INLINE_ASM
+static void ff_fft_calc_mips(FFTContext *s, FFTComplex *z)
+{
+ int nbits, i, n, num_transforms, offset, step;
+ int n4, n2, n34;
+ FFTSample tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, tmp8;
+ FFTComplex *tmpz;
+ float w_re, w_im;
+ float *w_re_ptr, *w_im_ptr;
+ const int fft_size = (1 << s->nbits);
+ int s_n = s->nbits;
+ int tem1, tem2;
+ float pom, pom1, pom2, pom3;
+ float temp, temp1, temp3, temp4;
+ FFTComplex * tmpz_n2, * tmpz_n34, * tmpz_n4;
+ FFTComplex * tmpz_n2_i, * tmpz_n34_i, * tmpz_n4_i, * tmpz_i;
+
+ /**
+ *num_transforms = (0x2aab >> (16 - s->nbits)) | 1;
+ */
+ __asm__ volatile (
+ "li %[tem1], 16 \n\t"
+ "sub %[s_n], %[tem1], %[s_n] \n\t"
+ "li %[tem2], 10923 \n\t"
+ "srav %[tem2], %[tem2], %[s_n] \n\t"
+ "ori %[num_t],%[tem2], 1 \n\t"
+ : [num_t]"=r"(num_transforms), [s_n]"+r"(s_n),
+ [tem1]"=&r"(tem1), [tem2]"=&r"(tem2)
+ );
+
+
+ for (n=0; n<num_transforms; n++) {
+ offset = fft_offsets_lut[n] << 2;
+ tmpz = z + offset;
+
+ tmp1 = tmpz[0].re + tmpz[1].re;
+ tmp5 = tmpz[2].re + tmpz[3].re;
+ tmp2 = tmpz[0].im + tmpz[1].im;
+ tmp6 = tmpz[2].im + tmpz[3].im;
+ tmp3 = tmpz[0].re - tmpz[1].re;
+ tmp8 = tmpz[2].im - tmpz[3].im;
+ tmp4 = tmpz[0].im - tmpz[1].im;
+ tmp7 = tmpz[2].re - tmpz[3].re;
+
+ tmpz[0].re = tmp1 + tmp5;
+ tmpz[2].re = tmp1 - tmp5;
+ tmpz[0].im = tmp2 + tmp6;
+ tmpz[2].im = tmp2 - tmp6;
+ tmpz[1].re = tmp3 + tmp8;
+ tmpz[3].re = tmp3 - tmp8;
+ tmpz[1].im = tmp4 - tmp7;
+ tmpz[3].im = tmp4 + tmp7;
+
+ }
+
+ if (fft_size < 8)
+ return;
+
+ num_transforms = (num_transforms >> 1) | 1;
+
+ for (n=0; n<num_transforms; n++) {
+ offset = fft_offsets_lut[n] << 3;
+ tmpz = z + offset;
+
+ __asm__ volatile (
+ "lwc1 %[tmp1], 32(%[tmpz]) \n\t"
+ "lwc1 %[pom], 40(%[tmpz]) \n\t"
+ "lwc1 %[tmp3], 48(%[tmpz]) \n\t"
+ "lwc1 %[pom1], 56(%[tmpz]) \n\t"
+ "lwc1 %[tmp2], 36(%[tmpz]) \n\t"
+ "lwc1 %[pom2], 44(%[tmpz]) \n\t"
+ "lwc1 %[pom3], 60(%[tmpz]) \n\t"
+ "lwc1 %[tmp4], 52(%[tmpz]) \n\t"
+ "add.s %[tmp1], %[tmp1], %[pom] \n\t" // tmp1 = tmpz[4].re + tmpz[5].re;
+ "add.s %[tmp3], %[tmp3], %[pom1] \n\t" // tmp3 = tmpz[6].re + tmpz[7].re;
+ "add.s %[tmp2], %[tmp2], %[pom2] \n\t" // tmp2 = tmpz[4].im + tmpz[5].im;
+ "lwc1 %[pom], 40(%[tmpz]) \n\t"
+ "add.s %[tmp4], %[tmp4], %[pom3] \n\t" // tmp4 = tmpz[6].im + tmpz[7].im;
+ "add.s %[tmp5], %[tmp1], %[tmp3] \n\t" // tmp5 = tmp1 + tmp3;
+ "sub.s %[tmp7], %[tmp1], %[tmp3] \n\t" // tmp7 = tmp1 - tmp3;
+ "lwc1 %[tmp1], 32(%[tmpz]) \n\t"
+ "lwc1 %[pom1], 44(%[tmpz]) \n\t"
+ "add.s %[tmp6], %[tmp2], %[tmp4] \n\t" // tmp6 = tmp2 + tmp4;
+ "sub.s %[tmp8], %[tmp2], %[tmp4] \n\t" // tmp8 = tmp2 - tmp4;
+ "lwc1 %[tmp2], 36(%[tmpz]) \n\t"
+ "lwc1 %[pom2], 56(%[tmpz]) \n\t"
+ "lwc1 %[pom3], 60(%[tmpz]) \n\t"
+ "lwc1 %[tmp3], 48(%[tmpz]) \n\t"
+ "lwc1 %[tmp4], 52(%[tmpz]) \n\t"
+ "sub.s %[tmp1], %[tmp1], %[pom] \n\t" // tmp1 = tmpz[4].re - tmpz[5].re;
+ "lwc1 %[pom], 0(%[tmpz]) \n\t"
+ "sub.s %[tmp2], %[tmp2], %[pom1] \n\t" // tmp2 = tmpz[4].im - tmpz[5].im;
+ "sub.s %[tmp3], %[tmp3], %[pom2] \n\t" // tmp3 = tmpz[6].re - tmpz[7].re;
+ "lwc1 %[pom2], 4(%[tmpz]) \n\t"
+ "sub.s %[pom1], %[pom], %[tmp5] \n\t"
+ "sub.s %[tmp4], %[tmp4], %[pom3] \n\t" // tmp4 = tmpz[6].im - tmpz[7].im;
+ "add.s %[pom3], %[pom], %[tmp5] \n\t"
+ "sub.s %[pom], %[pom2], %[tmp6] \n\t"
+ "add.s %[pom2], %[pom2], %[tmp6] \n\t"
+ "swc1 %[pom1], 32(%[tmpz]) \n\t" // tmpz[4].re = tmpz[0].re - tmp5;
+ "swc1 %[pom3], 0(%[tmpz]) \n\t" // tmpz[0].re = tmpz[0].re + tmp5;
+ "swc1 %[pom], 36(%[tmpz]) \n\t" // tmpz[4].im = tmpz[0].im - tmp6;
+ "swc1 %[pom2], 4(%[tmpz]) \n\t" // tmpz[0].im = tmpz[0].im + tmp6;
+ "lwc1 %[pom1], 16(%[tmpz]) \n\t"
+ "lwc1 %[pom3], 20(%[tmpz]) \n\t"
+ "li.s %[pom], 0.7071067812 \n\t" // float pom = 0.7071067812f;
+ "add.s %[temp1],%[tmp1], %[tmp2] \n\t"
+ "sub.s %[temp], %[pom1], %[tmp8] \n\t"
+ "add.s %[pom2], %[pom3], %[tmp7] \n\t"
+ "sub.s %[temp3],%[tmp3], %[tmp4] \n\t"
+ "sub.s %[temp4],%[tmp2], %[tmp1] \n\t"
+ "swc1 %[temp], 48(%[tmpz]) \n\t" // tmpz[6].re = tmpz[2].re - tmp8;
+ "swc1 %[pom2], 52(%[tmpz]) \n\t" // tmpz[6].im = tmpz[2].im + tmp7;
+ "add.s %[pom1], %[pom1], %[tmp8] \n\t"
+ "sub.s %[pom3], %[pom3], %[tmp7] \n\t"
+ "add.s %[tmp3], %[tmp3], %[tmp4] \n\t"
+ "mul.s %[tmp5], %[pom], %[temp1] \n\t" // tmp5 = pom * (tmp1 + tmp2);
+ "mul.s %[tmp7], %[pom], %[temp3] \n\t" // tmp7 = pom * (tmp3 - tmp4);
+ "mul.s %[tmp6], %[pom], %[temp4] \n\t" // tmp6 = pom * (tmp2 - tmp1);
+ "mul.s %[tmp8], %[pom], %[tmp3] \n\t" // tmp8 = pom * (tmp3 + tmp4);
+ "swc1 %[pom1], 16(%[tmpz]) \n\t" // tmpz[2].re = tmpz[2].re + tmp8;
+ "swc1 %[pom3], 20(%[tmpz]) \n\t" // tmpz[2].im = tmpz[2].im - tmp7;
+ "add.s %[tmp1], %[tmp5], %[tmp7] \n\t" // tmp1 = tmp5 + tmp7;
+ "sub.s %[tmp3], %[tmp5], %[tmp7] \n\t" // tmp3 = tmp5 - tmp7;
+ "add.s %[tmp2], %[tmp6], %[tmp8] \n\t" // tmp2 = tmp6 + tmp8;
+ "sub.s %[tmp4], %[tmp6], %[tmp8] \n\t" // tmp4 = tmp6 - tmp8;
+ "lwc1 %[temp], 8(%[tmpz]) \n\t"
+ "lwc1 %[temp1],12(%[tmpz]) \n\t"
+ "lwc1 %[pom], 24(%[tmpz]) \n\t"
+ "lwc1 %[pom2], 28(%[tmpz]) \n\t"
+ "sub.s %[temp4],%[temp], %[tmp1] \n\t"
+ "sub.s %[temp3],%[temp1], %[tmp2] \n\t"
+ "add.s %[temp], %[temp], %[tmp1] \n\t"
+ "add.s %[temp1],%[temp1], %[tmp2] \n\t"
+ "sub.s %[pom1], %[pom], %[tmp4] \n\t"
+ "add.s %[pom3], %[pom2], %[tmp3] \n\t"
+ "add.s %[pom], %[pom], %[tmp4] \n\t"
+ "sub.s %[pom2], %[pom2], %[tmp3] \n\t"
+ "swc1 %[temp4],40(%[tmpz]) \n\t" // tmpz[5].re = tmpz[1].re - tmp1;
+ "swc1 %[temp3],44(%[tmpz]) \n\t" // tmpz[5].im = tmpz[1].im - tmp2;
+ "swc1 %[temp], 8(%[tmpz]) \n\t" // tmpz[1].re = tmpz[1].re + tmp1;
+ "swc1 %[temp1],12(%[tmpz]) \n\t" // tmpz[1].im = tmpz[1].im + tmp2;
+ "swc1 %[pom1], 56(%[tmpz]) \n\t" // tmpz[7].re = tmpz[3].re - tmp4;
+ "swc1 %[pom3], 60(%[tmpz]) \n\t" // tmpz[7].im = tmpz[3].im + tmp3;
+ "swc1 %[pom], 24(%[tmpz]) \n\t" // tmpz[3].re = tmpz[3].re + tmp4;
+ "swc1 %[pom2], 28(%[tmpz]) \n\t" // tmpz[3].im = tmpz[3].im - tmp3;
+ : [tmp1]"=&f"(tmp1), [pom]"=&f"(pom), [pom1]"=&f"(pom1), [pom2]"=&f"(pom2),
+ [tmp3]"=&f"(tmp3), [tmp2]"=&f"(tmp2), [tmp4]"=&f"(tmp4), [tmp5]"=&f"(tmp5), [tmp7]"=&f"(tmp7),
+ [tmp6]"=&f"(tmp6), [tmp8]"=&f"(tmp8), [pom3]"=&f"(pom3),[temp]"=&f"(temp), [temp1]"=&f"(temp1),
+ [temp3]"=&f"(temp3), [temp4]"=&f"(temp4)
+ : [tmpz]"r"(tmpz)
+ : "memory"
+ );
+ }
+
+ step = 1 << (MAX_LOG2_NFFT - 4);
+ n4 = 4;
+
+ for (nbits=4; nbits<=s->nbits; nbits++) {
+ /*
+ * num_transforms = (num_transforms >> 1) | 1;
+ */
+ __asm__ volatile (
+ "sra %[num_t], %[num_t], 1 \n\t"
+ "ori %[num_t], %[num_t], 1 \n\t"
+
+ : [num_t] "+r" (num_transforms)
+ );
+ n2 = 2 * n4;
+ n34 = 3 * n4;
+
+ for (n=0; n<num_transforms; n++) {
+ offset = fft_offsets_lut[n] << nbits;
+ tmpz = z + offset;
+
+ tmpz_n2 = tmpz + n2;
+ tmpz_n4 = tmpz + n4;
+ tmpz_n34 = tmpz + n34;
+
+ __asm__ volatile (
+ "lwc1 %[pom1], 0(%[tmpz_n2]) \n\t"
+ "lwc1 %[pom], 0(%[tmpz_n34]) \n\t"
+ "lwc1 %[pom2], 4(%[tmpz_n2]) \n\t"
+ "lwc1 %[pom3], 4(%[tmpz_n34]) \n\t"
+ "lwc1 %[temp1],0(%[tmpz]) \n\t"
+ "lwc1 %[temp3],4(%[tmpz]) \n\t"
+ "add.s %[tmp5], %[pom1], %[pom] \n\t" // tmp5 = tmpz[ n2].re + tmpz[n34].re;
+ "sub.s %[tmp1], %[pom1], %[pom] \n\t" // tmp1 = tmpz[ n2].re - tmpz[n34].re;
+ "add.s %[tmp6], %[pom2], %[pom3] \n\t" // tmp6 = tmpz[ n2].im + tmpz[n34].im;
+ "sub.s %[tmp2], %[pom2], %[pom3] \n\t" // tmp2 = tmpz[ n2].im - tmpz[n34].im;
+ "sub.s %[temp], %[temp1], %[tmp5] \n\t"
+ "add.s %[temp1],%[temp1], %[tmp5] \n\t"
+ "sub.s %[temp4],%[temp3], %[tmp6] \n\t"
+ "add.s %[temp3],%[temp3], %[tmp6] \n\t"
+ "swc1 %[temp], 0(%[tmpz_n2]) \n\t" // tmpz[ n2].re = tmpz[ 0].re - tmp5;
+ "swc1 %[temp1],0(%[tmpz]) \n\t" // tmpz[ 0].re = tmpz[ 0].re + tmp5;
+ "lwc1 %[pom1], 0(%[tmpz_n4]) \n\t"
+ "swc1 %[temp4],4(%[tmpz_n2]) \n\t" // tmpz[ n2].im = tmpz[ 0].im - tmp6;
+ "lwc1 %[temp], 4(%[tmpz_n4]) \n\t"
+ "swc1 %[temp3],4(%[tmpz]) \n\t" // tmpz[ 0].im = tmpz[ 0].im + tmp6;
+ "sub.s %[pom], %[pom1], %[tmp2] \n\t"
+ "add.s %[pom1], %[pom1], %[tmp2] \n\t"
+ "add.s %[temp1],%[temp], %[tmp1] \n\t"
+ "sub.s %[temp], %[temp], %[tmp1] \n\t"
+ "swc1 %[pom], 0(%[tmpz_n34]) \n\t" // tmpz[n34].re = tmpz[n4].re - tmp2;
+ "swc1 %[pom1], 0(%[tmpz_n4]) \n\t" // tmpz[ n4].re = tmpz[n4].re + tmp2;
+ "swc1 %[temp1],4(%[tmpz_n34]) \n\t" // tmpz[n34].im = tmpz[n4].im + tmp1;
+ "swc1 %[temp], 4(%[tmpz_n4]) \n\t" // tmpz[ n4].im = tmpz[n4].im - tmp1;
+ : [tmp5]"=&f"(tmp5),
+ [tmp1]"=&f"(tmp1), [pom]"=&f"(pom), [pom1]"=&f"(pom1), [pom2]"=&f"(pom2),
+ [tmp2]"=&f"(tmp2), [tmp6]"=&f"(tmp6), [pom3]"=&f"(pom3),
+ [temp]"=&f"(temp), [temp1]"=&f"(temp1), [temp3]"=&f"(temp3), [temp4]"=&f"(temp4)
+ : [tmpz]"r"(tmpz), [tmpz_n2]"r"(tmpz_n2), [tmpz_n34]"r"(tmpz_n34), [tmpz_n4]"r"(tmpz_n4)
+ : "memory"
+ );
+
+ w_re_ptr = (float*)(ff_cos_65536 + step);
+ w_im_ptr = (float*)(ff_cos_65536 + MAX_FFT_SIZE/4 - step);
+
+ for (i=1; i<n4; i++) {
+ w_re = w_re_ptr[0];
+ w_im = w_im_ptr[0];
+ tmpz_n2_i = tmpz_n2 + i;
+ tmpz_n4_i = tmpz_n4 + i;
+ tmpz_n34_i= tmpz_n34 + i;
+ tmpz_i = tmpz + i;
+
+ __asm__ volatile (
+ "lwc1 %[temp], 0(%[tmpz_n2_i]) \n\t"
+ "lwc1 %[temp1], 4(%[tmpz_n2_i]) \n\t"
+ "lwc1 %[pom], 0(%[tmpz_n34_i]) \n\t"
+ "lwc1 %[pom1], 4(%[tmpz_n34_i]) \n\t"
+ "mul.s %[temp3], %[w_im], %[temp] \n\t"
+ "mul.s %[temp4], %[w_im], %[temp1] \n\t"
+ "mul.s %[pom2], %[w_im], %[pom1] \n\t"
+ "mul.s %[pom3], %[w_im], %[pom] \n\t"
+ "msub.s %[tmp2], %[temp3], %[w_re], %[temp1] \n\t" // tmp2 = w_re * tmpz[ n2+i].im - w_im * tmpz[ n2+i].re;
+ "madd.s %[tmp1], %[temp4], %[w_re], %[temp] \n\t" // tmp1 = w_re * tmpz[ n2+i].re + w_im * tmpz[ n2+i].im;
+ "msub.s %[tmp3], %[pom2], %[w_re], %[pom] \n\t" // tmp3 = w_re * tmpz[n34+i].re - w_im * tmpz[n34+i].im;
+ "madd.s %[tmp4], %[pom3], %[w_re], %[pom1] \n\t" // tmp4 = w_re * tmpz[n34+i].im + w_im * tmpz[n34+i].re;
+ "lwc1 %[temp], 0(%[tmpz_i]) \n\t"
+ "lwc1 %[pom], 4(%[tmpz_i]) \n\t"
+ "add.s %[tmp5], %[tmp1], %[tmp3] \n\t" // tmp5 = tmp1 + tmp3;
+ "sub.s %[tmp1], %[tmp1], %[tmp3] \n\t" // tmp1 = tmp1 - tmp3;
+ "add.s %[tmp6], %[tmp2], %[tmp4] \n\t" // tmp6 = tmp2 + tmp4;
+ "sub.s %[tmp2], %[tmp2], %[tmp4] \n\t" // tmp2 = tmp2 - tmp4;
+ "sub.s %[temp1], %[temp], %[tmp5] \n\t"
+ "add.s %[temp], %[temp], %[tmp5] \n\t"
+ "sub.s %[pom1], %[pom], %[tmp6] \n\t"
+ "add.s %[pom], %[pom], %[tmp6] \n\t"
+ "lwc1 %[temp3], 0(%[tmpz_n4_i]) \n\t"
+ "lwc1 %[pom2], 4(%[tmpz_n4_i]) \n\t"
+ "swc1 %[temp1], 0(%[tmpz_n2_i]) \n\t" // tmpz[ n2+i].re = tmpz[ i].re - tmp5;
+ "swc1 %[temp], 0(%[tmpz_i]) \n\t" // tmpz[ i].re = tmpz[ i].re + tmp5;
+ "swc1 %[pom1], 4(%[tmpz_n2_i]) \n\t" // tmpz[ n2+i].im = tmpz[ i].im - tmp6;
+ "swc1 %[pom] , 4(%[tmpz_i]) \n\t" // tmpz[ i].im = tmpz[ i].im + tmp6;
+ "sub.s %[temp4], %[temp3], %[tmp2] \n\t"
+ "add.s %[pom3], %[pom2], %[tmp1] \n\t"
+ "add.s %[temp3], %[temp3], %[tmp2] \n\t"
+ "sub.s %[pom2], %[pom2], %[tmp1] \n\t"
+ "swc1 %[temp4], 0(%[tmpz_n34_i]) \n\t" // tmpz[n34+i].re = tmpz[n4+i].re - tmp2;
+ "swc1 %[pom3], 4(%[tmpz_n34_i]) \n\t" // tmpz[n34+i].im = tmpz[n4+i].im + tmp1;
+ "swc1 %[temp3], 0(%[tmpz_n4_i]) \n\t" // tmpz[ n4+i].re = tmpz[n4+i].re + tmp2;
+ "swc1 %[pom2], 4(%[tmpz_n4_i]) \n\t" // tmpz[ n4+i].im = tmpz[n4+i].im - tmp1;
+ : [tmp1]"=&f"(tmp1), [tmp2]"=&f" (tmp2), [temp]"=&f"(temp), [tmp3]"=&f"(tmp3),
+ [tmp4]"=&f"(tmp4), [tmp5]"=&f"(tmp5), [tmp6]"=&f"(tmp6),
+ [temp1]"=&f"(temp1), [temp3]"=&f"(temp3), [temp4]"=&f"(temp4),
+ [pom]"=&f"(pom), [pom1]"=&f"(pom1), [pom2]"=&f"(pom2), [pom3]"=&f"(pom3)
+ : [w_re]"f"(w_re), [w_im]"f"(w_im),
+ [tmpz_i]"r"(tmpz_i),[tmpz_n2_i]"r"(tmpz_n2_i),
+ [tmpz_n34_i]"r"(tmpz_n34_i), [tmpz_n4_i]"r"(tmpz_n4_i)
+ : "memory"
+ );
+ w_re_ptr += step;
+ w_im_ptr -= step;
+ }
+ }
+ step >>= 1;
+ n4 <<= 1;
+ }
+}
+
+/**
+ * MDCT/IMDCT transforms.
+ */
+
+static void ff_imdct_half_mips(FFTContext *s, FFTSample *output, const FFTSample *input)
+{
+ int k, n8, n4, n2, n, j;
+ const uint16_t *revtab = s->revtab;
+ const FFTSample *tcos = s->tcos;
+ const FFTSample *tsin = s->tsin;
+ const FFTSample *in1, *in2, *in3, *in4;
+ FFTComplex *z = (FFTComplex *)output;
+
+ int j1;
+ const float *tcos1, *tsin1, *tcos2, *tsin2;
+ float temp1, temp2, temp3, temp4, temp5, temp6, temp7, temp8,
+ temp9, temp10, temp11, temp12, temp13, temp14, temp15, temp16;
+ FFTComplex *z1, *z2;
+
+ n = 1 << s->mdct_bits;
+ n2 = n >> 1;
+ n4 = n >> 2;
+ n8 = n >> 3;
+
+ /* pre rotation */
+ in1 = input;
+ in2 = input + n2 - 1;
+ in3 = input + 2;
+ in4 = input + n2 - 3;
+
+ tcos1 = tcos;
+ tsin1 = tsin;
+
+ /* n4 = 64 or 128 */
+ for(k = 0; k < n4; k += 2) {
+ j = revtab[k ];
+ j1 = revtab[k + 1];
+
+ __asm__ volatile (
+ "lwc1 %[temp1], 0(%[in2]) \t\n"
+ "lwc1 %[temp2], 0(%[tcos1]) \t\n"
+ "lwc1 %[temp3], 0(%[tsin1]) \t\n"
+ "lwc1 %[temp4], 0(%[in1]) \t\n"
+ "lwc1 %[temp5], 0(%[in4]) \t\n"
+ "mul.s %[temp9], %[temp1], %[temp2] \t\n"
+ "mul.s %[temp10], %[temp1], %[temp3] \t\n"
+ "lwc1 %[temp6], 4(%[tcos1]) \t\n"
+ "lwc1 %[temp7], 4(%[tsin1]) \t\n"
+ "nmsub.s %[temp9], %[temp9], %[temp4], %[temp3] \t\n"
+ "madd.s %[temp10], %[temp10], %[temp4], %[temp2] \t\n"
+ "mul.s %[temp11], %[temp5], %[temp6] \t\n"
+ "mul.s %[temp12], %[temp5], %[temp7] \t\n"
+ "lwc1 %[temp8], 0(%[in3]) \t\n"
+ "addiu %[tcos1], %[tcos1], 8 \t\n"
+ "addiu %[tsin1], %[tsin1], 8 \t\n"
+ "addiu %[in1], %[in1], 16 \t\n"
+ "nmsub.s %[temp11], %[temp11], %[temp8], %[temp7] \t\n"
+ "madd.s %[temp12], %[temp12], %[temp8], %[temp6] \t\n"
+ "addiu %[in2], %[in2], -16 \t\n"
+ "addiu %[in3], %[in3], 16 \t\n"
+ "addiu %[in4], %[in4], -16 \t\n"
+
+ : [temp1]"=&f"(temp1), [temp2]"=&f"(temp2),
+ [temp3]"=&f"(temp3), [temp4]"=&f"(temp4),
+ [temp5]"=&f"(temp5), [temp6]"=&f"(temp6),
+ [temp7]"=&f"(temp7), [temp8]"=&f"(temp8),
+ [temp9]"=&f"(temp9), [temp10]"=&f"(temp10),
+ [temp11]"=&f"(temp11), [temp12]"=&f"(temp12),
+ [tsin1]"+r"(tsin1), [tcos1]"+r"(tcos1),
+ [in1]"+r"(in1), [in2]"+r"(in2),
+ [in3]"+r"(in3), [in4]"+r"(in4)
+ :
+ : "memory"
+ );
+
+ z[j ].re = temp9;
+ z[j ].im = temp10;
+ z[j1].re = temp11;
+ z[j1].im = temp12;
+ }
+
+ s->fft_calc(s, z);
+
+ /* post rotation + reordering */
+ /* n8 = 32 or 64 */
+ for(k = 0; k < n8; k += 2) {
+ tcos1 = &tcos[n8 - k - 2];
+ tsin1 = &tsin[n8 - k - 2];
+ tcos2 = &tcos[n8 + k];
+ tsin2 = &tsin[n8 + k];
+ z1 = &z[n8 - k - 2];
+ z2 = &z[n8 + k ];
+
+ __asm__ volatile (
+ "lwc1 %[temp1], 12(%[z1]) \t\n"
+ "lwc1 %[temp2], 4(%[tsin1]) \t\n"
+ "lwc1 %[temp3], 4(%[tcos1]) \t\n"
+ "lwc1 %[temp4], 8(%[z1]) \t\n"
+ "lwc1 %[temp5], 4(%[z1]) \t\n"
+ "mul.s %[temp9], %[temp1], %[temp2] \t\n"
+ "mul.s %[temp10], %[temp1], %[temp3] \t\n"
+ "lwc1 %[temp6], 0(%[tsin1]) \t\n"
+ "lwc1 %[temp7], 0(%[tcos1]) \t\n"
+ "nmsub.s %[temp9], %[temp9], %[temp4], %[temp3] \t\n"
+ "madd.s %[temp10], %[temp10], %[temp4], %[temp2] \t\n"
+ "mul.s %[temp11], %[temp5], %[temp6] \t\n"
+ "mul.s %[temp12], %[temp5], %[temp7] \t\n"
+ "lwc1 %[temp8], 0(%[z1]) \t\n"
+ "lwc1 %[temp1], 4(%[z2]) \t\n"
+ "lwc1 %[temp2], 0(%[tsin2]) \t\n"
+ "lwc1 %[temp3], 0(%[tcos2]) \t\n"
+ "nmsub.s %[temp11], %[temp11], %[temp8], %[temp7] \t\n"
+ "madd.s %[temp12], %[temp12], %[temp8], %[temp6] \t\n"
+ "mul.s %[temp13], %[temp1], %[temp2] \t\n"
+ "mul.s %[temp14], %[temp1], %[temp3] \t\n"
+ "lwc1 %[temp4], 0(%[z2]) \t\n"
+ "lwc1 %[temp5], 12(%[z2]) \t\n"
+ "lwc1 %[temp6], 4(%[tsin2]) \t\n"
+ "lwc1 %[temp7], 4(%[tcos2]) \t\n"
+ "nmsub.s %[temp13], %[temp13], %[temp4], %[temp3] \t\n"
+ "madd.s %[temp14], %[temp14], %[temp4], %[temp2] \t\n"
+ "mul.s %[temp15], %[temp5], %[temp6] \t\n"
+ "mul.s %[temp16], %[temp5], %[temp7] \t\n"
+ "lwc1 %[temp8], 8(%[z2]) \t\n"
+ "nmsub.s %[temp15], %[temp15], %[temp8], %[temp7] \t\n"
+ "madd.s %[temp16], %[temp16], %[temp8], %[temp6] \t\n"
+ : [temp1]"=&f"(temp1), [temp2]"=&f"(temp2),
+ [temp3]"=&f"(temp3), [temp4]"=&f"(temp4),
+ [temp5]"=&f"(temp5), [temp6]"=&f"(temp6),
+ [temp7]"=&f"(temp7), [temp8]"=&f"(temp8),
+ [temp9]"=&f"(temp9), [temp10]"=&f"(temp10),
+ [temp11]"=&f"(temp11), [temp12]"=&f"(temp12),
+ [temp13]"=&f"(temp13), [temp14]"=&f"(temp14),
+ [temp15]"=&f"(temp15), [temp16]"=&f"(temp16)
+ : [z1]"r"(z1), [z2]"r"(z2),
+ [tsin1]"r"(tsin1), [tcos1]"r"(tcos1),
+ [tsin2]"r"(tsin2), [tcos2]"r"(tcos2)
+ : "memory"
+ );
+
+ z1[1].re = temp9;
+ z1[1].im = temp14;
+ z2[0].re = temp13;
+ z2[0].im = temp10;
+
+ z1[0].re = temp11;
+ z1[0].im = temp16;
+ z2[1].re = temp15;
+ z2[1].im = temp12;
+ }
+}
+
+/**
+ * Compute inverse MDCT of size N = 2^nbits
+ * @param output N samples
+ * @param input N/2 samples
+ */
+static void ff_imdct_calc_mips(FFTContext *s, FFTSample *output, const FFTSample *input)
+{
+ int k;
+ int n = 1 << s->mdct_bits;
+ int n2 = n >> 1;
+ int n4 = n >> 2;
+
+ ff_imdct_half_mips(s, output+n4, input);
+
+ for(k = 0; k < n4; k+=4) {
+ output[k] = -output[n2-k-1];
+ output[k+1] = -output[n2-k-2];
+ output[k+2] = -output[n2-k-3];
+ output[k+3] = -output[n2-k-4];
+
+ output[n-k-1] = output[n2+k];
+ output[n-k-2] = output[n2+k+1];
+ output[n-k-3] = output[n2+k+2];
+ output[n-k-4] = output[n2+k+3];
+ }
+}
+#endif /* HAVE_INLINE_ASM */
+
+av_cold void ff_fft_init_mips(FFTContext *s)
+{
+ int n=0;
+
+ ff_fft_lut_init(fft_offsets_lut, 0, 1 << 16, &n);
+ ff_init_ff_cos_tabs(16);
+
+#if HAVE_INLINE_ASM
+ s->fft_calc = ff_fft_calc_mips;
+#if CONFIG_MDCT
+ s->imdct_calc = ff_imdct_calc_mips;
+ s->imdct_half = ff_imdct_half_mips;
+#endif
+#endif
+}
diff --git a/libavcodec/mips/fft_table.h b/libavcodec/mips/fft_table.h
new file mode 100644
index 0000000000..dd52eaf8c8
--- /dev/null
+++ b/libavcodec/mips/fft_table.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2012
+ * MIPS Technologies, Inc., California.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the MIPS Technologies, Inc., nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE MIPS TECHNOLOGIES, INC. ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE MIPS TECHNOLOGIES, INC. BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Author: Stanislav Ocovaj (socovaj@mips.com)
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * definitions and LUT table for MIPS FFT
+ */
+#ifndef AVCODEC_MIPS_FFT_TABLE_H
+#define AVCODEC_MIPS_FFT_TABLE_H
+
+#include "libavcodec/fft.h"
+
+#define MAX_LOG2_NFFT 16 //!< Specifies maxiumum allowed fft size
+#define MAX_FFT_SIZE (1 << MAX_LOG2_NFFT)
+
+extern uint16_t fft_offsets_lut[];
+void ff_fft_lut_init(uint16_t *table, int off, int size, int *index);
+
+#endif /* AVCODEC_MIPS_FFT_TABLE_H */
diff --git a/libavcodec/mips/fmtconvert_mips.c b/libavcodec/mips/fmtconvert_mips.c
new file mode 100644
index 0000000000..8a0265f070
--- /dev/null
+++ b/libavcodec/mips/fmtconvert_mips.c
@@ -0,0 +1,342 @@
+/*
+ * Format Conversion Utils for MIPS
+ *
+ * Copyright (c) 2012
+ * MIPS Technologies, Inc., California.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the MIPS Technologies, Inc., nor the names of is
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE MIPS TECHNOLOGIES, INC. ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE MIPS TECHNOLOGIES, INC. BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Author: Zoran Lukic (zoranl@mips.com)
+ * Author: Nedeljko Babic (nbabic@mips.com)
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include "config.h"
+#include "libavcodec/avcodec.h"
+#include "libavcodec/fmtconvert.h"
+
+#if HAVE_INLINE_ASM
+#if HAVE_MIPSDSPR1
+static void float_to_int16_mips(int16_t *dst, const float *src, long len)
+{
+ const float *src_end = src + len;
+ int ret0, ret1, ret2, ret3, ret4, ret5, ret6, ret7;
+ float src0, src1, src2, src3, src4, src5, src6, src7;
+
+ /*
+ * loop is 8 times unrolled in assembler in order to achieve better performance
+ */
+ __asm__ volatile(
+ "beq %[len], $zero, fti16_end%= \n\t"
+ "fti16_lp%=: \n\t"
+ "lwc1 %[src0], 0(%[src]) \n\t"
+ "lwc1 %[src1], 4(%[src]) \n\t"
+ "lwc1 %[src2], 8(%[src]) \n\t"
+ "lwc1 %[src3], 12(%[src]) \n\t"
+ "cvt.w.s %[src0], %[src0] \n\t"
+ "cvt.w.s %[src1], %[src1] \n\t"
+ "cvt.w.s %[src2], %[src2] \n\t"
+ "cvt.w.s %[src3], %[src3] \n\t"
+ "mfc1 %[ret0], %[src0] \n\t"
+ "mfc1 %[ret1], %[src1] \n\t"
+ "mfc1 %[ret2], %[src2] \n\t"
+ "mfc1 %[ret3], %[src3] \n\t"
+ "lwc1 %[src4], 16(%[src]) \n\t"
+ "lwc1 %[src5], 20(%[src]) \n\t"
+ "lwc1 %[src6], 24(%[src]) \n\t"
+ "lwc1 %[src7], 28(%[src]) \n\t"
+ "cvt.w.s %[src4], %[src4] \n\t"
+ "cvt.w.s %[src5], %[src5] \n\t"
+ "cvt.w.s %[src6], %[src6] \n\t"
+ "cvt.w.s %[src7], %[src7] \n\t"
+ "addiu %[src], 32 \n\t"
+ "shll_s.w %[ret0], %[ret0], 16 \n\t"
+ "shll_s.w %[ret1], %[ret1], 16 \n\t"
+ "shll_s.w %[ret2], %[ret2], 16 \n\t"
+ "shll_s.w %[ret3], %[ret3], 16 \n\t"
+ "srl %[ret0], %[ret0], 16 \n\t"
+ "srl %[ret1], %[ret1], 16 \n\t"
+ "srl %[ret2], %[ret2], 16 \n\t"
+ "srl %[ret3], %[ret3], 16 \n\t"
+ "sh %[ret0], 0(%[dst]) \n\t"
+ "sh %[ret1], 2(%[dst]) \n\t"
+ "sh %[ret2], 4(%[dst]) \n\t"
+ "sh %[ret3], 6(%[dst]) \n\t"
+ "mfc1 %[ret4], %[src4] \n\t"
+ "mfc1 %[ret5], %[src5] \n\t"
+ "mfc1 %[ret6], %[src6] \n\t"
+ "mfc1 %[ret7], %[src7] \n\t"
+ "shll_s.w %[ret4], %[ret4], 16 \n\t"
+ "shll_s.w %[ret5], %[ret5], 16 \n\t"
+ "shll_s.w %[ret6], %[ret6], 16 \n\t"
+ "shll_s.w %[ret7], %[ret7], 16 \n\t"
+ "srl %[ret4], %[ret4], 16 \n\t"
+ "srl %[ret5], %[ret5], 16 \n\t"
+ "srl %[ret6], %[ret6], 16 \n\t"
+ "srl %[ret7], %[ret7], 16 \n\t"
+ "sh %[ret4], 8(%[dst]) \n\t"
+ "sh %[ret5], 10(%[dst]) \n\t"
+ "sh %[ret6], 12(%[dst]) \n\t"
+ "sh %[ret7], 14(%[dst]) \n\t"
+ "addiu %[dst], 16 \n\t"
+ "bne %[src], %[src_end], fti16_lp%= \n\t"
+ "fti16_end%=: \n\t"
+ : [ret0]"=&r"(ret0), [ret1]"=&r"(ret1), [ret2]"=&r"(ret2), [ret3]"=&r"(ret3),
+ [ret4]"=&r"(ret4), [ret5]"=&r"(ret5), [ret6]"=&r"(ret6), [ret7]"=&r"(ret7),
+ [src0]"=&f"(src0), [src1]"=&f"(src1), [src2]"=&f"(src2), [src3]"=&f"(src3),
+ [src4]"=&f"(src4), [src5]"=&f"(src5), [src6]"=&f"(src6), [src7]"=&f"(src7),
+ [src]"+r"(src), [dst]"+r"(dst)
+ : [src_end]"r"(src_end), [len]"r"(len)
+ : "memory"
+ );
+}
+
+static void float_to_int16_interleave_mips(int16_t *dst, const float **src, long len,
+ int channels)
+{
+ int c, ch2 = channels <<1;
+ int ret0, ret1, ret2, ret3, ret4, ret5, ret6, ret7;
+ float src0, src1, src2, src3, src4, src5, src6, src7;
+ int16_t *dst_ptr0, *dst_ptr1, *dst_ptr2, *dst_ptr3;
+ int16_t *dst_ptr4, *dst_ptr5, *dst_ptr6, *dst_ptr7;
+ const float *src_ptr, *src_ptr2, *src_end;
+
+ if (channels == 2) {
+ src_ptr = &src[0][0];
+ src_ptr2 = &src[1][0];
+ src_end = src_ptr + len;
+
+ __asm__ volatile (
+ "fti16i2_lp%=: \n\t"
+ "lwc1 %[src0], 0(%[src_ptr]) \n\t"
+ "lwc1 %[src1], 0(%[src_ptr2]) \n\t"
+ "addiu %[src_ptr], 4 \n\t"
+ "cvt.w.s $f9, %[src0] \n\t"
+ "cvt.w.s $f10, %[src1] \n\t"
+ "mfc1 %[ret0], $f9 \n\t"
+ "mfc1 %[ret1], $f10 \n\t"
+ "shll_s.w %[ret0], %[ret0], 16 \n\t"
+ "shll_s.w %[ret1], %[ret1], 16 \n\t"
+ "addiu %[src_ptr2], 4 \n\t"
+ "srl %[ret0], %[ret0], 16 \n\t"
+ "srl %[ret1], %[ret1], 16 \n\t"
+ "sh %[ret0], 0(%[dst]) \n\t"
+ "sh %[ret1], 2(%[dst]) \n\t"
+ "addiu %[dst], 4 \n\t"
+ "bne %[src_ptr], %[src_end], fti16i2_lp%= \n\t"
+ : [ret0]"=&r"(ret0), [ret1]"=&r"(ret1),
+ [src0]"=&f"(src0), [src1]"=&f"(src1),
+ [src_ptr]"+r"(src_ptr), [src_ptr2]"+r"(src_ptr2),
+ [dst]"+r"(dst)
+ : [src_end]"r"(src_end)
+ : "memory"
+ );
+ } else {
+ for (c = 0; c < channels; c++) {
+ src_ptr = &src[c][0];
+ dst_ptr0 = &dst[c];
+ src_end = src_ptr + len;
+ /*
+ * loop is 8 times unrolled in assembler in order to achieve better performance
+ */
+ __asm__ volatile(
+ "fti16i_lp%=: \n\t"
+ "lwc1 %[src0], 0(%[src_ptr]) \n\t"
+ "lwc1 %[src1], 4(%[src_ptr]) \n\t"
+ "lwc1 %[src2], 8(%[src_ptr]) \n\t"
+ "lwc1 %[src3], 12(%[src_ptr]) \n\t"
+ "cvt.w.s %[src0], %[src0] \n\t"
+ "cvt.w.s %[src1], %[src1] \n\t"
+ "cvt.w.s %[src2], %[src2] \n\t"
+ "cvt.w.s %[src3], %[src3] \n\t"
+ "mfc1 %[ret0], %[src0] \n\t"
+ "mfc1 %[ret1], %[src1] \n\t"
+ "mfc1 %[ret2], %[src2] \n\t"
+ "mfc1 %[ret3], %[src3] \n\t"
+ "lwc1 %[src4], 16(%[src_ptr]) \n\t"
+ "lwc1 %[src5], 20(%[src_ptr]) \n\t"
+ "lwc1 %[src6], 24(%[src_ptr]) \n\t"
+ "lwc1 %[src7], 28(%[src_ptr]) \n\t"
+ "addu %[dst_ptr1], %[dst_ptr0], %[ch2] \n\t"
+ "addu %[dst_ptr2], %[dst_ptr1], %[ch2] \n\t"
+ "addu %[dst_ptr3], %[dst_ptr2], %[ch2] \n\t"
+ "addu %[dst_ptr4], %[dst_ptr3], %[ch2] \n\t"
+ "addu %[dst_ptr5], %[dst_ptr4], %[ch2] \n\t"
+ "addu %[dst_ptr6], %[dst_ptr5], %[ch2] \n\t"
+ "addu %[dst_ptr7], %[dst_ptr6], %[ch2] \n\t"
+ "addiu %[src_ptr], 32 \n\t"
+ "cvt.w.s %[src4], %[src4] \n\t"
+ "cvt.w.s %[src5], %[src5] \n\t"
+ "cvt.w.s %[src6], %[src6] \n\t"
+ "cvt.w.s %[src7], %[src7] \n\t"
+ "shll_s.w %[ret0], %[ret0], 16 \n\t"
+ "shll_s.w %[ret1], %[ret1], 16 \n\t"
+ "shll_s.w %[ret2], %[ret2], 16 \n\t"
+ "shll_s.w %[ret3], %[ret3], 16 \n\t"
+ "srl %[ret0], %[ret0], 16 \n\t"
+ "srl %[ret1], %[ret1], 16 \n\t"
+ "srl %[ret2], %[ret2], 16 \n\t"
+ "srl %[ret3], %[ret3], 16 \n\t"
+ "sh %[ret0], 0(%[dst_ptr0]) \n\t"
+ "sh %[ret1], 0(%[dst_ptr1]) \n\t"
+ "sh %[ret2], 0(%[dst_ptr2]) \n\t"
+ "sh %[ret3], 0(%[dst_ptr3]) \n\t"
+ "mfc1 %[ret4], %[src4] \n\t"
+ "mfc1 %[ret5], %[src5] \n\t"
+ "mfc1 %[ret6], %[src6] \n\t"
+ "mfc1 %[ret7], %[src7] \n\t"
+ "shll_s.w %[ret4], %[ret4], 16 \n\t"
+ "shll_s.w %[ret5], %[ret5], 16 \n\t"
+ "shll_s.w %[ret6], %[ret6], 16 \n\t"
+ "shll_s.w %[ret7], %[ret7], 16 \n\t"
+ "srl %[ret4], %[ret4], 16 \n\t"
+ "srl %[ret5], %[ret5], 16 \n\t"
+ "srl %[ret6], %[ret6], 16 \n\t"
+ "srl %[ret7], %[ret7], 16 \n\t"
+ "sh %[ret4], 0(%[dst_ptr4]) \n\t"
+ "sh %[ret5], 0(%[dst_ptr5]) \n\t"
+ "sh %[ret6], 0(%[dst_ptr6]) \n\t"
+ "sh %[ret7], 0(%[dst_ptr7]) \n\t"
+ "addu %[dst_ptr0], %[dst_ptr7], %[ch2] \n\t"
+ "bne %[src_ptr], %[src_end], fti16i_lp%= \n\t"
+ : [ret0]"=&r"(ret0), [ret1]"=&r"(ret1), [ret2]"=&r"(ret2), [ret3]"=&r"(ret3),
+ [ret4]"=&r"(ret4), [ret5]"=&r"(ret5), [ret6]"=&r"(ret6), [ret7]"=&r"(ret7),
+ [src0]"=&f"(src0), [src1]"=&f"(src1), [src2]"=&f"(src2), [src3]"=&f"(src3),
+ [src4]"=&f"(src4), [src5]"=&f"(src5), [src6]"=&f"(src6), [src7]"=&f"(src7),
+ [dst_ptr1]"=&r"(dst_ptr1), [dst_ptr2]"=&r"(dst_ptr2), [dst_ptr3]"=&r"(dst_ptr3),
+ [dst_ptr4]"=&r"(dst_ptr4), [dst_ptr5]"=&r"(dst_ptr5), [dst_ptr6]"=&r"(dst_ptr6),
+ [dst_ptr7]"=&r"(dst_ptr7), [dst_ptr0]"+r"(dst_ptr0), [src_ptr]"+r"(src_ptr)
+ : [ch2]"r"(ch2), [src_end]"r"(src_end)
+ : "memory"
+ );
+ }
+ }
+}
+#endif /* HAVE_MIPSDSPR1 */
+
+static void int32_to_float_fmul_scalar_mips(float *dst, const int *src,
+ float mul, int len)
+{
+ /*
+ * variables used in inline assembler
+ */
+ float temp1, temp3, temp5, temp7, temp9, temp11, temp13, temp15;
+
+ int rpom1, rpom2, rpom11, rpom21, rpom12, rpom22, rpom13, rpom23;
+ const int *src_end = src + len;
+ /*
+ * loop is 8 times unrolled in assembler in order to achieve better performance
+ */
+ __asm__ volatile (
+ "i32tf_lp%=: \n\t"
+ "lw %[rpom11], 0(%[src]) \n\t"
+ "lw %[rpom21], 4(%[src]) \n\t"
+ "lw %[rpom1], 8(%[src]) \n\t"
+ "lw %[rpom2], 12(%[src]) \n\t"
+ "mtc1 %[rpom11], %[temp1] \n\t"
+ "mtc1 %[rpom21], %[temp3] \n\t"
+ "mtc1 %[rpom1], %[temp5] \n\t"
+ "mtc1 %[rpom2], %[temp7] \n\t"
+
+ "lw %[rpom13], 16(%[src]) \n\t"
+ "lw %[rpom23], 20(%[src]) \n\t"
+ "lw %[rpom12], 24(%[src]) \n\t"
+ "lw %[rpom22], 28(%[src]) \n\t"
+ "mtc1 %[rpom13], %[temp9] \n\t"
+ "mtc1 %[rpom23], %[temp11] \n\t"
+ "mtc1 %[rpom12], %[temp13] \n\t"
+ "mtc1 %[rpom22], %[temp15] \n\t"
+
+ "addiu %[src], 32 \n\t"
+ "cvt.s.w %[temp1], %[temp1] \n\t"
+ "cvt.s.w %[temp3], %[temp3] \n\t"
+ "cvt.s.w %[temp5], %[temp5] \n\t"
+ "cvt.s.w %[temp7], %[temp7] \n\t"
+
+ "cvt.s.w %[temp9], %[temp9] \n\t"
+ "cvt.s.w %[temp11], %[temp11] \n\t"
+ "cvt.s.w %[temp13], %[temp13] \n\t"
+ "cvt.s.w %[temp15], %[temp15] \n\t"
+
+ "mul.s %[temp1], %[temp1], %[mul] \n\t"
+ "mul.s %[temp3], %[temp3], %[mul] \n\t"
+ "mul.s %[temp5], %[temp5], %[mul] \n\t"
+ "mul.s %[temp7], %[temp7], %[mul] \n\t"
+
+ "mul.s %[temp9], %[temp9], %[mul] \n\t"
+ "mul.s %[temp11], %[temp11], %[mul] \n\t"
+ "mul.s %[temp13], %[temp13], %[mul] \n\t"
+ "mul.s %[temp15], %[temp15], %[mul] \n\t"
+
+ "swc1 %[temp1], 0(%[dst]) \n\t" /*dst[i] = src[i] * mul; */
+ "swc1 %[temp3], 4(%[dst]) \n\t" /*dst[i+1] = src[i+1] * mul;*/
+ "swc1 %[temp5], 8(%[dst]) \n\t" /*dst[i+2] = src[i+2] * mul;*/
+ "swc1 %[temp7], 12(%[dst]) \n\t" /*dst[i+3] = src[i+3] * mul;*/
+
+ "swc1 %[temp9], 16(%[dst]) \n\t" /*dst[i+4] = src[i+4] * mul;*/
+ "swc1 %[temp11], 20(%[dst]) \n\t" /*dst[i+5] = src[i+5] * mul;*/
+ "swc1 %[temp13], 24(%[dst]) \n\t" /*dst[i+6] = src[i+6] * mul;*/
+ "swc1 %[temp15], 28(%[dst]) \n\t" /*dst[i+7] = src[i+7] * mul;*/
+ "addiu %[dst], 32 \n\t"
+ "bne %[src], %[src_end], i32tf_lp%= \n\t"
+ : [temp1]"=&f"(temp1), [temp11]"=&f"(temp11),
+ [temp13]"=&f"(temp13), [temp15]"=&f"(temp15),
+ [temp3]"=&f"(temp3), [temp5]"=&f"(temp5),
+ [temp7]"=&f"(temp7), [temp9]"=&f"(temp9),
+ [rpom1]"=&r"(rpom1), [rpom2]"=&r"(rpom2),
+ [rpom11]"=&r"(rpom11), [rpom21]"=&r"(rpom21),
+ [rpom12]"=&r"(rpom12), [rpom22]"=&r"(rpom22),
+ [rpom13]"=&r"(rpom13), [rpom23]"=&r"(rpom23),
+ [dst]"+r"(dst), [src]"+r"(src)
+ : [mul]"f"(mul), [src_end]"r"(src_end)
+ : "memory"
+ );
+}
+#endif /* HAVE_INLINE_ASM */
+
+av_cold void ff_fmt_convert_init_mips(FmtConvertContext *c)
+{
+#if HAVE_INLINE_ASM
+#if HAVE_MIPSDSPR1
+ c->float_to_int16_interleave = float_to_int16_interleave_mips;
+ c->float_to_int16 = float_to_int16_mips;
+#endif
+ c->int32_to_float_fmul_scalar = int32_to_float_fmul_scalar_mips;
+#endif
+}
diff --git a/libavcodec/mips/iirfilter_mips.c b/libavcodec/mips/iirfilter_mips.c
new file mode 100644
index 0000000000..a5646cde8b
--- /dev/null
+++ b/libavcodec/mips/iirfilter_mips.c
@@ -0,0 +1,204 @@
+/*
+ * Copyright (c) 2012
+ * MIPS Technologies, Inc., California.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the MIPS Technologies, Inc., nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE MIPS TECHNOLOGIES, INC. ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE MIPS TECHNOLOGIES, INC. BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Author: Bojan Zivkovic (bojan@mips.com)
+ *
+ * IIR filter optimized for MIPS floating-point architecture
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+ /**
+ * @file
+ * Reference: libavcodec/iirfilter.c
+ */
+
+#include "libavcodec/iirfilter.h"
+
+#if HAVE_INLINE_ASM
+typedef struct FFIIRFilterCoeffs {
+ int order;
+ float gain;
+ int *cx;
+ float *cy;
+} FFIIRFilterCoeffs;
+
+typedef struct FFIIRFilterState {
+ float x[1];
+} FFIIRFilterState;
+
+static void ff_iir_filter_flt_mips(const struct FFIIRFilterCoeffs *c,
+ struct FFIIRFilterState *s, int size,
+ const float *src, int sstep, float *dst, int dstep)
+{
+ if (c->order == 2) {
+ int i;
+ const float *src0 = src;
+ float *dst0 = dst;
+ for (i = 0; i < size; i++) {
+ float in = *src0 * c->gain + s->x[0] * c->cy[0] + s->x[1] * c->cy[1];
+ *dst0 = s->x[0] + in + s->x[1] * c->cx[1];
+ s->x[0] = s->x[1];
+ s->x[1] = in;
+ src0 += sstep;
+ dst0 += dstep;
+ }
+ } else if (c->order == 4) {
+ int i;
+ const float *src0 = src;
+ float *dst0 = dst;
+ float four = 4.0;
+ float six = 6.0;
+ for (i = 0; i < size; i += 4) {
+ float in1, in2, in3, in4;
+ float res1, res2, res3, res4;
+ float *x = s->x;
+ float *cy = c->cy;
+ float gain = c->gain;
+ float src0_0 = src0[0 ];
+ float src0_1 = src0[sstep ];
+ float src0_2 = src0[2*sstep];
+ float src0_3 = src0[3*sstep];
+
+ __asm__ volatile (
+ "lwc1 $f0, 0(%[cy]) \n\t"
+ "lwc1 $f4, 0(%[x]) \n\t"
+ "lwc1 $f5, 4(%[x]) \n\t"
+ "lwc1 $f6, 8(%[x]) \n\t"
+ "lwc1 $f7, 12(%[x]) \n\t"
+ "mul.s %[in1], %[src0_0], %[gain] \n\t"
+ "mul.s %[in2], %[src0_1], %[gain] \n\t"
+ "mul.s %[in3], %[src0_2], %[gain] \n\t"
+ "mul.s %[in4], %[src0_3], %[gain] \n\t"
+ "lwc1 $f1, 4(%[cy]) \n\t"
+ "madd.s %[in1], %[in1], $f0, $f4 \n\t"
+ "madd.s %[in2], %[in2], $f0, $f5 \n\t"
+ "madd.s %[in3], %[in3], $f0, $f6 \n\t"
+ "madd.s %[in4], %[in4], $f0, $f7 \n\t"
+ "lwc1 $f2, 8(%[cy]) \n\t"
+ "madd.s %[in1], %[in1], $f1, $f5 \n\t"
+ "madd.s %[in2], %[in2], $f1, $f6 \n\t"
+ "madd.s %[in3], %[in3], $f1, $f7 \n\t"
+ "lwc1 $f3, 12(%[cy]) \n\t"
+ "add.s $f8, $f5, $f7 \n\t"
+ "madd.s %[in1], %[in1], $f2, $f6 \n\t"
+ "madd.s %[in2], %[in2], $f2, $f7 \n\t"
+ "mul.s $f9, $f6, %[six] \n\t"
+ "mul.s $f10, $f7, %[six] \n\t"
+ "madd.s %[in1], %[in1], $f3, $f7 \n\t"
+ "madd.s %[in2], %[in2], $f3, %[in1] \n\t"
+ "madd.s %[in3], %[in3], $f2, %[in1] \n\t"
+ "madd.s %[in4], %[in4], $f1, %[in1] \n\t"
+ "add.s %[res1], $f4, %[in1] \n\t"
+ "swc1 %[in1], 0(%[x]) \n\t"
+ "add.s $f0, $f6, %[in1] \n\t"
+ "madd.s %[in3], %[in3], $f3, %[in2] \n\t"
+ "madd.s %[in4], %[in4], $f2, %[in2] \n\t"
+ "add.s %[res2], $f5, %[in2] \n\t"
+ "madd.s %[res1], %[res1], $f8, %[four] \n\t"
+ "add.s $f8, $f7, %[in2] \n\t"
+ "swc1 %[in2], 4(%[x]) \n\t"
+ "madd.s %[in4], %[in4], $f3, %[in3] \n\t"
+ "add.s %[res3], $f6, %[in3] \n\t"
+ "add.s %[res1], %[res1], $f9 \n\t"
+ "madd.s %[res2], %[res2], $f0, %[four] \n\t"
+ "swc1 %[in3], 8(%[x]) \n\t"
+ "add.s %[res4], $f7, %[in4] \n\t"
+ "madd.s %[res3], %[res3], $f8, %[four] \n\t"
+ "swc1 %[in4], 12(%[x]) \n\t"
+ "add.s %[res2], %[res2], $f10 \n\t"
+ "add.s $f8, %[in1], %[in3] \n\t"
+ "madd.s %[res3], %[res3], %[in1], %[six] \n\t"
+ "madd.s %[res4], %[res4], $f8, %[four] \n\t"
+ "madd.s %[res4], %[res4], %[in2], %[six] \n\t"
+
+ : [in1]"=&f"(in1), [in2]"=&f"(in2),
+ [in3]"=&f"(in3), [in4]"=&f"(in4),
+ [res1]"=&f"(res1), [res2]"=&f"(res2),
+ [res3]"=&f"(res3), [res4]"=&f"(res4)
+ : [src0_0]"f"(src0_0), [src0_1]"f"(src0_1),
+ [src0_2]"f"(src0_2), [src0_3]"f"(src0_3),
+ [gain]"f"(gain), [x]"r"(x), [cy]"r"(cy),
+ [four]"f"(four), [six]"f"(six)
+ : "$f0", "$f1", "$f2", "$f3",
+ "$f4", "$f5", "$f6", "$f7",
+ "$f8", "$f9", "$f10",
+ "memory"
+ );
+
+ dst0[0 ] = res1;
+ dst0[sstep ] = res2;
+ dst0[2*sstep] = res3;
+ dst0[3*sstep] = res4;
+
+ src0 += 4*sstep;
+ dst0 += 4*dstep;
+ }
+ } else {
+ int i;
+ const float *src0 = src;
+ float *dst0 = dst;
+ for (i = 0; i < size; i++) {
+ int j;
+ float in, res;
+ in = *src0 * c->gain;
+ for(j = 0; j < c->order; j++)
+ in += c->cy[j] * s->x[j];
+ res = s->x[0] + in + s->x[c->order >> 1] * c->cx[c->order >> 1];
+ for(j = 1; j < c->order >> 1; j++)
+ res += (s->x[j] + s->x[c->order - j]) * c->cx[j];
+ for(j = 0; j < c->order - 1; j++)
+ s->x[j] = s->x[j + 1];
+ *dst0 = res;
+ s->x[c->order - 1] = in;
+ src0 += sstep;
+ dst0 += dstep;
+ }
+ }
+}
+#endif /* HAVE_INLINE_ASM */
+
+void ff_iir_filter_init_mips(FFIIRFilterContext *f) {
+#if HAVE_INLINE_ASM
+ f->filter_flt = ff_iir_filter_flt_mips;
+#endif /* HAVE_INLINE_ASM */
+}
diff --git a/libavcodec/mips/lsp_mips.h b/libavcodec/mips/lsp_mips.h
new file mode 100644
index 0000000000..c55ef03ad4
--- /dev/null
+++ b/libavcodec/mips/lsp_mips.h
@@ -0,0 +1,109 @@
+/*
+ * Copyright (c) 2012
+ * MIPS Technologies, Inc., California.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the MIPS Technologies, Inc., nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE MIPS TECHNOLOGIES, INC. ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE MIPS TECHNOLOGIES, INC. BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Author: Nedeljko Babic (nbabic@mips.com)
+ *
+ * LSP routines for ACELP-based codecs optimized for MIPS
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Reference: libavcodec/lsp.c
+ */
+#ifndef AVCODEC_LSP_MIPS_H
+#define AVCODEC_LSP_MIPS_H
+
+#if HAVE_MIPSFPU && HAVE_INLINE_ASM
+static av_always_inline void ff_lsp2polyf_mips(const double *lsp, double *f, int lp_half_order)
+{
+ int i, j = 0;
+ double * p_fi = f;
+ double * p_f = 0;
+
+ f[0] = 1.0;
+ f[1] = -2 * lsp[0];
+ lsp -= 2;
+
+ for(i=2; i<=lp_half_order; i++)
+ {
+ double tmp, f_j_2, f_j_1, f_j;
+ double val = lsp[2*i];
+
+ __asm__ volatile(
+ "move %[p_f], %[p_fi] \n\t"
+ "add.d %[val], %[val], %[val] \n\t"
+ "addiu %[p_fi], 8 \n\t"
+ "ldc1 %[f_j_1], 0(%[p_f]) \n\t"
+ "ldc1 %[f_j], 8(%[p_f]) \n\t"
+ "neg.d %[val], %[val] \n\t"
+ "add.d %[tmp], %[f_j_1], %[f_j_1] \n\t"
+ "madd.d %[tmp], %[tmp], %[f_j], %[val] \n\t"
+ "addiu %[j], %[i], -2 \n\t"
+ "ldc1 %[f_j_2], -8(%[p_f]) \n\t"
+ "sdc1 %[tmp], 16(%[p_f]) \n\t"
+ "beqz %[j], ff_lsp2polyf_lp_j_end%= \n\t"
+ "ff_lsp2polyf_lp_j%=: \n\t"
+ "add.d %[tmp], %[f_j], %[f_j_2] \n\t"
+ "madd.d %[tmp], %[tmp], %[f_j_1], %[val] \n\t"
+ "mov.d %[f_j], %[f_j_1] \n\t"
+ "addiu %[j], -1 \n\t"
+ "mov.d %[f_j_1], %[f_j_2] \n\t"
+ "ldc1 %[f_j_2], -16(%[p_f]) \n\t"
+ "sdc1 %[tmp], 8(%[p_f]) \n\t"
+ "addiu %[p_f], -8 \n\t"
+ "bgtz %[j], ff_lsp2polyf_lp_j%= \n\t"
+ "ff_lsp2polyf_lp_j_end%=: \n\t"
+
+ : [f_j_2]"=&f"(f_j_2), [f_j_1]"=&f"(f_j_1), [val]"+f"(val),
+ [tmp]"=&f"(tmp), [f_j]"=&f"(f_j), [p_f]"+r"(p_f),
+ [j]"+r"(j), [p_fi]"+r"(p_fi)
+ : [i]"r"(i)
+ : "memory"
+ );
+ f[1] += val;
+ }
+}
+#define ff_lsp2polyf ff_lsp2polyf_mips
+#endif /* HAVE_MIPSFPU && HAVE_INLINE_ASM */
+#endif /* AVCODEC_LSP_MIPS_H */
diff --git a/libavcodec/mips/mathops.h b/libavcodec/mips/mathops.h
index dd80f68072..368290ac5b 100644
--- a/libavcodec/mips/mathops.h
+++ b/libavcodec/mips/mathops.h
@@ -1,20 +1,20 @@
/*
* Copyright (c) 2009 Mans Rullgard <mans@mansr.com>
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
diff --git a/libavcodec/mips/mpegaudiodsp_mips_fixed.c b/libavcodec/mips/mpegaudiodsp_mips_fixed.c
new file mode 100644
index 0000000000..047a833f14
--- /dev/null
+++ b/libavcodec/mips/mpegaudiodsp_mips_fixed.c
@@ -0,0 +1,907 @@
+ /*
+ * Copyright (c) 2012
+ * MIPS Technologies, Inc., California.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the MIPS Technologies, Inc., nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE MIPS TECHNOLOGIES, INC. ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE MIPS TECHNOLOGIES, INC. BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Author: Bojan Zivkovic (bojan@mips.com)
+ *
+ * MPEG Audio decoder optimized for MIPS fixed-point architecture
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Reference: libavcodec/mpegaudiodsp_template.c
+ */
+
+#include <string.h>
+
+#include "libavcodec/mpegaudiodsp.h"
+
+static void ff_mpadsp_apply_window_mips_fixed(int32_t *synth_buf, int32_t *window,
+ int *dither_state, int16_t *samples, int incr)
+{
+ register const int32_t *w, *w2, *p;
+ int j;
+ int16_t *samples2;
+ int w_asm, p_asm, w_asm1, p_asm1, w_asm2, p_asm2;
+ int w2_asm, w2_asm1, *p_temp1, *p_temp2;
+ int sum1 = 0;
+ int const min_asm = -32768, max_asm = 32767;
+ int temp1, temp2 = 0, temp3 = 0;
+ int64_t sum;
+
+ /* copy to avoid wrap */
+ memcpy(synth_buf + 512, synth_buf, 32 * sizeof(*synth_buf));
+ samples2 = samples + 31 * incr;
+ w = window;
+ w2 = window + 31;
+ sum = *dither_state;
+ p = synth_buf + 16;
+ p_temp1 = synth_buf + 16;
+ p_temp2 = synth_buf + 48;
+ temp1 = sum;
+
+ /**
+ * use of round_sample function from the original code is eliminated,
+ * changed with appropriate assembly instructions.
+ */
+ __asm__ volatile (
+ "mthi $zero \n\t"
+ "mtlo %[temp1] \n\t"
+ "lw %[w_asm], 0(%[w]) \n\t"
+ "lw %[p_asm], 0(%[p]) \n\t"
+ "lw %[w_asm1], 64*4(%[w]) \n\t"
+ "lw %[p_asm1], 64*4(%[p]) \n\t"
+ "lw %[w_asm2], 128*4(%[w]) \n\t"
+ "lw %[p_asm2], 128*4(%[p]) \n\t"
+ "madd %[w_asm], %[p_asm] \n\t"
+ "madd %[w_asm1], %[p_asm1] \n\t"
+ "madd %[w_asm2], %[p_asm2] \n\t"
+ "lw %[w_asm], 192*4(%[w]) \n\t"
+ "lw %[p_asm], 192*4(%[p]) \n\t"
+ "lw %[w_asm1], 256*4(%[w]) \n\t"
+ "lw %[p_asm1], 256*4(%[p]) \n\t"
+ "lw %[w_asm2], 320*4(%[w]) \n\t"
+ "lw %[p_asm2], 320*4(%[p]) \n\t"
+ "madd %[w_asm], %[p_asm] \n\t"
+ "madd %[w_asm1], %[p_asm1] \n\t"
+ "madd %[w_asm2], %[p_asm2] \n\t"
+ "lw %[w_asm], 384*4(%[w]) \n\t"
+ "lw %[p_asm], 384*4(%[p]) \n\t"
+ "lw %[w_asm1], 448*4(%[w]) \n\t"
+ "lw %[p_asm1], 448*4(%[p]) \n\t"
+ "lw %[w_asm2], 32*4(%[w]) \n\t"
+ "lw %[p_asm2], 32*4(%[p]) \n\t"
+ "madd %[w_asm], %[p_asm] \n\t"
+ "madd %[w_asm1], %[p_asm1] \n\t"
+ "msub %[w_asm2], %[p_asm2] \n\t"
+ "lw %[w_asm], 96*4(%[w]) \n\t"
+ "lw %[p_asm], 96*4(%[p]) \n\t"
+ "lw %[w_asm1], 160*4(%[w]) \n\t"
+ "lw %[p_asm1], 160*4(%[p]) \n\t"
+ "lw %[w_asm2], 224*4(%[w]) \n\t"
+ "lw %[p_asm2], 224*4(%[p]) \n\t"
+ "msub %[w_asm], %[p_asm] \n\t"
+ "msub %[w_asm1], %[p_asm1] \n\t"
+ "msub %[w_asm2], %[p_asm2] \n\t"
+ "lw %[w_asm], 288*4(%[w]) \n\t"
+ "lw %[p_asm], 288*4(%[p]) \n\t"
+ "lw %[w_asm1], 352*4(%[w]) \n\t"
+ "lw %[p_asm1], 352*4(%[p]) \n\t"
+ "msub %[w_asm], %[p_asm] \n\t"
+ "lw %[w_asm], 480*4(%[w]) \n\t"
+ "lw %[p_asm], 480*4(%[p]) \n\t"
+ "lw %[w_asm2], 416*4(%[w]) \n\t"
+ "lw %[p_asm2], 416*4(%[p]) \n\t"
+ "msub %[w_asm], %[p_asm] \n\t"
+ "msub %[w_asm1], %[p_asm1] \n\t"
+ "msub %[w_asm2], %[p_asm2] \n\t"
+
+ /*round_sample function from the original code is eliminated,
+ * changed with appropriate assembly instructions
+ * code example:
+
+ "extr.w %[sum1],$ac0,24 \n\t"
+ "mflo %[temp3], $ac0 \n\t"
+ "and %[temp1], %[temp3], 0x00ffffff \n\t"
+ "slt %[temp2], %[sum1], %[min_asm] \n\t"
+ "movn %[sum1], %[min_asm],%[temp2] \n\t"
+ "slt %[temp2], %[max_asm],%[sum1] \n\t"
+ "movn %[sum1], %[max_asm],%[temp2] \n\t"
+ "sh %[sum1], 0(%[samples]) \n\t"
+ */
+
+ "extr.w %[sum1], $ac0, 24 \n\t"
+ "mflo %[temp3] \n\t"
+ "addi %[w], %[w], 4 \n\t"
+ "and %[temp1], %[temp3], 0x00ffffff \n\t"
+ "slt %[temp2], %[sum1], %[min_asm] \n\t"
+ "movn %[sum1], %[min_asm], %[temp2] \n\t"
+ "slt %[temp2], %[max_asm], %[sum1] \n\t"
+ "movn %[sum1], %[max_asm], %[temp2] \n\t"
+ "sh %[sum1], 0(%[samples]) \n\t"
+
+ : [w_asm] "=&r" (w_asm), [p_asm] "=&r" (p_asm), [w_asm1] "=&r" (w_asm1),
+ [p_asm1] "=&r" (p_asm1), [temp1] "+r" (temp1), [temp2] "+r" (temp2),
+ [w_asm2] "=&r" (w_asm2), [p_asm2] "=&r" (p_asm2),
+ [sum1] "+r" (sum1), [w] "+r" (w), [temp3] "+r" (temp3)
+ : [p] "r" (p), [samples] "r" (samples), [min_asm] "r" (min_asm),
+ [max_asm] "r" (max_asm)
+ : "memory", "hi","lo"
+ );
+
+ samples += incr;
+
+ /* we calculate two samples at the same time to avoid one memory
+ access per two sample */
+
+ for(j = 1; j < 16; j++) {
+ __asm__ volatile (
+ "mthi $0, $ac1 \n\t"
+ "mtlo $0, $ac1 \n\t"
+ "mthi $0 \n\t"
+ "mtlo %[temp1] \n\t"
+ "addi %[p_temp1], %[p_temp1], 4 \n\t"
+ "lw %[w_asm], 0(%[w]) \n\t"
+ "lw %[p_asm], 0(%[p_temp1]) \n\t"
+ "lw %[w2_asm], 0(%[w2]) \n\t"
+ "lw %[w_asm1], 64*4(%[w]) \n\t"
+ "lw %[p_asm1], 64*4(%[p_temp1]) \n\t"
+ "lw %[w2_asm1], 64*4(%[w2]) \n\t"
+ "madd %[w_asm], %[p_asm] \n\t"
+ "msub $ac1, %[w2_asm], %[p_asm] \n\t"
+ "madd %[w_asm1], %[p_asm1] \n\t"
+ "msub $ac1, %[w2_asm1], %[p_asm1] \n\t"
+ "lw %[w_asm], 128*4(%[w]) \n\t"
+ "lw %[p_asm], 128*4(%[p_temp1]) \n\t"
+ "lw %[w2_asm], 128*4(%[w2]) \n\t"
+ "lw %[w_asm1], 192*4(%[w]) \n\t"
+ "lw %[p_asm1], 192*4(%[p_temp1]) \n\t"
+ "lw %[w2_asm1], 192*4(%[w2]) \n\t"
+ "madd %[w_asm], %[p_asm] \n\t"
+ "msub $ac1, %[w2_asm], %[p_asm] \n\t"
+ "madd %[w_asm1], %[p_asm1] \n\t"
+ "msub $ac1, %[w2_asm1], %[p_asm1] \n\t"
+ "lw %[w_asm], 256*4(%[w]) \n\t"
+ "lw %[p_asm], 256*4(%[p_temp1]) \n\t"
+ "lw %[w2_asm], 256*4(%[w2]) \n\t"
+ "lw %[w_asm1], 320*4(%[w]) \n\t"
+ "lw %[p_asm1], 320*4(%[p_temp1]) \n\t"
+ "lw %[w2_asm1], 320*4(%[w2]) \n\t"
+ "madd %[w_asm], %[p_asm] \n\t"
+ "msub $ac1, %[w2_asm], %[p_asm] \n\t"
+ "madd %[w_asm1], %[p_asm1] \n\t"
+ "msub $ac1, %[w2_asm1], %[p_asm1] \n\t"
+ "lw %[w_asm], 384*4(%[w]) \n\t"
+ "lw %[p_asm], 384*4(%[p_temp1]) \n\t"
+ "lw %[w2_asm], 384*4(%[w2]) \n\t"
+ "lw %[w_asm1], 448*4(%[w]) \n\t"
+ "lw %[p_asm1], 448*4(%[p_temp1]) \n\t"
+ "lw %[w2_asm1], 448*4(%[w2]) \n\t"
+ "madd %[w_asm], %[p_asm] \n\t"
+ "msub $ac1, %[w2_asm], %[p_asm] \n\t"
+ "madd %[w_asm1], %[p_asm1] \n\t"
+ "msub $ac1, %[w2_asm1], %[p_asm1] \n\t"
+ "addi %[p_temp2], %[p_temp2], -4 \n\t"
+ "lw %[w_asm], 32*4(%[w]) \n\t"
+ "lw %[p_asm], 0(%[p_temp2]) \n\t"
+ "lw %[w2_asm], 32*4(%[w2]) \n\t"
+ "lw %[w_asm1], 96*4(%[w]) \n\t"
+ "lw %[p_asm1], 64*4(%[p_temp2]) \n\t"
+ "lw %[w2_asm1], 96*4(%[w2]) \n\t"
+ "msub %[w_asm], %[p_asm] \n\t"
+ "msub $ac1, %[w2_asm], %[p_asm] \n\t"
+ "msub %[w_asm1], %[p_asm1] \n\t"
+ "msub $ac1, %[w2_asm1], %[p_asm1] \n\t"
+ "lw %[w_asm], 160*4(%[w]) \n\t"
+ "lw %[p_asm], 128*4(%[p_temp2]) \n\t"
+ "lw %[w2_asm], 160*4(%[w2]) \n\t"
+ "lw %[w_asm1], 224*4(%[w]) \n\t"
+ "lw %[p_asm1], 192*4(%[p_temp2]) \n\t"
+ "lw %[w2_asm1], 224*4(%[w2]) \n\t"
+ "msub %[w_asm], %[p_asm] \n\t"
+ "msub $ac1, %[w2_asm], %[p_asm] \n\t"
+ "msub %[w_asm1], %[p_asm1] \n\t"
+ "msub $ac1, %[w2_asm1], %[p_asm1] \n\t"
+ "lw %[w_asm], 288*4(%[w]) \n\t"
+ "lw %[p_asm], 256*4(%[p_temp2]) \n\t"
+ "lw %[w2_asm], 288*4(%[w2]) \n\t"
+ "lw %[w_asm1], 352*4(%[w]) \n\t"
+ "lw %[p_asm1], 320*4(%[p_temp2]) \n\t"
+ "lw %[w2_asm1], 352*4(%[w2]) \n\t"
+ "msub %[w_asm], %[p_asm] \n\t"
+ "msub $ac1, %[w2_asm], %[p_asm] \n\t"
+ "msub %[w_asm1], %[p_asm1] \n\t"
+ "msub $ac1, %[w2_asm1], %[p_asm1] \n\t"
+ "lw %[w_asm], 416*4(%[w]) \n\t"
+ "lw %[p_asm], 384*4(%[p_temp2]) \n\t"
+ "lw %[w2_asm], 416*4(%[w2]) \n\t"
+ "lw %[w_asm1], 480*4(%[w]) \n\t"
+ "lw %[p_asm1], 448*4(%[p_temp2]) \n\t"
+ "lw %[w2_asm1], 480*4(%[w2]) \n\t"
+ "msub %[w_asm], %[p_asm] \n\t"
+ "msub %[w_asm1], %[p_asm1] \n\t"
+ "msub $ac1, %[w2_asm], %[p_asm] \n\t"
+ "msub $ac1, %[w2_asm1], %[p_asm1] \n\t"
+ "addi %[w], %[w], 4 \n\t"
+ "addi %[w2], %[w2], -4 \n\t"
+ "mflo %[temp2] \n\t"
+ "extr.w %[sum1], $ac0, 24 \n\t"
+ "li %[temp3], 1 \n\t"
+ "and %[temp1], %[temp2], 0x00ffffff \n\t"
+ "madd $ac1, %[temp1], %[temp3] \n\t"
+ "slt %[temp2], %[sum1], %[min_asm] \n\t"
+ "movn %[sum1], %[min_asm], %[temp2] \n\t"
+ "slt %[temp2], %[max_asm], %[sum1] \n\t"
+ "movn %[sum1], %[max_asm], %[temp2] \n\t"
+ "sh %[sum1], 0(%[samples]) \n\t"
+ "mflo %[temp3], $ac1 \n\t"
+ "extr.w %[sum1], $ac1, 24 \n\t"
+ "and %[temp1], %[temp3], 0x00ffffff \n\t"
+ "slt %[temp2], %[sum1], %[min_asm] \n\t"
+ "movn %[sum1], %[min_asm], %[temp2] \n\t"
+ "slt %[temp2], %[max_asm], %[sum1] \n\t"
+ "movn %[sum1], %[max_asm], %[temp2] \n\t"
+ "sh %[sum1], 0(%[samples2]) \n\t"
+
+ : [w_asm] "=&r" (w_asm), [p_asm] "=&r" (p_asm), [w_asm1] "=&r" (w_asm1),
+ [p_asm1] "=&r" (p_asm1), [w2_asm1] "=&r" (w2_asm1),
+ [w2_asm] "=&r" (w2_asm), [temp1] "+r" (temp1), [temp2] "+r" (temp2),
+ [p_temp1] "+r" (p_temp1), [p_temp2] "+r" (p_temp2), [sum1] "+r" (sum1),
+ [w] "+r" (w), [w2] "+r" (w2), [samples] "+r" (samples),
+ [samples2] "+r" (samples2), [temp3] "+r" (temp3)
+ : [min_asm] "r" (min_asm), [max_asm] "r" (max_asm)
+ : "memory", "hi", "lo", "$ac1hi", "$ac1lo"
+ );
+
+ samples += incr;
+ samples2 -= incr;
+ }
+
+ p = synth_buf + 32;
+
+ __asm__ volatile (
+ "mthi $0 \n\t"
+ "mtlo %[temp1] \n\t"
+ "lw %[w_asm], 32*4(%[w]) \n\t"
+ "lw %[p_asm], 0(%[p]) \n\t"
+ "lw %[w_asm1], 96*4(%[w]) \n\t"
+ "lw %[p_asm1], 64*4(%[p]) \n\t"
+ "lw %[w_asm2], 160*4(%[w]) \n\t"
+ "lw %[p_asm2], 128*4(%[p]) \n\t"
+ "msub %[w_asm], %[p_asm] \n\t"
+ "msub %[w_asm1], %[p_asm1] \n\t"
+ "msub %[w_asm2], %[p_asm2] \n\t"
+ "lw %[w_asm], 224*4(%[w]) \n\t"
+ "lw %[p_asm], 192*4(%[p]) \n\t"
+ "lw %[w_asm1], 288*4(%[w]) \n\t"
+ "lw %[p_asm1], 256*4(%[p]) \n\t"
+ "lw %[w_asm2], 352*4(%[w]) \n\t"
+ "lw %[p_asm2], 320*4(%[p]) \n\t"
+ "msub %[w_asm], %[p_asm] \n\t"
+ "msub %[w_asm1], %[p_asm1] \n\t"
+ "msub %[w_asm2], %[p_asm2] \n\t"
+ "lw %[w_asm], 416*4(%[w]) \n\t"
+ "lw %[p_asm], 384*4(%[p]) \n\t"
+ "lw %[w_asm1], 480*4(%[w]) \n\t"
+ "lw %[p_asm1], 448*4(%[p]) \n\t"
+ "msub %[w_asm], %[p_asm] \n\t"
+ "msub %[w_asm1], %[p_asm1] \n\t"
+ "extr.w %[sum1], $ac0, 24 \n\t"
+ "mflo %[temp2] \n\t"
+ "and %[temp1], %[temp2], 0x00ffffff \n\t"
+ "slt %[temp2], %[sum1], %[min_asm] \n\t"
+ "movn %[sum1], %[min_asm], %[temp2] \n\t"
+ "slt %[temp2], %[max_asm], %[sum1] \n\t"
+ "movn %[sum1], %[max_asm], %[temp2] \n\t"
+ "sh %[sum1], 0(%[samples]) \n\t"
+
+ : [w_asm] "=&r" (w_asm), [p_asm] "=&r" (p_asm), [w_asm1] "=&r" (w_asm1),
+ [p_asm1] "=&r" (p_asm1), [temp1] "+r" (temp1), [temp2] "+r" (temp2),
+ [w_asm2] "=&r" (w_asm2), [p_asm2] "=&r" (p_asm2), [sum1] "+r" (sum1)
+ : [w] "r" (w), [p] "r" (p), [samples] "r" (samples), [min_asm] "r" (min_asm),
+ [max_asm] "r" (max_asm)
+ : "memory", "hi", "lo", "$ac1hi", "$ac1lo"
+ );
+
+ *dither_state= temp1;
+}
+
+static void imdct36_mips_fixed(int *out, int *buf, int *in, int *win)
+{
+ int j;
+ int t0, t1, t2, t3, s0, s1, s2, s3;
+ int tmp[18], *tmp1, *in1;
+ /* temporary variables */
+ int temp_reg1, temp_reg2, temp_reg3, temp_reg4, temp_reg5, temp_reg6;
+ int t4, t5, t6, t8, t7;
+
+ /* values defined in macros and tables are
+ * eliminated - they are directly loaded in appropriate variables
+ */
+ int const C_1 = 4229717092; /* cos(pi*1/18)*2 */
+ int const C_2 = 4035949074; /* cos(pi*2/18)*2 */
+ int const C_3 = 575416510; /* -cos(pi*3/18)*2 */
+ int const C_3A = 3719550786; /* cos(pi*3/18)*2 */
+ int const C_4 = 1004831466; /* -cos(pi*4/18)*2 */
+ int const C_5 = 1534215534; /* -cos(pi*5/18)*2 */
+ int const C_7 = -1468965330; /* -cos(pi*7/18)*2 */
+ int const C_8 = -745813244; /* -cos(pi*8/18)*2 */
+
+ /*
+ * instructions of the first two loops are reorganized and loops are unrolled,
+ * in order to eliminate unnecessary readings and writings in array
+ */
+
+ __asm__ volatile (
+ "lw %[t1], 17*4(%[in]) \n\t"
+ "lw %[t2], 16*4(%[in]) \n\t"
+ "lw %[t3], 15*4(%[in]) \n\t"
+ "lw %[t4], 14*4(%[in]) \n\t"
+ "addu %[t1], %[t1], %[t2] \n\t"
+ "addu %[t2], %[t2], %[t3] \n\t"
+ "addu %[t3], %[t3], %[t4] \n\t"
+ "lw %[t5], 13*4(%[in]) \n\t"
+ "addu %[t1], %[t1], %[t3] \n\t"
+ "sw %[t2], 16*4(%[in]) \n\t"
+ "lw %[t6], 12*4(%[in]) \n\t"
+ "sw %[t1], 17*4(%[in]) \n\t"
+ "addu %[t4], %[t4], %[t5] \n\t"
+ "addu %[t5], %[t5], %[t6] \n\t"
+ "lw %[t7], 11*4(%[in]) \n\t"
+ "addu %[t3], %[t3], %[t5] \n\t"
+ "sw %[t4], 14*4(%[in]) \n\t"
+ "lw %[t8], 10*4(%[in]) \n\t"
+ "sw %[t3], 15*4(%[in]) \n\t"
+ "addu %[t6], %[t6], %[t7] \n\t"
+ "addu %[t7], %[t7], %[t8] \n\t"
+ "sw %[t6], 12*4(%[in]) \n\t"
+ "addu %[t5], %[t5], %[t7] \n\t"
+ "lw %[t1], 9*4(%[in]) \n\t"
+ "lw %[t2], 8*4(%[in]) \n\t"
+ "sw %[t5], 13*4(%[in]) \n\t"
+ "addu %[t8], %[t8], %[t1] \n\t"
+ "addu %[t1], %[t1], %[t2] \n\t"
+ "sw %[t8], 10*4(%[in]) \n\t"
+ "addu %[t7], %[t7], %[t1] \n\t"
+ "lw %[t3], 7*4(%[in]) \n\t"
+ "lw %[t4], 6*4(%[in]) \n\t"
+ "sw %[t7], 11*4(%[in]) \n\t"
+ "addu %[t2], %[t2], %[t3] \n\t"
+ "addu %[t3], %[t3], %[t4] \n\t"
+ "sw %[t2], 8*4(%[in]) \n\t"
+ "addu %[t1], %[t1], %[t3] \n\t"
+ "lw %[t5], 5*4(%[in]) \n\t"
+ "lw %[t6], 4*4(%[in]) \n\t"
+ "sw %[t1], 9*4(%[in]) \n\t"
+ "addu %[t4], %[t4], %[t5] \n\t"
+ "addu %[t5], %[t5], %[t6] \n\t"
+ "sw %[t4], 6*4(%[in]) \n\t"
+ "addu %[t3], %[t3], %[t5] \n\t"
+ "lw %[t7], 3*4(%[in]) \n\t"
+ "lw %[t8], 2*4(%[in]) \n\t"
+ "sw %[t3], 7*4(%[in]) \n\t"
+ "addu %[t6], %[t6], %[t7] \n\t"
+ "addu %[t7], %[t7], %[t8] \n\t"
+ "sw %[t6], 4*4(%[in]) \n\t"
+ "addu %[t5], %[t5], %[t7] \n\t"
+ "lw %[t1], 1*4(%[in]) \n\t"
+ "lw %[t2], 0*4(%[in]) \n\t"
+ "sw %[t5], 5*4(%[in]) \n\t"
+ "addu %[t8], %[t8], %[t1] \n\t"
+ "addu %[t1], %[t1], %[t2] \n\t"
+ "sw %[t8], 2*4(%[in]) \n\t"
+ "addu %[t7], %[t7], %[t1] \n\t"
+ "sw %[t7], 3*4(%[in]) \n\t"
+ "sw %[t1], 1*4(%[in]) \n\t"
+
+ : [in] "+r" (in), [t1] "=&r" (t1), [t2] "=&r" (t2), [t3] "=&r" (t3),
+ [t4] "=&r" (t4), [t5] "=&r" (t5), [t6] "=&r" (t6),
+ [t7] "=&r" (t7), [t8] "=&r" (t8)
+ :
+ : "memory"
+ );
+
+ for(j = 0; j < 2; j++) {
+
+ tmp1 = tmp + j;
+ in1 = in + j;
+
+ /**
+ * Original constants are multiplied by two in advanced
+ * for assembly optimization (e.g. C_2 = 2 * C2).
+ * That can lead to overflow in operations where they are used.
+ *
+ * Example of the solution:
+ *
+ * in original code:
+ * t0 = ((int64_t)(in1[2*2] + in1[2*4]) * (int64_t)(2*C2))>>32
+ *
+ * in assembly:
+ * C_2 = 2 * C2;
+ * .
+ * .
+ * "lw %[t7], 4*4(%[in1]) \n\t"
+ * "lw %[t8], 8*4(%[in1]) \n\t"
+ * "addu %[temp_reg2],%[t7], %[t8] \n\t"
+ * "multu %[C_2], %[temp_reg2] \n\t"
+ * "mfhi %[temp_reg1] \n\t"
+ * "sra %[temp_reg2],%[temp_reg2],31 \n\t"
+ * "move %[t0], $0 \n\t"
+ * "movn %[t0], %[C_2], %[temp_reg2] \n\t"
+ * "sub %[t0], %[temp_reg1],%[t0] \n\t"
+ */
+
+ __asm__ volatile (
+ "lw %[t7], 4*4(%[in1]) \n\t"
+ "lw %[t8], 8*4(%[in1]) \n\t"
+ "lw %[t6], 16*4(%[in1]) \n\t"
+ "lw %[t4], 0*4(%[in1]) \n\t"
+ "addu %[temp_reg2], %[t7], %[t8] \n\t"
+ "addu %[t2], %[t6], %[t8] \n\t"
+ "multu %[C_2], %[temp_reg2] \n\t"
+ "lw %[t5], 12*4(%[in1]) \n\t"
+ "sub %[t2], %[t2], %[t7] \n\t"
+ "sub %[t1], %[t4], %[t5] \n\t"
+ "sra %[t3], %[t5], 1 \n\t"
+ "sra %[temp_reg1], %[t2], 1 \n\t"
+ "addu %[t3], %[t3], %[t4] \n\t"
+ "sub %[temp_reg1], %[t1], %[temp_reg1] \n\t"
+ "sra %[temp_reg2], %[temp_reg2], 31 \n\t"
+ "sw %[temp_reg1], 6*4(%[tmp1]) \n\t"
+ "move %[t0], $0 \n\t"
+ "movn %[t0], %[C_2], %[temp_reg2] \n\t"
+ "mfhi %[temp_reg1] \n\t"
+ "addu %[t1], %[t1], %[t2] \n\t"
+ "sw %[t1], 16*4(%[tmp1]) \n\t"
+ "sub %[temp_reg4], %[t8], %[t6] \n\t"
+ "add %[temp_reg2], %[t7], %[t6] \n\t"
+ "mult $ac1, %[C_8], %[temp_reg4] \n\t"
+ "multu $ac2, %[C_4], %[temp_reg2] \n\t"
+ "sub %[t0], %[temp_reg1], %[t0] \n\t"
+ "sra %[temp_reg1], %[temp_reg2], 31 \n\t"
+ "move %[t2], $0 \n\t"
+ "movn %[t2], %[C_4], %[temp_reg1] \n\t"
+ "mfhi %[t1], $ac1 \n\t"
+ "mfhi %[temp_reg1], $ac2 \n\t"
+ "lw %[t6], 10*4(%[in1]) \n\t"
+ "lw %[t8], 14*4(%[in1]) \n\t"
+ "lw %[t7], 2*4(%[in1]) \n\t"
+ "lw %[t4], 6*4(%[in1]) \n\t"
+ "sub %[temp_reg3], %[t3], %[t0] \n\t"
+ "add %[temp_reg4], %[t3], %[t0] \n\t"
+ "sub %[temp_reg1], %[temp_reg1], %[temp_reg2] \n\t"
+ "add %[temp_reg4], %[temp_reg4], %[t1] \n\t"
+ "sub %[t2], %[temp_reg1], %[t2] \n\t"
+ "sw %[temp_reg4], 2*4(%[tmp1]) \n\t"
+ "sub %[temp_reg3], %[temp_reg3], %[t2] \n\t"
+ "add %[temp_reg1], %[t3], %[t2] \n\t"
+ "sw %[temp_reg3], 10*4(%[tmp1]) \n\t"
+ "sub %[temp_reg1], %[temp_reg1], %[t1] \n\t"
+ "addu %[temp_reg2], %[t6], %[t8] \n\t"
+ "sw %[temp_reg1], 14*4(%[tmp1]) \n\t"
+ "sub %[temp_reg2], %[temp_reg2], %[t7] \n\t"
+ "addu %[temp_reg3], %[t7], %[t6] \n\t"
+ "multu $ac3, %[C_3], %[temp_reg2] \n\t"
+ "multu %[C_1], %[temp_reg3] \n\t"
+ "sra %[temp_reg1], %[temp_reg2], 31 \n\t"
+ "move %[t1], $0 \n\t"
+ "sra %[temp_reg3], %[temp_reg3], 31 \n\t"
+ "movn %[t1], %[C_3], %[temp_reg1] \n\t"
+ "mfhi %[temp_reg1], $ac3 \n\t"
+ "mfhi %[temp_reg4] \n\t"
+ "move %[t2], $0 \n\t"
+ "movn %[t2], %[C_1], %[temp_reg3] \n\t"
+ "sub %[temp_reg3], %[t6], %[t8] \n\t"
+ "sub %[t2], %[temp_reg4], %[t2] \n\t"
+ "multu $ac1, %[C_7], %[temp_reg3] \n\t"
+ "sub %[temp_reg1], %[temp_reg1], %[temp_reg2] \n\t"
+ "sra %[temp_reg4], %[temp_reg3], 31 \n\t"
+ "sub %[t1], %[temp_reg1], %[t1] \n\t"
+ "move %[t3], $0 \n\t"
+ "sw %[t1], 4*4(%[tmp1]) \n\t"
+ "movn %[t3], %[C_7], %[temp_reg4] \n\t"
+ "multu $ac2, %[C_3A], %[t4] \n\t"
+ "add %[temp_reg2], %[t7], %[t8] \n\t"
+ "move %[t1], $0 \n\t"
+ "mfhi %[temp_reg4], $ac1 \n\t"
+ "multu $ac3,%[C_5], %[temp_reg2] \n\t"
+ "move %[t0], $0 \n\t"
+ "sra %[temp_reg1], %[temp_reg2], 31 \n\t"
+ "movn %[t1],%[C_5], %[temp_reg1] \n\t"
+ "sub %[temp_reg4], %[temp_reg4], %[temp_reg3] \n\t"
+ "mfhi %[temp_reg1], $ac3 \n\t"
+ "sra %[temp_reg3], %[t4], 31 \n\t"
+ "movn %[t0], %[C_3A], %[temp_reg3] \n\t"
+ "mfhi %[temp_reg3], $ac2 \n\t"
+ "sub %[t3], %[temp_reg4], %[t3] \n\t"
+ "add %[temp_reg4], %[t3], %[t2] \n\t"
+ "sub %[temp_reg1], %[temp_reg1], %[temp_reg2] \n\t"
+ "sub %[t1], %[temp_reg1], %[t1] \n\t"
+ "sub %[t0], %[temp_reg3], %[t0] \n\t"
+ "add %[temp_reg1], %[t2], %[t1] \n\t"
+ "add %[temp_reg4], %[temp_reg4], %[t0] \n\t"
+ "sub %[temp_reg2], %[t3], %[t1] \n\t"
+ "sw %[temp_reg4], 0*4(%[tmp1]) \n\t"
+ "sub %[temp_reg1], %[temp_reg1], %[t0] \n\t"
+ "sub %[temp_reg2], %[temp_reg2], %[t0] \n\t"
+ "sw %[temp_reg1], 12*4(%[tmp1]) \n\t"
+ "sw %[temp_reg2], 8*4(%[tmp1]) \n\t"
+
+ : [t7] "=&r" (t7), [temp_reg1] "=&r" (temp_reg1),
+ [temp_reg2] "=&r" (temp_reg2), [temp_reg4] "=&r" (temp_reg4),
+ [temp_reg3] "=&r" (temp_reg3), [t8] "=&r" (t8), [t0] "=&r" (t0),
+ [t4] "=&r" (t4), [t5] "=&r" (t5), [t6] "=&r"(t6), [t2] "=&r" (t2),
+ [t3] "=&r" (t3), [t1] "=&r" (t1)
+ : [C_2] "r" (C_2), [in1] "r" (in1), [tmp1] "r" (tmp1), [C_8] "r" (C_8),
+ [C_4] "r" (C_4), [C_3] "r" (C_3), [C_1] "r" (C_1), [C_7] "r" (C_7),
+ [C_3A] "r" (C_3A), [C_5] "r" (C_5)
+ : "memory", "hi", "lo", "$ac1hi", "$ac1lo", "$ac2hi", "$ac2lo",
+ "$ac3hi", "$ac3lo"
+ );
+ }
+
+ /**
+ * loop is unrolled four times
+ *
+ * values defined in tables(icos36[] and icos36h[]) are not loaded from
+ * these tables - they are directly loaded in appropriate registers
+ *
+ */
+
+ __asm__ volatile (
+ "lw %[t2], 1*4(%[tmp]) \n\t"
+ "lw %[t3], 3*4(%[tmp]) \n\t"
+ "lw %[t0], 0*4(%[tmp]) \n\t"
+ "lw %[t1], 2*4(%[tmp]) \n\t"
+ "addu %[temp_reg1], %[t3], %[t2] \n\t"
+ "li %[temp_reg2], 0x807D2B1E \n\t"
+ "move %[s1], $0 \n\t"
+ "multu %[temp_reg2], %[temp_reg1] \n\t"
+ "sra %[temp_reg1], %[temp_reg1], 31 \n\t"
+ "movn %[s1], %[temp_reg2], %[temp_reg1] \n\t"
+ "sub %[temp_reg3], %[t3], %[t2] \n\t"
+ "li %[temp_reg4], 0x2de5151 \n\t"
+ "mfhi %[temp_reg2] \n\t"
+ "addu %[s0], %[t1], %[t0] \n\t"
+ "lw %[temp_reg5], 9*4(%[win]) \n\t"
+ "mult $ac1, %[temp_reg4], %[temp_reg3] \n\t"
+ "lw %[temp_reg6], 4*9*4(%[buf]) \n\t"
+ "sub %[s2], %[t1], %[t0] \n\t"
+ "lw %[temp_reg3], 29*4(%[win]) \n\t"
+ "subu %[s1], %[temp_reg2], %[s1] \n\t"
+ "lw %[temp_reg4], 28*4(%[win]) \n\t"
+ "add %[t0], %[s0], %[s1] \n\t"
+ "extr.w %[s3], $ac1,23 \n\t"
+ "mult $ac2, %[t0], %[temp_reg3] \n\t"
+ "sub %[t1], %[s0], %[s1] \n\t"
+ "lw %[temp_reg1], 4*8*4(%[buf]) \n\t"
+ "mult %[t1], %[temp_reg5] \n\t"
+ "lw %[temp_reg2], 8*4(%[win]) \n\t"
+ "mfhi %[temp_reg3], $ac2 \n\t"
+ "mult $ac3, %[t0], %[temp_reg4] \n\t"
+ "add %[t0], %[s2], %[s3] \n\t"
+ "mfhi %[temp_reg5] \n\t"
+ "mult $ac1, %[t1], %[temp_reg2] \n\t"
+ "sub %[t1], %[s2], %[s3] \n\t"
+ "sw %[temp_reg3], 4*9*4(%[buf]) \n\t"
+ "mfhi %[temp_reg4], $ac3 \n\t"
+ "lw %[temp_reg3], 37*4(%[win]) \n\t"
+ "mfhi %[temp_reg2], $ac1 \n\t"
+ "add %[temp_reg5], %[temp_reg5], %[temp_reg6] \n\t"
+ "lw %[temp_reg6], 17*4(%[win]) \n\t"
+ "sw %[temp_reg5], 32*9*4(%[out]) \n\t"
+ "sw %[temp_reg4], 4*8*4(%[buf]) \n\t"
+ "mult %[t1], %[temp_reg6] \n\t"
+ "add %[temp_reg1], %[temp_reg1], %[temp_reg2] \n\t"
+ "lw %[temp_reg2], 0*4(%[win]) \n\t"
+ "lw %[temp_reg5], 4*17*4(%[buf]) \n\t"
+ "sw %[temp_reg1], 8*32*4(%[out]) \n\t"
+ "mfhi %[temp_reg6] \n\t"
+ "mult $ac1, %[t1], %[temp_reg2] \n\t"
+ "lw %[temp_reg4], 20*4(%[win]) \n\t"
+ "lw %[temp_reg1], 0(%[buf]) \n\t"
+ "mult $ac2, %[t0], %[temp_reg3] \n\t"
+ "mult %[t0], %[temp_reg4] \n\t"
+ "mfhi %[temp_reg2], $ac1 \n\t"
+ "lw %[t0], 4*4(%[tmp]) \n\t"
+ "add %[temp_reg5], %[temp_reg5], %[temp_reg6] \n\t"
+ "mfhi %[temp_reg3], $ac2 \n\t"
+ "mfhi %[temp_reg4] \n\t"
+ "sw %[temp_reg5], 17*32*4(%[out]) \n\t"
+ "lw %[t1], 6*4(%[tmp]) \n\t"
+ "add %[temp_reg1], %[temp_reg1], %[temp_reg2] \n\t"
+ "lw %[t2], 5*4(%[tmp]) \n\t"
+ "sw %[temp_reg1], 0*32*4(%[out]) \n\t"
+ "addu %[s0], %[t1], %[t0] \n\t"
+ "sw %[temp_reg3], 4*17*4(%[buf]) \n\t"
+ "lw %[t3], 7*4(%[tmp]) \n\t"
+ "sub %[s2], %[t1], %[t0] \n\t"
+ "sw %[temp_reg4], 0(%[buf]) \n\t"
+ "addu %[temp_reg5], %[t3], %[t2] \n\t"
+ "li %[temp_reg6], 0x8483EE0C \n\t"
+ "move %[s1], $0 \n\t"
+ "multu %[temp_reg6], %[temp_reg5] \n\t"
+ "sub %[temp_reg1], %[t3], %[t2] \n\t"
+ "li %[temp_reg2], 0xf746ea \n\t"
+ "sra %[temp_reg5], %[temp_reg5], 31 \n\t"
+ "mult $ac1, %[temp_reg2], %[temp_reg1] \n\t"
+ "movn %[s1], %[temp_reg6], %[temp_reg5] \n\t"
+ "mfhi %[temp_reg5] \n\t"
+ "lw %[temp_reg3], 10*4(%[win]) \n\t"
+ "lw %[temp_reg4], 4*10*4(%[buf]) \n\t"
+ "extr.w %[s3], $ac1, 23 \n\t"
+ "lw %[temp_reg1], 4*7*4(%[buf]) \n\t"
+ "lw %[temp_reg2], 7*4(%[win]) \n\t"
+ "lw %[temp_reg6], 30*4(%[win]) \n\t"
+ "subu %[s1], %[temp_reg5], %[s1] \n\t"
+ "sub %[t1], %[s0], %[s1] \n\t"
+ "add %[t0], %[s0], %[s1] \n\t"
+ "mult $ac2, %[t1], %[temp_reg3] \n\t"
+ "mult $ac3, %[t1], %[temp_reg2] \n\t"
+ "mult %[t0], %[temp_reg6] \n\t"
+ "lw %[temp_reg5], 27*4(%[win]) \n\t"
+ "mult $ac1, %[t0], %[temp_reg5] \n\t"
+ "mfhi %[temp_reg3], $ac2 \n\t"
+ "mfhi %[temp_reg2], $ac3 \n\t"
+ "mfhi %[temp_reg6] \n\t"
+ "add %[t0], %[s2], %[s3] \n\t"
+ "sub %[t1], %[s2], %[s3] \n\t"
+ "add %[temp_reg3], %[temp_reg3], %[temp_reg4] \n\t"
+ "lw %[temp_reg4], 16*4(%[win]) \n\t"
+ "mfhi %[temp_reg5], $ac1 \n\t"
+ "sw %[temp_reg3], 32*10*4(%[out]) \n\t"
+ "add %[temp_reg1], %[temp_reg1], %[temp_reg2] \n\t"
+ "lw %[temp_reg3], 4*16*4(%[buf]) \n\t"
+ "sw %[temp_reg6], 4*10*4(%[buf]) \n\t"
+ "sw %[temp_reg1], 7*32*4(%[out]) \n\t"
+ "mult $ac2, %[t1], %[temp_reg4] \n\t"
+ "sw %[temp_reg5], 4*7*4(%[buf]) \n\t"
+ "lw %[temp_reg6], 1*4(%[win]) \n\t"
+ "lw %[temp_reg5], 4*1*4(%[buf]) \n\t"
+ "lw %[temp_reg1], 36*4(%[win]) \n\t"
+ "mult $ac3, %[t1], %[temp_reg6] \n\t"
+ "lw %[temp_reg2], 21*4(%[win]) \n\t"
+ "mfhi %[temp_reg4], $ac2 \n\t"
+ "mult %[t0], %[temp_reg1] \n\t"
+ "mult $ac1, %[t0],%[temp_reg2] \n\t"
+ "lw %[t0], 8*4(%[tmp]) \n\t"
+ "mfhi %[temp_reg6], $ac3 \n\t"
+ "lw %[t1], 10*4(%[tmp]) \n\t"
+ "lw %[t3], 11*4(%[tmp]) \n\t"
+ "mfhi %[temp_reg1] \n\t"
+ "add %[temp_reg3], %[temp_reg3], %[temp_reg4] \n\t"
+ "lw %[t2], 9*4(%[tmp]) \n\t"
+ "mfhi %[temp_reg2], $ac1 \n\t"
+ "add %[temp_reg5], %[temp_reg5], %[temp_reg6] \n\t"
+ "sw %[temp_reg3], 16*32*4(%[out]) \n\t"
+ "sw %[temp_reg5], 1*32*4(%[out]) \n\t"
+ "sw %[temp_reg1], 4*16*4(%[buf]) \n\t"
+ "addu %[temp_reg3], %[t3], %[t2] \n\t"
+ "li %[temp_reg4], 0x8D3B7CD6 \n\t"
+ "sw %[temp_reg2], 4*1*4(%[buf]) \n\t"
+ "multu %[temp_reg4],%[temp_reg3] \n\t"
+ "sra %[temp_reg3], %[temp_reg3], 31 \n\t"
+ "move %[s1], $0 \n\t"
+ "movn %[s1], %[temp_reg4], %[temp_reg3] \n\t"
+ "addu %[s0], %[t1], %[t0] \n\t"
+ "mfhi %[temp_reg3] \n\t"
+ "sub %[s2], %[t1], %[t0] \n\t"
+ "sub %[temp_reg5], %[t3], %[t2] \n\t"
+ "li %[temp_reg6], 0x976fd9 \n\t"
+ "lw %[temp_reg2], 11*4(%[win]) \n\t"
+ "lw %[temp_reg1], 4*11*4(%[buf]) \n\t"
+ "mult $ac1, %[temp_reg6], %[temp_reg5] \n\t"
+ "subu %[s1], %[temp_reg3], %[s1] \n\t"
+ "lw %[temp_reg5], 31*4(%[win]) \n\t"
+ "sub %[t1], %[s0], %[s1] \n\t"
+ "add %[t0], %[s0], %[s1] \n\t"
+ "mult $ac2, %[t1], %[temp_reg2] \n\t"
+ "mult %[t0], %[temp_reg5] \n\t"
+ "lw %[temp_reg4], 6*4(%[win]) \n\t"
+ "extr.w %[s3], $ac1, 23 \n\t"
+ "lw %[temp_reg3], 4*6*4(%[buf]) \n\t"
+ "mfhi %[temp_reg2], $ac2 \n\t"
+ "lw %[temp_reg6], 26*4(%[win]) \n\t"
+ "mfhi %[temp_reg5] \n\t"
+ "mult $ac3, %[t1], %[temp_reg4] \n\t"
+ "mult $ac1, %[t0], %[temp_reg6] \n\t"
+ "add %[t0], %[s2], %[s3] \n\t"
+ "sub %[t1], %[s2], %[s3] \n\t"
+ "add %[temp_reg2], %[temp_reg2], %[temp_reg1] \n\t"
+ "mfhi %[temp_reg4], $ac3 \n\t"
+ "mfhi %[temp_reg6], $ac1 \n\t"
+ "sw %[temp_reg5], 4*11*4(%[buf]) \n\t"
+ "sw %[temp_reg2], 32*11*4(%[out]) \n\t"
+ "lw %[temp_reg1], 4*15*4(%[buf]) \n\t"
+ "add %[temp_reg3], %[temp_reg3], %[temp_reg4] \n\t"
+ "lw %[temp_reg2], 15*4(%[win]) \n\t"
+ "sw %[temp_reg3], 6*32*4(%[out]) \n\t"
+ "sw %[temp_reg6], 4*6*4(%[buf]) \n\t"
+ "mult %[t1], %[temp_reg2] \n\t"
+ "lw %[temp_reg3], 2*4(%[win]) \n\t"
+ "lw %[temp_reg4], 4*2*4(%[buf]) \n\t"
+ "lw %[temp_reg5], 35*4(%[win]) \n\t"
+ "mult $ac1, %[t1], %[temp_reg3] \n\t"
+ "mfhi %[temp_reg2] \n\t"
+ "lw %[temp_reg6], 22*4(%[win]) \n\t"
+ "mult $ac2, %[t0], %[temp_reg5] \n\t"
+ "lw %[t1], 14*4(%[tmp]) \n\t"
+ "mult $ac3, %[t0], %[temp_reg6] \n\t"
+ "lw %[t0], 12*4(%[tmp]) \n\t"
+ "mfhi %[temp_reg3], $ac1 \n\t"
+ "add %[temp_reg1], %[temp_reg1], %[temp_reg2] \n\t"
+ "mfhi %[temp_reg5], $ac2 \n\t"
+ "sw %[temp_reg1], 15*32*4(%[out]) \n\t"
+ "mfhi %[temp_reg6], $ac3 \n\t"
+ "lw %[t2], 13*4(%[tmp]) \n\t"
+ "lw %[t3], 15*4(%[tmp]) \n\t"
+ "add %[temp_reg4], %[temp_reg4], %[temp_reg3] \n\t"
+ "sw %[temp_reg5], 4*15*4(%[buf]) \n\t"
+ "addu %[temp_reg1], %[t3], %[t2] \n\t"
+ "li %[temp_reg2], 0x9C42577C \n\t"
+ "move %[s1], $0 \n\t"
+ "multu %[temp_reg2], %[temp_reg1] \n\t"
+ "sw %[temp_reg4], 2*32*4(%[out]) \n\t"
+ "sra %[temp_reg1], %[temp_reg1], 31 \n\t"
+ "movn %[s1], %[temp_reg2], %[temp_reg1] \n\t"
+ "sub %[temp_reg3], %[t3], %[t2] \n\t"
+ "li %[temp_reg4], 0x6f94a2 \n\t"
+ "mfhi %[temp_reg1] \n\t"
+ "addu %[s0], %[t1], %[t0] \n\t"
+ "sw %[temp_reg6], 4*2*4(%[buf]) \n\t"
+ "mult $ac1, %[temp_reg4], %[temp_reg3] \n\t"
+ "sub %[s2], %[t1], %[t0] \n\t"
+ "lw %[temp_reg5], 12*4(%[win]) \n\t"
+ "lw %[temp_reg6], 4*12*4(%[buf]) \n\t"
+ "subu %[s1], %[temp_reg1], %[s1] \n\t"
+ "sub %[t1], %[s0], %[s1] \n\t"
+ "lw %[temp_reg3], 32*4(%[win]) \n\t"
+ "mult $ac2, %[t1], %[temp_reg5] \n\t"
+ "add %[t0], %[s0], %[s1] \n\t"
+ "extr.w %[s3], $ac1, 23 \n\t"
+ "lw %[temp_reg2], 5*4(%[win]) \n\t"
+ "mult %[t0], %[temp_reg3] \n\t"
+ "mfhi %[temp_reg5], $ac2 \n\t"
+ "lw %[temp_reg4], 25*4(%[win]) \n\t"
+ "lw %[temp_reg1], 4*5*4(%[buf]) \n\t"
+ "mult $ac3, %[t1], %[temp_reg2] \n\t"
+ "mult $ac1, %[t0], %[temp_reg4] \n\t"
+ "mfhi %[temp_reg3] \n\t"
+ "add %[t0], %[s2], %[s3] \n\t"
+ "add %[temp_reg5], %[temp_reg5], %[temp_reg6] \n\t"
+ "mfhi %[temp_reg2], $ac3 \n\t"
+ "mfhi %[temp_reg4], $ac1 \n\t"
+ "sub %[t1], %[s2], %[s3] \n\t"
+ "sw %[temp_reg5], 32*12*4(%[out]) \n\t"
+ "sw %[temp_reg3], 4*12*4(%[buf]) \n\t"
+ "lw %[temp_reg6], 14*4(%[win]) \n\t"
+ "lw %[temp_reg5], 4*14*4(%[buf]) \n\t"
+ "add %[temp_reg1], %[temp_reg1], %[temp_reg2] \n\t"
+ "sw %[temp_reg4], 4*5*4(%[buf]) \n\t"
+ "sw %[temp_reg1], 5*32*4(%[out]) \n\t"
+ "mult %[t1], %[temp_reg6] \n\t"
+ "lw %[temp_reg4], 34*4(%[win]) \n\t"
+ "lw %[temp_reg2], 3*4(%[win]) \n\t"
+ "lw %[temp_reg1], 4*3*4(%[buf]) \n\t"
+ "mult $ac2, %[t0], %[temp_reg4] \n\t"
+ "mfhi %[temp_reg6] \n\t"
+ "mult $ac1, %[t1], %[temp_reg2] \n\t"
+ "lw %[temp_reg3], 23*4(%[win]) \n\t"
+ "lw %[s0], 16*4(%[tmp]) \n\t"
+ "mfhi %[temp_reg4], $ac2 \n\t"
+ "lw %[t1], 17*4(%[tmp]) \n\t"
+ "mult $ac3, %[t0], %[temp_reg3] \n\t"
+ "move %[s1], $0 \n\t"
+ "add %[temp_reg5], %[temp_reg5], %[temp_reg6] \n\t"
+ "mfhi %[temp_reg2], $ac1 \n\t"
+ "sw %[temp_reg5], 14*32*4(%[out]) \n\t"
+ "sw %[temp_reg4], 4*14*4(%[buf]) \n\t"
+ "mfhi %[temp_reg3], $ac3 \n\t"
+ "li %[temp_reg5], 0xB504F334 \n\t"
+ "add %[temp_reg1], %[temp_reg1], %[temp_reg2] \n\t"
+ "multu %[temp_reg5], %[t1] \n\t"
+ "lw %[temp_reg2], 4*13*4(%[buf]) \n\t"
+ "sw %[temp_reg1], 3*32*4(%[out]) \n\t"
+ "sra %[t1], %[t1], 31 \n\t"
+ "mfhi %[temp_reg6] \n\t"
+ "movn %[s1], %[temp_reg5], %[t1] \n\t"
+ "sw %[temp_reg3], 4*3*4(%[buf]) \n\t"
+ "lw %[temp_reg1], 13*4(%[win]) \n\t"
+ "lw %[temp_reg4], 4*4*4(%[buf]) \n\t"
+ "lw %[temp_reg3], 4*4(%[win]) \n\t"
+ "lw %[temp_reg5], 33*4(%[win]) \n\t"
+ "subu %[s1], %[temp_reg6], %[s1] \n\t"
+ "lw %[temp_reg6], 24*4(%[win]) \n\t"
+ "sub %[t1], %[s0], %[s1] \n\t"
+ "add %[t0], %[s0], %[s1] \n\t"
+ "mult $ac1, %[t1], %[temp_reg1] \n\t"
+ "mult $ac2, %[t1], %[temp_reg3] \n\t"
+ "mult $ac3, %[t0], %[temp_reg5] \n\t"
+ "mult %[t0], %[temp_reg6] \n\t"
+ "mfhi %[temp_reg1], $ac1 \n\t"
+ "mfhi %[temp_reg3], $ac2 \n\t"
+ "mfhi %[temp_reg5], $ac3 \n\t"
+ "mfhi %[temp_reg6] \n\t"
+ "add %[temp_reg2], %[temp_reg2], %[temp_reg1] \n\t"
+ "add %[temp_reg4], %[temp_reg4], %[temp_reg3] \n\t"
+ "sw %[temp_reg2], 13*32*4(%[out]) \n\t"
+ "sw %[temp_reg4], 4*32*4(%[out]) \n\t"
+ "sw %[temp_reg5], 4*13*4(%[buf]) \n\t"
+ "sw %[temp_reg6], 4*4*4(%[buf]) \n\t"
+
+ : [t0] "=&r" (t0), [t1] "=&r" (t1), [t2] "=&r" (t2), [t3] "=&r" (t3),
+ [s0] "=&r" (s0), [s2] "=&r" (s2), [temp_reg1] "=&r" (temp_reg1),
+ [temp_reg2] "=&r" (temp_reg2), [s1] "=&r" (s1), [s3] "=&r" (s3),
+ [temp_reg3] "=&r" (temp_reg3), [temp_reg4] "=&r" (temp_reg4),
+ [temp_reg5] "=&r" (temp_reg5), [temp_reg6] "=&r" (temp_reg6),
+ [out] "+r" (out)
+ : [tmp] "r" (tmp), [win] "r" (win), [buf] "r" (buf)
+ : "memory", "hi", "lo", "$ac1hi", "$ac1lo", "$ac2hi", "$ac2lo",
+ "$ac3hi", "$ac3lo"
+ );
+}
+
+static void ff_imdct36_blocks_mips_fixed(int *out, int *buf, int *in,
+ int count, int switch_point, int block_type)
+{
+ int j;
+ for (j=0 ; j < count; j++) {
+ /* apply window & overlap with previous buffer */
+
+ /* select window */
+ int win_idx = (switch_point && j < 2) ? 0 : block_type;
+ int *win = ff_mdct_win_fixed[win_idx + (4 & -(j & 1))];
+
+ imdct36_mips_fixed(out, buf, in, win);
+
+ in += 18;
+ buf += ((j&3) != 3 ? 1 : (72-3));
+ out++;
+ }
+}
+
+void ff_mpadsp_init_mipsdspr1(MPADSPContext *s)
+{
+ s->apply_window_fixed = ff_mpadsp_apply_window_mips_fixed;
+ s->imdct36_blocks_fixed = ff_imdct36_blocks_mips_fixed;
+}
diff --git a/libavcodec/mips/mpegaudiodsp_mips_float.c b/libavcodec/mips/mpegaudiodsp_mips_float.c
new file mode 100644
index 0000000000..beb8e782f2
--- /dev/null
+++ b/libavcodec/mips/mpegaudiodsp_mips_float.c
@@ -0,0 +1,1250 @@
+/*
+ * Copyright (c) 2012
+ * MIPS Technologies, Inc., California.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the MIPS Technologies, Inc., nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE MIPS TECHNOLOGIES, INC. ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE MIPS TECHNOLOGIES, INC. BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Author: Bojan Zivkovic (bojan@mips.com)
+ *
+ * MPEG Audio decoder optimized for MIPS floating-point architecture
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Reference: libavcodec/mpegaudiodsp_template.c
+ * libavcodec/dct32.c
+ */
+
+#include <string.h>
+
+#include "libavcodec/mpegaudiodsp.h"
+
+static void ff_mpadsp_apply_window_mips_float(float *synth_buf, float *window,
+ int *dither_state, float *samples, int incr)
+{
+ register const float *w, *w2, *p;
+ int j;
+ float *samples2;
+ float sum, sum2;
+ /* temporary variables */
+ int incr1 = incr << 2;
+ int t_sample;
+ float in1, in2, in3, in4, in5, in6, in7, in8;
+ float *p2;
+
+ /* copy to avoid wrap */
+ memcpy(synth_buf + 512, synth_buf, 32 * sizeof(*synth_buf));
+
+ /**
+ * instructions are scheduled to minimize pipeline stall.
+ * use of round_sample function from the original code is
+ * changed with appropriate assembly instructions.
+ */
+
+ __asm__ volatile (
+ "lwc1 %[sum], 0(%[dither_state]) \t\n"
+ "sll %[t_sample], %[incr1], 5 \t\n"
+ "sub %[t_sample], %[t_sample], %[incr1] \n\t"
+ "li %[j], 4 \t\n"
+ "lwc1 %[in1], 0(%[window]) \t\n"
+ "lwc1 %[in2], 16*4(%[synth_buf]) \t\n"
+ "sw $zero, 0(%[dither_state]) \t\n"
+ "lwc1 %[in3], 64*4(%[window]) \t\n"
+ "lwc1 %[in4], 80*4(%[synth_buf]) \t\n"
+ "addu %[samples2], %[samples], %[t_sample] \t\n"
+ "madd.s %[sum], %[sum], %[in1], %[in2] \t\n"
+ "lwc1 %[in5], 128*4(%[window]) \t\n"
+ "lwc1 %[in6], 144*4(%[synth_buf]) \t\n"
+ "lwc1 %[in7], 192*4(%[window]) \t\n"
+ "madd.s %[sum], %[sum], %[in3], %[in4] \t\n"
+ "lwc1 %[in8], 208*4(%[synth_buf]) \t\n"
+ "lwc1 %[in1], 256*4(%[window]) \t\n"
+ "lwc1 %[in2], 272*4(%[synth_buf]) \t\n"
+ "madd.s %[sum], %[sum], %[in5], %[in6] \t\n"
+ "lwc1 %[in3], 320*4(%[window]) \t\n"
+ "lwc1 %[in4], 336*4(%[synth_buf]) \t\n"
+ "lwc1 %[in5], 384*4(%[window]) \t\n"
+ "madd.s %[sum], %[sum], %[in7], %[in8] \t\n"
+ "lwc1 %[in6], 400*4(%[synth_buf]) \t\n"
+ "lwc1 %[in7], 448*4(%[window]) \t\n"
+ "lwc1 %[in8], 464*4(%[synth_buf]) \t\n"
+ "madd.s %[sum], %[sum], %[in1], %[in2] \t\n"
+ "lwc1 %[in1], 32*4(%[window]) \t\n"
+ "lwc1 %[in2], 48*4(%[synth_buf]) \t\n"
+ "madd.s %[sum], %[sum], %[in3], %[in4] \t\n"
+ "lwc1 %[in3], 96*4(%[window]) \t\n"
+ "lwc1 %[in4], 112*4(%[synth_buf]) \t\n"
+ "madd.s %[sum], %[sum], %[in5], %[in6] \t\n"
+ "lwc1 %[in5], 160*4(%[window]) \t\n"
+ "lwc1 %[in6], 176*4(%[synth_buf]) \t\n"
+ "madd.s %[sum], %[sum], %[in7], %[in8] \t\n"
+ "lwc1 %[in7], 224*4(%[window]) \t\n"
+ "lwc1 %[in8], 240*4(%[synth_buf]) \t\n"
+ "nmsub.s %[sum], %[sum], %[in1], %[in2] \t\n"
+ "lwc1 %[in1], 288*4(%[window]) \t\n"
+ "lwc1 %[in2], 304*4(%[synth_buf]) \t\n"
+ "nmsub.s %[sum], %[sum], %[in3], %[in4] \t\n"
+ "lwc1 %[in3], 352*4(%[window]) \t\n"
+ "lwc1 %[in4], 368*4(%[synth_buf]) \t\n"
+ "nmsub.s %[sum], %[sum], %[in5], %[in6] \t\n"
+ "lwc1 %[in5], 416*4(%[window]) \t\n"
+ "lwc1 %[in6], 432*4(%[synth_buf]) \t\n"
+ "nmsub.s %[sum], %[sum], %[in7], %[in8] \t\n"
+ "lwc1 %[in7], 480*4(%[window]) \t\n"
+ "lwc1 %[in8], 496*4(%[synth_buf]) \t\n"
+ "nmsub.s %[sum], %[sum], %[in1], %[in2] \t\n"
+ "addu %[w], %[window], 4 \t\n"
+ "nmsub.s %[sum], %[sum], %[in3], %[in4] \t\n"
+ "addu %[w2], %[window], 124 \t\n"
+ "addiu %[p], %[synth_buf], 68 \t\n"
+ "addiu %[p2], %[synth_buf], 188 \t\n"
+ "nmsub.s %[sum], %[sum], %[in5], %[in6] \t\n"
+ "nmsub.s %[sum], %[sum], %[in7], %[in8] \t\n"
+ "swc1 %[sum], 0(%[samples]) \t\n"
+ "addu %[samples], %[samples], %[incr1] \t\n"
+
+ /* calculate two samples at the same time to avoid one memory
+ access per two sample */
+
+ "ff_mpadsp_apply_window_loop%=: \t\n"
+ "lwc1 %[in1], 0(%[w]) \t\n"
+ "lwc1 %[in2], 0(%[p]) \t\n"
+ "lwc1 %[in3], 0(%[w2]) \t\n"
+ "lwc1 %[in4], 64*4(%[w]) \t\n"
+ "lwc1 %[in5], 64*4(%[p]) \t\n"
+ "lwc1 %[in6], 64*4(%[w2]) \t\n"
+ "mul.s %[sum], %[in1], %[in2] \t\n"
+ "mul.s %[sum2], %[in2], %[in3] \t\n"
+ "lwc1 %[in1], 128*4(%[w]) \t\n"
+ "lwc1 %[in2], 128*4(%[p]) \t\n"
+ "madd.s %[sum], %[sum], %[in4], %[in5] \t\n"
+ "nmadd.s %[sum2], %[sum2], %[in5], %[in6] \t\n"
+ "lwc1 %[in3], 128*4(%[w2]) \t\n"
+ "lwc1 %[in4], 192*4(%[w]) \t\n"
+ "madd.s %[sum], %[sum], %[in1], %[in2] \t\n"
+ "lwc1 %[in5], 192*4(%[p]) \t\n"
+ "lwc1 %[in6], 192*4(%[w2]) \t\n"
+ "nmsub.s %[sum2], %[sum2], %[in2], %[in3] \t\n"
+ "lwc1 %[in1], 256*4(%[w]) \t\n"
+ "lwc1 %[in2], 256*4(%[p]) \t\n"
+ "madd.s %[sum], %[sum], %[in4], %[in5] \t\n"
+ "nmsub.s %[sum2], %[sum2], %[in5], %[in6] \t\n"
+ "lwc1 %[in3], 256*4(%[w2]) \t\n"
+ "lwc1 %[in4], 320*4(%[w]) \t\n"
+ "madd.s %[sum], %[sum], %[in1], %[in2] \t\n"
+ "lwc1 %[in5], 320*4(%[p]) \t\n"
+ "lwc1 %[in6], 320*4(%[w2]) \t\n"
+ "nmsub.s %[sum2], %[sum2], %[in2], %[in3] \t\n"
+ "lwc1 %[in1], 384*4(%[w]) \t\n"
+ "lwc1 %[in2], 384*4(%[p]) \t\n"
+ "madd.s %[sum], %[sum], %[in4], %[in5] \t\n"
+ "nmsub.s %[sum2], %[sum2], %[in5], %[in6] \t\n"
+ "lwc1 %[in3], 384*4(%[w2]) \t\n"
+ "lwc1 %[in4], 448*4(%[w]) \t\n"
+ "madd.s %[sum], %[sum], %[in1], %[in2] \t\n"
+ "lwc1 %[in5], 448*4(%[p]) \t\n"
+ "lwc1 %[in6], 448*4(%[w2]) \t\n"
+ "nmsub.s %[sum2], %[sum2], %[in2], %[in3] \t\n"
+ "madd.s %[sum], %[sum], %[in4], %[in5] \t\n"
+ "lwc1 %[in1], 32*4(%[w]) \t\n"
+ "lwc1 %[in2], 0(%[p2]) \t\n"
+ "nmsub.s %[sum2], %[sum2], %[in5], %[in6] \t\n"
+ "lwc1 %[in3], 32*4(%[w2]) \t\n"
+ "lwc1 %[in4], 96*4(%[w]) \t\n"
+ "lwc1 %[in5], 64*4(%[p2]) \t\n"
+ "nmsub.s %[sum], %[sum], %[in1], %[in2] \t\n"
+ "lwc1 %[in6], 96*4(%[w2]) \t\n"
+ "nmsub.s %[sum2], %[sum2], %[in2], %[in3] \t\n"
+ "lwc1 %[in1], 160*4(%[w]) \t\n"
+ "nmsub.s %[sum], %[sum], %[in4], %[in5] \t\n"
+ "lwc1 %[in2], 128*4(%[p2]) \t\n"
+ "nmsub.s %[sum2], %[sum2], %[in5], %[in6] \t\n"
+ "lwc1 %[in3], 160*4(%[w2]) \t\n"
+ "lwc1 %[in4], 224*4(%[w]) \t\n"
+ "lwc1 %[in5], 192*4(%[p2]) \t\n"
+ "nmsub.s %[sum], %[sum], %[in1], %[in2] \t\n"
+ "lwc1 %[in6], 224*4(%[w2]) \t\n"
+ "nmsub.s %[sum2], %[sum2], %[in2], %[in3] \t\n"
+ "lwc1 %[in1], 288*4(%[w]) \t\n"
+ "nmsub.s %[sum], %[sum], %[in4], %[in5] \t\n"
+ "lwc1 %[in2], 256*4(%[p2]) \t\n"
+ "nmsub.s %[sum2], %[sum2], %[in5], %[in6] \t\n"
+ "lwc1 %[in3], 288*4(%[w2]) \t\n"
+ "lwc1 %[in4], 352*4(%[w]) \t\n"
+ "lwc1 %[in5], 320*4(%[p2]) \t\n"
+ "nmsub.s %[sum], %[sum], %[in1], %[in2] \t\n"
+ "lwc1 %[in6], 352*4(%[w2]) \t\n"
+ "nmsub.s %[sum2], %[sum2], %[in2], %[in3] \t\n"
+ "lwc1 %[in1], 416*4(%[w]) \t\n"
+ "nmsub.s %[sum], %[sum], %[in4], %[in5] \t\n"
+ "lwc1 %[in2], 384*4(%[p2]) \t\n"
+ "nmsub.s %[sum2], %[sum2], %[in5], %[in6] \t\n"
+ "lwc1 %[in3], 416*4(%[w2]) \t\n"
+ "lwc1 %[in4], 480*4(%[w]) \t\n"
+ "lwc1 %[in5], 448*4(%[p2]) \t\n"
+ "nmsub.s %[sum], %[sum], %[in1], %[in2] \t\n"
+ "lwc1 %[in6], 480*4(%[w2]) \t\n"
+ "nmsub.s %[sum2], %[sum2], %[in2], %[in3] \t\n"
+ "addiu %[w], %[w], 4 \t\n"
+ "nmsub.s %[sum], %[sum], %[in4], %[in5] \t\n"
+ "addiu %[w2], %[w2], -4 \t\n"
+ "nmsub.s %[sum2], %[sum2], %[in5], %[in6] \t\n"
+ "addu %[j], %[j], 4 \t\n"
+ "addiu %[p], 4 \t\n"
+ "swc1 %[sum], 0(%[samples]) \t\n"
+ "addiu %[p2], -4 \t\n"
+ "swc1 %[sum2], 0(%[samples2]) \t\n"
+ "addu %[samples], %[samples], %[incr1] \t\n"
+ "subu %[samples2], %[samples2], %[incr1] \t\n"
+ "bne %[j], 64, ff_mpadsp_apply_window_loop%= \t\n"
+
+ "lwc1 %[in1], 48*4(%[window]) \t\n"
+ "lwc1 %[in2], 32*4(%[synth_buf]) \t\n"
+ "lwc1 %[in3], 112*4(%[window]) \t\n"
+ "lwc1 %[in4], 96*4(%[synth_buf]) \t\n"
+ "lwc1 %[in5], 176*4(%[window]) \t\n"
+ "lwc1 %[in6], 160*4(%[synth_buf]) \t\n"
+ "mul.s %[sum], %[in1], %[in2] \t\n"
+ "lwc1 %[in7], 240*4(%[window]) \t\n"
+ "lwc1 %[in8], 224*4(%[synth_buf]) \t\n"
+ "lwc1 %[in1], 304*4(%[window]) \t\n"
+ "nmadd.s %[sum], %[sum], %[in3], %[in4] \t\n"
+ "lwc1 %[in2], 288*4(%[synth_buf]) \t\n"
+ "lwc1 %[in3], 368*4(%[window]) \t\n"
+ "lwc1 %[in4], 352*4(%[synth_buf]) \t\n"
+ "nmsub.s %[sum], %[sum], %[in5], %[in6] \t\n"
+ "nmsub.s %[sum], %[sum], %[in7], %[in8] \t\n"
+ "lwc1 %[in5], 432*4(%[window]) \t\n"
+ "lwc1 %[in6], 416*4(%[synth_buf]) \t\n"
+ "nmsub.s %[sum], %[sum], %[in1], %[in2] \t\n"
+ "lwc1 %[in7], 496*4(%[window]) \t\n"
+ "lwc1 %[in8], 480*4(%[synth_buf]) \t\n"
+ "nmsub.s %[sum], %[sum], %[in3], %[in4] \t\n"
+ "nmsub.s %[sum], %[sum], %[in5], %[in6] \t\n"
+ "nmsub.s %[sum], %[sum], %[in7], %[in8] \t\n"
+ "swc1 %[sum], 0(%[samples]) \t\n"
+
+ : [sum] "=&f" (sum), [sum2] "=&f" (sum2),
+ [w2] "=&r" (w2), [w] "=&r" (w),
+ [p] "=&r" (p), [p2] "=&r" (p2), [j] "=&r" (j),
+ [samples] "+r" (samples), [samples2] "=&r" (samples2),
+ [in1] "=&f" (in1), [in2] "=&f" (in2),
+ [in3] "=&f" (in3), [in4] "=&f" (in4),
+ [in5] "=&f" (in5), [in6] "=&f" (in6),
+ [in7] "=&f" (in7), [in8] "=&f" (in8),
+ [t_sample] "=&r" (t_sample)
+ : [synth_buf] "r" (synth_buf), [window] "r" (window),
+ [dither_state] "r" (dither_state), [incr1] "r" (incr1)
+ : "memory"
+ );
+}
+
+static void ff_dct32_mips_float(float *out, const float *tab)
+{
+ float val0 , val1 , val2 , val3 , val4 , val5 , val6 , val7,
+ val8 , val9 , val10, val11, val12, val13, val14, val15,
+ val16, val17, val18, val19, val20, val21, val22, val23,
+ val24, val25, val26, val27, val28, val29, val30, val31;
+ float fTmp1, fTmp2, fTmp3, fTmp4, fTmp5, fTmp6, fTmp7, fTmp8,
+ fTmp9, fTmp10, fTmp11;
+
+ /**
+ * instructions are scheduled to minimize pipeline stall.
+ */
+ __asm__ volatile (
+ "lwc1 %[fTmp1], 0*4(%[tab]) \n\t"
+ "lwc1 %[fTmp2], 31*4(%[tab]) \n\t"
+ "lwc1 %[fTmp3], 15*4(%[tab]) \n\t"
+ "lwc1 %[fTmp4], 16*4(%[tab]) \n\t"
+ "li.s %[fTmp7], 0.50241928618815570551 \n\t"
+ "add.s %[fTmp5], %[fTmp1], %[fTmp2] \n\t"
+ "sub.s %[fTmp8], %[fTmp1], %[fTmp2] \n\t"
+ "add.s %[fTmp6], %[fTmp3], %[fTmp4] \n\t"
+ "sub.s %[fTmp9], %[fTmp3], %[fTmp4] \n\t"
+ "li.s %[fTmp10], 0.50060299823519630134 \n\t"
+ "li.s %[fTmp11], 10.19000812354805681150 \n\t"
+ "mul.s %[fTmp8], %[fTmp8], %[fTmp10] \n\t"
+ "add.s %[val0], %[fTmp5], %[fTmp6] \n\t"
+ "sub.s %[val15], %[fTmp5], %[fTmp6] \n\t"
+ "lwc1 %[fTmp1], 7*4(%[tab]) \n\t"
+ "lwc1 %[fTmp2], 24*4(%[tab]) \n\t"
+ "madd.s %[val16], %[fTmp8], %[fTmp9], %[fTmp11] \n\t"
+ "nmsub.s %[val31], %[fTmp8], %[fTmp9], %[fTmp11] \n\t"
+ "mul.s %[val15], %[val15], %[fTmp7] \n\t"
+ "lwc1 %[fTmp3], 8*4(%[tab]) \n\t"
+ "lwc1 %[fTmp4], 23*4(%[tab]) \n\t"
+ "add.s %[fTmp5], %[fTmp1], %[fTmp2] \n\t"
+ "mul.s %[val31], %[val31], %[fTmp7] \n\t"
+ "sub.s %[fTmp8], %[fTmp1], %[fTmp2] \n\t"
+ "add.s %[fTmp6], %[fTmp3], %[fTmp4] \n\t"
+ "sub.s %[fTmp9], %[fTmp3], %[fTmp4] \n\t"
+ "li.s %[fTmp7], 5.10114861868916385802 \n\t"
+ "li.s %[fTmp10], 0.67480834145500574602 \n\t"
+ "li.s %[fTmp11], 0.74453627100229844977 \n\t"
+ "add.s %[val7], %[fTmp5], %[fTmp6] \n\t"
+ "sub.s %[val8], %[fTmp5], %[fTmp6] \n\t"
+ "mul.s %[fTmp8], %[fTmp8], %[fTmp10] \n\t"
+ "li.s %[fTmp1], 0.50979557910415916894 \n\t"
+ "sub.s %[fTmp2], %[val0], %[val7] \n\t"
+ "mul.s %[val8], %[val8], %[fTmp7] \n\t"
+ "madd.s %[val23], %[fTmp8], %[fTmp9], %[fTmp11] \n\t"
+ "nmsub.s %[val24], %[fTmp8], %[fTmp9], %[fTmp11] \n\t"
+ "add.s %[val0], %[val0], %[val7] \n\t"
+ "mul.s %[val7], %[fTmp1], %[fTmp2] \n\t"
+ "sub.s %[fTmp2], %[val15], %[val8] \n\t"
+ "add.s %[val8], %[val15], %[val8] \n\t"
+ "mul.s %[val24], %[val24], %[fTmp7] \n\t"
+ "sub.s %[fTmp3], %[val16], %[val23] \n\t"
+ "add.s %[val16], %[val16], %[val23] \n\t"
+ "mul.s %[val15], %[fTmp1], %[fTmp2] \n\t"
+ "sub.s %[fTmp4], %[val31], %[val24] \n\t"
+ "mul.s %[val23], %[fTmp1], %[fTmp3] \n\t"
+ "add.s %[val24], %[val31], %[val24] \n\t"
+ "mul.s %[val31], %[fTmp1], %[fTmp4] \n\t"
+
+ : [fTmp1] "=&f" (fTmp1), [fTmp2] "=&f" (fTmp2), [fTmp3] "=&f" (fTmp3),
+ [fTmp4] "=&f" (fTmp4), [fTmp5] "=&f" (fTmp5), [fTmp6] "=&f" (fTmp6),
+ [fTmp7] "=&f" (fTmp7), [fTmp8] "=&f" (fTmp8), [fTmp9] "=&f" (fTmp9),
+ [fTmp10] "=&f" (fTmp10), [fTmp11] "=&f" (fTmp11),
+ [val0] "=f" (val0), [val7] "=f" (val7),
+ [val8] "=f" (val8), [val15] "=f" (val15),
+ [val16] "=f" (val16), [val23] "=f" (val23),
+ [val24] "=f" (val24), [val31] "=f" (val31)
+ : [tab] "r" (tab)
+ : "memory"
+ );
+
+ __asm__ volatile (
+ "lwc1 %[fTmp1], 3*4(%[tab]) \n\t"
+ "lwc1 %[fTmp2], 28*4(%[tab]) \n\t"
+ "lwc1 %[fTmp3], 12*4(%[tab]) \n\t"
+ "lwc1 %[fTmp4], 19*4(%[tab]) \n\t"
+ "li.s %[fTmp7], 0.64682178335999012954 \n\t"
+ "add.s %[fTmp5], %[fTmp1], %[fTmp2] \n\t"
+ "sub.s %[fTmp8], %[fTmp1], %[fTmp2] \n\t"
+ "add.s %[fTmp6], %[fTmp3], %[fTmp4] \n\t"
+ "sub.s %[fTmp9], %[fTmp3], %[fTmp4] \n\t"
+ "li.s %[fTmp10], 0.53104259108978417447 \n\t"
+ "li.s %[fTmp11], 1.48416461631416627724 \n\t"
+ "mul.s %[fTmp8], %[fTmp8], %[fTmp10] \n\t"
+ "add.s %[val3], %[fTmp5], %[fTmp6] \n\t"
+ "sub.s %[val12], %[fTmp5], %[fTmp6] \n\t"
+ "lwc1 %[fTmp1], 4*4(%[tab]) \n\t"
+ "lwc1 %[fTmp2], 27*4(%[tab]) \n\t"
+ "madd.s %[val19], %[fTmp8], %[fTmp9], %[fTmp11] \n\t"
+ "nmsub.s %[val28], %[fTmp8], %[fTmp9], %[fTmp11] \n\t"
+ "mul.s %[val12], %[val12], %[fTmp7] \n\t"
+ "lwc1 %[fTmp3], 11*4(%[tab]) \n\t"
+ "lwc1 %[fTmp4], 20*4(%[tab]) \n\t"
+ "add.s %[fTmp5], %[fTmp1], %[fTmp2] \n\t"
+ "mul.s %[val28], %[val28], %[fTmp7] \n\t"
+ "sub.s %[fTmp8], %[fTmp1], %[fTmp2] \n\t"
+ "li.s %[fTmp7], 0.78815462345125022473 \n\t"
+ "add.s %[fTmp6], %[fTmp3], %[fTmp4] \n\t"
+ "sub.s %[fTmp9], %[fTmp3], %[fTmp4] \n\t"
+ "li.s %[fTmp10], 0.55310389603444452782 \n\t"
+ "li.s %[fTmp11], 1.16943993343288495515 \n\t"
+ "mul.s %[fTmp8], %[fTmp8], %[fTmp10] \n\t"
+ "add.s %[val4], %[fTmp5], %[fTmp6] \n\t"
+ "sub.s %[val11], %[fTmp5], %[fTmp6] \n\t"
+ "li.s %[fTmp1], 2.56291544774150617881 \n\t"
+ "madd.s %[val20], %[fTmp8], %[fTmp9], %[fTmp11] \n\t"
+ "nmsub.s %[val27], %[fTmp8], %[fTmp9], %[fTmp11] \n\t"
+ "mul.s %[val11], %[val11], %[fTmp7] \n\t"
+ "sub.s %[fTmp2], %[val3], %[val4] \n\t"
+ "add.s %[val3], %[val3], %[val4] \n\t"
+ "sub.s %[fTmp4], %[val19], %[val20] \n\t"
+ "mul.s %[val27], %[val27], %[fTmp7] \n\t"
+ "sub.s %[fTmp3], %[val12], %[val11] \n\t"
+ "mul.s %[val4], %[fTmp1], %[fTmp2] \n\t"
+ "add.s %[val11], %[val12], %[val11] \n\t"
+ "add.s %[val19], %[val19], %[val20] \n\t"
+ "mul.s %[val20], %[fTmp1], %[fTmp4] \n\t"
+ "mul.s %[val12], %[fTmp1], %[fTmp3] \n\t"
+ "sub.s %[fTmp2], %[val28], %[val27] \n\t"
+ "add.s %[val27], %[val28], %[val27] \n\t"
+ "mul.s %[val28], %[fTmp1], %[fTmp2] \n\t"
+
+ : [fTmp1] "=&f" (fTmp1), [fTmp2] "=&f" (fTmp2), [fTmp3] "=&f" (fTmp3),
+ [fTmp4] "=&f" (fTmp4), [fTmp5] "=&f" (fTmp5), [fTmp6] "=&f" (fTmp6),
+ [fTmp7] "=&f" (fTmp7), [fTmp8] "=&f" (fTmp8), [fTmp9] "=&f" (fTmp9),
+ [fTmp10] "=&f" (fTmp10), [fTmp11] "=&f" (fTmp11),
+ [val3] "=f" (val3), [val4] "=f" (val4),
+ [val11] "=f" (val11), [val12] "=f" (val12),
+ [val19] "=f" (val19), [val20] "=f" (val20),
+ [val27] "=f" (val27), [val28] "=f" (val28)
+ : [tab] "r" (tab)
+ : "memory"
+ );
+
+ __asm__ volatile (
+ "li.s %[fTmp1], 0.54119610014619698439 \n\t"
+ "sub.s %[fTmp2], %[val0], %[val3] \n\t"
+ "add.s %[val0], %[val0], %[val3] \n\t"
+ "sub.s %[fTmp3], %[val7], %[val4] \n\t"
+ "add.s %[val4], %[val7], %[val4] \n\t"
+ "sub.s %[fTmp4], %[val8], %[val11] \n\t"
+ "mul.s %[val3], %[fTmp1], %[fTmp2] \n\t"
+ "add.s %[val8], %[val8], %[val11] \n\t"
+ "mul.s %[val7], %[fTmp1], %[fTmp3] \n\t"
+ "sub.s %[fTmp2], %[val15], %[val12] \n\t"
+ "mul.s %[val11], %[fTmp1], %[fTmp4] \n\t"
+ "add.s %[val12], %[val15], %[val12] \n\t"
+ "mul.s %[val15], %[fTmp1], %[fTmp2] \n\t"
+
+ : [val0] "+f" (val0), [val3] "+f" (val3),
+ [val4] "+f" (val4), [val7] "+f" (val7),
+ [val8] "+f" (val8), [val11] "+f" (val11),
+ [val12] "+f" (val12), [val15] "+f" (val15),
+ [fTmp1] "=f" (fTmp1), [fTmp2] "=&f" (fTmp2),
+ [fTmp3] "=&f" (fTmp3), [fTmp4] "=&f" (fTmp4)
+ :
+ );
+
+ __asm__ volatile (
+ "sub.s %[fTmp2], %[val16], %[val19] \n\t"
+ "add.s %[val16], %[val16], %[val19] \n\t"
+ "sub.s %[fTmp3], %[val23], %[val20] \n\t"
+ "add.s %[val20], %[val23], %[val20] \n\t"
+ "sub.s %[fTmp4], %[val24], %[val27] \n\t"
+ "mul.s %[val19], %[fTmp1], %[fTmp2] \n\t"
+ "add.s %[val24], %[val24], %[val27] \n\t"
+ "mul.s %[val23], %[fTmp1], %[fTmp3] \n\t"
+ "sub.s %[fTmp2], %[val31], %[val28] \n\t"
+ "mul.s %[val27], %[fTmp1], %[fTmp4] \n\t"
+ "add.s %[val28], %[val31], %[val28] \n\t"
+ "mul.s %[val31], %[fTmp1], %[fTmp2] \n\t"
+
+ : [fTmp2] "=&f" (fTmp2), [fTmp3] "=&f" (fTmp3), [fTmp4] "=&f" (fTmp4),
+ [val16] "+f" (val16), [val19] "+f" (val19), [val20] "+f" (val20),
+ [val23] "+f" (val23), [val24] "+f" (val24), [val27] "+f" (val27),
+ [val28] "+f" (val28), [val31] "+f" (val31)
+ : [fTmp1] "f" (fTmp1)
+ );
+
+ __asm__ volatile (
+ "lwc1 %[fTmp1], 1*4(%[tab]) \n\t"
+ "lwc1 %[fTmp2], 30*4(%[tab]) \n\t"
+ "lwc1 %[fTmp3], 14*4(%[tab]) \n\t"
+ "lwc1 %[fTmp4], 17*4(%[tab]) \n\t"
+ "li.s %[fTmp7], 0.52249861493968888062 \n\t"
+ "add.s %[fTmp5], %[fTmp1], %[fTmp2] \n\t"
+ "sub.s %[fTmp8], %[fTmp1], %[fTmp2] \n\t"
+ "add.s %[fTmp6], %[fTmp3], %[fTmp4] \n\t"
+ "sub.s %[fTmp9], %[fTmp3], %[fTmp4] \n\t"
+ "li.s %[fTmp10], 0.50547095989754365998 \n\t"
+ "li.s %[fTmp11], 3.40760841846871878570 \n\t"
+ "mul.s %[fTmp8], %[fTmp8], %[fTmp10] \n\t"
+ "add.s %[val1], %[fTmp5], %[fTmp6] \n\t"
+ "sub.s %[val14], %[fTmp5], %[fTmp6] \n\t"
+ "lwc1 %[fTmp1], 6*4(%[tab]) \n\t"
+ "lwc1 %[fTmp2], 25*4(%[tab]) \n\t"
+ "madd.s %[val17], %[fTmp8], %[fTmp9], %[fTmp11] \n\t"
+ "nmsub.s %[val30], %[fTmp8], %[fTmp9], %[fTmp11] \n\t"
+ "mul.s %[val14], %[val14], %[fTmp7] \n\t"
+ "lwc1 %[fTmp3], 9*4(%[tab]) \n\t"
+ "lwc1 %[fTmp4], 22*4(%[tab]) \n\t"
+ "add.s %[fTmp5], %[fTmp1], %[fTmp2] \n\t"
+ "mul.s %[val30], %[val30], %[fTmp7] \n\t"
+ "sub.s %[fTmp8], %[fTmp1], %[fTmp2] \n\t"
+ "add.s %[fTmp6], %[fTmp3], %[fTmp4] \n\t"
+ "sub.s %[fTmp9], %[fTmp3], %[fTmp4] \n\t"
+ "li.s %[fTmp7], 1.72244709823833392782 \n\t"
+ "li.s %[fTmp10], 0.62250412303566481615 \n\t"
+ "li.s %[fTmp11], 0.83934964541552703873 \n\t"
+ "add.s %[val6], %[fTmp5], %[fTmp6] \n\t"
+ "sub.s %[val9], %[fTmp5], %[fTmp6] \n\t"
+ "mul.s %[fTmp8], %[fTmp8], %[fTmp10] \n\t"
+ "li.s %[fTmp1], 0.60134488693504528054 \n\t"
+ "sub.s %[fTmp2], %[val1], %[val6] \n\t"
+ "add.s %[val1], %[val1], %[val6] \n\t"
+ "mul.s %[val9], %[val9], %[fTmp7] \n\t"
+ "madd.s %[val22], %[fTmp8], %[fTmp9], %[fTmp11] \n\t"
+ "nmsub.s %[val25], %[fTmp8], %[fTmp9], %[fTmp11] \n\t"
+ "mul.s %[val6], %[fTmp1], %[fTmp2] \n\t"
+ "sub.s %[fTmp2], %[val14], %[val9] \n\t"
+ "add.s %[val9], %[val14], %[val9] \n\t"
+ "mul.s %[val25], %[val25], %[fTmp7] \n\t"
+ "sub.s %[fTmp3], %[val17], %[val22] \n\t"
+ "add.s %[val17], %[val17], %[val22] \n\t"
+ "mul.s %[val14], %[fTmp1], %[fTmp2] \n\t"
+ "sub.s %[fTmp2], %[val30], %[val25] \n\t"
+ "mul.s %[val22], %[fTmp1], %[fTmp3] \n\t"
+ "add.s %[val25], %[val30], %[val25] \n\t"
+ "mul.s %[val30], %[fTmp1], %[fTmp2] \n\t"
+
+ : [fTmp1] "=&f" (fTmp1), [fTmp2] "=&f" (fTmp2), [fTmp3] "=&f" (fTmp3),
+ [fTmp4] "=&f" (fTmp4), [fTmp5] "=&f" (fTmp5), [fTmp6] "=&f" (fTmp6),
+ [fTmp7] "=&f" (fTmp7), [fTmp8] "=&f" (fTmp8), [fTmp9] "=&f" (fTmp9),
+ [fTmp10] "=&f" (fTmp10), [fTmp11] "=&f" (fTmp11),
+ [val1] "=f" (val1), [val6] "=f" (val6),
+ [val9] "=f" (val9), [val14] "=f" (val14),
+ [val17] "=f" (val17), [val22] "=f" (val22),
+ [val25] "=f" (val25), [val30] "=f" (val30)
+ : [tab] "r" (tab)
+ : "memory"
+ );
+
+ __asm__ volatile (
+ "lwc1 %[fTmp1], 2*4(%[tab]) \n\t"
+ "lwc1 %[fTmp2], 29*4(%[tab]) \n\t"
+ "lwc1 %[fTmp3], 13*4(%[tab]) \n\t"
+ "lwc1 %[fTmp4], 18*4(%[tab]) \n\t"
+ "li.s %[fTmp7], 0.56694403481635770368 \n\t"
+ "add.s %[fTmp5], %[fTmp1], %[fTmp2] \n\t"
+ "sub.s %[fTmp8], %[fTmp1], %[fTmp2] \n\t"
+ "add.s %[fTmp6], %[fTmp3], %[fTmp4] \n\t"
+ "sub.s %[fTmp9], %[fTmp3], %[fTmp4] \n\t"
+ "li.s %[fTmp10], 0.51544730992262454697 \n\t"
+ "li.s %[fTmp11], 2.05778100995341155085 \n\t"
+ "mul.s %[fTmp8], %[fTmp8], %[fTmp10] \n\t"
+ "add.s %[val2], %[fTmp5], %[fTmp6] \n\t"
+ "sub.s %[val13], %[fTmp5], %[fTmp6] \n\t"
+ "lwc1 %[fTmp1], 5*4(%[tab]) \n\t"
+ "lwc1 %[fTmp2], 26*4(%[tab]) \n\t"
+ "madd.s %[val18], %[fTmp8], %[fTmp9], %[fTmp11] \n\t"
+ "nmsub.s %[val29], %[fTmp8], %[fTmp9], %[fTmp11] \n\t"
+ "mul.s %[val13], %[val13], %[fTmp7] \n\t"
+ "lwc1 %[fTmp3], 10*4(%[tab]) \n\t"
+ "lwc1 %[fTmp4], 21*4(%[tab]) \n\t"
+ "mul.s %[val29], %[val29], %[fTmp7] \n\t"
+ "add.s %[fTmp5], %[fTmp1], %[fTmp2] \n\t"
+ "sub.s %[fTmp8], %[fTmp1], %[fTmp2] \n\t"
+ "add.s %[fTmp6], %[fTmp3], %[fTmp4] \n\t"
+ "sub.s %[fTmp9], %[fTmp3], %[fTmp4] \n\t"
+ "li.s %[fTmp7], 1.06067768599034747134 \n\t"
+ "li.s %[fTmp10], 0.58293496820613387367 \n\t"
+ "li.s %[fTmp11], 0.97256823786196069369 \n\t"
+ "add.s %[val5], %[fTmp5], %[fTmp6] \n\t"
+ "sub.s %[val10], %[fTmp5], %[fTmp6] \n\t"
+ "mul.s %[fTmp8], %[fTmp8], %[fTmp10] \n\t"
+ "li.s %[fTmp1], 0.89997622313641570463 \n\t"
+ "sub.s %[fTmp2], %[val2], %[val5] \n\t"
+ "mul.s %[val10], %[val10], %[fTmp7] \n\t"
+ "madd.s %[val21], %[fTmp8], %[fTmp9], %[fTmp11] \n\t"
+ "nmsub.s %[val26], %[fTmp8], %[fTmp9], %[fTmp11] \n\t"
+ "add.s %[val2], %[val2], %[val5] \n\t"
+ "mul.s %[val5], %[fTmp1], %[fTmp2] \n\t"
+ "sub.s %[fTmp3], %[val13], %[val10] \n\t"
+ "add.s %[val10], %[val13], %[val10] \n\t"
+ "mul.s %[val26], %[val26], %[fTmp7] \n\t"
+ "sub.s %[fTmp4], %[val18], %[val21] \n\t"
+ "add.s %[val18], %[val18], %[val21] \n\t"
+ "mul.s %[val13], %[fTmp1], %[fTmp3] \n\t"
+ "sub.s %[fTmp2], %[val29], %[val26] \n\t"
+ "add.s %[val26], %[val29], %[val26] \n\t"
+ "mul.s %[val21], %[fTmp1], %[fTmp4] \n\t"
+ "mul.s %[val29], %[fTmp1], %[fTmp2] \n\t"
+
+ : [fTmp1] "=&f" (fTmp1), [fTmp2] "=&f" (fTmp2), [fTmp3] "=&f" (fTmp3),
+ [fTmp4] "=&f" (fTmp4), [fTmp5] "=&f" (fTmp5), [fTmp6] "=&f" (fTmp6),
+ [fTmp7] "=&f" (fTmp7), [fTmp8] "=&f" (fTmp8), [fTmp9] "=&f" (fTmp9),
+ [fTmp10] "=&f" (fTmp10), [fTmp11] "=&f" (fTmp11),
+ [val2] "=f" (val2), [val5] "=f" (val5),
+ [val10] "=f" (val10), [val13] "=f" (val13),
+ [val18] "=f" (val18), [val21] "=f" (val21),
+ [val26] "=f" (val26), [val29] "=f" (val29)
+ : [tab] "r" (tab)
+ : "memory"
+ );
+
+ __asm__ volatile (
+ "li.s %[fTmp1], 1.30656296487637652785 \n\t"
+ "sub.s %[fTmp2], %[val1], %[val2] \n\t"
+ "add.s %[val1], %[val1], %[val2] \n\t"
+ "sub.s %[fTmp3], %[val6], %[val5] \n\t"
+ "add.s %[val5], %[val6], %[val5] \n\t"
+ "sub.s %[fTmp4], %[val9], %[val10] \n\t"
+ "mul.s %[val2], %[fTmp1], %[fTmp2] \n\t"
+ "add.s %[val9], %[val9], %[val10] \n\t"
+ "mul.s %[val6], %[fTmp1], %[fTmp3] \n\t"
+ "sub.s %[fTmp2], %[val14], %[val13] \n\t"
+ "mul.s %[val10], %[fTmp1], %[fTmp4] \n\t"
+ "add.s %[val13], %[val14], %[val13] \n\t"
+ "mul.s %[val14], %[fTmp1], %[fTmp2] \n\t"
+
+ : [fTmp1] "=f" (fTmp1), [fTmp2] "=&f" (fTmp2),
+ [fTmp3] "=&f" (fTmp3), [fTmp4] "=&f" (fTmp4),
+ [val1] "+f" (val1), [val2] "+f" (val2),
+ [val5] "+f" (val5), [val6] "+f" (val6),
+ [val9] "+f" (val9), [val10] "+f" (val10),
+ [val13] "+f" (val13), [val14] "+f" (val14)
+ :
+ );
+
+ __asm__ volatile (
+ "sub.s %[fTmp2], %[val17], %[val18] \n\t"
+ "add.s %[val17], %[val17], %[val18] \n\t"
+ "sub.s %[fTmp3], %[val22], %[val21] \n\t"
+ "add.s %[val21], %[val22], %[val21] \n\t"
+ "sub.s %[fTmp4], %[val25], %[val26] \n\t"
+ "mul.s %[val18], %[fTmp1], %[fTmp2] \n\t"
+ "add.s %[val25], %[val25], %[val26] \n\t"
+ "mul.s %[val22], %[fTmp1], %[fTmp3] \n\t"
+ "sub.s %[fTmp2], %[val30], %[val29] \n\t"
+ "mul.s %[val26], %[fTmp1], %[fTmp4] \n\t"
+ "add.s %[val29], %[val30], %[val29] \n\t"
+ "mul.s %[val30], %[fTmp1], %[fTmp2] \n\t"
+
+ : [fTmp2] "=&f" (fTmp2), [fTmp3] "=&f" (fTmp3), [fTmp4] "=&f" (fTmp4),
+ [val17] "+f" (val17), [val18] "+f" (val18), [val21] "+f" (val21),
+ [val22] "+f" (val22), [val25] "+f" (val25), [val26] "+f" (val26),
+ [val29] "+f" (val29), [val30] "+f" (val30)
+ : [fTmp1] "f" (fTmp1)
+ );
+
+ __asm__ volatile (
+ "li.s %[fTmp1], 0.70710678118654752439 \n\t"
+ "sub.s %[fTmp2], %[val0], %[val1] \n\t"
+ "add.s %[val0], %[val0], %[val1] \n\t"
+ "sub.s %[fTmp3], %[val3], %[val2] \n\t"
+ "add.s %[val2], %[val3], %[val2] \n\t"
+ "sub.s %[fTmp4], %[val4], %[val5] \n\t"
+ "mul.s %[val1], %[fTmp1], %[fTmp2] \n\t"
+ "swc1 %[val0], 0(%[out]) \n\t"
+ "mul.s %[val3], %[fTmp3], %[fTmp1] \n\t"
+ "add.s %[val4], %[val4], %[val5] \n\t"
+ "mul.s %[val5], %[fTmp1], %[fTmp4] \n\t"
+ "swc1 %[val1], 16*4(%[out]) \n\t"
+ "sub.s %[fTmp2], %[val7], %[val6] \n\t"
+ "add.s %[val2], %[val2], %[val3] \n\t"
+ "swc1 %[val3], 24*4(%[out]) \n\t"
+ "add.s %[val6], %[val7], %[val6] \n\t"
+ "mul.s %[val7], %[fTmp1], %[fTmp2] \n\t"
+ "swc1 %[val2], 8*4(%[out]) \n\t"
+ "add.s %[val6], %[val6], %[val7] \n\t"
+ "swc1 %[val7], 28*4(%[out]) \n\t"
+ "add.s %[val4], %[val4], %[val6] \n\t"
+ "add.s %[val6], %[val6], %[val5] \n\t"
+ "add.s %[val5], %[val5], %[val7] \n\t"
+ "swc1 %[val4], 4*4(%[out]) \n\t"
+ "swc1 %[val5], 20*4(%[out]) \n\t"
+ "swc1 %[val6], 12*4(%[out]) \n\t"
+
+ : [fTmp1] "=f" (fTmp1), [fTmp2] "=&f" (fTmp2),
+ [fTmp3] "=&f" (fTmp3), [fTmp4] "=&f" (fTmp4),
+ [val0] "+f" (val0), [val1] "+f" (val1),
+ [val2] "+f" (val2), [val3] "+f" (val3),
+ [val4] "+f" (val4), [val5] "+f" (val5),
+ [val6] "+f" (val6), [val7] "+f" (val7)
+ : [out] "r" (out)
+ );
+
+ __asm__ volatile (
+ "sub.s %[fTmp2], %[val8], %[val9] \n\t"
+ "add.s %[val8], %[val8], %[val9] \n\t"
+ "sub.s %[fTmp3], %[val11], %[val10] \n\t"
+ "add.s %[val10], %[val11], %[val10] \n\t"
+ "sub.s %[fTmp4], %[val12], %[val13] \n\t"
+ "mul.s %[val9], %[fTmp1], %[fTmp2] \n\t"
+ "add.s %[val12], %[val12], %[val13] \n\t"
+ "mul.s %[val11], %[fTmp1], %[fTmp3] \n\t"
+ "sub.s %[fTmp2], %[val15], %[val14] \n\t"
+ "mul.s %[val13], %[fTmp1], %[fTmp4] \n\t"
+ "add.s %[val14], %[val15], %[val14] \n\t"
+ "add.s %[val10], %[val10], %[val11] \n\t"
+ "mul.s %[val15], %[fTmp1], %[fTmp2] \n\t"
+ "add.s %[val14], %[val14], %[val15] \n\t"
+ "add.s %[val12], %[val12], %[val14] \n\t"
+ "add.s %[val14], %[val14], %[val13] \n\t"
+ "add.s %[val13], %[val13], %[val15] \n\t"
+ "add.s %[val8], %[val8], %[val12] \n\t"
+ "add.s %[val12], %[val12], %[val10] \n\t"
+ "add.s %[val10], %[val10], %[val14] \n\t"
+ "add.s %[val14], %[val14], %[val9] \n\t"
+ "add.s %[val9], %[val9], %[val13] \n\t"
+ "add.s %[val13], %[val13], %[val11] \n\t"
+ "add.s %[val11], %[val11], %[val15] \n\t"
+ "swc1 %[val8], 2*4(%[out]) \n\t"
+ "swc1 %[val9], 18*4(%[out]) \n\t"
+ "swc1 %[val10], 10*4(%[out]) \n\t"
+ "swc1 %[val11], 26*4(%[out]) \n\t"
+ "swc1 %[val12], 6*4(%[out]) \n\t"
+ "swc1 %[val13], 22*4(%[out]) \n\t"
+ "swc1 %[val14], 14*4(%[out]) \n\t"
+ "swc1 %[val15], 30*4(%[out]) \n\t"
+
+ : [fTmp2] "=&f" (fTmp2), [fTmp3] "=&f" (fTmp3), [fTmp4] "=&f" (fTmp4),
+ [val8] "+f" (val8), [val9] "+f" (val9), [val10] "+f" (val10),
+ [val11] "+f" (val11), [val12] "+f" (val12), [val13] "+f" (val13),
+ [val14] "+f" (val14), [val15] "+f" (val15)
+ : [fTmp1] "f" (fTmp1), [out] "r" (out)
+ );
+
+ __asm__ volatile (
+ "sub.s %[fTmp2], %[val16], %[val17] \n\t"
+ "add.s %[val16], %[val16], %[val17] \n\t"
+ "sub.s %[fTmp3], %[val19], %[val18] \n\t"
+ "add.s %[val18], %[val19], %[val18] \n\t"
+ "sub.s %[fTmp4], %[val20], %[val21] \n\t"
+ "mul.s %[val17], %[fTmp1], %[fTmp2] \n\t"
+ "add.s %[val20], %[val20], %[val21] \n\t"
+ "mul.s %[val19], %[fTmp1], %[fTmp3] \n\t"
+ "sub.s %[fTmp2], %[val23], %[val22] \n\t"
+ "mul.s %[val21], %[fTmp1], %[fTmp4] \n\t"
+ "add.s %[val22], %[val23], %[val22] \n\t"
+ "add.s %[val18], %[val18], %[val19] \n\t"
+ "mul.s %[val23], %[fTmp1], %[fTmp2] \n\t"
+ "add.s %[val22], %[val22], %[val23] \n\t"
+ "add.s %[val20], %[val20], %[val22] \n\t"
+ "add.s %[val22], %[val22], %[val21] \n\t"
+ "add.s %[val21], %[val21], %[val23] \n\t"
+
+ : [fTmp2] "=&f" (fTmp2), [fTmp3] "=&f" (fTmp3), [fTmp4] "=&f" (fTmp4),
+ [val16] "+f" (val16), [val17] "+f" (val17), [val18] "+f" (val18),
+ [val19] "+f" (val19), [val20] "+f" (val20), [val21] "+f" (val21),
+ [val22] "+f" (val22), [val23] "+f" (val23)
+ : [fTmp1] "f" (fTmp1)
+ );
+
+ __asm__ volatile (
+ "sub.s %[fTmp2], %[val24], %[val25] \n\t"
+ "add.s %[val24], %[val24], %[val25] \n\t"
+ "sub.s %[fTmp3], %[val27], %[val26] \n\t"
+ "add.s %[val26], %[val27], %[val26] \n\t"
+ "sub.s %[fTmp4], %[val28], %[val29] \n\t"
+ "mul.s %[val25], %[fTmp1], %[fTmp2] \n\t"
+ "add.s %[val28], %[val28], %[val29] \n\t"
+ "mul.s %[val27], %[fTmp1], %[fTmp3] \n\t"
+ "sub.s %[fTmp2], %[val31], %[val30] \n\t"
+ "mul.s %[val29], %[fTmp1], %[fTmp4] \n\t"
+ "add.s %[val30], %[val31], %[val30] \n\t"
+ "add.s %[val26], %[val26], %[val27] \n\t"
+ "mul.s %[val31], %[fTmp1], %[fTmp2] \n\t"
+ "add.s %[val30], %[val30], %[val31] \n\t"
+ "add.s %[val28], %[val28], %[val30] \n\t"
+ "add.s %[val30], %[val30], %[val29] \n\t"
+ "add.s %[val29], %[val29], %[val31] \n\t"
+ "add.s %[val24], %[val24], %[val28] \n\t"
+ "add.s %[val28], %[val28], %[val26] \n\t"
+ "add.s %[val26], %[val26], %[val30] \n\t"
+ "add.s %[val30], %[val30], %[val25] \n\t"
+ "add.s %[val25], %[val25], %[val29] \n\t"
+ "add.s %[val29], %[val29], %[val27] \n\t"
+ "add.s %[val27], %[val27], %[val31] \n\t"
+
+ : [fTmp2] "=&f" (fTmp2), [fTmp3] "=&f" (fTmp3), [fTmp4] "=&f" (fTmp4),
+ [val24] "+f" (val24), [val25] "+f" (val25), [val26] "+f" (val26),
+ [val27] "+f" (val27), [val28] "+f" (val28), [val29] "+f" (val29),
+ [val30] "+f" (val30), [val31] "+f" (val31)
+ : [fTmp1] "f" (fTmp1)
+ );
+
+ out[ 1] = val16 + val24;
+ out[17] = val17 + val25;
+ out[ 9] = val18 + val26;
+ out[25] = val19 + val27;
+ out[ 5] = val20 + val28;
+ out[21] = val21 + val29;
+ out[13] = val22 + val30;
+ out[29] = val23 + val31;
+ out[ 3] = val24 + val20;
+ out[19] = val25 + val21;
+ out[11] = val26 + val22;
+ out[27] = val27 + val23;
+ out[ 7] = val28 + val18;
+ out[23] = val29 + val19;
+ out[15] = val30 + val17;
+ out[31] = val31;
+}
+
+static void imdct36_mips_float(float *out, float *buf, float *in, float *win)
+{
+ float t0, t1, t2, t3, s0, s1, s2, s3;
+ float tmp[18];
+ /* temporary variables */
+ float in1, in2, in3, in4, in5, in6;
+ float out1, out2, out3, out4, out5;
+ float c1, c2, c3, c4, c5, c6, c7, c8, c9;
+
+ /**
+ * all loops are unrolled totally, and instructions are scheduled to
+ * minimize pipeline stall. instructions of the first two loops are
+ * reorganized, in order to eliminate unnecessary readings and
+ * writings into array. values defined in macros and tables are
+ * eliminated - they are directly loaded in appropriate variables
+ */
+
+ /* loop 1 and 2 */
+ __asm__ volatile (
+ "lwc1 %[in1], 17*4(%[in]) \t\n"
+ "lwc1 %[in2], 16*4(%[in]) \t\n"
+ "lwc1 %[in3], 15*4(%[in]) \t\n"
+ "lwc1 %[in4], 14*4(%[in]) \t\n"
+ "lwc1 %[in5], 13*4(%[in]) \t\n"
+ "lwc1 %[in6], 12*4(%[in]) \t\n"
+ "add.s %[out1], %[in1], %[in2] \t\n"
+ "add.s %[out2], %[in2], %[in3] \t\n"
+ "add.s %[out3], %[in3], %[in4] \t\n"
+ "add.s %[out4], %[in4], %[in5] \t\n"
+ "add.s %[out5], %[in5], %[in6] \t\n"
+ "lwc1 %[in1], 11*4(%[in]) \t\n"
+ "swc1 %[out2], 16*4(%[in]) \t\n"
+ "add.s %[out1], %[out1], %[out3] \t\n"
+ "swc1 %[out4], 14*4(%[in]) \t\n"
+ "add.s %[out3], %[out3], %[out5] \t\n"
+ "lwc1 %[in2], 10*4(%[in]) \t\n"
+ "lwc1 %[in3], 9*4(%[in]) \t\n"
+ "swc1 %[out1], 17*4(%[in]) \t\n"
+ "lwc1 %[in4], 8*4(%[in]) \t\n"
+ "swc1 %[out3], 15*4(%[in]) \t\n"
+ "add.s %[out1], %[in6], %[in1] \t\n"
+ "add.s %[out2], %[in1], %[in2] \t\n"
+ "add.s %[out3], %[in2], %[in3] \t\n"
+ "add.s %[out4], %[in3], %[in4] \t\n"
+ "lwc1 %[in5], 7*4(%[in]) \t\n"
+ "swc1 %[out1], 12*4(%[in]) \t\n"
+ "add.s %[out5], %[out5], %[out2] \t\n"
+ "swc1 %[out3], 10*4(%[in]) \t\n"
+ "add.s %[out2], %[out2], %[out4] \t\n"
+ "lwc1 %[in6], 6*4(%[in]) \t\n"
+ "lwc1 %[in1], 5*4(%[in]) \t\n"
+ "swc1 %[out5], 13*4(%[in]) \t\n"
+ "lwc1 %[in2], 4*4(%[in]) \t\n"
+ "swc1 %[out2], 11*4(%[in]) \t\n"
+ "add.s %[out5], %[in4], %[in5] \t\n"
+ "add.s %[out1], %[in5], %[in6] \t\n"
+ "add.s %[out2], %[in6], %[in1] \t\n"
+ "add.s %[out3], %[in1], %[in2] \t\n"
+ "lwc1 %[in3], 3*4(%[in]) \t\n"
+ "swc1 %[out5], 8*4(%[in]) \t\n"
+ "add.s %[out4], %[out4], %[out1] \t\n"
+ "swc1 %[out2], 6*4(%[in]) \t\n"
+ "add.s %[out1], %[out1], %[out3] \t\n"
+ "lwc1 %[in4], 2*4(%[in]) \t\n"
+ "lwc1 %[in5], 1*4(%[in]) \t\n"
+ "swc1 %[out4], 9*4(%[in]) \t\n"
+ "lwc1 %[in6], 0(%[in]) \t\n"
+ "swc1 %[out1], 7*4(%[in]) \t\n"
+ "add.s %[out4], %[in2], %[in3] \t\n"
+ "add.s %[out5], %[in3], %[in4] \t\n"
+ "add.s %[out1], %[in4], %[in5] \t\n"
+ "add.s %[out2], %[in5], %[in6] \t\n"
+ "swc1 %[out4], 4*4(%[in]) \t\n"
+ "add.s %[out3], %[out3], %[out5] \t\n"
+ "swc1 %[out1], 2*4(%[in]) \t\n"
+ "add.s %[out5], %[out5], %[out2] \t\n"
+ "swc1 %[out2], 1*4(%[in]) \t\n"
+ "swc1 %[out3], 5*4(%[in]) \t\n"
+ "swc1 %[out5], 3*4(%[in]) \t\n"
+
+ : [in1] "=&f" (in1), [in2] "=&f" (in2),
+ [in3] "=&f" (in3), [in4] "=&f" (in4),
+ [in5] "=&f" (in5), [in6] "=&f" (in6),
+ [out1] "=&f" (out1), [out2] "=&f" (out2),
+ [out3] "=&f" (out3), [out4] "=&f" (out4),
+ [out5] "=&f" (out5)
+ : [in] "r" (in)
+ : "memory"
+ );
+
+ /* loop 3 */
+ __asm__ volatile (
+ "li.s %[c1], 0.5 \t\n"
+ "lwc1 %[in1], 8*4(%[in]) \t\n"
+ "lwc1 %[in2], 16*4(%[in]) \t\n"
+ "lwc1 %[in3], 4*4(%[in]) \t\n"
+ "lwc1 %[in4], 0(%[in]) \t\n"
+ "lwc1 %[in5], 12*4(%[in]) \t\n"
+ "li.s %[c2], 0.93969262078590838405 \t\n"
+ "add.s %[t2], %[in1], %[in2] \t\n"
+ "add.s %[t0], %[in1], %[in3] \t\n"
+ "li.s %[c3], -0.76604444311897803520 \t\n"
+ "madd.s %[t3], %[in4], %[in5], %[c1] \t\n"
+ "sub.s %[t1], %[in4], %[in5] \t\n"
+ "sub.s %[t2], %[t2], %[in3] \t\n"
+ "mul.s %[t0], %[t0], %[c2] \t\n"
+ "li.s %[c4], -0.17364817766693034885 \t\n"
+ "li.s %[c5], -0.86602540378443864676 \t\n"
+ "li.s %[c6], 0.98480775301220805936 \t\n"
+ "nmsub.s %[out1], %[t1], %[t2], %[c1] \t\n"
+ "add.s %[out2], %[t1], %[t2] \t\n"
+ "add.s %[t2], %[in2], %[in3] \t\n"
+ "sub.s %[t1], %[in1], %[in2] \t\n"
+ "sub.s %[out3], %[t3], %[t0] \t\n"
+ "swc1 %[out1], 6*4(%[tmp]) \t\n"
+ "swc1 %[out2], 16*4(%[tmp]) \t\n"
+ "mul.s %[t2], %[t2], %[c3] \t\n"
+ "mul.s %[t1], %[t1], %[c4] \t\n"
+ "add.s %[out1], %[t3], %[t0] \t\n"
+ "lwc1 %[in1], 10*4(%[in]) \t\n"
+ "lwc1 %[in2], 14*4(%[in]) \t\n"
+ "sub.s %[out3], %[out3], %[t2] \t\n"
+ "add.s %[out2], %[t3], %[t2] \t\n"
+ "add.s %[out1], %[out1], %[t1] \t\n"
+ "lwc1 %[in3], 2*4(%[in]) \t\n"
+ "lwc1 %[in4], 6*4(%[in]) \t\n"
+ "swc1 %[out3], 10*4(%[tmp]) \t\n"
+ "sub.s %[out2], %[out2], %[t1] \t\n"
+ "swc1 %[out1], 2*4(%[tmp]) \t\n"
+ "add.s %[out1], %[in1], %[in2] \t\n"
+ "add.s %[t2], %[in1], %[in3] \t\n"
+ "sub.s %[t3], %[in1], %[in2] \t\n"
+ "swc1 %[out2], 14*4(%[tmp]) \t\n"
+ "li.s %[c7], -0.34202014332566873304 \t\n"
+ "sub.s %[out1], %[out1], %[in3] \t\n"
+ "mul.s %[t2], %[t2], %[c6] \t\n"
+ "mul.s %[t3], %[t3], %[c7] \t\n"
+ "li.s %[c8], 0.86602540378443864676 \t\n"
+ "mul.s %[t0], %[in4], %[c8] \t\n"
+ "mul.s %[out1], %[out1], %[c5] \t\n"
+ "add.s %[t1], %[in2], %[in3] \t\n"
+ "li.s %[c9], -0.64278760968653932632 \t\n"
+ "add.s %[out2], %[t2], %[t3] \t\n"
+ "lwc1 %[in1], 9*4(%[in]) \t\n"
+ "swc1 %[out1], 4*4(%[tmp]) \t\n"
+ "mul.s %[t1], %[t1], %[c9] \t\n"
+ "lwc1 %[in2], 17*4(%[in]) \t\n"
+ "add.s %[out2], %[out2], %[t0] \t\n"
+ "lwc1 %[in3], 5*4(%[in]) \t\n"
+ "lwc1 %[in4], 1*4(%[in]) \t\n"
+ "add.s %[out3], %[t2], %[t1] \t\n"
+ "sub.s %[out1], %[t3], %[t1] \t\n"
+ "swc1 %[out2], 0(%[tmp]) \t\n"
+ "lwc1 %[in5], 13*4(%[in]) \t\n"
+ "add.s %[t2], %[in1], %[in2] \t\n"
+ "sub.s %[out3], %[out3], %[t0] \t\n"
+ "sub.s %[out1], %[out1], %[t0] \t\n"
+ "add.s %[t0], %[in1], %[in3] \t\n"
+ "madd.s %[t3], %[in4], %[in5], %[c1] \t\n"
+ "sub.s %[t2], %[t2], %[in3] \t\n"
+ "swc1 %[out3], 12*4(%[tmp]) \t\n"
+ "swc1 %[out1], 8*4(%[tmp]) \t\n"
+ "sub.s %[t1], %[in4], %[in5] \t\n"
+ "mul.s %[t0], %[t0], %[c2] \t\n"
+ "nmsub.s %[out1], %[t1], %[t2], %[c1] \t\n"
+ "add.s %[out2], %[t1], %[t2] \t\n"
+ "add.s %[t2], %[in2], %[in3] \t\n"
+ "sub.s %[t1], %[in1], %[in2] \t\n"
+ "sub.s %[out3], %[t3], %[t0] \t\n"
+ "swc1 %[out1], 7*4(%[tmp]) \t\n"
+ "swc1 %[out2], 17*4(%[tmp]) \t\n"
+ "mul.s %[t2], %[t2], %[c3] \t\n"
+ "mul.s %[t1], %[t1], %[c4] \t\n"
+ "add.s %[out1], %[t3], %[t0] \t\n"
+ "lwc1 %[in1], 11*4(%[in]) \t\n"
+ "lwc1 %[in2], 15*4(%[in]) \t\n"
+ "sub.s %[out3], %[out3], %[t2] \t\n"
+ "add.s %[out2], %[t3], %[t2] \t\n"
+ "add.s %[out1], %[out1], %[t1] \t\n"
+ "lwc1 %[in3], 3*4(%[in]) \t\n"
+ "lwc1 %[in4], 7*4(%[in]) \t\n"
+ "swc1 %[out3], 11*4(%[tmp]) \t\n"
+ "sub.s %[out2], %[out2], %[t1] \t\n"
+ "swc1 %[out1], 3*4(%[tmp]) \t\n"
+ "add.s %[out3], %[in1], %[in2] \t\n"
+ "add.s %[t2], %[in1], %[in3] \t\n"
+ "sub.s %[t3], %[in1], %[in2] \t\n"
+ "swc1 %[out2], 15*4(%[tmp]) \t\n"
+ "mul.s %[t0], %[in4], %[c8] \t\n"
+ "sub.s %[out3], %[out3], %[in3] \t\n"
+ "mul.s %[t2], %[t2], %[c6] \t\n"
+ "mul.s %[t3], %[t3], %[c7] \t\n"
+ "add.s %[t1], %[in2], %[in3] \t\n"
+ "mul.s %[out3], %[out3], %[c5] \t\n"
+ "add.s %[out1], %[t2], %[t3] \t\n"
+ "mul.s %[t1], %[t1], %[c9] \t\n"
+ "swc1 %[out3], 5*4(%[tmp]) \t\n"
+ "add.s %[out1], %[out1], %[t0] \t\n"
+ "add.s %[out2], %[t2], %[t1] \t\n"
+ "sub.s %[out3], %[t3], %[t1] \t\n"
+ "swc1 %[out1], 1*4(%[tmp]) \t\n"
+ "sub.s %[out2], %[out2], %[t0] \t\n"
+ "sub.s %[out3], %[out3], %[t0] \t\n"
+ "swc1 %[out2], 13*4(%[tmp]) \t\n"
+ "swc1 %[out3], 9*4(%[tmp]) \t\n"
+
+ : [t0] "=&f" (t0), [t1] "=&f" (t1),
+ [t2] "=&f" (t2), [t3] "=&f" (t3),
+ [in1] "=&f" (in1), [in2] "=&f" (in2),
+ [in3] "=&f" (in3), [in4] "=&f" (in4),
+ [in5] "=&f" (in5),
+ [out1] "=&f" (out1), [out2] "=&f" (out2),
+ [out3] "=&f" (out3),
+ [c1] "=&f" (c1), [c2] "=&f" (c2),
+ [c3] "=&f" (c3), [c4] "=&f" (c4),
+ [c5] "=&f" (c5), [c6] "=&f" (c6),
+ [c7] "=&f" (c7), [c8] "=&f" (c8),
+ [c9] "=&f" (c9)
+ : [in] "r" (in), [tmp] "r" (tmp)
+ : "memory"
+ );
+
+ /* loop 4 */
+ __asm__ volatile (
+ "lwc1 %[in1], 2*4(%[tmp]) \t\n"
+ "lwc1 %[in2], 0(%[tmp]) \t\n"
+ "lwc1 %[in3], 3*4(%[tmp]) \t\n"
+ "lwc1 %[in4], 1*4(%[tmp]) \t\n"
+ "li.s %[c1], 0.50190991877167369479 \t\n"
+ "li.s %[c2], 5.73685662283492756461 \t\n"
+ "add.s %[s0], %[in1], %[in2] \t\n"
+ "sub.s %[s2], %[in1], %[in2] \t\n"
+ "add.s %[s1], %[in3], %[in4] \t\n"
+ "sub.s %[s3], %[in3], %[in4] \t\n"
+ "lwc1 %[in1], 9*4(%[win]) \t\n"
+ "lwc1 %[in2], 4*9*4(%[buf]) \t\n"
+ "lwc1 %[in3], 8*4(%[win]) \t\n"
+ "mul.s %[s1], %[s1], %[c1] \t\n"
+ "mul.s %[s3], %[s3], %[c2] \t\n"
+ "lwc1 %[in4], 4*8*4(%[buf]) \t\n"
+ "lwc1 %[in5], 29*4(%[win]) \t\n"
+ "lwc1 %[in6], 28*4(%[win]) \t\n"
+ "add.s %[t0], %[s0], %[s1] \t\n"
+ "sub.s %[t1], %[s0], %[s1] \t\n"
+ "li.s %[c1], 0.51763809020504152469 \t\n"
+ "li.s %[c2], 1.93185165257813657349 \t\n"
+ "mul.s %[out3], %[in5], %[t0] \t\n"
+ "madd.s %[out1], %[in2], %[in1], %[t1] \t\n"
+ "madd.s %[out2], %[in4], %[in3], %[t1] \t\n"
+ "mul.s %[out4], %[in6], %[t0] \t\n"
+ "add.s %[t0], %[s2], %[s3] \t\n"
+ "swc1 %[out3], 4*9*4(%[buf]) \t\n"
+ "swc1 %[out1], 288*4(%[out]) \t\n"
+ "swc1 %[out2], 256*4(%[out]) \t\n"
+ "swc1 %[out4], 4*8*4(%[buf]) \t\n"
+ "sub.s %[t1], %[s2], %[s3] \t\n"
+ "lwc1 %[in1], 17*4(%[win]) \t\n"
+ "lwc1 %[in2], 4*17*4(%[buf]) \t\n"
+ "lwc1 %[in3], 0(%[win]) \t\n"
+ "lwc1 %[in4], 0(%[buf]) \t\n"
+ "lwc1 %[in5], 37*4(%[win]) \t\n"
+ "lwc1 %[in6], 20*4(%[win]) \t\n"
+ "madd.s %[out1], %[in2], %[in1], %[t1] \t\n"
+ "lwc1 %[in1], 6*4(%[tmp]) \t\n"
+ "madd.s %[out2], %[in4], %[in3], %[t1] \t\n"
+ "mul.s %[out3], %[t0], %[in5] \t\n"
+ "mul.s %[out4], %[t0], %[in6] \t\n"
+ "swc1 %[out1], 544*4(%[out]) \t\n"
+ "lwc1 %[in2], 4*4(%[tmp]) \t\n"
+ "swc1 %[out2], 0(%[out]) \t\n"
+ "swc1 %[out3], 4*17*4(%[buf]) \t\n"
+ "swc1 %[out4], 0(%[buf]) \t\n"
+ "lwc1 %[in3], 7*4(%[tmp]) \t\n"
+ "add.s %[s0], %[in1], %[in2] \t\n"
+ "sub.s %[s2], %[in1], %[in2] \t\n"
+ "lwc1 %[in4], 5*4(%[tmp]) \t\n"
+ "add.s %[s1], %[in3], %[in4] \t\n"
+ "sub.s %[s3], %[in3], %[in4] \t\n"
+ "lwc1 %[in1], 10*4(%[win]) \t\n"
+ "lwc1 %[in2], 4*10*4(%[buf]) \t\n"
+ "lwc1 %[in3], 7*4(%[win]) \t\n"
+ "mul.s %[s1], %[s1], %[c1] \t\n"
+ "mul.s %[s3], %[s3], %[c2] \t\n"
+ "add.s %[t0], %[s0], %[s1] \t\n"
+ "sub.s %[t1], %[s0], %[s1] \t\n"
+ "lwc1 %[in4], 4*7*4(%[buf]) \t\n"
+ "lwc1 %[in5], 30*4(%[win]) \t\n"
+ "lwc1 %[in6], 27*4(%[win]) \t\n"
+ "li.s %[c1], 0.55168895948124587824 \t\n"
+ "madd.s %[out1], %[in2], %[in1], %[t1] \t\n"
+ "madd.s %[out2], %[in4], %[in3], %[t1] \t\n"
+ "mul.s %[out3], %[t0], %[in5] \t\n"
+ "mul.s %[out4], %[t0], %[in6] \t\n"
+ "add.s %[t0], %[s2], %[s3] \t\n"
+ "swc1 %[out1], 320*4(%[out]) \t\n"
+ "swc1 %[out2], 224*4(%[out]) \t\n"
+ "swc1 %[out3], 4*10*4(%[buf]) \t\n"
+ "swc1 %[out4], 4*7*4(%[buf]) \t\n"
+ "sub.s %[t1], %[s2], %[s3] \t\n"
+ "lwc1 %[in1], 16*4(%[win]) \t\n"
+ "lwc1 %[in2], 4*16*4(%[buf]) \t\n"
+ "lwc1 %[in3], 1*4(%[win]) \t\n"
+ "lwc1 %[in4], 4*1*4(%[buf]) \t\n"
+ "lwc1 %[in5], 36*4(%[win]) \t\n"
+ "lwc1 %[in6], 21*4(%[win]) \t\n"
+ "madd.s %[out1], %[in2], %[in1], %[t1] \t\n"
+ "lwc1 %[in1], 10*4(%[tmp]) \t\n"
+ "madd.s %[out2], %[in4], %[in3], %[t1] \t\n"
+ "mul.s %[out3], %[in5], %[t0] \t\n"
+ "mul.s %[out4], %[in6], %[t0] \t\n"
+ "swc1 %[out1], 512*4(%[out]) \t\n"
+ "lwc1 %[in2], 8*4(%[tmp]) \t\n"
+ "swc1 %[out2], 32*4(%[out]) \t\n"
+ "swc1 %[out3], 4*16*4(%[buf]) \t\n"
+ "swc1 %[out4], 4*1*4(%[buf]) \t\n"
+ "li.s %[c2], 1.18310079157624925896 \t\n"
+ "add.s %[s0], %[in1], %[in2] \t\n"
+ "sub.s %[s2], %[in1], %[in2] \t\n"
+ "lwc1 %[in3], 11*4(%[tmp]) \t\n"
+ "lwc1 %[in4], 9*4(%[tmp]) \t\n"
+ "add.s %[s1], %[in3], %[in4] \t\n"
+ "sub.s %[s3], %[in3], %[in4] \t\n"
+ "lwc1 %[in1], 11*4(%[win]) \t\n"
+ "lwc1 %[in2], 4*11*4(%[buf]) \t\n"
+ "lwc1 %[in3], 6*4(%[win]) \t\n"
+ "mul.s %[s1], %[s1], %[c1] \t\n"
+ "mul.s %[s3], %[s3], %[c2] \t\n"
+ "lwc1 %[in4], 4*6*4(%[buf]) \t\n"
+ "lwc1 %[in5], 31*4(%[win]) \t\n"
+ "lwc1 %[in6], 26*4(%[win]) \t\n"
+ "add.s %[t0], %[s0], %[s1] \t\n"
+ "sub.s %[t1], %[s0], %[s1] \t\n"
+ "mul.s %[out3], %[t0], %[in5] \t\n"
+ "mul.s %[out4], %[t0], %[in6] \t\n"
+ "add.s %[t0], %[s2], %[s3] \t\n"
+ "madd.s %[out1], %[in2], %[in1], %[t1] \t\n"
+ "madd.s %[out2], %[in4], %[in3], %[t1] \t\n"
+ "swc1 %[out3], 4*11*4(%[buf]) \t\n"
+ "swc1 %[out4], 4*6*4(%[buf]) \t\n"
+ "sub.s %[t1], %[s2], %[s3] \t\n"
+ "swc1 %[out1], 352*4(%[out]) \t\n"
+ "swc1 %[out2], 192*4(%[out]) \t\n"
+ "lwc1 %[in1], 15*4(%[win]) \t\n"
+ "lwc1 %[in2], 4*15*4(%[buf]) \t\n"
+ "lwc1 %[in3], 2*4(%[win]) \t\n"
+ "lwc1 %[in4], 4*2*4(%[buf]) \t\n"
+ "lwc1 %[in5], 35*4(%[win]) \t\n"
+ "lwc1 %[in6], 22*4(%[win]) \t\n"
+ "madd.s %[out1], %[in2], %[in1], %[t1] \t\n"
+ "lwc1 %[in1], 14*4(%[tmp]) \t\n"
+ "madd.s %[out2], %[in4], %[in3], %[t1] \t\n"
+ "mul.s %[out3], %[t0], %[in5] \t\n"
+ "mul.s %[out4], %[t0], %[in6] \t\n"
+ "swc1 %[out1], 480*4(%[out]) \t\n"
+ "lwc1 %[in2], 12*4(%[tmp]) \t\n"
+ "swc1 %[out2], 64*4(%[out]) \t\n"
+ "swc1 %[out3], 4*15*4(%[buf]) \t\n"
+ "swc1 %[out4], 4*2*4(%[buf]) \t\n"
+ "lwc1 %[in3], 15*4(%[tmp]) \t\n"
+ "add.s %[s0], %[in1], %[in2] \t\n"
+ "sub.s %[s2], %[in1], %[in2] \t\n"
+ "lwc1 %[in4], 13*4(%[tmp]) \t\n"
+ "li.s %[c1], 0.61038729438072803416 \t\n"
+ "li.s %[c2], 0.87172339781054900991 \t\n"
+ "add.s %[s1], %[in3], %[in4] \t\n"
+ "sub.s %[s3], %[in3], %[in4] \t\n"
+ "lwc1 %[in1], 12*4(%[win]) \t\n"
+ "lwc1 %[in2], 4*12*4(%[buf]) \t\n"
+ "lwc1 %[in3], 5*4(%[win]) \t\n"
+ "mul.s %[s1], %[s1], %[c1] \t\n"
+ "mul.s %[s3], %[s3], %[c2] \t\n"
+ "lwc1 %[in4], 4*5*4(%[buf]) \t\n"
+ "lwc1 %[in5], 32*4(%[win]) \t\n"
+ "lwc1 %[in6], 25*4(%[win]) \t\n"
+ "add.s %[t0], %[s0], %[s1] \t\n"
+ "sub.s %[t1], %[s0], %[s1] \t\n"
+ "lwc1 %[s0], 16*4(%[tmp]) \t\n"
+ "lwc1 %[s1], 17*4(%[tmp]) \t\n"
+ "li.s %[c1], 0.70710678118654752439 \t\n"
+ "mul.s %[out3], %[t0], %[in5] \t\n"
+ "madd.s %[out1], %[in2], %[in1], %[t1] \t\n"
+ "madd.s %[out2], %[in4], %[in3], %[t1] \t\n"
+ "mul.s %[out4], %[t0], %[in6] \t\n"
+ "add.s %[t0], %[s2], %[s3] \t\n"
+ "swc1 %[out3], 4*12*4(%[buf]) \t\n"
+ "swc1 %[out1], 384*4(%[out]) \t\n"
+ "swc1 %[out2], 160*4(%[out]) \t\n"
+ "swc1 %[out4], 4*5*4(%[buf]) \t\n"
+ "sub.s %[t1], %[s2], %[s3] \t\n"
+ "lwc1 %[in1], 14*4(%[win]) \t\n"
+ "lwc1 %[in2], 4*14*4(%[buf]) \t\n"
+ "lwc1 %[in3], 3*4(%[win]) \t\n"
+ "lwc1 %[in4], 4*3*4(%[buf]) \t\n"
+ "lwc1 %[in5], 34*4(%[win]) \t\n"
+ "lwc1 %[in6], 23*4(%[win]) \t\n"
+ "madd.s %[out1], %[in2], %[in1], %[t1] \t\n"
+ "mul.s %[s1], %[s1], %[c1] \t\n"
+ "madd.s %[out2], %[in4], %[in3], %[t1] \t\n"
+ "mul.s %[out3], %[in5], %[t0] \t\n"
+ "mul.s %[out4], %[in6], %[t0] \t\n"
+ "swc1 %[out1], 448*4(%[out]) \t\n"
+ "add.s %[t0], %[s0], %[s1] \t\n"
+ "swc1 %[out2], 96*4(%[out]) \t\n"
+ "swc1 %[out3], 4*14*4(%[buf]) \t\n"
+ "swc1 %[out4], 4*3*4(%[buf]) \t\n"
+ "sub.s %[t1], %[s0], %[s1] \t\n"
+ "lwc1 %[in1], 13*4(%[win]) \t\n"
+ "lwc1 %[in2], 4*13*4(%[buf]) \t\n"
+ "lwc1 %[in3], 4*4(%[win]) \t\n"
+ "lwc1 %[in4], 4*4*4(%[buf]) \t\n"
+ "lwc1 %[in5], 33*4(%[win]) \t\n"
+ "lwc1 %[in6], 24*4(%[win]) \t\n"
+ "madd.s %[out1], %[in2], %[in1], %[t1] \t\n"
+ "madd.s %[out2], %[in4], %[in3], %[t1] \t\n"
+ "mul.s %[out3], %[t0], %[in5] \t\n"
+ "mul.s %[out4], %[t0], %[in6] \t\n"
+ "swc1 %[out1], 416*4(%[out]) \t\n"
+ "swc1 %[out2], 128*4(%[out]) \t\n"
+ "swc1 %[out3], 4*13*4(%[buf]) \t\n"
+ "swc1 %[out4], 4*4*4(%[buf]) \t\n"
+
+ : [c1] "=&f" (c1), [c2] "=&f" (c2),
+ [in1] "=&f" (in1), [in2] "=&f" (in2),
+ [in3] "=&f" (in3), [in4] "=&f" (in4),
+ [in5] "=&f" (in5), [in6] "=&f" (in6),
+ [out1] "=&f" (out1), [out2] "=&f" (out2),
+ [out3] "=&f" (out3), [out4] "=&f" (out4),
+ [t0] "=&f" (t0), [t1] "=&f" (t1),
+ [t2] "=&f" (t2), [t3] "=&f" (t3),
+ [s0] "=&f" (s0), [s1] "=&f" (s1),
+ [s2] "=&f" (s2), [s3] "=&f" (s3)
+ : [tmp] "r" (tmp), [win] "r" (win),
+ [buf] "r" (buf), [out] "r" (out)
+ : "memory"
+ );
+}
+
+static void ff_imdct36_blocks_mips_float(float *out, float *buf, float *in,
+ int count, int switch_point, int block_type)
+{
+ int j;
+ for (j=0 ; j < count; j++) {
+ /* apply window & overlap with previous buffer */
+
+ /* select window */
+ int win_idx = (switch_point && j < 2) ? 0 : block_type;
+ float *win = ff_mdct_win_float[win_idx + (4 & -(j & 1))];
+
+ imdct36_mips_float(out, buf, in, win);
+
+ in += 18;
+ buf += ((j&3) != 3 ? 1 : (72-3));
+ out++;
+ }
+}
+
+void ff_mpadsp_init_mipsfpu(MPADSPContext *s)
+{
+ s->apply_window_float = ff_mpadsp_apply_window_mips_float;
+ s->imdct36_blocks_float = ff_imdct36_blocks_mips_float;
+ s->dct32_float = ff_dct32_mips_float;
+}
diff --git a/libavcodec/mips/sbrdsp_mips.c b/libavcodec/mips/sbrdsp_mips.c
new file mode 100644
index 0000000000..d4460bad6e
--- /dev/null
+++ b/libavcodec/mips/sbrdsp_mips.c
@@ -0,0 +1,940 @@
+/*
+ * Copyright (c) 2012
+ * MIPS Technologies, Inc., California.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the MIPS Technologies, Inc., nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE MIPS TECHNOLOGIES, INC. ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE MIPS TECHNOLOGIES, INC. BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Authors: Darko Laus (darko@mips.com)
+ * Djordje Pesut (djordje@mips.com)
+ * Mirjana Vulin (mvulin@mips.com)
+ *
+ * AAC Spectral Band Replication decoding functions optimized for MIPS
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Reference: libavcodec/sbrdsp.c
+ */
+
+#include "config.h"
+#include "libavcodec/sbrdsp.h"
+
+#if HAVE_INLINE_ASM
+static void sbr_neg_odd_64_mips(float *x)
+{
+ int Temp1, Temp2, Temp3, Temp4, Temp5;
+ float *x1 = &x[1];
+ float *x_end = x1 + 64;
+
+ /* loop unrolled 4 times */
+ __asm__ volatile (
+ "lui %[Temp5], 0x8000 \n\t"
+ "1: \n\t"
+ "lw %[Temp1], 0(%[x1]) \n\t"
+ "lw %[Temp2], 8(%[x1]) \n\t"
+ "lw %[Temp3], 16(%[x1]) \n\t"
+ "lw %[Temp4], 24(%[x1]) \n\t"
+ "xor %[Temp1], %[Temp1], %[Temp5] \n\t"
+ "xor %[Temp2], %[Temp2], %[Temp5] \n\t"
+ "xor %[Temp3], %[Temp3], %[Temp5] \n\t"
+ "xor %[Temp4], %[Temp4], %[Temp5] \n\t"
+ "sw %[Temp1], 0(%[x1]) \n\t"
+ "sw %[Temp2], 8(%[x1]) \n\t"
+ "sw %[Temp3], 16(%[x1]) \n\t"
+ "sw %[Temp4], 24(%[x1]) \n\t"
+ "addiu %[x1], %[x1], 32 \n\t"
+ "bne %[x1], %[x_end], 1b \n\t"
+
+ : [Temp1]"=&r"(Temp1), [Temp2]"=&r"(Temp2),
+ [Temp3]"=&r"(Temp3), [Temp4]"=&r"(Temp4),
+ [Temp5]"=&r"(Temp5), [x1]"+r"(x1)
+ : [x_end]"r"(x_end)
+ : "memory"
+ );
+}
+
+static void sbr_qmf_pre_shuffle_mips(float *z)
+{
+ int Temp1, Temp2, Temp3, Temp4, Temp5, Temp6;
+ float *z1 = &z[66];
+ float *z2 = &z[59];
+ float *z3 = &z[2];
+ float *z4 = z1 + 60;
+
+ /* loop unrolled 5 times */
+ __asm__ volatile (
+ "lui %[Temp6], 0x8000 \n\t"
+ "1: \n\t"
+ "lw %[Temp1], 0(%[z2]) \n\t"
+ "lw %[Temp2], 4(%[z2]) \n\t"
+ "lw %[Temp3], 8(%[z2]) \n\t"
+ "lw %[Temp4], 12(%[z2]) \n\t"
+ "lw %[Temp5], 16(%[z2]) \n\t"
+ "xor %[Temp1], %[Temp1], %[Temp6] \n\t"
+ "xor %[Temp2], %[Temp2], %[Temp6] \n\t"
+ "xor %[Temp3], %[Temp3], %[Temp6] \n\t"
+ "xor %[Temp4], %[Temp4], %[Temp6] \n\t"
+ "xor %[Temp5], %[Temp5], %[Temp6] \n\t"
+ "addiu %[z2], %[z2], -20 \n\t"
+ "sw %[Temp1], 32(%[z1]) \n\t"
+ "sw %[Temp2], 24(%[z1]) \n\t"
+ "sw %[Temp3], 16(%[z1]) \n\t"
+ "sw %[Temp4], 8(%[z1]) \n\t"
+ "sw %[Temp5], 0(%[z1]) \n\t"
+ "lw %[Temp1], 0(%[z3]) \n\t"
+ "lw %[Temp2], 4(%[z3]) \n\t"
+ "lw %[Temp3], 8(%[z3]) \n\t"
+ "lw %[Temp4], 12(%[z3]) \n\t"
+ "lw %[Temp5], 16(%[z3]) \n\t"
+ "sw %[Temp1], 4(%[z1]) \n\t"
+ "sw %[Temp2], 12(%[z1]) \n\t"
+ "sw %[Temp3], 20(%[z1]) \n\t"
+ "sw %[Temp4], 28(%[z1]) \n\t"
+ "sw %[Temp5], 36(%[z1]) \n\t"
+ "addiu %[z3], %[z3], 20 \n\t"
+ "addiu %[z1], %[z1], 40 \n\t"
+ "bne %[z1], %[z4], 1b \n\t"
+ "lw %[Temp1], 132(%[z]) \n\t"
+ "lw %[Temp2], 128(%[z]) \n\t"
+ "lw %[Temp3], 0(%[z]) \n\t"
+ "lw %[Temp4], 4(%[z]) \n\t"
+ "xor %[Temp1], %[Temp1], %[Temp6] \n\t"
+ "sw %[Temp1], 504(%[z]) \n\t"
+ "sw %[Temp2], 508(%[z]) \n\t"
+ "sw %[Temp3], 256(%[z]) \n\t"
+ "sw %[Temp4], 260(%[z]) \n\t"
+
+ : [Temp1]"=&r"(Temp1), [Temp2]"=&r"(Temp2),
+ [Temp3]"=&r"(Temp3), [Temp4]"=&r"(Temp4),
+ [Temp5]"=&r"(Temp5), [Temp6]"=&r"(Temp6),
+ [z1]"+r"(z1), [z2]"+r"(z2), [z3]"+r"(z3)
+ : [z4]"r"(z4), [z]"r"(z)
+ : "memory"
+ );
+}
+
+static void sbr_qmf_post_shuffle_mips(float W[32][2], const float *z)
+{
+ int Temp1, Temp2, Temp3, Temp4, Temp5;
+ float *W_ptr = (float *)W;
+ float *z1 = (float *)z;
+ float *z2 = (float *)&z[60];
+ float *z_end = z1 + 32;
+
+ /* loop unrolled 4 times */
+ __asm__ volatile (
+ "lui %[Temp5], 0x8000 \n\t"
+ "1: \n\t"
+ "lw %[Temp1], 0(%[z2]) \n\t"
+ "lw %[Temp2], 4(%[z2]) \n\t"
+ "lw %[Temp3], 8(%[z2]) \n\t"
+ "lw %[Temp4], 12(%[z2]) \n\t"
+ "xor %[Temp1], %[Temp1], %[Temp5] \n\t"
+ "xor %[Temp2], %[Temp2], %[Temp5] \n\t"
+ "xor %[Temp3], %[Temp3], %[Temp5] \n\t"
+ "xor %[Temp4], %[Temp4], %[Temp5] \n\t"
+ "addiu %[z2], %[z2], -16 \n\t"
+ "sw %[Temp1], 24(%[W_ptr]) \n\t"
+ "sw %[Temp2], 16(%[W_ptr]) \n\t"
+ "sw %[Temp3], 8(%[W_ptr]) \n\t"
+ "sw %[Temp4], 0(%[W_ptr]) \n\t"
+ "lw %[Temp1], 0(%[z1]) \n\t"
+ "lw %[Temp2], 4(%[z1]) \n\t"
+ "lw %[Temp3], 8(%[z1]) \n\t"
+ "lw %[Temp4], 12(%[z1]) \n\t"
+ "sw %[Temp1], 4(%[W_ptr]) \n\t"
+ "sw %[Temp2], 12(%[W_ptr]) \n\t"
+ "sw %[Temp3], 20(%[W_ptr]) \n\t"
+ "sw %[Temp4], 28(%[W_ptr]) \n\t"
+ "addiu %[z1], %[z1], 16 \n\t"
+ "addiu %[W_ptr], %[W_ptr], 32 \n\t"
+ "bne %[z1], %[z_end], 1b \n\t"
+
+ : [Temp1]"=&r"(Temp1), [Temp2]"=&r"(Temp2),
+ [Temp3]"=&r"(Temp3), [Temp4]"=&r"(Temp4),
+ [Temp5]"=&r"(Temp5), [z1]"+r"(z1),
+ [z2]"+r"(z2), [W_ptr]"+r"(W_ptr)
+ : [z_end]"r"(z_end)
+ : "memory"
+ );
+}
+
+#if HAVE_MIPSFPU
+static void sbr_sum64x5_mips(float *z)
+{
+ int k;
+ float *z1;
+ float f1, f2, f3, f4, f5, f6, f7, f8;
+ for (k = 0; k < 64; k += 8) {
+
+ z1 = &z[k];
+
+ /* loop unrolled 8 times */
+ __asm__ volatile (
+ "lwc1 $f0, 0(%[z1]) \n\t"
+ "lwc1 $f1, 256(%[z1]) \n\t"
+ "lwc1 $f2, 4(%[z1]) \n\t"
+ "lwc1 $f3, 260(%[z1]) \n\t"
+ "lwc1 $f4, 8(%[z1]) \n\t"
+ "add.s %[f1], $f0, $f1 \n\t"
+ "lwc1 $f5, 264(%[z1]) \n\t"
+ "add.s %[f2], $f2, $f3 \n\t"
+ "lwc1 $f6, 12(%[z1]) \n\t"
+ "lwc1 $f7, 268(%[z1]) \n\t"
+ "add.s %[f3], $f4, $f5 \n\t"
+ "lwc1 $f8, 16(%[z1]) \n\t"
+ "lwc1 $f9, 272(%[z1]) \n\t"
+ "add.s %[f4], $f6, $f7 \n\t"
+ "lwc1 $f10, 20(%[z1]) \n\t"
+ "lwc1 $f11, 276(%[z1]) \n\t"
+ "add.s %[f5], $f8, $f9 \n\t"
+ "lwc1 $f12, 24(%[z1]) \n\t"
+ "lwc1 $f13, 280(%[z1]) \n\t"
+ "add.s %[f6], $f10, $f11 \n\t"
+ "lwc1 $f14, 28(%[z1]) \n\t"
+ "lwc1 $f15, 284(%[z1]) \n\t"
+ "add.s %[f7], $f12, $f13 \n\t"
+ "lwc1 $f0, 512(%[z1]) \n\t"
+ "lwc1 $f1, 516(%[z1]) \n\t"
+ "add.s %[f8], $f14, $f15 \n\t"
+ "lwc1 $f2, 520(%[z1]) \n\t"
+ "add.s %[f1], %[f1], $f0 \n\t"
+ "add.s %[f2], %[f2], $f1 \n\t"
+ "lwc1 $f3, 524(%[z1]) \n\t"
+ "add.s %[f3], %[f3], $f2 \n\t"
+ "lwc1 $f4, 528(%[z1]) \n\t"
+ "lwc1 $f5, 532(%[z1]) \n\t"
+ "add.s %[f4], %[f4], $f3 \n\t"
+ "lwc1 $f6, 536(%[z1]) \n\t"
+ "add.s %[f5], %[f5], $f4 \n\t"
+ "add.s %[f6], %[f6], $f5 \n\t"
+ "lwc1 $f7, 540(%[z1]) \n\t"
+ "add.s %[f7], %[f7], $f6 \n\t"
+ "lwc1 $f0, 768(%[z1]) \n\t"
+ "lwc1 $f1, 772(%[z1]) \n\t"
+ "add.s %[f8], %[f8], $f7 \n\t"
+ "lwc1 $f2, 776(%[z1]) \n\t"
+ "add.s %[f1], %[f1], $f0 \n\t"
+ "add.s %[f2], %[f2], $f1 \n\t"
+ "lwc1 $f3, 780(%[z1]) \n\t"
+ "add.s %[f3], %[f3], $f2 \n\t"
+ "lwc1 $f4, 784(%[z1]) \n\t"
+ "lwc1 $f5, 788(%[z1]) \n\t"
+ "add.s %[f4], %[f4], $f3 \n\t"
+ "lwc1 $f6, 792(%[z1]) \n\t"
+ "add.s %[f5], %[f5], $f4 \n\t"
+ "add.s %[f6], %[f6], $f5 \n\t"
+ "lwc1 $f7, 796(%[z1]) \n\t"
+ "add.s %[f7], %[f7], $f6 \n\t"
+ "lwc1 $f0, 1024(%[z1]) \n\t"
+ "lwc1 $f1, 1028(%[z1]) \n\t"
+ "add.s %[f8], %[f8], $f7 \n\t"
+ "lwc1 $f2, 1032(%[z1]) \n\t"
+ "add.s %[f1], %[f1], $f0 \n\t"
+ "add.s %[f2], %[f2], $f1 \n\t"
+ "lwc1 $f3, 1036(%[z1]) \n\t"
+ "add.s %[f3], %[f3], $f2 \n\t"
+ "lwc1 $f4, 1040(%[z1]) \n\t"
+ "lwc1 $f5, 1044(%[z1]) \n\t"
+ "add.s %[f4], %[f4], $f3 \n\t"
+ "lwc1 $f6, 1048(%[z1]) \n\t"
+ "add.s %[f5], %[f5], $f4 \n\t"
+ "add.s %[f6], %[f6], $f5 \n\t"
+ "lwc1 $f7, 1052(%[z1]) \n\t"
+ "add.s %[f7], %[f7], $f6 \n\t"
+ "swc1 %[f1], 0(%[z1]) \n\t"
+ "swc1 %[f2], 4(%[z1]) \n\t"
+ "add.s %[f8], %[f8], $f7 \n\t"
+ "swc1 %[f3], 8(%[z1]) \n\t"
+ "swc1 %[f4], 12(%[z1]) \n\t"
+ "swc1 %[f5], 16(%[z1]) \n\t"
+ "swc1 %[f6], 20(%[z1]) \n\t"
+ "swc1 %[f7], 24(%[z1]) \n\t"
+ "swc1 %[f8], 28(%[z1]) \n\t"
+
+ : [f1]"=&f"(f1), [f2]"=&f"(f2), [f3]"=&f"(f3),
+ [f4]"=&f"(f4), [f5]"=&f"(f5), [f6]"=&f"(f6),
+ [f7]"=&f"(f7), [f8]"=&f"(f8)
+ : [z1]"r"(z1)
+ : "$f0", "$f1", "$f2", "$f3", "$f4", "$f5",
+ "$f6", "$f7", "$f8", "$f9", "$f10", "$f11",
+ "$f12", "$f13", "$f14", "$f15",
+ "memory"
+ );
+ }
+}
+
+static float sbr_sum_square_mips(float (*x)[2], int n)
+{
+ float sum0 = 0.0f, sum1 = 0.0f;
+ float *p_x;
+ float temp0, temp1, temp2, temp3;
+ float *loop_end;
+ p_x = &x[0][0];
+ loop_end = p_x + (n >> 1)*4 - 4;
+
+ __asm__ volatile (
+ ".set push \n\t"
+ ".set noreorder \n\t"
+ "lwc1 %[temp0], 0(%[p_x]) \n\t"
+ "lwc1 %[temp1], 4(%[p_x]) \n\t"
+ "lwc1 %[temp2], 8(%[p_x]) \n\t"
+ "lwc1 %[temp3], 12(%[p_x]) \n\t"
+ "1: \n\t"
+ "addiu %[p_x], %[p_x], 16 \n\t"
+ "madd.s %[sum0], %[sum0], %[temp0], %[temp0] \n\t"
+ "lwc1 %[temp0], 0(%[p_x]) \n\t"
+ "madd.s %[sum1], %[sum1], %[temp1], %[temp1] \n\t"
+ "lwc1 %[temp1], 4(%[p_x]) \n\t"
+ "madd.s %[sum0], %[sum0], %[temp2], %[temp2] \n\t"
+ "lwc1 %[temp2], 8(%[p_x]) \n\t"
+ "madd.s %[sum1], %[sum1], %[temp3], %[temp3] \n\t"
+ "bne %[p_x], %[loop_end], 1b \n\t"
+ " lwc1 %[temp3], 12(%[p_x]) \n\t"
+ "madd.s %[sum0], %[sum0], %[temp0], %[temp0] \n\t"
+ "madd.s %[sum1], %[sum1], %[temp1], %[temp1] \n\t"
+ "madd.s %[sum0], %[sum0], %[temp2], %[temp2] \n\t"
+ "madd.s %[sum1], %[sum1], %[temp3], %[temp3] \n\t"
+ ".set pop \n\t"
+
+ : [temp0]"=&f"(temp0), [temp1]"=&f"(temp1), [temp2]"=&f"(temp2),
+ [temp3]"=&f"(temp3), [sum0]"+f"(sum0), [sum1]"+f"(sum1),
+ [p_x]"+r"(p_x)
+ : [loop_end]"r"(loop_end)
+ : "memory"
+ );
+ return sum0 + sum1;
+}
+
+static void sbr_qmf_deint_bfly_mips(float *v, const float *src0, const float *src1)
+{
+ int i;
+ float temp0, temp1, temp2, temp3, temp4, temp5;
+ float temp6, temp7, temp8, temp9, temp10, temp11;
+ float *v0 = v;
+ float *v1 = &v[127];
+ float *psrc0 = (float*)src0;
+ float *psrc1 = (float*)&src1[63];
+
+ for (i = 0; i < 4; i++) {
+
+ /* loop unrolled 16 times */
+ __asm__ volatile(
+ "lwc1 %[temp0], 0(%[src0]) \n\t"
+ "lwc1 %[temp1], 0(%[src1]) \n\t"
+ "lwc1 %[temp3], 4(%[src0]) \n\t"
+ "lwc1 %[temp4], -4(%[src1]) \n\t"
+ "lwc1 %[temp6], 8(%[src0]) \n\t"
+ "lwc1 %[temp7], -8(%[src1]) \n\t"
+ "lwc1 %[temp9], 12(%[src0]) \n\t"
+ "lwc1 %[temp10], -12(%[src1]) \n\t"
+ "add.s %[temp2], %[temp0], %[temp1] \n\t"
+ "add.s %[temp5], %[temp3], %[temp4] \n\t"
+ "add.s %[temp8], %[temp6], %[temp7] \n\t"
+ "add.s %[temp11], %[temp9], %[temp10] \n\t"
+ "sub.s %[temp0], %[temp0], %[temp1] \n\t"
+ "sub.s %[temp3], %[temp3], %[temp4] \n\t"
+ "sub.s %[temp6], %[temp6], %[temp7] \n\t"
+ "sub.s %[temp9], %[temp9], %[temp10] \n\t"
+ "swc1 %[temp2], 0(%[v1]) \n\t"
+ "swc1 %[temp0], 0(%[v0]) \n\t"
+ "swc1 %[temp5], -4(%[v1]) \n\t"
+ "swc1 %[temp3], 4(%[v0]) \n\t"
+ "swc1 %[temp8], -8(%[v1]) \n\t"
+ "swc1 %[temp6], 8(%[v0]) \n\t"
+ "swc1 %[temp11], -12(%[v1]) \n\t"
+ "swc1 %[temp9], 12(%[v0]) \n\t"
+ "lwc1 %[temp0], 16(%[src0]) \n\t"
+ "lwc1 %[temp1], -16(%[src1]) \n\t"
+ "lwc1 %[temp3], 20(%[src0]) \n\t"
+ "lwc1 %[temp4], -20(%[src1]) \n\t"
+ "lwc1 %[temp6], 24(%[src0]) \n\t"
+ "lwc1 %[temp7], -24(%[src1]) \n\t"
+ "lwc1 %[temp9], 28(%[src0]) \n\t"
+ "lwc1 %[temp10], -28(%[src1]) \n\t"
+ "add.s %[temp2], %[temp0], %[temp1] \n\t"
+ "add.s %[temp5], %[temp3], %[temp4] \n\t"
+ "add.s %[temp8], %[temp6], %[temp7] \n\t"
+ "add.s %[temp11], %[temp9], %[temp10] \n\t"
+ "sub.s %[temp0], %[temp0], %[temp1] \n\t"
+ "sub.s %[temp3], %[temp3], %[temp4] \n\t"
+ "sub.s %[temp6], %[temp6], %[temp7] \n\t"
+ "sub.s %[temp9], %[temp9], %[temp10] \n\t"
+ "swc1 %[temp2], -16(%[v1]) \n\t"
+ "swc1 %[temp0], 16(%[v0]) \n\t"
+ "swc1 %[temp5], -20(%[v1]) \n\t"
+ "swc1 %[temp3], 20(%[v0]) \n\t"
+ "swc1 %[temp8], -24(%[v1]) \n\t"
+ "swc1 %[temp6], 24(%[v0]) \n\t"
+ "swc1 %[temp11], -28(%[v1]) \n\t"
+ "swc1 %[temp9], 28(%[v0]) \n\t"
+ "lwc1 %[temp0], 32(%[src0]) \n\t"
+ "lwc1 %[temp1], -32(%[src1]) \n\t"
+ "lwc1 %[temp3], 36(%[src0]) \n\t"
+ "lwc1 %[temp4], -36(%[src1]) \n\t"
+ "lwc1 %[temp6], 40(%[src0]) \n\t"
+ "lwc1 %[temp7], -40(%[src1]) \n\t"
+ "lwc1 %[temp9], 44(%[src0]) \n\t"
+ "lwc1 %[temp10], -44(%[src1]) \n\t"
+ "add.s %[temp2], %[temp0], %[temp1] \n\t"
+ "add.s %[temp5], %[temp3], %[temp4] \n\t"
+ "add.s %[temp8], %[temp6], %[temp7] \n\t"
+ "add.s %[temp11], %[temp9], %[temp10] \n\t"
+ "sub.s %[temp0], %[temp0], %[temp1] \n\t"
+ "sub.s %[temp3], %[temp3], %[temp4] \n\t"
+ "sub.s %[temp6], %[temp6], %[temp7] \n\t"
+ "sub.s %[temp9], %[temp9], %[temp10] \n\t"
+ "swc1 %[temp2], -32(%[v1]) \n\t"
+ "swc1 %[temp0], 32(%[v0]) \n\t"
+ "swc1 %[temp5], -36(%[v1]) \n\t"
+ "swc1 %[temp3], 36(%[v0]) \n\t"
+ "swc1 %[temp8], -40(%[v1]) \n\t"
+ "swc1 %[temp6], 40(%[v0]) \n\t"
+ "swc1 %[temp11], -44(%[v1]) \n\t"
+ "swc1 %[temp9], 44(%[v0]) \n\t"
+ "lwc1 %[temp0], 48(%[src0]) \n\t"
+ "lwc1 %[temp1], -48(%[src1]) \n\t"
+ "lwc1 %[temp3], 52(%[src0]) \n\t"
+ "lwc1 %[temp4], -52(%[src1]) \n\t"
+ "lwc1 %[temp6], 56(%[src0]) \n\t"
+ "lwc1 %[temp7], -56(%[src1]) \n\t"
+ "lwc1 %[temp9], 60(%[src0]) \n\t"
+ "lwc1 %[temp10], -60(%[src1]) \n\t"
+ "add.s %[temp2], %[temp0], %[temp1] \n\t"
+ "add.s %[temp5], %[temp3], %[temp4] \n\t"
+ "add.s %[temp8], %[temp6], %[temp7] \n\t"
+ "add.s %[temp11], %[temp9], %[temp10] \n\t"
+ "sub.s %[temp0], %[temp0], %[temp1] \n\t"
+ "sub.s %[temp3], %[temp3], %[temp4] \n\t"
+ "sub.s %[temp6], %[temp6], %[temp7] \n\t"
+ "sub.s %[temp9], %[temp9], %[temp10] \n\t"
+ "swc1 %[temp2], -48(%[v1]) \n\t"
+ "swc1 %[temp0], 48(%[v0]) \n\t"
+ "swc1 %[temp5], -52(%[v1]) \n\t"
+ "swc1 %[temp3], 52(%[v0]) \n\t"
+ "swc1 %[temp8], -56(%[v1]) \n\t"
+ "swc1 %[temp6], 56(%[v0]) \n\t"
+ "swc1 %[temp11], -60(%[v1]) \n\t"
+ "swc1 %[temp9], 60(%[v0]) \n\t"
+ "addiu %[src0], %[src0], 64 \n\t"
+ "addiu %[src1], %[src1], -64 \n\t"
+ "addiu %[v0], %[v0], 64 \n\t"
+ "addiu %[v1], %[v1], -64 \n\t"
+
+ : [v0]"+r"(v0), [v1]"+r"(v1), [src0]"+r"(psrc0), [src1]"+r"(psrc1),
+ [temp0]"=&f"(temp0), [temp1]"=&f"(temp1), [temp2]"=&f"(temp2),
+ [temp3]"=&f"(temp3), [temp4]"=&f"(temp4), [temp5]"=&f"(temp5),
+ [temp6]"=&f"(temp6), [temp7]"=&f"(temp7), [temp8]"=&f"(temp8),
+ [temp9]"=&f"(temp9), [temp10]"=&f"(temp10), [temp11]"=&f"(temp11)
+ :
+ :"memory"
+ );
+ }
+}
+
+static void sbr_autocorrelate_mips(const float x[40][2], float phi[3][2][2])
+{
+ int i;
+ float real_sum_0 = 0.0f;
+ float real_sum_1 = 0.0f;
+ float real_sum_2 = 0.0f;
+ float imag_sum_1 = 0.0f;
+ float imag_sum_2 = 0.0f;
+ float *p_x, *p_phi;
+ float temp0, temp1, temp2, temp3, temp4, temp5, temp6;
+ float temp7, temp_r, temp_r1, temp_r2, temp_r3, temp_r4;
+ p_x = (float*)&x[0][0];
+ p_phi = &phi[0][0][0];
+
+ __asm__ volatile (
+ "lwc1 %[temp0], 8(%[p_x]) \n\t"
+ "lwc1 %[temp1], 12(%[p_x]) \n\t"
+ "lwc1 %[temp2], 16(%[p_x]) \n\t"
+ "lwc1 %[temp3], 20(%[p_x]) \n\t"
+ "lwc1 %[temp4], 24(%[p_x]) \n\t"
+ "lwc1 %[temp5], 28(%[p_x]) \n\t"
+ "mul.s %[temp_r], %[temp1], %[temp1] \n\t"
+ "mul.s %[temp_r1], %[temp1], %[temp3] \n\t"
+ "mul.s %[temp_r2], %[temp1], %[temp2] \n\t"
+ "mul.s %[temp_r3], %[temp1], %[temp5] \n\t"
+ "mul.s %[temp_r4], %[temp1], %[temp4] \n\t"
+ "madd.s %[temp_r], %[temp_r], %[temp0], %[temp0] \n\t"
+ "madd.s %[temp_r1], %[temp_r1], %[temp0], %[temp2] \n\t"
+ "msub.s %[temp_r2], %[temp_r2], %[temp0], %[temp3] \n\t"
+ "madd.s %[temp_r3], %[temp_r3], %[temp0], %[temp4] \n\t"
+ "msub.s %[temp_r4], %[temp_r4], %[temp0], %[temp5] \n\t"
+ "add.s %[real_sum_0], %[real_sum_0], %[temp_r] \n\t"
+ "add.s %[real_sum_1], %[real_sum_1], %[temp_r1] \n\t"
+ "add.s %[imag_sum_1], %[imag_sum_1], %[temp_r2] \n\t"
+ "add.s %[real_sum_2], %[real_sum_2], %[temp_r3] \n\t"
+ "add.s %[imag_sum_2], %[imag_sum_2], %[temp_r4] \n\t"
+ "addiu %[p_x], %[p_x], 8 \n\t"
+
+ : [temp0]"=&f"(temp0), [temp1]"=&f"(temp1), [temp2]"=&f"(temp2),
+ [temp3]"=&f"(temp3), [temp4]"=&f"(temp4), [temp5]"=&f"(temp5),
+ [real_sum_0]"+f"(real_sum_0), [real_sum_1]"+f"(real_sum_1),
+ [imag_sum_1]"+f"(imag_sum_1), [real_sum_2]"+f"(real_sum_2),
+ [temp_r]"=&f"(temp_r), [temp_r1]"=&f"(temp_r1), [temp_r2]"=&f"(temp_r2),
+ [temp_r3]"=&f"(temp_r3), [temp_r4]"=&f"(temp_r4),
+ [p_x]"+r"(p_x), [imag_sum_2]"+f"(imag_sum_2)
+ :
+ : "memory"
+ );
+
+ for (i = 0; i < 12; i++) {
+ __asm__ volatile (
+ "lwc1 %[temp0], 8(%[p_x]) \n\t"
+ "lwc1 %[temp1], 12(%[p_x]) \n\t"
+ "lwc1 %[temp2], 16(%[p_x]) \n\t"
+ "lwc1 %[temp3], 20(%[p_x]) \n\t"
+ "lwc1 %[temp4], 24(%[p_x]) \n\t"
+ "lwc1 %[temp5], 28(%[p_x]) \n\t"
+ "mul.s %[temp_r], %[temp1], %[temp1] \n\t"
+ "mul.s %[temp_r1], %[temp1], %[temp3] \n\t"
+ "mul.s %[temp_r2], %[temp1], %[temp2] \n\t"
+ "mul.s %[temp_r3], %[temp1], %[temp5] \n\t"
+ "mul.s %[temp_r4], %[temp1], %[temp4] \n\t"
+ "madd.s %[temp_r], %[temp_r], %[temp0], %[temp0] \n\t"
+ "madd.s %[temp_r1], %[temp_r1], %[temp0], %[temp2] \n\t"
+ "msub.s %[temp_r2], %[temp_r2], %[temp0], %[temp3] \n\t"
+ "madd.s %[temp_r3], %[temp_r3], %[temp0], %[temp4] \n\t"
+ "msub.s %[temp_r4], %[temp_r4], %[temp0], %[temp5] \n\t"
+ "add.s %[real_sum_0], %[real_sum_0], %[temp_r] \n\t"
+ "add.s %[real_sum_1], %[real_sum_1], %[temp_r1] \n\t"
+ "add.s %[imag_sum_1], %[imag_sum_1], %[temp_r2] \n\t"
+ "add.s %[real_sum_2], %[real_sum_2], %[temp_r3] \n\t"
+ "add.s %[imag_sum_2], %[imag_sum_2], %[temp_r4] \n\t"
+ "lwc1 %[temp0], 32(%[p_x]) \n\t"
+ "lwc1 %[temp1], 36(%[p_x]) \n\t"
+ "mul.s %[temp_r], %[temp3], %[temp3] \n\t"
+ "mul.s %[temp_r1], %[temp3], %[temp5] \n\t"
+ "mul.s %[temp_r2], %[temp3], %[temp4] \n\t"
+ "mul.s %[temp_r3], %[temp3], %[temp1] \n\t"
+ "mul.s %[temp_r4], %[temp3], %[temp0] \n\t"
+ "madd.s %[temp_r], %[temp_r], %[temp2], %[temp2] \n\t"
+ "madd.s %[temp_r1], %[temp_r1], %[temp2], %[temp4] \n\t"
+ "msub.s %[temp_r2], %[temp_r2], %[temp2], %[temp5] \n\t"
+ "madd.s %[temp_r3], %[temp_r3], %[temp2], %[temp0] \n\t"
+ "msub.s %[temp_r4], %[temp_r4], %[temp2], %[temp1] \n\t"
+ "add.s %[real_sum_0], %[real_sum_0], %[temp_r] \n\t"
+ "add.s %[real_sum_1], %[real_sum_1], %[temp_r1] \n\t"
+ "add.s %[imag_sum_1], %[imag_sum_1], %[temp_r2] \n\t"
+ "add.s %[real_sum_2], %[real_sum_2], %[temp_r3] \n\t"
+ "add.s %[imag_sum_2], %[imag_sum_2], %[temp_r4] \n\t"
+ "lwc1 %[temp2], 40(%[p_x]) \n\t"
+ "lwc1 %[temp3], 44(%[p_x]) \n\t"
+ "mul.s %[temp_r], %[temp5], %[temp5] \n\t"
+ "mul.s %[temp_r1], %[temp5], %[temp1] \n\t"
+ "mul.s %[temp_r2], %[temp5], %[temp0] \n\t"
+ "mul.s %[temp_r3], %[temp5], %[temp3] \n\t"
+ "mul.s %[temp_r4], %[temp5], %[temp2] \n\t"
+ "madd.s %[temp_r], %[temp_r], %[temp4], %[temp4] \n\t"
+ "madd.s %[temp_r1], %[temp_r1], %[temp4], %[temp0] \n\t"
+ "msub.s %[temp_r2], %[temp_r2], %[temp4], %[temp1] \n\t"
+ "madd.s %[temp_r3], %[temp_r3], %[temp4], %[temp2] \n\t"
+ "msub.s %[temp_r4], %[temp_r4], %[temp4], %[temp3] \n\t"
+ "add.s %[real_sum_0], %[real_sum_0], %[temp_r] \n\t"
+ "add.s %[real_sum_1], %[real_sum_1], %[temp_r1] \n\t"
+ "add.s %[imag_sum_1], %[imag_sum_1], %[temp_r2] \n\t"
+ "add.s %[real_sum_2], %[real_sum_2], %[temp_r3] \n\t"
+ "add.s %[imag_sum_2], %[imag_sum_2], %[temp_r4] \n\t"
+ "addiu %[p_x], %[p_x], 24 \n\t"
+
+ : [temp0]"=&f"(temp0), [temp1]"=&f"(temp1), [temp2]"=&f"(temp2),
+ [temp3]"=&f"(temp3), [temp4]"=&f"(temp4), [temp5]"=&f"(temp5),
+ [real_sum_0]"+f"(real_sum_0), [real_sum_1]"+f"(real_sum_1),
+ [imag_sum_1]"+f"(imag_sum_1), [real_sum_2]"+f"(real_sum_2),
+ [temp_r]"=&f"(temp_r), [temp_r1]"=&f"(temp_r1),
+ [temp_r2]"=&f"(temp_r2), [temp_r3]"=&f"(temp_r3),
+ [temp_r4]"=&f"(temp_r4), [p_x]"+r"(p_x),
+ [imag_sum_2]"+f"(imag_sum_2)
+ :
+ : "memory"
+ );
+ }
+ __asm__ volatile (
+ "lwc1 %[temp0], -296(%[p_x]) \n\t"
+ "lwc1 %[temp1], -292(%[p_x]) \n\t"
+ "lwc1 %[temp2], 8(%[p_x]) \n\t"
+ "lwc1 %[temp3], 12(%[p_x]) \n\t"
+ "lwc1 %[temp4], -288(%[p_x]) \n\t"
+ "lwc1 %[temp5], -284(%[p_x]) \n\t"
+ "lwc1 %[temp6], -280(%[p_x]) \n\t"
+ "lwc1 %[temp7], -276(%[p_x]) \n\t"
+ "madd.s %[temp_r], %[real_sum_0], %[temp0], %[temp0] \n\t"
+ "madd.s %[temp_r1], %[real_sum_0], %[temp2], %[temp2] \n\t"
+ "madd.s %[temp_r2], %[real_sum_1], %[temp0], %[temp4] \n\t"
+ "madd.s %[temp_r3], %[imag_sum_1], %[temp0], %[temp5] \n\t"
+ "madd.s %[temp_r], %[temp_r], %[temp1], %[temp1] \n\t"
+ "madd.s %[temp_r1], %[temp_r1], %[temp3], %[temp3] \n\t"
+ "madd.s %[temp_r2], %[temp_r2], %[temp1], %[temp5] \n\t"
+ "nmsub.s %[temp_r3], %[temp_r3], %[temp1], %[temp4] \n\t"
+ "lwc1 %[temp4], 16(%[p_x]) \n\t"
+ "lwc1 %[temp5], 20(%[p_x]) \n\t"
+ "swc1 %[temp_r], 40(%[p_phi]) \n\t"
+ "swc1 %[temp_r1], 16(%[p_phi]) \n\t"
+ "swc1 %[temp_r2], 24(%[p_phi]) \n\t"
+ "swc1 %[temp_r3], 28(%[p_phi]) \n\t"
+ "madd.s %[temp_r], %[real_sum_1], %[temp2], %[temp4] \n\t"
+ "madd.s %[temp_r1], %[imag_sum_1], %[temp2], %[temp5] \n\t"
+ "madd.s %[temp_r2], %[real_sum_2], %[temp0], %[temp6] \n\t"
+ "madd.s %[temp_r3], %[imag_sum_2], %[temp0], %[temp7] \n\t"
+ "madd.s %[temp_r], %[temp_r], %[temp3], %[temp5] \n\t"
+ "nmsub.s %[temp_r1], %[temp_r1], %[temp3], %[temp4] \n\t"
+ "madd.s %[temp_r2], %[temp_r2], %[temp1], %[temp7] \n\t"
+ "nmsub.s %[temp_r3], %[temp_r3], %[temp1], %[temp6] \n\t"
+ "swc1 %[temp_r], 0(%[p_phi]) \n\t"
+ "swc1 %[temp_r1], 4(%[p_phi]) \n\t"
+ "swc1 %[temp_r2], 8(%[p_phi]) \n\t"
+ "swc1 %[temp_r3], 12(%[p_phi]) \n\t"
+
+ : [temp0]"=&f"(temp0), [temp1]"=&f"(temp1), [temp2]"=&f"(temp2),
+ [temp3]"=&f"(temp3), [temp4]"=&f"(temp4), [temp5]"=&f"(temp5),
+ [temp6]"=&f"(temp6), [temp7]"=&f"(temp7), [temp_r]"=&f"(temp_r),
+ [real_sum_0]"+f"(real_sum_0), [real_sum_1]"+f"(real_sum_1),
+ [real_sum_2]"+f"(real_sum_2), [imag_sum_1]"+f"(imag_sum_1),
+ [temp_r2]"=&f"(temp_r2), [temp_r3]"=&f"(temp_r3),
+ [temp_r1]"=&f"(temp_r1), [p_phi]"+r"(p_phi),
+ [imag_sum_2]"+f"(imag_sum_2)
+ : [p_x]"r"(p_x)
+ : "memory"
+ );
+}
+
+static void sbr_hf_gen_mips(float (*X_high)[2], const float (*X_low)[2],
+ const float alpha0[2], const float alpha1[2],
+ float bw, int start, int end)
+{
+ float alpha[4];
+ int i;
+ float *p_x_low = (float*)&X_low[0][0] + 2*start;
+ float *p_x_high = &X_high[0][0] + 2*start;
+ float temp0, temp1, temp2, temp3, temp4, temp5, temp6;
+ float temp7, temp8, temp9, temp10, temp11, temp12;
+
+ alpha[0] = alpha1[0] * bw * bw;
+ alpha[1] = alpha1[1] * bw * bw;
+ alpha[2] = alpha0[0] * bw;
+ alpha[3] = alpha0[1] * bw;
+
+ for (i = start; i < end; i++) {
+ __asm__ volatile (
+ "lwc1 %[temp0], -16(%[p_x_low]) \n\t"
+ "lwc1 %[temp1], -12(%[p_x_low]) \n\t"
+ "lwc1 %[temp2], -8(%[p_x_low]) \n\t"
+ "lwc1 %[temp3], -4(%[p_x_low]) \n\t"
+ "lwc1 %[temp5], 0(%[p_x_low]) \n\t"
+ "lwc1 %[temp6], 4(%[p_x_low]) \n\t"
+ "lwc1 %[temp7], 0(%[alpha]) \n\t"
+ "lwc1 %[temp8], 4(%[alpha]) \n\t"
+ "lwc1 %[temp9], 8(%[alpha]) \n\t"
+ "lwc1 %[temp10], 12(%[alpha]) \n\t"
+ "addiu %[p_x_high], %[p_x_high], 8 \n\t"
+ "addiu %[p_x_low], %[p_x_low], 8 \n\t"
+ "mul.s %[temp11], %[temp1], %[temp8] \n\t"
+ "msub.s %[temp11], %[temp11], %[temp0], %[temp7] \n\t"
+ "madd.s %[temp11], %[temp11], %[temp2], %[temp9] \n\t"
+ "nmsub.s %[temp11], %[temp11], %[temp3], %[temp10] \n\t"
+ "add.s %[temp11], %[temp11], %[temp5] \n\t"
+ "swc1 %[temp11], -8(%[p_x_high]) \n\t"
+ "mul.s %[temp12], %[temp1], %[temp7] \n\t"
+ "madd.s %[temp12], %[temp12], %[temp0], %[temp8] \n\t"
+ "madd.s %[temp12], %[temp12], %[temp3], %[temp9] \n\t"
+ "madd.s %[temp12], %[temp12], %[temp2], %[temp10] \n\t"
+ "add.s %[temp12], %[temp12], %[temp6] \n\t"
+ "swc1 %[temp12], -4(%[p_x_high]) \n\t"
+
+ : [temp0]"=&f"(temp0), [temp1]"=&f"(temp1), [temp2]"=&f"(temp2),
+ [temp3]"=&f"(temp3), [temp4]"=&f"(temp4), [temp5]"=&f"(temp5),
+ [temp6]"=&f"(temp6), [temp7]"=&f"(temp7), [temp8]"=&f"(temp8),
+ [temp9]"=&f"(temp9), [temp10]"=&f"(temp10), [temp11]"=&f"(temp11),
+ [temp12]"=&f"(temp12), [p_x_high]"+r"(p_x_high),
+ [p_x_low]"+r"(p_x_low)
+ : [alpha]"r"(alpha)
+ : "memory"
+ );
+ }
+}
+
+static void sbr_hf_g_filt_mips(float (*Y)[2], const float (*X_high)[40][2],
+ const float *g_filt, int m_max, intptr_t ixh)
+{
+ float *p_y, *p_x, *p_g;
+ float temp0, temp1, temp2;
+ int loop_end;
+
+ p_g = (float*)&g_filt[0];
+ p_y = &Y[0][0];
+ p_x = (float*)&X_high[0][ixh][0];
+ loop_end = (int)((int*)p_g + m_max);
+
+ __asm__ volatile(
+ ".set push \n\t"
+ ".set noreorder \n\t"
+ "1: \n\t"
+ "lwc1 %[temp0], 0(%[p_g]) \n\t"
+ "lwc1 %[temp1], 0(%[p_x]) \n\t"
+ "lwc1 %[temp2], 4(%[p_x]) \n\t"
+ "mul.s %[temp1], %[temp1], %[temp0] \n\t"
+ "mul.s %[temp2], %[temp2], %[temp0] \n\t"
+ "addiu %[p_g], %[p_g], 4 \n\t"
+ "addiu %[p_x], %[p_x], 320 \n\t"
+ "swc1 %[temp1], 0(%[p_y]) \n\t"
+ "swc1 %[temp2], 4(%[p_y]) \n\t"
+ "bne %[p_g], %[loop_end], 1b \n\t"
+ " addiu %[p_y], %[p_y], 8 \n\t"
+ ".set pop \n\t"
+
+ : [temp0]"=&f"(temp0), [temp1]"=&f"(temp1),
+ [temp2]"=&f"(temp2), [p_x]"+r"(p_x),
+ [p_y]"+r"(p_y), [p_g]"+r"(p_g)
+ : [loop_end]"r"(loop_end)
+ : "memory"
+ );
+}
+
+static void sbr_hf_apply_noise_0_mips(float (*Y)[2], const float *s_m,
+ const float *q_filt, int noise,
+ int kx, int m_max)
+{
+ int m;
+
+ for (m = 0; m < m_max; m++){
+
+ float *Y1=&Y[m][0];
+ float *ff_table;
+ float y0,y1, temp1, temp2, temp4, temp5;
+ int temp0, temp3;
+ const float *s_m1=&s_m[m];
+ const float *q_filt1= &q_filt[m];
+
+ __asm__ volatile(
+ "lwc1 %[y0], 0(%[Y1]) \n\t"
+ "lwc1 %[temp1], 0(%[s_m1]) \n\t"
+ "addiu %[noise], %[noise], 1 \n\t"
+ "andi %[noise], %[noise], 0x1ff \n\t"
+ "sll %[temp0], %[noise], 3 \n\t"
+ "addu %[ff_table], %[ff_sbr_noise_table], %[temp0] \n\t"
+ "add.s %[y0], %[y0], %[temp1] \n\t"
+ "mfc1 %[temp3], %[temp1] \n\t"
+ "bne %[temp3], $0, 1f \n\t"
+ "lwc1 %[y1], 4(%[Y1]) \n\t"
+ "lwc1 %[temp2], 0(%[q_filt1]) \n\t"
+ "lwc1 %[temp4], 0(%[ff_table]) \n\t"
+ "lwc1 %[temp5], 4(%[ff_table]) \n\t"
+ "madd.s %[y0], %[y0], %[temp2], %[temp4] \n\t"
+ "madd.s %[y1], %[y1], %[temp2], %[temp5] \n\t"
+ "swc1 %[y1], 4(%[Y1]) \n\t"
+ "1: \n\t"
+ "swc1 %[y0], 0(%[Y1]) \n\t"
+
+ : [ff_table]"=&r"(ff_table), [y0]"=&f"(y0), [y1]"=&f"(y1),
+ [temp0]"=&r"(temp0), [temp1]"=&f"(temp1), [temp2]"=&f"(temp2),
+ [temp3]"=&r"(temp3), [temp4]"=&f"(temp4), [temp5]"=&f"(temp5)
+ : [ff_sbr_noise_table]"r"(ff_sbr_noise_table), [noise]"r"(noise),
+ [Y1]"r"(Y1), [s_m1]"r"(s_m1), [q_filt1]"r"(q_filt1)
+ : "memory"
+ );
+ }
+}
+
+static void sbr_hf_apply_noise_1_mips(float (*Y)[2], const float *s_m,
+ const float *q_filt, int noise,
+ int kx, int m_max)
+{
+ float y0,y1,temp1, temp2, temp4, temp5;
+ int temp0, temp3, m;
+ float phi_sign = 1 - 2 * (kx & 1);
+
+ for (m = 0; m < m_max; m++) {
+
+ float *ff_table;
+ float *Y1=&Y[m][0];
+ const float *s_m1=&s_m[m];
+ const float *q_filt1= &q_filt[m];
+
+ __asm__ volatile(
+ "lwc1 %[y1], 4(%[Y1]) \n\t"
+ "lwc1 %[temp1], 0(%[s_m1]) \n\t"
+ "lw %[temp3], 0(%[s_m1]) \n\t"
+ "addiu %[noise], %[noise], 1 \n\t"
+ "andi %[noise], %[noise], 0x1ff \n\t"
+ "sll %[temp0], %[noise], 3 \n\t"
+ "addu %[ff_table], %[ff_sbr_noise_table], %[temp0] \n\t"
+ "madd.s %[y1], %[y1], %[temp1], %[phi_sign] \n\t"
+ "bne %[temp3], $0, 1f \n\t"
+ "lwc1 %[y0], 0(%[Y1]) \n\t"
+ "lwc1 %[temp2], 0(%[q_filt1]) \n\t"
+ "lwc1 %[temp4], 0(%[ff_table]) \n\t"
+ "lwc1 %[temp5], 4(%[ff_table]) \n\t"
+ "madd.s %[y0], %[y0], %[temp2], %[temp4] \n\t"
+ "madd.s %[y1], %[y1], %[temp2], %[temp5] \n\t"
+ "swc1 %[y0], 0(%[Y1]) \n\t"
+ "1: \n\t"
+ "swc1 %[y1], 4(%[Y1]) \n\t"
+
+ : [ff_table] "=&r" (ff_table), [y0] "=&f" (y0), [y1] "=&f" (y1),
+ [temp0] "=&r" (temp0), [temp1] "=&f" (temp1), [temp2] "=&f" (temp2),
+ [temp3] "=&r" (temp3), [temp4] "=&f" (temp4), [temp5] "=&f" (temp5)
+ : [ff_sbr_noise_table] "r" (ff_sbr_noise_table), [noise] "r" (noise),
+ [Y1] "r" (Y1), [s_m1] "r" (s_m1), [q_filt1] "r" (q_filt1),
+ [phi_sign] "f" (phi_sign)
+ : "memory"
+ );
+ phi_sign = -phi_sign;
+ }
+}
+
+static void sbr_hf_apply_noise_2_mips(float (*Y)[2], const float *s_m,
+ const float *q_filt, int noise,
+ int kx, int m_max)
+{
+ int m;
+ float *ff_table;
+ float y0,y1, temp0, temp1, temp2, temp3, temp4, temp5;
+
+ for (m = 0; m < m_max; m++) {
+
+ float *Y1=&Y[m][0];
+ const float *s_m1=&s_m[m];
+ const float *q_filt1= &q_filt[m];
+
+ __asm__ volatile(
+ "lwc1 %[y0], 0(%[Y1]) \n\t"
+ "lwc1 %[temp1], 0(%[s_m1]) \n\t"
+ "addiu %[noise], %[noise], 1 \n\t"
+ "andi %[noise], %[noise], 0x1ff \n\t"
+ "sll %[temp0], %[noise], 3 \n\t"
+ "addu %[ff_table], %[ff_sbr_noise_table], %[temp0] \n\t"
+ "sub.s %[y0], %[y0], %[temp1] \n\t"
+ "mfc1 %[temp3], %[temp1] \n\t"
+ "bne %[temp3], $0, 1f \n\t"
+ "lwc1 %[y1], 4(%[Y1]) \n\t"
+ "lwc1 %[temp2], 0(%[q_filt1]) \n\t"
+ "lwc1 %[temp4], 0(%[ff_table]) \n\t"
+ "lwc1 %[temp5], 4(%[ff_table]) \n\t"
+ "madd.s %[y0], %[y0], %[temp2], %[temp4] \n\t"
+ "madd.s %[y1], %[y1], %[temp2], %[temp5] \n\t"
+ "swc1 %[y1], 4(%[Y1]) \n\t"
+ "1: \n\t"
+ "swc1 %[y0], 0(%[Y1]) \n\t"
+
+ : [temp0]"=&r"(temp0), [ff_table]"=&r"(ff_table), [y0]"=&f"(y0),
+ [y1]"=&f"(y1), [temp1]"=&f"(temp1), [temp2]"=&f"(temp2),
+ [temp3]"=&r"(temp3), [temp4]"=&f"(temp4), [temp5]"=&f"(temp5)
+ : [ff_sbr_noise_table]"r"(ff_sbr_noise_table), [noise]"r"(noise),
+ [Y1]"r"(Y1), [s_m1]"r"(s_m1), [q_filt1]"r"(q_filt1)
+ : "memory"
+ );
+ }
+}
+
+static void sbr_hf_apply_noise_3_mips(float (*Y)[2], const float *s_m,
+ const float *q_filt, int noise,
+ int kx, int m_max)
+{
+ float phi_sign = 1 - 2 * (kx & 1);
+ int m;
+
+ for (m = 0; m < m_max; m++) {
+
+ float *Y1=&Y[m][0];
+ float *ff_table;
+ float y0,y1, temp1, temp2, temp4, temp5;
+ int temp0, temp3;
+ const float *s_m1=&s_m[m];
+ const float *q_filt1= &q_filt[m];
+
+ __asm__ volatile(
+ "lwc1 %[y1], 4(%[Y1]) \n\t"
+ "lwc1 %[temp1], 0(%[s_m1]) \n\t"
+ "addiu %[noise], %[noise], 1 \n\t"
+ "andi %[noise], %[noise], 0x1ff \n\t"
+ "sll %[temp0], %[noise], 3 \n\t"
+ "addu %[ff_table], %[ff_sbr_noise_table], %[temp0] \n\t"
+ "nmsub.s %[y1], %[y1], %[temp1], %[phi_sign] \n\t"
+ "mfc1 %[temp3], %[temp1] \n\t"
+ "bne %[temp3], $0, 1f \n\t"
+ "lwc1 %[y0], 0(%[Y1]) \n\t"
+ "lwc1 %[temp2], 0(%[q_filt1]) \n\t"
+ "lwc1 %[temp4], 0(%[ff_table]) \n\t"
+ "lwc1 %[temp5], 4(%[ff_table]) \n\t"
+ "madd.s %[y0], %[y0], %[temp2], %[temp4] \n\t"
+ "madd.s %[y1], %[y1], %[temp2], %[temp5] \n\t"
+ "swc1 %[y0], 0(%[Y1]) \n\t"
+ "1: \n\t"
+ "swc1 %[y1], 4(%[Y1]) \n\t"
+
+ : [ff_table]"=&r"(ff_table), [y0]"=&f"(y0), [y1]"=&f"(y1),
+ [temp0]"=&r"(temp0), [temp1]"=&f"(temp1), [temp2]"=&f"(temp2),
+ [temp3]"=&r"(temp3), [temp4]"=&f"(temp4), [temp5]"=&f"(temp5)
+ : [ff_sbr_noise_table]"r"(ff_sbr_noise_table), [noise]"r"(noise),
+ [Y1]"r"(Y1), [s_m1]"r"(s_m1), [q_filt1]"r"(q_filt1),
+ [phi_sign]"f"(phi_sign)
+ : "memory"
+ );
+ phi_sign = -phi_sign;
+ }
+}
+#endif /* HAVE_MIPSFPU */
+#endif /* HAVE_INLINE_ASM */
+
+void ff_sbrdsp_init_mips(SBRDSPContext *s)
+{
+#if HAVE_INLINE_ASM
+ s->neg_odd_64 = sbr_neg_odd_64_mips;
+ s->qmf_pre_shuffle = sbr_qmf_pre_shuffle_mips;
+ s->qmf_post_shuffle = sbr_qmf_post_shuffle_mips;
+#if HAVE_MIPSFPU
+ s->sum64x5 = sbr_sum64x5_mips;
+ s->sum_square = sbr_sum_square_mips;
+ s->qmf_deint_bfly = sbr_qmf_deint_bfly_mips;
+ s->autocorrelate = sbr_autocorrelate_mips;
+ s->hf_gen = sbr_hf_gen_mips;
+ s->hf_g_filt = sbr_hf_g_filt_mips;
+
+ s->hf_apply_noise[0] = sbr_hf_apply_noise_0_mips;
+ s->hf_apply_noise[1] = sbr_hf_apply_noise_1_mips;
+ s->hf_apply_noise[2] = sbr_hf_apply_noise_2_mips;
+ s->hf_apply_noise[3] = sbr_hf_apply_noise_3_mips;
+#endif /* HAVE_MIPSFPU */
+#endif /* HAVE_INLINE_ASM */
+}