summaryrefslogtreecommitdiff
path: root/libavcodec/arm/vp56_arith.h
blob: 0591d614a9dd31682cf8370665bfc847d48f5fc1 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
/*
 * Copyright (C) 2010 Mans Rullgard <mans@mansr.com>
 *
 * This file is part of Libav.
 *
 * Libav is free software; you can redistribute it and/or
 * modify it under the terms of the GNU Lesser General Public
 * License as published by the Free Software Foundation; either
 * version 2.1 of the License, or (at your option) any later version.
 *
 * Libav is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * Lesser General Public License for more details.
 *
 * You should have received a copy of the GNU Lesser General Public
 * License along with Libav; if not, write to the Free Software
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
 */

#ifndef AVCODEC_ARM_VP56_ARITH_H
#define AVCODEC_ARM_VP56_ARITH_H

#if HAVE_ARMV6 && HAVE_INLINE_ASM

#define vp56_rac_get_prob vp56_rac_get_prob_armv6
static inline int vp56_rac_get_prob_armv6(VP56RangeCoder *c, int pr)
{
    unsigned shift     = ff_vp56_norm_shift[c->high];
    unsigned code_word = c->code_word << shift;
    unsigned high      = c->high << shift;
    unsigned bit;

    __asm__ volatile ("adds    %3,  %3,  %0           \n"
                      "cmpcs   %7,  %4                \n"
                      "ldrcsh  %2,  [%4], #2          \n"
                      "rsb     %0,  %6,  #256         \n"
                      "smlabb  %0,  %5,  %6,  %0      \n"
                      "rev16cs %2,  %2                \n"
                      "orrcs   %1,  %1,  %2,  lsl %3  \n"
                      "subcs   %3,  %3,  #16          \n"
                      "lsr     %0,  %0,  #8           \n"
                      "cmp     %1,  %0,  lsl #16      \n"
                      "subge   %1,  %1,  %0,  lsl #16 \n"
                      "subge   %0,  %5,  %0           \n"
                      "movge   %2,  #1                \n"
                      "movlt   %2,  #0                \n"
                      : "=&r"(c->high), "=&r"(c->code_word), "=&r"(bit),
                        "+&r"(c->bits), "+&r"(c->buffer)
                      : "r"(high), "r"(pr), "r"(c->end - 1),
                        "0"(shift), "1"(code_word));

    return bit;
}

#define vp56_rac_get_prob_branchy vp56_rac_get_prob_branchy_armv6
static inline int vp56_rac_get_prob_branchy_armv6(VP56RangeCoder *c, int pr)
{
    unsigned shift     = ff_vp56_norm_shift[c->high];
    unsigned code_word = c->code_word << shift;
    unsigned high      = c->high << shift;
    unsigned low;
    unsigned tmp;

    __asm__ volatile ("adds    %3,  %3,  %0           \n"
                      "cmpcs   %7,  %4                \n"
                      "ldrcsh  %2,  [%4], #2          \n"
                      "rsb     %0,  %6,  #256         \n"
                      "smlabb  %0,  %5,  %6,  %0      \n"
                      "rev16cs %2,  %2                \n"
                      "orrcs   %1,  %1,  %2,  lsl %3  \n"
                      "subcs   %3,  %3,  #16          \n"
                      "lsr     %0,  %0,  #8           \n"
                      "lsl     %2,  %0,  #16          \n"
                      : "=&r"(low), "+&r"(code_word), "=&r"(tmp),
                        "+&r"(c->bits), "+&r"(c->buffer)
                      : "r"(high), "r"(pr), "r"(c->end - 1), "0"(shift));

    if (code_word >= tmp) {
        c->high      = high - low;
        c->code_word = code_word - tmp;
        return 1;
    }

    c->high      = low;
    c->code_word = code_word;
    return 0;
}

#endif

#endif /* AVCODEC_ARM_VP56_ARITH_H */