blob: 77ed1717642b6fc1405c70adcdaa9c129071a6d4 [file] [log] [blame]
Nathan E. Egge1078dee2016-03-06 10:59:29 -05001/*Daala video codec
2Copyright (c) 2001-2013 Daala project contributors. All rights reserved.
3
4Redistribution and use in source and binary forms, with or without
5modification, are permitted provided that the following conditions are met:
6
7- Redistributions of source code must retain the above copyright notice, this
8 list of conditions and the following disclaimer.
9
10- Redistributions in binary form must reproduce the above copyright notice,
11 this list of conditions and the following disclaimer in the documentation
12 and/or other materials provided with the distribution.
13
14THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
15AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
17DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
18FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
20SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
21CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
22OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.*/
24
25#if !defined(_entcode_H)
26#define _entcode_H (1)
27#include <limits.h>
28#include <stddef.h>
29#include "av1/common/odintrin.h"
30
31/*Set this flag 1 to enable a "reduced overhead" version of the entropy coder.
32 This uses a partition function that more accurately follows the input
33 probability estimates at the expense of some additional CPU cost (though
34 still an order of magnitude less than a full division).
35
36 In classic arithmetic coding, the partition function maps a value x in the
37 range [0, ft] to a value in y in [0, r] with 0 < ft <= r via
38 y = x*r/ft.
39 Any deviation from this value increases coding inefficiency.
40
41 To avoid divisions, we require ft <= r < 2*ft (enforcing it by shifting up
42 ft if necessary), and replace that function with
43 y = x + OD_MINI(x, r - ft).
44 This counts values of x smaller than r - ft double compared to values larger
45 than r - ft, which over-estimates the probability of symbols at the start of
46 the alphabet, and under-estimates the probability of symbols at the end of
47 the alphabet.
48 The overall coding inefficiency assuming accurate probability models and
49 independent symbols is in the 1% range, which is similar to that of CABAC.
50
51 To reduce overhead even further, we split this into two cases:
52 1) r - ft > ft - (r - ft).
53 That is, we have more values of x that are double-counted than
54 single-counted.
55 In this case, we still double-count the first 2*r - 3*ft values of x, but
56 after that we alternate between single-counting and double-counting for
57 the rest.
58 2) r - ft < ft - (r - ft).
59 That is, we have more values of x that are single-counted than
60 double-counted.
61 In this case, we alternate between single-counting and double-counting for
62 the first 2*(r - ft) values of x, and single-count the rest.
63 For two equiprobable symbols in different places in the alphabet, this
64 reduces the maximum ratio of over-estimation to under-estimation from 2:1
65 for the previous partition function to either 4:3 or 3:2 (for each of the
66 two cases above, respectively), assuming symbol probabilities significantly
67 greater than 1/32768.
68 That reduces the worst-case per-symbol overhead from 1 bit to 0.58 bits.
69
70 The resulting function is
71 e = OD_MAXI(2*r - 3*ft, 0);
72 y = x + OD_MINI(x, e) + OD_MINI(OD_MAXI(x - e, 0) >> 1, r - ft).
73 Here, e is a value that is greater than 0 in case 1, and 0 in case 2.
74 This function is about 3 times as expensive to evaluate as the high-overhead
75 version, but still an order of magnitude cheaper than a division, since it
76 is composed only of very simple operations.
77 Because we want to fit in 16-bit registers and must use unsigned values to do
78 so, we use saturating subtraction to enforce the maximums with 0.
79
80 Enabling this reduces the measured overhead in ectest from 0.805% to 0.621%
81 (vs. 0.022% for the division-based partition function with r much greater
82 than ft).
83 It improves performance on ntt-short-1 by about 0.3%.*/
84#define OD_EC_REDUCED_OVERHEAD (1)
85
86/*OPT: od_ec_window must be at least 32 bits, but if you have fast arithmetic
87 on a larger type, you can speed up the decoder by using it here.*/
88typedef uint32_t od_ec_window;
89
90#define OD_EC_WINDOW_SIZE ((int)sizeof(od_ec_window) * CHAR_BIT)
91
92/*Unsigned subtraction with unsigned saturation.
93 This implementation of the macro is intentionally chosen to increase the
94 number of common subexpressions in the reduced-overhead partition function.
95 This matters for C code, but it would not for hardware with a saturating
96 subtraction instruction.*/
97#define OD_SUBSATU(a, b) ((a)-OD_MINI(a, b))
98
99/*The number of bits to use for the range-coded part of unsigned integers.*/
100#define OD_EC_UINT_BITS (4)
101
102/*The resolution of fractional-precision bit usage measurements, i.e.,
103 3 => 1/8th bits.*/
104#define OD_BITRES (3)
105
106extern const uint16_t OD_UNIFORM_CDFS_Q15[135];
107
108/*Returns a Q15 CDF for a uniform probability distribution of the given size.
109 n: The size of the distribution.
110 This must be at least 2, and no more than 16.*/
111#define OD_UNIFORM_CDF_Q15(n) (OD_UNIFORM_CDFS_Q15 + ((n) * ((n)-1) >> 1) - 1)
112
113/*See entcode.c for further documentation.*/
114
115OD_WARN_UNUSED_RESULT uint32_t od_ec_tell_frac(uint32_t nbits_total,
116 uint32_t rng);
117
118#endif