Bit accounting.
This patch adds bit account infrastructure to the bit reader API.
When configured with --enable-accounting, every bit reader API
function records the number of bits necessary to decoding a symbol.
Accounting symbol entries are collected in global accounting data
structure, that can be used to understand exactly where bits are
spent (http://aomanalyzer.org). The data structure is cleared and
reused each frame to reduce memory usage. When configured without
--enable-accounting, bit accounting does not incur any runtime
overhead.
All aom_read_xxx functions now have an additional string parameter
that specifies the symbol name. By default, the ACCT_STR macro is
used (which expands to __func__). For more precise accounting,
these should be replaced with more descriptive names.
Change-Id: Ia2e1343cb842c9391b12b77272587dfbe307a56d
diff --git a/av1/decoder/decodemv.c b/av1/decoder/decodemv.c
index a358f8d..e19b3e3 100644
--- a/av1/decoder/decodemv.c
+++ b/av1/decoder/decodemv.c
@@ -25,23 +25,25 @@
#include "aom_dsp/aom_dsp_common.h"
+#define ACCT_STR __func__
+
#if CONFIG_EXT_INTRA || CONFIG_PALETTE
static INLINE int read_uniform(aom_reader *r, int n) {
int l = get_unsigned_bits(n);
int m = (1 << l) - n;
- int v = aom_read_literal(r, l - 1);
+ int v = aom_read_literal(r, l - 1, ACCT_STR);
assert(l != 0);
if (v < m)
return v;
else
- return (v << 1) - m + aom_read_literal(r, 1);
+ return (v << 1) - m + aom_read_literal(r, 1, ACCT_STR);
}
#endif // CONFIG_EXT_INTRA || CONFIG_PALETTE
static PREDICTION_MODE read_intra_mode(aom_reader *r, const aom_prob *p) {
- return (PREDICTION_MODE)aom_read_tree(r, av1_intra_mode_tree, p);
+ return (PREDICTION_MODE)aom_read_tree(r, av1_intra_mode_tree, p, ACCT_STR);
}
static PREDICTION_MODE read_intra_mode_y(AV1_COMMON *cm, MACROBLOCKD *xd,
@@ -67,7 +69,8 @@
static INTERINTRA_MODE read_interintra_mode(AV1_COMMON *cm, MACROBLOCKD *xd,
aom_reader *r, int size_group) {
const INTERINTRA_MODE ii_mode = (INTERINTRA_MODE)aom_read_tree(
- r, av1_interintra_mode_tree, cm->fc->interintra_mode_prob[size_group]);
+ r, av1_interintra_mode_tree, cm->fc->interintra_mode_prob[size_group],
+ ACCT_STR);
FRAME_COUNTS *counts = xd->counts;
if (counts) ++counts->interintra_mode[size_group][ii_mode];
return ii_mode;
@@ -84,7 +87,7 @@
int16_t mode_ctx = ctx & NEWMV_CTX_MASK;
aom_prob mode_prob = cm->fc->newmv_prob[mode_ctx];
- if (aom_read(r, mode_prob) == 0) {
+ if (aom_read(r, mode_prob, ACCT_STR) == 0) {
if (counts) ++counts->newmv_mode[mode_ctx][0];
#if CONFIG_EXT_INTER
@@ -94,7 +97,7 @@
#if CONFIG_EXT_INTER
} else {
mode_prob = cm->fc->new2mv_prob;
- if (aom_read(r, mode_prob) == 0) {
+ if (aom_read(r, mode_prob, ACCT_STR) == 0) {
if (counts) ++counts->new2mv_mode[0];
return NEWMV;
} else {
@@ -111,7 +114,7 @@
mode_ctx = (ctx >> ZEROMV_OFFSET) & ZEROMV_CTX_MASK;
mode_prob = cm->fc->zeromv_prob[mode_ctx];
- if (aom_read(r, mode_prob) == 0) {
+ if (aom_read(r, mode_prob, ACCT_STR) == 0) {
if (counts) ++counts->zeromv_mode[mode_ctx][0];
return ZEROMV;
}
@@ -125,7 +128,7 @@
mode_prob = cm->fc->refmv_prob[mode_ctx];
- if (aom_read(r, mode_prob) == 0) {
+ if (aom_read(r, mode_prob, ACCT_STR) == 0) {
if (counts) ++counts->refmv_mode[mode_ctx][0];
return NEARESTMV;
@@ -137,8 +140,8 @@
// Invalid prediction mode.
assert(0);
#else
- const int mode =
- aom_read_tree(r, av1_inter_mode_tree, cm->fc->inter_mode_probs[ctx]);
+ const int mode = aom_read_tree(r, av1_inter_mode_tree,
+ cm->fc->inter_mode_probs[ctx], ACCT_STR);
FRAME_COUNTS *counts = xd->counts;
if (counts) ++counts->inter_mode[ctx][mode];
@@ -158,7 +161,7 @@
if (xd->ref_mv_count[ref_frame_type] > idx + 1) {
uint8_t drl_ctx = av1_drl_ctx(xd->ref_mv_stack[ref_frame_type], idx);
aom_prob drl_prob = cm->fc->drl_prob[drl_ctx];
- if (!aom_read(r, drl_prob)) {
+ if (!aom_read(r, drl_prob, ACCT_STR)) {
mbmi->ref_mv_idx = idx;
if (xd->counts) ++xd->counts->drl_mode[drl_ctx][0];
return;
@@ -178,7 +181,7 @@
if (xd->ref_mv_count[ref_frame_type] > idx + 1) {
uint8_t drl_ctx = av1_drl_ctx(xd->ref_mv_stack[ref_frame_type], idx);
aom_prob drl_prob = cm->fc->drl_prob[drl_ctx];
- if (!aom_read(r, drl_prob)) {
+ if (!aom_read(r, drl_prob, ACCT_STR)) {
mbmi->ref_mv_idx = idx - 1;
if (xd->counts) ++xd->counts->drl_mode[drl_ctx][0];
return;
@@ -194,8 +197,9 @@
#if CONFIG_EXT_INTER
static PREDICTION_MODE read_inter_compound_mode(AV1_COMMON *cm, MACROBLOCKD *xd,
aom_reader *r, int16_t ctx) {
- const int mode = aom_read_tree(r, av1_inter_compound_mode_tree,
- cm->fc->inter_compound_mode_probs[ctx]);
+ const int mode =
+ aom_read_tree(r, av1_inter_compound_mode_tree,
+ cm->fc->inter_compound_mode_probs[ctx], ACCT_STR);
FRAME_COUNTS *counts = xd->counts;
if (counts) ++counts->inter_compound_mode[ctx][mode];
@@ -208,9 +212,9 @@
static int read_segment_id(aom_reader *r,
const struct segmentation_probs *segp) {
#if CONFIG_DAALA_EC
- return aom_read_symbol(r, segp->tree_cdf, MAX_SEGMENTS);
+ return aom_read_symbol(r, segp->tree_cdf, MAX_SEGMENTS, ACCT_STR);
#else
- return aom_read_tree(r, av1_segment_tree, segp->tree_probs);
+ return aom_read_tree(r, av1_segment_tree, segp->tree_probs, ACCT_STR);
#endif
}
@@ -235,7 +239,7 @@
if (blk_row >= max_blocks_high || blk_col >= max_blocks_wide) return;
- is_split = aom_read(r, cm->fc->txfm_partition_prob[ctx]);
+ is_split = aom_read(r, cm->fc->txfm_partition_prob[ctx], ACCT_STR);
if (is_split) {
BLOCK_SIZE bsize = txsize_to_bsize[tx_size];
@@ -278,8 +282,9 @@
int tx_size_cat, aom_reader *r) {
FRAME_COUNTS *counts = xd->counts;
const int ctx = get_tx_size_context(xd);
- int tx_size = aom_read_tree(r, av1_tx_size_tree[tx_size_cat],
- cm->fc->tx_size_probs[tx_size_cat][ctx]);
+ int tx_size =
+ aom_read_tree(r, av1_tx_size_tree[tx_size_cat],
+ cm->fc->tx_size_probs[tx_size_cat][ctx], ACCT_STR);
if (counts) ++counts->tx_size[tx_size_cat][ctx][tx_size];
return (TX_SIZE)tx_size;
}
@@ -420,7 +425,7 @@
if (seg->temporal_update) {
const int ctx = av1_get_pred_context_seg_id(xd);
const aom_prob pred_prob = segp->pred_probs[ctx];
- mbmi->seg_id_predicted = aom_read(r, pred_prob);
+ mbmi->seg_id_predicted = aom_read(r, pred_prob, ACCT_STR);
if (counts) ++counts->seg.pred[ctx][mbmi->seg_id_predicted];
if (mbmi->seg_id_predicted) {
segment_id = predicted_segment_id;
@@ -442,7 +447,7 @@
return 1;
} else {
const int ctx = av1_get_skip_context(xd);
- const int skip = aom_read(r, cm->fc->skip_probs[ctx]);
+ const int skip = aom_read(r, cm->fc->skip_probs[ctx], ACCT_STR);
FRAME_COUNTS *counts = xd->counts;
if (counts) ++counts->skip[ctx][skip];
return skip;
@@ -465,15 +470,17 @@
palette_ctx += (above_mi->mbmi.palette_mode_info.palette_size[0] > 0);
if (left_mi)
palette_ctx += (left_mi->mbmi.palette_mode_info.palette_size[0] > 0);
- if (aom_read(r, av1_default_palette_y_mode_prob[bsize - BLOCK_8X8]
- [palette_ctx])) {
+ if (aom_read(
+ r, av1_default_palette_y_mode_prob[bsize - BLOCK_8X8][palette_ctx],
+ ACCT_STR)) {
pmi->palette_size[0] =
aom_read_tree(r, av1_palette_size_tree,
- av1_default_palette_y_size_prob[bsize - BLOCK_8X8]) +
+ av1_default_palette_y_size_prob[bsize - BLOCK_8X8],
+ ACCT_STR) +
2;
n = pmi->palette_size[0];
for (i = 0; i < n; ++i)
- pmi->palette_colors[i] = aom_read_literal(r, cm->bit_depth);
+ pmi->palette_colors[i] = aom_read_literal(r, cm->bit_depth, ACCT_STR);
xd->plane[0].color_index_map[0] = read_uniform(r, n);
assert(xd->plane[0].color_index_map[0] < n);
@@ -481,18 +488,19 @@
}
if (mbmi->uv_mode == DC_PRED) {
- if (aom_read(r,
- av1_default_palette_uv_mode_prob[pmi->palette_size[0] > 0])) {
+ if (aom_read(r, av1_default_palette_uv_mode_prob[pmi->palette_size[0] > 0],
+ ACCT_STR)) {
pmi->palette_size[1] =
aom_read_tree(r, av1_palette_size_tree,
- av1_default_palette_uv_size_prob[bsize - BLOCK_8X8]) +
+ av1_default_palette_uv_size_prob[bsize - BLOCK_8X8],
+ ACCT_STR) +
2;
n = pmi->palette_size[1];
for (i = 0; i < n; ++i) {
pmi->palette_colors[PALETTE_MAX_SIZE + i] =
- aom_read_literal(r, cm->bit_depth);
+ aom_read_literal(r, cm->bit_depth, ACCT_STR);
pmi->palette_colors[2 * PALETTE_MAX_SIZE + i] =
- aom_read_literal(r, cm->bit_depth);
+ aom_read_literal(r, cm->bit_depth, ACCT_STR);
}
xd->plane[1].color_index_map[0] = read_uniform(r, n);
assert(xd->plane[1].color_index_map[0] < n);
@@ -517,7 +525,7 @@
#endif // CONFIG_PALETTE
) {
mbmi->ext_intra_mode_info.use_ext_intra_mode[0] =
- aom_read(r, cm->fc->ext_intra_probs[0]);
+ aom_read(r, cm->fc->ext_intra_probs[0], ACCT_STR);
if (mbmi->ext_intra_mode_info.use_ext_intra_mode[0]) {
mbmi->ext_intra_mode_info.ext_intra_mode[0] =
read_uniform(r, FILTER_INTRA_MODES);
@@ -531,7 +539,7 @@
#endif // CONFIG_PALETTE
) {
mbmi->ext_intra_mode_info.use_ext_intra_mode[1] =
- aom_read(r, cm->fc->ext_intra_probs[1]);
+ aom_read(r, cm->fc->ext_intra_probs[1], ACCT_STR);
if (mbmi->ext_intra_mode_info.use_ext_intra_mode[1]) {
mbmi->ext_intra_mode_info.ext_intra_mode[1] =
read_uniform(r, FILTER_INTRA_MODES);
@@ -556,8 +564,8 @@
p_angle = mode_to_angle_map[mbmi->mode] + mbmi->angle_delta[0] * ANGLE_STEP;
if (av1_is_intra_filter_switchable(p_angle)) {
FRAME_COUNTS *counts = xd->counts;
- mbmi->intra_filter = aom_read_tree(r, av1_intra_filter_tree,
- cm->fc->intra_filter_probs[ctx]);
+ mbmi->intra_filter = aom_read_tree(
+ r, av1_intra_filter_tree, cm->fc->intra_filter_probs[ctx], ACCT_STR);
if (counts) ++counts->intra_filter[ctx][mbmi->intra_filter];
} else {
mbmi->intra_filter = INTRA_FILTER_LINEAR;
@@ -645,7 +653,8 @@
if (eset > 0) {
mbmi->tx_type = aom_read_tree(
r, av1_ext_tx_intra_tree[eset],
- cm->fc->intra_ext_tx_prob[eset][mbmi->tx_size][mbmi->mode]);
+ cm->fc->intra_ext_tx_prob[eset][mbmi->tx_size][mbmi->mode],
+ ACCT_STR);
if (counts)
++counts->intra_ext_tx[eset][mbmi->tx_size][mbmi->mode]
[mbmi->tx_type];
@@ -658,9 +667,9 @@
!segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP)) {
FRAME_COUNTS *counts = xd->counts;
TX_TYPE tx_type_nom = intra_mode_to_tx_type_context[mbmi->mode];
- mbmi->tx_type =
- aom_read_tree(r, av1_ext_tx_tree,
- cm->fc->intra_ext_tx_prob[mbmi->tx_size][tx_type_nom]);
+ mbmi->tx_type = aom_read_tree(
+ r, av1_ext_tx_tree,
+ cm->fc->intra_ext_tx_prob[mbmi->tx_size][tx_type_nom], ACCT_STR);
if (counts)
++counts->intra_ext_tx[mbmi->tx_size][tx_type_nom][mbmi->tx_type];
} else {
@@ -673,29 +682,31 @@
static int read_mv_component(aom_reader *r, const nmv_component *mvcomp,
int usehp) {
int mag, d, fr, hp;
- const int sign = aom_read(r, mvcomp->sign);
- const int mv_class = aom_read_tree(r, av1_mv_class_tree, mvcomp->classes);
+ const int sign = aom_read(r, mvcomp->sign, ACCT_STR);
+ const int mv_class =
+ aom_read_tree(r, av1_mv_class_tree, mvcomp->classes, ACCT_STR);
const int class0 = mv_class == MV_CLASS_0;
// Integer part
if (class0) {
- d = aom_read_tree(r, av1_mv_class0_tree, mvcomp->class0);
+ d = aom_read_tree(r, av1_mv_class0_tree, mvcomp->class0, ACCT_STR);
mag = 0;
} else {
int i;
const int n = mv_class + CLASS0_BITS - 1; // number of bits
d = 0;
- for (i = 0; i < n; ++i) d |= aom_read(r, mvcomp->bits[i]) << i;
+ for (i = 0; i < n; ++i) d |= aom_read(r, mvcomp->bits[i], ACCT_STR) << i;
mag = CLASS0_SIZE << (mv_class + 2);
}
// Fractional part
fr = aom_read_tree(r, av1_mv_fp_tree,
- class0 ? mvcomp->class0_fp[d] : mvcomp->fp);
+ class0 ? mvcomp->class0_fp[d] : mvcomp->fp, ACCT_STR);
// High precision part (if hp is not used, the default value of the hp is 1)
- hp = usehp ? aom_read(r, class0 ? mvcomp->class0_hp : mvcomp->hp) : 1;
+ hp = usehp ? aom_read(r, class0 ? mvcomp->class0_hp : mvcomp->hp, ACCT_STR)
+ : 1;
// Result
mag += ((d << 3) | (fr << 1) | hp) + 1;
@@ -708,7 +719,8 @@
MV_JOINT_TYPE joint_type;
const int use_hp = allow_hp && av1_use_mv_hp(ref);
MV diff = { 0, 0 };
- joint_type = (MV_JOINT_TYPE)aom_read_tree(r, av1_mv_joint_tree, ctx->joints);
+ joint_type =
+ (MV_JOINT_TYPE)aom_read_tree(r, av1_mv_joint_tree, ctx->joints, ACCT_STR);
if (mv_joint_vertical(joint_type))
diff.row = read_mv_component(r, &ctx->comps[0], use_hp);
@@ -728,7 +740,7 @@
if (cm->reference_mode == REFERENCE_MODE_SELECT) {
const int ctx = av1_get_reference_mode_context(cm, xd);
const REFERENCE_MODE mode =
- (REFERENCE_MODE)aom_read(r, cm->fc->comp_inter_prob[ctx]);
+ (REFERENCE_MODE)aom_read(r, cm->fc->comp_inter_prob[ctx], ACCT_STR);
FRAME_COUNTS *counts = xd->counts;
if (counts) ++counts->comp_inter[ctx][mode];
return mode; // SINGLE_REFERENCE or COMPOUND_REFERENCE
@@ -758,7 +770,7 @@
const int idx = cm->ref_frame_sign_bias[cm->comp_fixed_ref];
#endif // CONFIG_EXT_REFS
const int ctx = av1_get_pred_context_comp_ref_p(cm, xd);
- const int bit = aom_read(r, fc->comp_ref_prob[ctx][0]);
+ const int bit = aom_read(r, fc->comp_ref_prob[ctx][0], ACCT_STR);
if (counts) ++counts->comp_ref[ctx][0][bit];
@@ -766,12 +778,12 @@
// Decode forward references.
if (!bit) {
const int ctx1 = av1_get_pred_context_comp_ref_p1(cm, xd);
- const int bit1 = aom_read(r, fc->comp_ref_prob[ctx1][1]);
+ const int bit1 = aom_read(r, fc->comp_ref_prob[ctx1][1], ACCT_STR);
if (counts) ++counts->comp_ref[ctx1][1][bit1];
ref_frame[!idx] = cm->comp_fwd_ref[bit1 ? 0 : 1];
} else {
const int ctx2 = av1_get_pred_context_comp_ref_p2(cm, xd);
- const int bit2 = aom_read(r, fc->comp_ref_prob[ctx2][2]);
+ const int bit2 = aom_read(r, fc->comp_ref_prob[ctx2][2], ACCT_STR);
if (counts) ++counts->comp_ref[ctx2][2][bit2];
ref_frame[!idx] = cm->comp_fwd_ref[bit2 ? 3 : 2];
}
@@ -779,7 +791,8 @@
// Decode backward references.
{
const int ctx_bwd = av1_get_pred_context_comp_bwdref_p(cm, xd);
- const int bit_bwd = aom_read(r, fc->comp_bwdref_prob[ctx_bwd][0]);
+ const int bit_bwd =
+ aom_read(r, fc->comp_bwdref_prob[ctx_bwd][0], ACCT_STR);
if (counts) ++counts->comp_bwdref[ctx_bwd][0][bit_bwd];
ref_frame[idx] = cm->comp_bwd_ref[bit_bwd];
}
@@ -790,38 +803,38 @@
} else if (mode == SINGLE_REFERENCE) {
#if CONFIG_EXT_REFS
const int ctx0 = av1_get_pred_context_single_ref_p1(xd);
- const int bit0 = aom_read(r, fc->single_ref_prob[ctx0][0]);
+ const int bit0 = aom_read(r, fc->single_ref_prob[ctx0][0], ACCT_STR);
if (counts) ++counts->single_ref[ctx0][0][bit0];
if (bit0) {
const int ctx1 = av1_get_pred_context_single_ref_p2(xd);
- const int bit1 = aom_read(r, fc->single_ref_prob[ctx1][1]);
+ const int bit1 = aom_read(r, fc->single_ref_prob[ctx1][1], ACCT_STR);
if (counts) ++counts->single_ref[ctx1][1][bit1];
ref_frame[0] = bit1 ? ALTREF_FRAME : BWDREF_FRAME;
} else {
const int ctx2 = av1_get_pred_context_single_ref_p3(xd);
- const int bit2 = aom_read(r, fc->single_ref_prob[ctx2][2]);
+ const int bit2 = aom_read(r, fc->single_ref_prob[ctx2][2], ACCT_STR);
if (counts) ++counts->single_ref[ctx2][2][bit2];
if (bit2) {
const int ctx4 = av1_get_pred_context_single_ref_p5(xd);
- const int bit4 = aom_read(r, fc->single_ref_prob[ctx4][4]);
+ const int bit4 = aom_read(r, fc->single_ref_prob[ctx4][4], ACCT_STR);
if (counts) ++counts->single_ref[ctx4][4][bit4];
ref_frame[0] = bit4 ? GOLDEN_FRAME : LAST3_FRAME;
} else {
const int ctx3 = av1_get_pred_context_single_ref_p4(xd);
- const int bit3 = aom_read(r, fc->single_ref_prob[ctx3][3]);
+ const int bit3 = aom_read(r, fc->single_ref_prob[ctx3][3], ACCT_STR);
if (counts) ++counts->single_ref[ctx3][3][bit3];
ref_frame[0] = bit3 ? LAST2_FRAME : LAST_FRAME;
}
}
#else
const int ctx0 = av1_get_pred_context_single_ref_p1(xd);
- const int bit0 = aom_read(r, fc->single_ref_prob[ctx0][0]);
+ const int bit0 = aom_read(r, fc->single_ref_prob[ctx0][0], ACCT_STR);
if (counts) ++counts->single_ref[ctx0][0][bit0];
if (bit0) {
const int ctx1 = av1_get_pred_context_single_ref_p2(xd);
- const int bit1 = aom_read(r, fc->single_ref_prob[ctx1][1]);
+ const int bit1 = aom_read(r, fc->single_ref_prob[ctx1][1], ACCT_STR);
if (counts) ++counts->single_ref[ctx1][1][bit1];
ref_frame[0] = bit1 ? ALTREF_FRAME : GOLDEN_FRAME;
} else {
@@ -843,8 +856,9 @@
int motion_mode;
FRAME_COUNTS *counts = xd->counts;
- motion_mode = aom_read_tree(r, av1_motion_mode_tree,
- cm->fc->motion_mode_prob[mbmi->sb_type]);
+ motion_mode =
+ aom_read_tree(r, av1_motion_mode_tree,
+ cm->fc->motion_mode_prob[mbmi->sb_type], ACCT_STR);
if (counts) ++counts->motion_mode[mbmi->sb_type][motion_mode];
return (MOTION_MODE)(SIMPLE_TRANSLATION + motion_mode);
} else {
@@ -873,11 +887,13 @@
FRAME_COUNTS *counts = xd->counts;
#if CONFIG_DAALA_EC
const InterpFilter type =
- (InterpFilter)av1_switchable_interp_inv[aom_read_tree_cdf(
- r, cm->fc->switchable_interp_cdf[ctx], SWITCHABLE_FILTERS)];
+ (InterpFilter)av1_switchable_interp_inv[aom_read_symbol(
+ r, cm->fc->switchable_interp_cdf[ctx], SWITCHABLE_FILTERS,
+ ACCT_STR)];
#else
const InterpFilter type = (InterpFilter)aom_read_tree(
- r, av1_switchable_interp_tree, cm->fc->switchable_interp_prob[ctx]);
+ r, av1_switchable_interp_tree, cm->fc->switchable_interp_prob[ctx],
+ ACCT_STR);
#endif
if (counts) ++counts->switchable_interp[ctx][type];
return type;
@@ -1173,7 +1189,7 @@
return get_segdata(&cm->seg, segment_id, SEG_LVL_REF_FRAME) != INTRA_FRAME;
} else {
const int ctx = av1_get_intra_inter_context(xd);
- const int is_inter = aom_read(r, cm->fc->intra_inter_prob[ctx]);
+ const int is_inter = aom_read(r, cm->fc->intra_inter_prob[ctx], ACCT_STR);
FRAME_COUNTS *counts = xd->counts;
if (counts) ++counts->intra_inter[ctx][is_inter];
return is_inter;
@@ -1533,7 +1549,8 @@
#endif
is_interintra_allowed(mbmi)) {
const int bsize_group = size_group_lookup[bsize];
- const int interintra = aom_read(r, cm->fc->interintra_prob[bsize_group]);
+ const int interintra =
+ aom_read(r, cm->fc->interintra_prob[bsize_group], ACCT_STR);
if (xd->counts) xd->counts->interintra[bsize_group][interintra]++;
assert(mbmi->ref_frame[1] == NONE);
if (interintra) {
@@ -1550,12 +1567,12 @@
#endif // CONFIG_EXT_INTRA
if (is_interintra_wedge_used(bsize)) {
mbmi->use_wedge_interintra =
- aom_read(r, cm->fc->wedge_interintra_prob[bsize]);
+ aom_read(r, cm->fc->wedge_interintra_prob[bsize], ACCT_STR);
if (xd->counts)
xd->counts->wedge_interintra[bsize][mbmi->use_wedge_interintra]++;
if (mbmi->use_wedge_interintra) {
mbmi->interintra_wedge_index =
- aom_read_literal(r, get_wedge_bits_lookup(bsize));
+ aom_read_literal(r, get_wedge_bits_lookup(bsize), ACCT_STR);
mbmi->interintra_wedge_sign = 0;
}
}
@@ -1584,13 +1601,13 @@
#endif // CONFIG_MOTION_VAR || CONFIG_WARPED_MOTION
is_interinter_wedge_used(bsize)) {
mbmi->use_wedge_interinter =
- aom_read(r, cm->fc->wedge_interinter_prob[bsize]);
+ aom_read(r, cm->fc->wedge_interinter_prob[bsize], ACCT_STR);
if (xd->counts)
xd->counts->wedge_interinter[bsize][mbmi->use_wedge_interinter]++;
if (mbmi->use_wedge_interinter) {
mbmi->interinter_wedge_index =
- aom_read_literal(r, get_wedge_bits_lookup(bsize));
- mbmi->interinter_wedge_sign = aom_read_bit(r);
+ aom_read_literal(r, get_wedge_bits_lookup(bsize), ACCT_STR);
+ mbmi->interinter_wedge_sign = aom_read_bit(r, ACCT_STR);
}
}
#endif // CONFIG_EXT_INTER
@@ -1660,7 +1677,7 @@
int use_rect_tx = 0;
if (is_rect_tx_allowed) {
- use_rect_tx = aom_read(r, cm->fc->rect_tx_prob[tx_size_cat]);
+ use_rect_tx = aom_read(r, cm->fc->rect_tx_prob[tx_size_cat], ACCT_STR);
if (xd->counts) {
++xd->counts->rect_tx[tx_size_cat][use_rect_tx];
}
@@ -1747,7 +1764,8 @@
if (eset > 0) {
mbmi->tx_type = aom_read_tree(
r, av1_ext_tx_inter_tree[eset],
- cm->fc->inter_ext_tx_prob[eset][txsize_sqr_map[mbmi->tx_size]]);
+ cm->fc->inter_ext_tx_prob[eset][txsize_sqr_map[mbmi->tx_size]],
+ ACCT_STR);
if (counts)
++counts->inter_ext_tx[eset][txsize_sqr_map[mbmi->tx_size]]
[mbmi->tx_type];
@@ -1756,7 +1774,8 @@
if (eset > 0) {
mbmi->tx_type = aom_read_tree(
r, av1_ext_tx_intra_tree[eset],
- cm->fc->intra_ext_tx_prob[eset][mbmi->tx_size][mbmi->mode]);
+ cm->fc->intra_ext_tx_prob[eset][mbmi->tx_size][mbmi->mode],
+ ACCT_STR);
if (counts)
++counts->intra_ext_tx[eset][mbmi->tx_size][mbmi->mode]
[mbmi->tx_type];
@@ -1774,22 +1793,24 @@
FRAME_COUNTS *counts = xd->counts;
if (inter_block) {
#if CONFIG_DAALA_EC
- mbmi->tx_type = av1_ext_tx_inv[aom_read_tree_cdf(
- r, cm->fc->inter_ext_tx_cdf[mbmi->tx_size], TX_TYPES)];
+ mbmi->tx_type = av1_ext_tx_inv[aom_read_symbol(
+ r, cm->fc->inter_ext_tx_cdf[mbmi->tx_size], TX_TYPES, ACCT_STR)];
#else
- mbmi->tx_type = aom_read_tree(r, av1_ext_tx_tree,
- cm->fc->inter_ext_tx_prob[mbmi->tx_size]);
+ mbmi->tx_type =
+ aom_read_tree(r, av1_ext_tx_tree,
+ cm->fc->inter_ext_tx_prob[mbmi->tx_size], ACCT_STR);
#endif
if (counts) ++counts->inter_ext_tx[mbmi->tx_size][mbmi->tx_type];
} else {
const TX_TYPE tx_type_nom = intra_mode_to_tx_type_context[mbmi->mode];
#if CONFIG_DAALA_EC
- mbmi->tx_type = av1_ext_tx_inv[aom_read_tree_cdf(
- r, cm->fc->intra_ext_tx_cdf[mbmi->tx_size][tx_type_nom], TX_TYPES)];
+ mbmi->tx_type = av1_ext_tx_inv[aom_read_symbol(
+ r, cm->fc->intra_ext_tx_cdf[mbmi->tx_size][tx_type_nom], TX_TYPES,
+ ACCT_STR)];
#else
mbmi->tx_type = aom_read_tree(
r, av1_ext_tx_tree,
- cm->fc->intra_ext_tx_prob[mbmi->tx_size][tx_type_nom]);
+ cm->fc->intra_ext_tx_prob[mbmi->tx_size][tx_type_nom], ACCT_STR);
#endif
if (counts)
++counts->intra_ext_tx[mbmi->tx_size][tx_type_nom][mbmi->tx_type];