Remove ZERO_TOKEN from the ANS tokenset
This can be re-added after aligning AOM's ANS with nextgenv2's ANS.
This partially reverts commit 3829cd2f2f9904572019aa047d068baeee843767.
Change-Id: I78afc587f1abfe33ffcd53b3262910cfae135534
diff --git a/av1/common/entropy.c b/av1/common/entropy.c
index c9166db..d44cc99 100644
--- a/av1/common/entropy.c
+++ b/av1/common/entropy.c
@@ -2801,17 +2801,6 @@
}
#if CONFIG_ANS
-void av1_build_token_cdfs(const aom_prob *pdf_model, rans_lut cdf) {
- AnsP10 pdf_tab[ENTROPY_TOKENS - 1];
- assert(pdf_model[2] != 0);
- // TODO(aconverse): Investigate making the precision of the zero and EOB tree
- // nodes 10-bits.
- aom_rans_merge_prob8_pdf(pdf_tab, pdf_model[1],
- av1_pareto8_token_probs[pdf_model[2] - 1],
- ENTROPY_TOKENS - 2);
- aom_rans_build_cdf_from_pdf(pdf_tab, cdf);
-}
-
void av1_coef_pareto_cdfs(FRAME_CONTEXT *fc) {
TX_SIZE t;
int i, j, k, l;
@@ -2819,9 +2808,13 @@
for (i = 0; i < PLANE_TYPES; ++i)
for (j = 0; j < REF_TYPES; ++j)
for (k = 0; k < COEF_BANDS; ++k)
- for (l = 0; l < BAND_COEFF_CONTEXTS(k); ++l)
- av1_build_token_cdfs(fc->coef_probs[t][i][j][k][l],
- fc->coef_cdfs[t][i][j][k][l]);
+ for (l = 0; l < BAND_COEFF_CONTEXTS(k); ++l) {
+ const aom_prob *const tree_probs = fc->coef_probs[t][i][j][k][l];
+ aom_prob pivot = tree_probs[PIVOT_NODE];
+ assert(pivot != 0);
+ aom_rans_build_cdf_from_pdf(av1_pareto8_token_probs[pivot - 1],
+ fc->coef_cdfs[t][i][j][k][l]);
+ }
}
#endif // CONFIG_ANS
diff --git a/av1/decoder/detokenize.c b/av1/decoder/detokenize.c
index b83ab3d..629e3b6 100644
--- a/av1/decoder/detokenize.c
+++ b/av1/decoder/detokenize.c
@@ -261,7 +261,6 @@
const FRAME_CONTEXT *const fc = xd->fc;
const int ref = is_inter_block(&xd->mi[0]->mbmi);
int band, c = 0;
- int skip_eob = 0;
const int tx_size_ctx = txsize_sqr_map[tx_size];
const aom_prob(*coef_probs)[COEFF_CONTEXTS][UNCONSTRAINED_NODES] =
fc->coef_probs[tx_size_ctx][type][ref];
@@ -331,87 +330,89 @@
int val = -1;
band = *band_translate++;
prob = coef_probs[band][ctx];
- if (!skip_eob) {
- if (counts) ++eob_branch_count[band][ctx];
- if (!uabs_read(ans, prob[EOB_CONTEXT_NODE])) {
- INCREMENT_COUNT(EOB_MODEL_TOKEN);
- break;
- }
+ if (counts) ++eob_branch_count[band][ctx];
+ if (!uabs_read(ans, prob[EOB_CONTEXT_NODE])) {
+ INCREMENT_COUNT(EOB_MODEL_TOKEN);
+ break;
}
#if CONFIG_NEW_QUANT
dqv_val = &dq_val[band][0];
#endif // CONFIG_NEW_QUANT
- cdf = &coef_cdfs[band][ctx];
- token = ZERO_TOKEN + rans_read(ans, *cdf);
- if (token == ZERO_TOKEN) {
+ while (!uabs_read(ans, prob[ZERO_CONTEXT_NODE])) {
INCREMENT_COUNT(ZERO_TOKEN);
+ dqv = dq[1];
token_cache[scan[c]] = 0;
- skip_eob = 1;
- } else {
- INCREMENT_COUNT(ONE_TOKEN + (token > ONE_TOKEN));
- switch (token) {
- case ONE_TOKEN:
- case TWO_TOKEN:
- case THREE_TOKEN:
- case FOUR_TOKEN: val = token; break;
- case CATEGORY1_TOKEN:
- val = CAT1_MIN_VAL + read_coeff(cat1_prob, 1, ans);
- break;
- case CATEGORY2_TOKEN:
- val = CAT2_MIN_VAL + read_coeff(cat2_prob, 2, ans);
- break;
- case CATEGORY3_TOKEN:
- val = CAT3_MIN_VAL + read_coeff(cat3_prob, 3, ans);
- break;
- case CATEGORY4_TOKEN:
- val = CAT4_MIN_VAL + read_coeff(cat4_prob, 4, ans);
- break;
- case CATEGORY5_TOKEN:
- val = CAT5_MIN_VAL + read_coeff(cat5_prob, 5, ans);
- break;
- case CATEGORY6_TOKEN: {
- const int skip_bits = TX_SIZES - 1 - txsize_sqr_up_map[tx_size];
- const uint8_t *cat6p = cat6_prob + skip_bits;
+ ++c;
+ if (c >= max_eob) return c; // zero tokens at the end (no eob token)
+ ctx = get_coef_context(nb, token_cache, c);
+ band = *band_translate++;
+ prob = coef_probs[band][ctx];
+ }
+ cdf = &coef_cdfs[band][ctx];
+
+ token = ONE_TOKEN + rans_read(ans, *cdf);
+ INCREMENT_COUNT(ONE_TOKEN + (token > ONE_TOKEN));
+ switch (token) {
+ case ONE_TOKEN:
+ case TWO_TOKEN:
+ case THREE_TOKEN:
+ case FOUR_TOKEN: val = token; break;
+ case CATEGORY1_TOKEN:
+ val = CAT1_MIN_VAL + read_coeff(cat1_prob, 1, ans);
+ break;
+ case CATEGORY2_TOKEN:
+ val = CAT2_MIN_VAL + read_coeff(cat2_prob, 2, ans);
+ break;
+ case CATEGORY3_TOKEN:
+ val = CAT3_MIN_VAL + read_coeff(cat3_prob, 3, ans);
+ break;
+ case CATEGORY4_TOKEN:
+ val = CAT4_MIN_VAL + read_coeff(cat4_prob, 4, ans);
+ break;
+ case CATEGORY5_TOKEN:
+ val = CAT5_MIN_VAL + read_coeff(cat5_prob, 5, ans);
+ break;
+ case CATEGORY6_TOKEN: {
+ const int skip_bits = TX_SIZES - 1 - txsize_sqr_up_map[tx_size];
+ const uint8_t *cat6p = cat6_prob + skip_bits;
#if CONFIG_AOM_HIGHBITDEPTH
- switch (xd->bd) {
- case AOM_BITS_8:
- val = CAT6_MIN_VAL + read_coeff(cat6p, 14 - skip_bits, ans);
- break;
- case AOM_BITS_10:
- val = CAT6_MIN_VAL + read_coeff(cat6p, 16 - skip_bits, ans);
- break;
- case AOM_BITS_12:
- val = CAT6_MIN_VAL + read_coeff(cat6p, 18 - skip_bits, ans);
- break;
- default: assert(0); return -1;
- }
+ switch (xd->bd) {
+ case AOM_BITS_8:
+ val = CAT6_MIN_VAL + read_coeff(cat6p, 14 - skip_bits, ans);
+ break;
+ case AOM_BITS_10:
+ val = CAT6_MIN_VAL + read_coeff(cat6p, 16 - skip_bits, ans);
+ break;
+ case AOM_BITS_12:
+ val = CAT6_MIN_VAL + read_coeff(cat6p, 18 - skip_bits, ans);
+ break;
+ default: assert(0); return -1;
+ }
#else
- val = CAT6_MIN_VAL + read_coeff(cat6p, 14 - skip_bits, ans);
+ val = CAT6_MIN_VAL + read_coeff(cat6p, 14 - skip_bits, ans);
#endif
- } break;
- }
+ } break;
+ }
#if CONFIG_NEW_QUANT
- v = av1_dequant_abscoeff_nuq(val, dqv, dqv_val);
- v = dq_shift ? ROUND_POWER_OF_TWO(v, dq_shift) : v;
+ v = av1_dequant_abscoeff_nuq(val, dqv, dqv_val);
+ v = dq_shift ? ROUND_POWER_OF_TWO(v, dq_shift) : v;
#else
- v = (val * dqv) >> dq_shift;
+ v = (val * dqv) >> dq_shift;
#endif // CONFIG_NEW_QUANT
#if CONFIG_COEFFICIENT_RANGE_CHECKING
#if CONFIG_AOM_HIGHBITDEPTH
- dqcoeff[scan[c]] =
- highbd_check_range((uabs_read_bit(ans) ? -v : v), xd->bd);
+ dqcoeff[scan[c]] =
+ highbd_check_range((uabs_read_bit(ans) ? -v : v), xd->bd);
#else
- dqcoeff[scan[c]] = check_range(uabs_read_bit(ans) ? -v : v);
+ dqcoeff[scan[c]] = check_range(uabs_read_bit(ans) ? -v : v);
#endif // CONFIG_AOM_HIGHBITDEPTH
#else
- dqcoeff[scan[c]] = uabs_read_bit(ans) ? -v : v;
+ dqcoeff[scan[c]] = uabs_read_bit(ans) ? -v : v;
#endif // CONFIG_COEFFICIENT_RANGE_CHECKING
- token_cache[scan[c]] = av1_pt_energy_class[token];
- skip_eob = 0;
- }
+ token_cache[scan[c]] = av1_pt_energy_class[token];
++c;
ctx = get_coef_context(nb, token_cache, c);
dqv = dq[1];
diff --git a/av1/encoder/bitstream.c b/av1/encoder/bitstream.c
index 3954dd0..cd13063 100644
--- a/av1/encoder/bitstream.c
+++ b/av1/encoder/bitstream.c
@@ -654,12 +654,15 @@
if (!p->skip_eob_node) aom_write(w, t != EOB_TOKEN, p->context_tree[0]);
if (t != EOB_TOKEN) {
- struct rans_sym s;
- const rans_lut *token_cdf = p->token_cdf;
- assert(token_cdf);
- s.cum_prob = (*token_cdf)[t - ZERO_TOKEN];
- s.prob = (*token_cdf)[t - ZERO_TOKEN + 1] - s.cum_prob;
- buf_rans_write(w, &s);
+ aom_write(w, t != ZERO_TOKEN, p->context_tree[1]);
+
+ if (t != ZERO_TOKEN) {
+ struct rans_sym s;
+ const rans_lut *token_cdf = p->token_cdf;
+ s.cum_prob = (*token_cdf)[t - ONE_TOKEN];
+ s.prob = (*token_cdf)[t - ONE_TOKEN + 1] - s.cum_prob;
+ buf_rans_write(w, &s);
+ }
}
#else
/* skip one or two nodes */