Merge "Fix avx2 16x16/32x32 fwd txfm coeff output on HBD" into nextgenv2
diff --git a/aom_dsp/entdec.c b/aom_dsp/entdec.c
index 3f2feab..18563b2 100644
--- a/aom_dsp/entdec.c
+++ b/aom_dsp/entdec.c
@@ -206,6 +206,7 @@
od_ec_window dif;
od_ec_window vw;
unsigned r;
+ unsigned r_new;
unsigned v;
int ret;
OD_ASSERT(0 < fz);
@@ -216,10 +217,14 @@
OD_ASSERT(32768U <= r);
v = fz * (uint32_t)r >> 15;
vw = (od_ec_window)v << (OD_EC_WINDOW_SIZE - 16);
- ret = dif >= vw;
- if (ret) dif -= vw;
- r = ret ? r - v : v;
- return od_ec_dec_normalize(dec, dif, r, ret);
+ ret = 0;
+ r_new = v;
+ if (dif >= vw) {
+ r_new = r - v;
+ dif -= vw;
+ ret = 1;
+ }
+ return od_ec_dec_normalize(dec, dif, r_new, ret);
}
/*Decodes a symbol given a cumulative distribution function (CDF) table.
diff --git a/av1/common/entropymode.c b/av1/common/entropymode.c
index 78f4ffe..e812f15 100644
--- a/av1/common/entropymode.c
+++ b/av1/common/entropymode.c
@@ -147,6 +147,9 @@
{ 43, 81, 53, 140, 169, 204, 68, 84, 72 } // left = tm
}
};
+#if CONFIG_DAALA_EC
+aom_cdf_prob av1_kf_y_mode_cdf[INTRA_MODES][INTRA_MODES][INTRA_MODES];
+#endif
static const aom_prob default_if_y_probs[BLOCK_SIZE_GROUPS][INTRA_MODES - 1] = {
{ 65, 32, 18, 144, 162, 194, 41, 51, 98 }, // block_size < 8x8
@@ -368,6 +371,10 @@
static const aom_prob default_delta_q_probs[DELTA_Q_CONTEXTS] = { 220, 220,
220 };
#endif
+int av1_intra_mode_ind[INTRA_MODES];
+int av1_intra_mode_inv[INTRA_MODES];
+int av1_inter_mode_ind[INTER_MODES];
+int av1_inter_mode_inv[INTER_MODES];
/* Array indices are identical to previously-existing INTRAMODECONTEXTNODES. */
const aom_tree_index av1_intra_mode_tree[TREE_SIZE(INTRA_MODES)] = {
@@ -1406,14 +1413,22 @@
av1_copy(fc->switchable_restore_prob, default_switchable_restore_prob);
#endif // CONFIG_LOOP_RESTORATION
#if CONFIG_DAALA_EC
+ av1_tree_to_cdf_1D(av1_intra_mode_tree, fc->y_mode_prob, fc->y_mode_cdf,
+ BLOCK_SIZE_GROUPS);
+ av1_tree_to_cdf_1D(av1_intra_mode_tree, fc->uv_mode_prob, fc->uv_mode_cdf,
+ INTRA_MODES);
av1_tree_to_cdf_1D(av1_switchable_interp_tree, fc->switchable_interp_prob,
fc->switchable_interp_cdf, SWITCHABLE_FILTER_CONTEXTS);
+ av1_tree_to_cdf_1D(av1_partition_tree, fc->partition_prob, fc->partition_cdf,
+ PARTITION_CONTEXTS);
+ av1_tree_to_cdf_1D(av1_inter_mode_tree, fc->inter_mode_probs,
+ fc->inter_mode_cdf, INTER_MODE_CONTEXTS);
av1_tree_to_cdf_2D(av1_ext_tx_tree, fc->intra_ext_tx_prob,
fc->intra_ext_tx_cdf, EXT_TX_SIZES, TX_TYPES);
av1_tree_to_cdf_1D(av1_ext_tx_tree, fc->inter_ext_tx_prob,
fc->inter_ext_tx_cdf, EXT_TX_SIZES);
- av1_tree_to_cdf_1D(av1_partition_tree, fc->partition_prob, fc->partition_cdf,
- PARTITION_CONTEXTS);
+ av1_tree_to_cdf_2D(av1_intra_mode_tree, av1_kf_y_mode_prob, av1_kf_y_mode_cdf,
+ INTRA_MODES, INTRA_MODES);
av1_tree_to_cdf(av1_segment_tree, fc->seg.tree_probs, fc->seg.tree_cdf);
#endif
#if CONFIG_DELTA_Q
diff --git a/av1/common/entropymode.h b/av1/common/entropymode.h
index 68a6400..3043114 100644
--- a/av1/common/entropymode.h
+++ b/av1/common/entropymode.h
@@ -165,9 +165,12 @@
aom_prob switchable_restore_prob[RESTORE_SWITCHABLE_TYPES - 1];
#endif // CONFIG_LOOP_RESTORATION
#if CONFIG_DAALA_EC
+ aom_cdf_prob y_mode_cdf[BLOCK_SIZE_GROUPS][INTRA_MODES];
+ aom_cdf_prob uv_mode_cdf[INTRA_MODES][INTRA_MODES];
aom_cdf_prob partition_cdf[PARTITION_CONTEXTS][PARTITION_TYPES];
aom_cdf_prob switchable_interp_cdf[SWITCHABLE_FILTER_CONTEXTS]
[SWITCHABLE_FILTERS];
+ aom_cdf_prob inter_mode_cdf[INTER_MODE_CONTEXTS][INTER_MODES];
aom_cdf_prob intra_ext_tx_cdf[EXT_TX_SIZES][TX_TYPES][TX_TYPES];
aom_cdf_prob inter_ext_tx_cdf[EXT_TX_SIZES][TX_TYPES];
#endif
@@ -276,6 +279,9 @@
extern const aom_prob av1_kf_y_mode_prob[INTRA_MODES][INTRA_MODES]
[INTRA_MODES - 1];
+#if CONFIG_DAALA_EC
+extern aom_cdf_prob av1_kf_y_mode_cdf[INTRA_MODES][INTRA_MODES][INTRA_MODES];
+#endif
#if CONFIG_PALETTE
extern const aom_prob av1_default_palette_y_mode_prob[PALETTE_BLOCK_SIZES]
[PALETTE_Y_MODE_CONTEXTS];
@@ -294,6 +300,12 @@
extern const aom_tree_index av1_intra_mode_tree[TREE_SIZE(INTRA_MODES)];
extern const aom_tree_index av1_inter_mode_tree[TREE_SIZE(INTER_MODES)];
+#if CONFIG_DAALA_EC
+extern int av1_intra_mode_ind[INTRA_MODES];
+extern int av1_intra_mode_inv[INTRA_MODES];
+extern int av1_inter_mode_ind[INTER_MODES];
+extern int av1_inter_mode_inv[INTER_MODES];
+#endif
#if CONFIG_EXT_INTER
extern const aom_tree_index
av1_interintra_mode_tree[TREE_SIZE(INTERINTRA_MODES)];
diff --git a/av1/common/entropymv.c b/av1/common/entropymv.c
index 34918b3..a80165e 100644
--- a/av1/common/entropymv.c
+++ b/av1/common/entropymv.c
@@ -42,28 +42,45 @@
4, -2, -3 };
static const nmv_context default_nmv_context = {
- { 32, 64, 96 },
+ { 32, 64, 96 }, // joints
+#if CONFIG_DAALA_EC
+ { 0, 0, 0, 0 }, // joint_cdf is computed from joints in av1_init_mv_probs()
+#endif
{ {
// Vertical component
128, // sign
{ 224, 144, 192, 168, 192, 176, 192, 198, 198, 245 }, // class
+#if CONFIG_DAALA_EC
+ { 0 }, // class_cdf is computed from class in av1_init_mv_probs()
+#endif
{ 216 }, // class0
{ 136, 140, 148, 160, 176, 192, 224, 234, 234, 240 }, // bits
{ { 128, 128, 64 }, { 96, 112, 64 } }, // class0_fp
{ 64, 96, 64 }, // fp
- 160, // class0_hp bit
- 128, // hp
+#if CONFIG_DAALA_EC
+ { { 0 }, { 0 } }, // class0_fp_cdf is computed in av1_init_mv_probs()
+ { 0 }, // fp_cdf is computed from fp in av1_init_mv_probs()
+#endif
+ 160, // class0_hp bit
+ 128, // hp
},
{
// Horizontal component
128, // sign
{ 216, 128, 176, 160, 176, 176, 192, 198, 198, 208 }, // class
+#if CONFIG_DAALA_EC
+ { 0 }, // class_cdf is computed from class in av1_init_mv_probs()
+#endif
{ 208 }, // class0
{ 136, 140, 148, 160, 176, 192, 224, 234, 234, 240 }, // bits
{ { 128, 128, 64 }, { 96, 112, 64 } }, // class0_fp
{ 64, 96, 64 }, // fp
- 160, // class0_hp bit
- 128, // hp
+#if CONFIG_DAALA_EC
+ { { 0 }, { 0 } }, // class0_fp_cdf is computed in av1_init_mv_probs()
+ { 0 }, // fp_cdf is computed from fp in av1_init_mv_probs()
+#endif
+ 160, // class0_hp bit
+ 128, // hp
} },
};
@@ -262,6 +279,23 @@
for (i = 0; i < NMV_CONTEXTS; ++i) cm->fc->nmvc[i] = default_nmv_context;
#else
cm->fc->nmvc = default_nmv_context;
+#if CONFIG_DAALA_EC
+ {
+ int i, j;
+ av1_tree_to_cdf(av1_mv_joint_tree, cm->fc->nmvc.joints,
+ cm->fc->nmvc.joint_cdf);
+ for (i = 0; i < 2; i++) {
+ av1_tree_to_cdf(av1_mv_class_tree, cm->fc->nmvc.comps[i].classes,
+ cm->fc->nmvc.comps[i].class_cdf);
+ av1_tree_to_cdf(av1_mv_fp_tree, cm->fc->nmvc.comps[i].fp,
+ cm->fc->nmvc.comps[i].fp_cdf);
+ for (j = 0; j < CLASS0_SIZE; j++) {
+ av1_tree_to_cdf(av1_mv_fp_tree, cm->fc->nmvc.comps[i].class0_fp[j],
+ cm->fc->nmvc.comps[i].class0_fp_cdf[j]);
+ }
+ }
+ }
+#endif
#endif
#if CONFIG_GLOBAL_MOTION
av1_copy(cm->fc->global_motion_types_prob, default_global_motion_types_prob);
diff --git a/av1/common/entropymv.h b/av1/common/entropymv.h
index f97dd85..f308ef3 100644
--- a/av1/common/entropymv.h
+++ b/av1/common/entropymv.h
@@ -85,16 +85,26 @@
typedef struct {
aom_prob sign;
aom_prob classes[MV_CLASSES - 1];
+#if CONFIG_DAALA_EC
+ aom_cdf_prob class_cdf[MV_CLASSES];
+#endif
aom_prob class0[CLASS0_SIZE - 1];
aom_prob bits[MV_OFFSET_BITS];
aom_prob class0_fp[CLASS0_SIZE][MV_FP_SIZE - 1];
aom_prob fp[MV_FP_SIZE - 1];
+#if CONFIG_DAALA_EC
+ aom_cdf_prob class0_fp_cdf[CLASS0_SIZE][MV_FP_SIZE];
+ aom_cdf_prob fp_cdf[MV_FP_SIZE];
+#endif
aom_prob class0_hp;
aom_prob hp;
} nmv_component;
typedef struct {
aom_prob joints[MV_JOINTS - 1];
+#if CONFIG_DAALA_EC
+ aom_cdf_prob joint_cdf[MV_JOINTS];
+#endif
nmv_component comps[2];
} nmv_context;
diff --git a/av1/common/onyxc_int.h b/av1/common/onyxc_int.h
index 3c8eac8..b6e73cd 100644
--- a/av1/common/onyxc_int.h
+++ b/av1/common/onyxc_int.h
@@ -378,6 +378,9 @@
// - this is intentionally not placed in FRAME_CONTEXT since it's reset upon
// each keyframe and not used afterwards
aom_prob kf_y_prob[INTRA_MODES][INTRA_MODES][INTRA_MODES - 1];
+#if CONFIG_DAALA_EC
+ aom_cdf_prob kf_y_cdf[INTRA_MODES][INTRA_MODES][INTRA_MODES];
+#endif
#if CONFIG_GLOBAL_MOTION
Global_Motion_Params global_motion[TOTAL_REFS_PER_FRAME];
#endif
@@ -582,6 +585,18 @@
return cm->kf_y_prob[above][left];
}
+#if CONFIG_DAALA_EC
+static INLINE const aom_cdf_prob *get_y_mode_cdf(const AV1_COMMON *cm,
+ const MODE_INFO *mi,
+ const MODE_INFO *above_mi,
+ const MODE_INFO *left_mi,
+ int block) {
+ const PREDICTION_MODE above = av1_above_block_mode(mi, above_mi, block);
+ const PREDICTION_MODE left = av1_left_block_mode(mi, left_mi, block);
+ return cm->kf_y_cdf[above][left];
+}
+#endif
+
static INLINE void update_partition_context(MACROBLOCKD *xd, int mi_row,
int mi_col, BLOCK_SIZE subsize,
BLOCK_SIZE bsize) {
diff --git a/av1/common/scan.h b/av1/common/scan.h
index 7b6698f..407c9ec 100644
--- a/av1/common/scan.h
+++ b/av1/common/scan.h
@@ -80,8 +80,9 @@
}
#endif // CONFIG_EXT_TX
-static INLINE const SCAN_ORDER *get_scan(TX_SIZE tx_size, TX_TYPE tx_type,
- int is_inter) {
+static INLINE const SCAN_ORDER *get_scan(const AV1_COMMON *cm, TX_SIZE tx_size,
+ TX_TYPE tx_type, int is_inter) {
+ (void)cm;
#if CONFIG_EXT_TX
return is_inter ? &av1_inter_scan_orders[tx_size][tx_type]
: &av1_intra_scan_orders[tx_size][tx_type];
diff --git a/av1/decoder/decodeframe.c b/av1/decoder/decodeframe.c
index 146ca23..b62abe0 100644
--- a/av1/decoder/decodeframe.c
+++ b/av1/decoder/decodeframe.c
@@ -134,9 +134,14 @@
#endif // CONFIG_EXT_INTER
#else
int j;
- for (i = 0; i < INTER_MODE_CONTEXTS; ++i)
+ for (i = 0; i < INTER_MODE_CONTEXTS; ++i) {
for (j = 0; j < INTER_MODES - 1; ++j)
av1_diff_update_prob(r, &fc->inter_mode_probs[i][j], ACCT_STR);
+#if CONFIG_DAALA_EC
+ av1_tree_to_cdf(av1_inter_mode_tree, fc->inter_mode_probs[i],
+ fc->inter_mode_cdf[i]);
+#endif
+ }
#endif
}
@@ -204,6 +209,9 @@
int i, j;
update_mv_probs(ctx->joints, MV_JOINTS - 1, r);
+#if CONFIG_DAALA_EC
+ av1_tree_to_cdf(av1_mv_joint_tree, ctx->joints, ctx->joint_cdf);
+#endif
for (i = 0; i < 2; ++i) {
nmv_component *const comp_ctx = &ctx->comps[i];
@@ -211,13 +219,24 @@
update_mv_probs(comp_ctx->classes, MV_CLASSES - 1, r);
update_mv_probs(comp_ctx->class0, CLASS0_SIZE - 1, r);
update_mv_probs(comp_ctx->bits, MV_OFFSET_BITS, r);
+#if CONFIG_DAALA_EC
+ av1_tree_to_cdf(av1_mv_class_tree, comp_ctx->classes, comp_ctx->class_cdf);
+#endif
}
for (i = 0; i < 2; ++i) {
nmv_component *const comp_ctx = &ctx->comps[i];
- for (j = 0; j < CLASS0_SIZE; ++j)
+ for (j = 0; j < CLASS0_SIZE; ++j) {
update_mv_probs(comp_ctx->class0_fp[j], MV_FP_SIZE - 1, r);
+#if CONFIG_DAALA_EC
+ av1_tree_to_cdf(av1_mv_fp_tree, comp_ctx->class0_fp[j],
+ comp_ctx->class0_fp_cdf[j]);
+#endif
+ }
update_mv_probs(comp_ctx->fp, MV_FP_SIZE - 1, r);
+#if CONFIG_DAALA_EC
+ av1_tree_to_cdf(av1_mv_fp_tree, comp_ctx->fp, comp_ctx->fp_cdf);
+#endif
}
if (allow_hp) {
@@ -232,37 +251,30 @@
static void inverse_transform_block(MACROBLOCKD *xd, int plane,
const TX_TYPE tx_type,
const TX_SIZE tx_size, uint8_t *dst,
- int stride, int eob) {
+ int stride, int16_t scan_line, int eob) {
struct macroblockd_plane *const pd = &xd->plane[plane];
- if (eob > 0) {
- tran_low_t *const dqcoeff = pd->dqcoeff;
- INV_TXFM_PARAM inv_txfm_param;
- inv_txfm_param.tx_type = tx_type;
- inv_txfm_param.tx_size = tx_size;
- inv_txfm_param.eob = eob;
- inv_txfm_param.lossless = xd->lossless[xd->mi[0]->mbmi.segment_id];
+ tran_low_t *const dqcoeff = pd->dqcoeff;
+ INV_TXFM_PARAM inv_txfm_param;
+ inv_txfm_param.tx_type = tx_type;
+ inv_txfm_param.tx_size = tx_size;
+ inv_txfm_param.eob = eob;
+ inv_txfm_param.lossless = xd->lossless[xd->mi[0]->mbmi.segment_id];
#if CONFIG_AOM_HIGHBITDEPTH
- if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
- inv_txfm_param.bd = xd->bd;
- highbd_inv_txfm_add(dqcoeff, dst, stride, &inv_txfm_param);
- } else {
+ if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
+ inv_txfm_param.bd = xd->bd;
+ highbd_inv_txfm_add(dqcoeff, dst, stride, &inv_txfm_param);
+ } else {
#endif // CONFIG_AOM_HIGHBITDEPTH
- inv_txfm_add(dqcoeff, dst, stride, &inv_txfm_param);
+ inv_txfm_add(dqcoeff, dst, stride, &inv_txfm_param);
#if CONFIG_AOM_HIGHBITDEPTH
- }
-#endif // CONFIG_AOM_HIGHBITDEPTH
-
- // TODO(jingning): This cleans up different reset requests from various
- // experiments, but incurs unnecessary memset size.
- if (eob == 1)
- dqcoeff[0] = 0;
- else
- memset(dqcoeff, 0, tx_size_2d[tx_size] * sizeof(dqcoeff[0]));
}
+#endif // CONFIG_AOM_HIGHBITDEPTH
+ memset(dqcoeff, 0, (scan_line + 1) * sizeof(dqcoeff[0]));
}
-static void predict_and_reconstruct_intra_block(MACROBLOCKD *const xd,
+static void predict_and_reconstruct_intra_block(AV1_COMMON *cm,
+ MACROBLOCKD *const xd,
#if CONFIG_ANS
struct AnsDecoder *const r,
#else
@@ -286,18 +298,21 @@
if (!mbmi->skip) {
TX_TYPE tx_type = get_tx_type(plane_type, xd, block_idx, tx_size);
- const SCAN_ORDER *scan_order = get_scan(tx_size, tx_type, 0);
- const int eob = av1_decode_block_tokens(
- xd, plane, scan_order, col, row, tx_size, tx_type, r, mbmi->segment_id);
- inverse_transform_block(xd, plane, tx_type, tx_size, dst, pd->dst.stride,
- eob);
+ const SCAN_ORDER *scan_order = get_scan(cm, tx_size, tx_type, 0);
+ int16_t max_scan_line = 0;
+ const int eob =
+ av1_decode_block_tokens(xd, plane, scan_order, col, row, tx_size,
+ tx_type, &max_scan_line, r, mbmi->segment_id);
+ if (eob)
+ inverse_transform_block(xd, plane, tx_type, tx_size, dst, pd->dst.stride,
+ max_scan_line, eob);
}
}
#if CONFIG_VAR_TX
-static void decode_reconstruct_tx(MACROBLOCKD *const xd, aom_reader *r,
- MB_MODE_INFO *const mbmi, int plane,
- BLOCK_SIZE plane_bsize, int block,
+static void decode_reconstruct_tx(AV1_COMMON *cm, MACROBLOCKD *const xd,
+ aom_reader *r, MB_MODE_INFO *const mbmi,
+ int plane, BLOCK_SIZE plane_bsize, int block,
int blk_row, int blk_col, TX_SIZE tx_size,
int *eob_total) {
const struct macroblockd_plane *const pd = &xd->plane[plane];
@@ -320,14 +335,15 @@
if (tx_size == plane_tx_size) {
PLANE_TYPE plane_type = (plane == 0) ? PLANE_TYPE_Y : PLANE_TYPE_UV;
TX_TYPE tx_type = get_tx_type(plane_type, xd, block, plane_tx_size);
- const SCAN_ORDER *sc = get_scan(plane_tx_size, tx_type, 1);
+ const SCAN_ORDER *sc = get_scan(cm, plane_tx_size, tx_type, 1);
+ int16_t max_scan_line = 0;
const int eob =
av1_decode_block_tokens(xd, plane, sc, blk_col, blk_row, plane_tx_size,
- tx_type, r, mbmi->segment_id);
+ tx_type, &max_scan_line, r, mbmi->segment_id);
inverse_transform_block(
xd, plane, tx_type, plane_tx_size,
&pd->dst.buf[4 * blk_row * pd->dst.stride + 4 * blk_col],
- pd->dst.stride, eob);
+ pd->dst.stride, max_scan_line, eob);
*eob_total += eob;
} else {
int bsl = b_width_log2_lookup[bsize];
@@ -343,15 +359,16 @@
if (offsetr >= max_blocks_high || offsetc >= max_blocks_wide) continue;
- decode_reconstruct_tx(xd, r, mbmi, plane, plane_bsize, block + i * step,
- offsetr, offsetc, tx_size - 1, eob_total);
+ decode_reconstruct_tx(cm, xd, r, mbmi, plane, plane_bsize,
+ block + i * step, offsetr, offsetc, tx_size - 1,
+ eob_total);
}
}
}
#endif // CONFIG_VAR_TX
#if !CONFIG_VAR_TX || CONFIG_SUPERTX || (CONFIG_EXT_TX && CONFIG_RECT_TX)
-static int reconstruct_inter_block(MACROBLOCKD *const xd,
+static int reconstruct_inter_block(AV1_COMMON *cm, MACROBLOCKD *const xd,
#if CONFIG_ANS
struct AnsDecoder *const r,
#else
@@ -363,13 +380,15 @@
PLANE_TYPE plane_type = (plane == 0) ? PLANE_TYPE_Y : PLANE_TYPE_UV;
int block_idx = (row << 1) + col;
TX_TYPE tx_type = get_tx_type(plane_type, xd, block_idx, tx_size);
- const SCAN_ORDER *scan_order = get_scan(tx_size, tx_type, 1);
- const int eob = av1_decode_block_tokens(xd, plane, scan_order, col, row,
- tx_size, tx_type, r, segment_id);
-
- inverse_transform_block(xd, plane, tx_type, tx_size,
- &pd->dst.buf[4 * row * pd->dst.stride + 4 * col],
- pd->dst.stride, eob);
+ const SCAN_ORDER *scan_order = get_scan(cm, tx_size, tx_type, 1);
+ int16_t max_scan_line = 0;
+ const int eob =
+ av1_decode_block_tokens(xd, plane, scan_order, col, row, tx_size, tx_type,
+ &max_scan_line, r, segment_id);
+ if (eob)
+ inverse_transform_block(xd, plane, tx_type, tx_size,
+ &pd->dst.buf[4 * row * pd->dst.stride + 4 * col],
+ pd->dst.stride, max_scan_line, eob);
return eob;
}
#endif // !CONFIG_VAR_TX || CONFIG_SUPER_TX
@@ -1146,11 +1165,12 @@
const int bh = 1 << (bhl - 1);
const int x_mis = AOMMIN(bw, cm->mi_cols - mi_col);
const int y_mis = AOMMIN(bh, cm->mi_rows - mi_row);
+ MB_MODE_INFO *mbmi;
+
#if CONFIG_ACCOUNTING
aom_accounting_set_context(&pbi->accounting, mi_col, mi_row);
#endif
#if CONFIG_SUPERTX
- MB_MODE_INFO *mbmi;
if (supertx_enabled) {
mbmi = set_mb_offsets(cm, xd, bsize, mi_row, mi_col, bw, bh, x_mis, y_mis);
} else {
@@ -1162,8 +1182,8 @@
#endif
av1_read_mode_info(pbi, xd, supertx_enabled, mi_row, mi_col, r, x_mis, y_mis);
#else
- MB_MODE_INFO *mbmi = set_offsets(cm, xd, bsize, mi_row, mi_col, bw, bh, x_mis,
- y_mis, bwl, bhl);
+ mbmi = set_offsets(cm, xd, bsize, mi_row, mi_col, bw, bh, x_mis, y_mis, bwl,
+ bhl);
#if CONFIG_EXT_PARTITION_TYPES
xd->mi[0]->mbmi.partition = partition;
#endif
@@ -1237,7 +1257,7 @@
for (row = 0; row < max_blocks_high; row += stepr)
for (col = 0; col < max_blocks_wide; col += stepc)
- predict_and_reconstruct_intra_block(xd, r, mbmi, plane, row, col,
+ predict_and_reconstruct_intra_block(cm, xd, r, mbmi, plane, row, col,
tx_size);
}
} else {
@@ -1327,14 +1347,14 @@
for (row = 0; row < max_blocks_high; row += stepr)
for (col = 0; col < max_blocks_wide; col += stepc)
- eobtotal += reconstruct_inter_block(xd, r, mbmi->segment_id,
+ eobtotal += reconstruct_inter_block(cm, xd, r, mbmi->segment_id,
plane, row, col, tx_size);
} else {
#endif
for (row = 0; row < num_4x4_h; row += bh_var_tx) {
for (col = 0; col < num_4x4_w; col += bw_var_tx) {
- decode_reconstruct_tx(xd, r, mbmi, plane, plane_bsize, block, row,
- col, max_tx_size, &eobtotal);
+ decode_reconstruct_tx(cm, xd, r, mbmi, plane, plane_bsize, block,
+ row, col, max_tx_size, &eobtotal);
block += step;
}
}
@@ -1357,8 +1377,8 @@
for (row = 0; row < max_blocks_high; row += stepr)
for (col = 0; col < max_blocks_wide; col += stepc)
- eobtotal += reconstruct_inter_block(xd, r, mbmi->segment_id, plane,
- row, col, tx_size);
+ eobtotal += reconstruct_inter_block(cm, xd, r, mbmi->segment_id,
+ plane, row, col, tx_size);
#endif
}
}
@@ -1754,8 +1774,8 @@
for (row = 0; row < max_blocks_high; row += stepr)
for (col = 0; col < max_blocks_wide; col += stepc)
- eobtotal += reconstruct_inter_block(xd, r, mbmi->segment_id_supertx,
- i, row, col, tx_size);
+ eobtotal += reconstruct_inter_block(
+ cm, xd, r, mbmi->segment_id_supertx, i, row, col, tx_size);
}
if (!(subsize < BLOCK_8X8) && eobtotal == 0) skip = 1;
}
@@ -3715,9 +3735,14 @@
#endif
}
- for (j = 0; j < INTRA_MODES; j++)
+ for (j = 0; j < INTRA_MODES; j++) {
for (i = 0; i < INTRA_MODES - 1; ++i)
av1_diff_update_prob(&r, &fc->uv_mode_prob[j][i], ACCT_STR);
+#if CONFIG_DAALA_EC
+ av1_tree_to_cdf(av1_intra_mode_tree, fc->uv_mode_prob[j],
+ fc->uv_mode_cdf[j]);
+#endif
+ }
#if CONFIG_EXT_PARTITION_TYPES
for (i = 0; i < PARTITION_TYPES - 1; ++i)
@@ -3744,10 +3769,18 @@
if (frame_is_intra_only(cm)) {
av1_copy(cm->kf_y_prob, av1_kf_y_mode_prob);
+#if CONFIG_DAALA_EC
+ av1_copy(cm->kf_y_cdf, av1_kf_y_mode_cdf);
+#endif
for (k = 0; k < INTRA_MODES; k++)
- for (j = 0; j < INTRA_MODES; j++)
+ for (j = 0; j < INTRA_MODES; j++) {
for (i = 0; i < INTRA_MODES - 1; ++i)
av1_diff_update_prob(&r, &cm->kf_y_prob[k][j][i], ACCT_STR);
+#if CONFIG_DAALA_EC
+ av1_tree_to_cdf(av1_intra_mode_tree, cm->kf_y_prob[k][j],
+ cm->kf_y_cdf[k][j]);
+#endif
+ }
} else {
#if !CONFIG_REF_MV
nmv_context *const nmvc = &fc->nmvc;
@@ -3799,9 +3832,14 @@
read_frame_reference_mode_probs(cm, &r);
- for (j = 0; j < BLOCK_SIZE_GROUPS; j++)
+ for (j = 0; j < BLOCK_SIZE_GROUPS; j++) {
for (i = 0; i < INTRA_MODES - 1; ++i)
av1_diff_update_prob(&r, &fc->y_mode_prob[j][i], ACCT_STR);
+#if CONFIG_DAALA_EC
+ av1_tree_to_cdf(av1_intra_mode_tree, fc->y_mode_prob[j],
+ fc->y_mode_cdf[j]);
+#endif
+ }
#if CONFIG_REF_MV
for (i = 0; i < NMV_CONTEXTS; ++i)
diff --git a/av1/decoder/decodemv.c b/av1/decoder/decodemv.c
index 8260f9d..07c745d 100644
--- a/av1/decoder/decodemv.c
+++ b/av1/decoder/decodemv.c
@@ -26,7 +26,6 @@
#include "aom_dsp/aom_dsp_common.h"
#define ACCT_STR __func__
-
#if CONFIG_EXT_INTRA || CONFIG_FILTER_INTRA || CONFIG_PALETTE
static INLINE int read_uniform(aom_reader *r, int n) {
int l = get_unsigned_bits(n);
@@ -42,9 +41,16 @@
}
#endif // CONFIG_EXT_INTRA || CONFIG_FILTER_INTRA || CONFIG_PALETTE
+#if CONFIG_DAALA_EC
+static PREDICTION_MODE read_intra_mode(aom_reader *r, const aom_cdf_prob *cdf) {
+ return (PREDICTION_MODE)
+ av1_intra_mode_inv[aom_read_symbol(r, cdf, INTRA_MODES, ACCT_STR)];
+}
+#else
static PREDICTION_MODE read_intra_mode(aom_reader *r, const aom_prob *p) {
return (PREDICTION_MODE)aom_read_tree(r, av1_intra_mode_tree, p, ACCT_STR);
}
+#endif
#if CONFIG_DELTA_Q
static int read_delta_qindex(AV1_COMMON *cm, MACROBLOCKD *xd, aom_reader *r,
@@ -85,7 +91,11 @@
static PREDICTION_MODE read_intra_mode_y(AV1_COMMON *cm, MACROBLOCKD *xd,
aom_reader *r, int size_group) {
const PREDICTION_MODE y_mode =
+#if CONFIG_DAALA_EC
+ read_intra_mode(r, cm->fc->y_mode_cdf[size_group]);
+#else
read_intra_mode(r, cm->fc->y_mode_prob[size_group]);
+#endif
FRAME_COUNTS *counts = xd->counts;
if (counts) ++counts->y_mode[size_group][y_mode];
return y_mode;
@@ -95,7 +105,11 @@
aom_reader *r,
PREDICTION_MODE y_mode) {
const PREDICTION_MODE uv_mode =
+#if CONFIG_DAALA_EC
+ read_intra_mode(r, cm->fc->uv_mode_cdf[y_mode]);
+#else
read_intra_mode(r, cm->fc->uv_mode_prob[y_mode]);
+#endif
FRAME_COUNTS *counts = xd->counts;
if (counts) ++counts->uv_mode[y_mode][uv_mode];
return uv_mode;
@@ -176,8 +190,13 @@
// Invalid prediction mode.
assert(0);
#else
+#if CONFIG_DAALA_EC
+ const int mode = av1_inter_mode_inv[aom_read_symbol(
+ r, cm->fc->inter_mode_cdf[ctx], INTER_MODES, ACCT_STR)];
+#else
const int mode = aom_read_tree(r, av1_inter_mode_tree,
cm->fc->inter_mode_probs[ctx], ACCT_STR);
+#endif
FRAME_COUNTS *counts = xd->counts;
if (counts) ++counts->inter_mode[ctx][mode];
@@ -657,24 +676,48 @@
case BLOCK_4X4:
for (i = 0; i < 4; ++i)
mi->bmi[i].as_mode =
+#if CONFIG_DAALA_EC
+ read_intra_mode(r, get_y_mode_cdf(cm, mi, above_mi, left_mi, i));
+#else
read_intra_mode(r, get_y_mode_probs(cm, mi, above_mi, left_mi, i));
+#endif
mbmi->mode = mi->bmi[3].as_mode;
break;
case BLOCK_4X8:
mi->bmi[0].as_mode = mi->bmi[2].as_mode =
+#if CONFIG_DAALA_EC
+ read_intra_mode(r, get_y_mode_cdf(cm, mi, above_mi, left_mi, 0));
+#else
read_intra_mode(r, get_y_mode_probs(cm, mi, above_mi, left_mi, 0));
+#endif
mi->bmi[1].as_mode = mi->bmi[3].as_mode = mbmi->mode =
+#if CONFIG_DAALA_EC
+ read_intra_mode(r, get_y_mode_cdf(cm, mi, above_mi, left_mi, 1));
+#else
read_intra_mode(r, get_y_mode_probs(cm, mi, above_mi, left_mi, 1));
+#endif
break;
case BLOCK_8X4:
mi->bmi[0].as_mode = mi->bmi[1].as_mode =
+#if CONFIG_DAALA_EC
+ read_intra_mode(r, get_y_mode_cdf(cm, mi, above_mi, left_mi, 0));
+#else
read_intra_mode(r, get_y_mode_probs(cm, mi, above_mi, left_mi, 0));
+#endif
mi->bmi[2].as_mode = mi->bmi[3].as_mode = mbmi->mode =
+#if CONFIG_DAALA_EC
+ read_intra_mode(r, get_y_mode_cdf(cm, mi, above_mi, left_mi, 2));
+#else
read_intra_mode(r, get_y_mode_probs(cm, mi, above_mi, left_mi, 2));
+#endif
break;
default:
mbmi->mode =
+#if CONFIG_DAALA_EC
+ read_intra_mode(r, get_y_mode_cdf(cm, mi, above_mi, left_mi, 0));
+#else
read_intra_mode(r, get_y_mode_probs(cm, mi, above_mi, left_mi, 0));
+#endif
}
mbmi->uv_mode = read_intra_mode_uv(cm, xd, r, mbmi->mode);
@@ -741,7 +784,11 @@
int mag, d, fr, hp;
const int sign = aom_read(r, mvcomp->sign, ACCT_STR);
const int mv_class =
+#if CONFIG_DAALA_EC
+ aom_read_symbol(r, mvcomp->class_cdf, MV_CLASSES, ACCT_STR);
+#else
aom_read_tree(r, av1_mv_class_tree, mvcomp->classes, ACCT_STR);
+#endif
const int class0 = mv_class == MV_CLASS_0;
// Integer part
@@ -757,9 +804,14 @@
mag = CLASS0_SIZE << (mv_class + 2);
}
- // Fractional part
+// Fractional part
+#if CONFIG_DAALA_EC
+ fr = aom_read_symbol(r, class0 ? mvcomp->class0_fp_cdf[d] : mvcomp->fp_cdf,
+ MV_FP_SIZE, ACCT_STR);
+#else
fr = aom_read_tree(r, av1_mv_fp_tree,
class0 ? mvcomp->class0_fp[d] : mvcomp->fp, ACCT_STR);
+#endif
// High precision part (if hp is not used, the default value of the hp is 1)
hp = usehp ? aom_read(r, class0 ? mvcomp->class0_hp : mvcomp->hp, ACCT_STR)
@@ -777,7 +829,11 @@
const int use_hp = allow_hp && av1_use_mv_hp(ref);
MV diff = { 0, 0 };
joint_type =
+#if CONFIG_DAALA_EC
+ (MV_JOINT_TYPE)aom_read_symbol(r, ctx->joint_cdf, MV_JOINTS, ACCT_STR);
+#else
(MV_JOINT_TYPE)aom_read_tree(r, av1_mv_joint_tree, ctx->joints, ACCT_STR);
+#endif
if (mv_joint_vertical(joint_type))
diff.row = read_mv_component(r, &ctx->comps[0], use_hp);
diff --git a/av1/decoder/decoder.c b/av1/decoder/decoder.c
index 9952650..bcc6a1b 100644
--- a/av1/decoder/decoder.c
+++ b/av1/decoder/decoder.c
@@ -52,6 +52,10 @@
SWITCHABLE_FILTERS, av1_switchable_interp_tree);
av1_indices_from_tree(av1_ext_tx_ind, av1_ext_tx_inv, TX_TYPES,
av1_ext_tx_tree);
+ av1_indices_from_tree(av1_intra_mode_ind, av1_intra_mode_inv, INTRA_MODES,
+ av1_intra_mode_tree);
+ av1_indices_from_tree(av1_inter_mode_ind, av1_inter_mode_inv, INTER_MODES,
+ av1_inter_mode_tree);
#endif
}
}
diff --git a/av1/decoder/detokenize.c b/av1/decoder/detokenize.c
index f2f74f5..9c01b93 100644
--- a/av1/decoder/detokenize.c
+++ b/av1/decoder/detokenize.c
@@ -61,7 +61,7 @@
dequant_val_type_nuq *dq_val,
#endif // CONFIG_NEW_QUANT
int ctx, const int16_t *scan, const int16_t *nb,
- aom_reader *r)
+ int16_t *max_scan_line, aom_reader *r)
#endif
{
FRAME_COUNTS *counts = xd->counts;
@@ -166,6 +166,9 @@
dqv_val = &dq_val[band][0];
#endif // CONFIG_NEW_QUANT
}
+
+ *max_scan_line = AOMMAX(*max_scan_line, scan[c]);
+
#if CONFIG_RANS
cdf = &coef_cdfs[band][ctx];
token = ONE_TOKEN +
@@ -327,7 +330,8 @@
int av1_decode_block_tokens(MACROBLOCKD *const xd, int plane,
const SCAN_ORDER *sc, int x, int y, TX_SIZE tx_size,
- TX_TYPE tx_type, aom_reader *r, int seg_id) {
+ TX_TYPE tx_type, int16_t *max_scan_line,
+ aom_reader *r, int seg_id) {
struct macroblockd_plane *const pd = &xd->plane[plane];
const int16_t *const dequant = pd->seg_dequant[seg_id];
const int ctx =
@@ -339,16 +343,16 @@
#endif // CONFIG_NEW_QUANT
#if CONFIG_AOM_QM
- const int eob =
- decode_coefs(xd, pd->plane_type, pd->dqcoeff, tx_size, tx_type, dequant,
- ctx, sc->scan, sc->neighbors, r, pd->seg_iqmatrix[seg_id]);
+ const int eob = decode_coefs(xd, pd->plane_type, pd->dqcoeff, tx_size,
+ tx_type, dequant, ctx, sc->scan, sc->neighbors,
+ &sc->max_scan_line, r, pd->seg_iqmatrix[seg_id]);
#else
const int eob =
decode_coefs(xd, pd->plane_type, pd->dqcoeff, tx_size, tx_type, dequant,
#if CONFIG_NEW_QUANT
pd->seg_dequant_nuq[seg_id][dq],
#endif // CONFIG_NEW_QUANT
- ctx, sc->scan, sc->neighbors, r);
+ ctx, sc->scan, sc->neighbors, max_scan_line, r);
#endif // CONFIG_AOM_QM
av1_set_contexts(xd, pd, tx_size, eob > 0, x, y);
return eob;
diff --git a/av1/decoder/detokenize.h b/av1/decoder/detokenize.h
index 9c08ff9..1eb1e6c 100644
--- a/av1/decoder/detokenize.h
+++ b/av1/decoder/detokenize.h
@@ -28,7 +28,7 @@
int av1_decode_block_tokens(MACROBLOCKD *const xd, int plane,
const SCAN_ORDER *sc, int x, int y, TX_SIZE tx_size,
- TX_TYPE tx_type,
+ TX_TYPE tx_type, int16_t *max_scan_line,
#if CONFIG_ANS
struct AnsDecoder *const r,
#else
diff --git a/av1/encoder/bitstream.c b/av1/encoder/bitstream.c
index e0fb7ec..5ae920b 100644
--- a/av1/encoder/bitstream.c
+++ b/av1/encoder/bitstream.c
@@ -192,13 +192,19 @@
structure. */
av1_indices_from_tree(av1_ext_tx_ind, av1_ext_tx_inv, TX_TYPES,
av1_ext_tx_tree);
+ av1_indices_from_tree(av1_intra_mode_ind, av1_intra_mode_inv, INTRA_MODES,
+ av1_intra_mode_tree);
+ av1_indices_from_tree(av1_inter_mode_ind, av1_inter_mode_inv, INTER_MODES,
+ av1_inter_mode_tree);
#endif
}
+#if !CONFIG_DAALA_EC
static void write_intra_mode(aom_writer *w, PREDICTION_MODE mode,
const aom_prob *probs) {
av1_write_token(w, av1_intra_mode_tree, probs, &intra_mode_encodings[mode]);
}
+#endif
#if CONFIG_EXT_INTER
static void write_interintra_mode(aom_writer *w, INTERINTRA_MODE mode,
@@ -252,10 +258,17 @@
}
}
#else
- const aom_prob *const inter_probs = cm->fc->inter_mode_probs[mode_ctx];
assert(is_inter_mode(mode));
- av1_write_token(w, av1_inter_mode_tree, inter_probs,
- &inter_mode_encodings[INTER_OFFSET(mode)]);
+#if CONFIG_DAALA_EC
+ aom_write_symbol(w, av1_inter_mode_ind[INTER_OFFSET(mode)],
+ cm->fc->inter_mode_cdf[mode_ctx], INTER_MODES);
+#else
+ {
+ const aom_prob *const inter_probs = cm->fc->inter_mode_probs[mode_ctx];
+ av1_write_token(w, av1_inter_mode_tree, inter_probs,
+ &inter_mode_encodings[INTER_OFFSET(mode)]);
+ }
+#endif
#endif
}
@@ -1233,7 +1246,13 @@
if (!is_inter) {
if (bsize >= BLOCK_8X8) {
+#if CONFIG_DAALA_EC
+ aom_write_symbol(w, av1_intra_mode_ind[mode],
+ cm->fc->y_mode_cdf[size_group_lookup[bsize]],
+ INTRA_MODES);
+#else
write_intra_mode(w, mode, cm->fc->y_mode_prob[size_group_lookup[bsize]]);
+#endif
} else {
int idx, idy;
const int num_4x4_w = num_4x4_blocks_wide_lookup[bsize];
@@ -1241,11 +1260,21 @@
for (idy = 0; idy < 2; idy += num_4x4_h) {
for (idx = 0; idx < 2; idx += num_4x4_w) {
const PREDICTION_MODE b_mode = mi->bmi[idy * 2 + idx].as_mode;
+#if CONFIG_DAALA_EC
+ aom_write_symbol(w, av1_intra_mode_ind[b_mode], cm->fc->y_mode_cdf[0],
+ INTRA_MODES);
+#else
write_intra_mode(w, b_mode, cm->fc->y_mode_prob[0]);
+#endif
}
}
}
+#if CONFIG_DAALA_EC
+ aom_write_symbol(w, av1_intra_mode_ind[mbmi->uv_mode],
+ cm->fc->uv_mode_cdf[mode], INTRA_MODES);
+#else
write_intra_mode(w, mbmi->uv_mode, cm->fc->uv_mode_prob[mode]);
+#endif
#if CONFIG_EXT_INTRA
write_intra_angle_info(cm, xd, w);
#endif // CONFIG_EXT_INTRA
@@ -1622,8 +1651,13 @@
write_selected_tx_size(cm, xd, w);
if (bsize >= BLOCK_8X8) {
+#if CONFIG_DAALA_EC
+ aom_write_symbol(w, av1_intra_mode_ind[mbmi->mode],
+ get_y_mode_cdf(cm, mi, above_mi, left_mi, 0), INTRA_MODES);
+#else
write_intra_mode(w, mbmi->mode,
get_y_mode_probs(cm, mi, above_mi, left_mi, 0));
+#endif
} else {
const int num_4x4_w = num_4x4_blocks_wide_lookup[bsize];
const int num_4x4_h = num_4x4_blocks_high_lookup[bsize];
@@ -1632,13 +1666,23 @@
for (idy = 0; idy < 2; idy += num_4x4_h) {
for (idx = 0; idx < 2; idx += num_4x4_w) {
const int block = idy * 2 + idx;
+#if CONFIG_DAALA_EC
+ aom_write_symbol(w, av1_intra_mode_ind[mi->bmi[block].as_mode],
+ get_y_mode_cdf(cm, mi, above_mi, left_mi, block),
+ INTRA_MODES);
+#else
write_intra_mode(w, mi->bmi[block].as_mode,
get_y_mode_probs(cm, mi, above_mi, left_mi, block));
+#endif
}
}
}
-
+#if CONFIG_DAALA_EC
+ aom_write_symbol(w, av1_intra_mode_ind[mbmi->uv_mode],
+ cm->fc->uv_mode_cdf[mbmi->mode], INTRA_MODES);
+#else
write_intra_mode(w, mbmi->uv_mode, cm->fc->uv_mode_prob[mbmi->mode]);
+#endif
#if CONFIG_EXT_INTRA
write_intra_angle_info(cm, xd, w);
#endif // CONFIG_EXT_INTRA
@@ -3592,9 +3636,14 @@
#endif
update_seg_probs(cpi, header_bc);
- for (i = 0; i < INTRA_MODES; ++i)
+ for (i = 0; i < INTRA_MODES; ++i) {
prob_diff_update(av1_intra_mode_tree, fc->uv_mode_prob[i],
counts->uv_mode[i], INTRA_MODES, header_bc);
+#if CONFIG_DAALA_EC
+ av1_tree_to_cdf(av1_intra_mode_tree, fc->uv_mode_prob[i],
+ fc->uv_mode_cdf[i]);
+#endif
+ }
#if CONFIG_EXT_PARTITION_TYPES
prob_diff_update(av1_partition_tree, fc->partition_prob[0],
@@ -3621,17 +3670,30 @@
if (frame_is_intra_only(cm)) {
av1_copy(cm->kf_y_prob, av1_kf_y_mode_prob);
+#if CONFIG_DAALA_EC
+ av1_copy(cm->kf_y_cdf, av1_kf_y_mode_cdf);
+#endif
for (i = 0; i < INTRA_MODES; ++i)
- for (j = 0; j < INTRA_MODES; ++j)
+ for (j = 0; j < INTRA_MODES; ++j) {
prob_diff_update(av1_intra_mode_tree, cm->kf_y_prob[i][j],
counts->kf_y_mode[i][j], INTRA_MODES, header_bc);
+#if CONFIG_DAALA_EC
+ av1_tree_to_cdf(av1_intra_mode_tree, cm->kf_y_prob[i][j],
+ cm->kf_y_cdf[i][j]);
+#endif
+ }
} else {
#if CONFIG_REF_MV
update_inter_mode_probs(cm, header_bc, counts);
#else
- for (i = 0; i < INTER_MODE_CONTEXTS; ++i)
+ for (i = 0; i < INTER_MODE_CONTEXTS; ++i) {
prob_diff_update(av1_inter_mode_tree, cm->fc->inter_mode_probs[i],
counts->inter_mode[i], INTER_MODES, header_bc);
+#if CONFIG_DAALA_EC
+ av1_tree_to_cdf(av1_inter_mode_tree, cm->fc->inter_mode_probs[i],
+ cm->fc->inter_mode_cdf[i]);
+#endif
+ }
#endif
#if CONFIG_EXT_INTER
@@ -3713,9 +3775,14 @@
}
}
- for (i = 0; i < BLOCK_SIZE_GROUPS; ++i)
+ for (i = 0; i < BLOCK_SIZE_GROUPS; ++i) {
prob_diff_update(av1_intra_mode_tree, cm->fc->y_mode_prob[i],
counts->y_mode[i], INTRA_MODES, header_bc);
+#if CONFIG_DAALA_EC
+ av1_tree_to_cdf(av1_intra_mode_tree, cm->fc->y_mode_prob[i],
+ cm->fc->y_mode_cdf[i]);
+#endif
+ }
av1_write_nmv_probs(cm, cm->allow_high_precision_mv, header_bc,
#if CONFIG_REF_MV
@@ -3723,6 +3790,10 @@
#else
&counts->mv);
#endif
+#if CONFIG_DAALA_EC
+ av1_tree_to_cdf(av1_mv_joint_tree, cm->fc->nmvc.joints,
+ cm->fc->nmvc.joint_cdf);
+#endif
update_ext_tx_probs(cm, header_bc);
#if CONFIG_SUPERTX
if (!xd->lossless[0]) update_supertx_probs(cm, header_bc);
diff --git a/av1/encoder/encodeframe.c b/av1/encoder/encodeframe.c
index d3b97d6..9dc6a2e 100644
--- a/av1/encoder/encodeframe.c
+++ b/av1/encoder/encodeframe.c
@@ -2213,7 +2213,7 @@
int this_rate = 0;
x->use_lp32x32fdct = cpi->sf.use_lp32x32fdct;
- av1_encode_sb_supertx(x, bsize);
+ av1_encode_sb_supertx((AV1_COMMON *)cm, x, bsize);
av1_tokenize_sb_supertx(cpi, td, tp, dry_run, bsize, rate);
if (rate) *rate += this_rate;
} else {
@@ -4489,6 +4489,10 @@
#define MIN_TRANS_THRESH 8
#define GLOBAL_MOTION_ADVANTAGE_THRESH 0.60
#define GLOBAL_MOTION_MODEL ROTZOOM
+// TODO(sarahparker) This function needs to be adjusted
+// to accomodate changes in the paraemter integerization.
+// Commenting it out until the fix is made.
+/*
static void refine_integerized_param(WarpedMotionParams *wm,
#if CONFIG_AOM_HIGHBITDEPTH
int use_hbd, int bd,
@@ -4565,6 +4569,7 @@
*param = best_param;
}
}
+*/
static void convert_to_params(const double *params, TransformationType type,
int16_t *model) {
@@ -4579,7 +4584,7 @@
GM_TRANS_DECODE_FACTOR;
for (i = 2; i < n_params; ++i) {
- diag_value = ((i && 1) ? (1 << GM_ALPHA_PREC_BITS) : 0);
+ diag_value = ((i & 1) ? (1 << GM_ALPHA_PREC_BITS) : 0);
model[i] = (int16_t)floor(params[i] * (1 << GM_ALPHA_PREC_BITS) + 0.5);
model[i] =
(int16_t)(clamp(model[i] - diag_value, GM_ALPHA_MIN, GM_ALPHA_MAX) +
@@ -4643,14 +4648,6 @@
convert_model_to_params(params, GLOBAL_MOTION_MODEL,
&cm->global_motion[frame]);
if (get_gmtype(&cm->global_motion[frame]) > GLOBAL_ZERO) {
- refine_integerized_param(
- &cm->global_motion[frame].motion_params,
-#if CONFIG_AOM_HIGHBITDEPTH
- xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH, xd->bd,
-#endif // CONFIG_AOM_HIGHBITDEPTH
- ref_buf->y_buffer, ref_buf->y_width, ref_buf->y_height,
- ref_buf->y_stride, cpi->Source->y_buffer, cpi->Source->y_width,
- cpi->Source->y_height, cpi->Source->y_stride, 3);
// compute the advantage of using gm parameters over 0 motion
erroradvantage = av1_warp_erroradv(
&cm->global_motion[frame].motion_params,
@@ -5107,7 +5104,8 @@
int plane;
mbmi->skip = 1;
for (plane = 0; plane < MAX_MB_PLANE; ++plane)
- av1_encode_intra_block_plane(x, AOMMAX(bsize, BLOCK_8X8), plane, 1);
+ av1_encode_intra_block_plane((AV1_COMMON *)cm, x,
+ AOMMAX(bsize, BLOCK_8X8), plane, 1);
if (!dry_run)
sum_intra_stats(td->counts, mi, xd->above_mi, xd->left_mi,
frame_is_intra_only(cm));
@@ -5229,7 +5227,7 @@
}
#endif // CONFIG_MOTION_VAR
- av1_encode_sb(x, AOMMAX(bsize, BLOCK_8X8));
+ av1_encode_sb((AV1_COMMON *)cm, x, AOMMAX(bsize, BLOCK_8X8));
#if CONFIG_VAR_TX
#if CONFIG_EXT_TX && CONFIG_RECT_TX
if (is_rect_tx(mbmi->tx_size))
diff --git a/av1/encoder/encodemb.c b/av1/encoder/encodemb.c
index 4ab1b48..8914ba5 100644
--- a/av1/encoder/encodemb.c
+++ b/av1/encoder/encodemb.c
@@ -67,8 +67,8 @@
rd_cost1 = RDCOST(rdmult, rddiv, rate1, error1); \
}
-int av1_optimize_b(MACROBLOCK *mb, int plane, int block, TX_SIZE tx_size,
- int ctx) {
+int av1_optimize_b(const AV1_COMMON *cm, MACROBLOCK *mb, int plane, int block,
+ TX_SIZE tx_size, int ctx) {
MACROBLOCKD *const xd = &mb->e_mbd;
struct macroblock_plane *const p = &mb->plane[plane];
struct macroblockd_plane *const pd = &xd->plane[plane];
@@ -86,7 +86,7 @@
const uint8_t *const band_translate = get_band_translate(tx_size);
TX_TYPE tx_type = get_tx_type(plane_type, xd, block, tx_size);
const SCAN_ORDER *const scan_order =
- get_scan(tx_size, tx_type, is_inter_block(&xd->mi[0]->mbmi));
+ get_scan(cm, tx_size, tx_type, is_inter_block(&xd->mi[0]->mbmi));
const int16_t *const scan = scan_order->scan;
const int16_t *const nb = scan_order->neighbors;
#if CONFIG_AOM_QM
@@ -432,16 +432,16 @@
FWD_TXFM_OPT_NORMAL, FWD_TXFM_OPT_NORMAL, FWD_TXFM_OPT_DC, FWD_TXFM_OPT_NORMAL
};
-void av1_xform_quant(MACROBLOCK *x, int plane, int block, int blk_row,
- int blk_col, BLOCK_SIZE plane_bsize, TX_SIZE tx_size,
- AV1_XFORM_QUANT xform_quant_idx) {
+void av1_xform_quant(const AV1_COMMON *cm, MACROBLOCK *x, int plane, int block,
+ int blk_row, int blk_col, BLOCK_SIZE plane_bsize,
+ TX_SIZE tx_size, AV1_XFORM_QUANT xform_quant_idx) {
MACROBLOCKD *const xd = &x->e_mbd;
const struct macroblock_plane *const p = &x->plane[plane];
const struct macroblockd_plane *const pd = &xd->plane[plane];
PLANE_TYPE plane_type = (plane == 0) ? PLANE_TYPE_Y : PLANE_TYPE_UV;
TX_TYPE tx_type = get_tx_type(plane_type, xd, block, tx_size);
const int is_inter = is_inter_block(&xd->mi[0]->mbmi);
- const SCAN_ORDER *const scan_order = get_scan(tx_size, tx_type, is_inter);
+ const SCAN_ORDER *const scan_order = get_scan(cm, tx_size, tx_type, is_inter);
tran_low_t *const coeff = BLOCK_OFFSET(p->coeff, block);
tran_low_t *const qcoeff = BLOCK_OFFSET(p->qcoeff, block);
tran_low_t *const dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block);
@@ -505,16 +505,16 @@
}
#if CONFIG_NEW_QUANT
-void av1_xform_quant_nuq(MACROBLOCK *x, int plane, int block, int blk_row,
- int blk_col, BLOCK_SIZE plane_bsize, TX_SIZE tx_size,
- int ctx) {
+void av1_xform_quant_nuq(const AV1_COMMON *cm, MACROBLOCK *x, int plane,
+ int block, int blk_row, int blk_col,
+ BLOCK_SIZE plane_bsize, TX_SIZE tx_size, int ctx) {
MACROBLOCKD *const xd = &x->e_mbd;
const struct macroblock_plane *const p = &x->plane[plane];
const struct macroblockd_plane *const pd = &xd->plane[plane];
PLANE_TYPE plane_type = (plane == 0) ? PLANE_TYPE_Y : PLANE_TYPE_UV;
TX_TYPE tx_type = get_tx_type(plane_type, xd, block, tx_size);
const int is_inter = is_inter_block(&xd->mi[0]->mbmi);
- const SCAN_ORDER *const scan_order = get_scan(tx_size, tx_type, is_inter);
+ const SCAN_ORDER *const scan_order = get_scan(cm, tx_size, tx_type, is_inter);
tran_low_t *const coeff = BLOCK_OFFSET(p->coeff, block);
tran_low_t *const qcoeff = BLOCK_OFFSET(p->qcoeff, block);
tran_low_t *const dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block);
@@ -578,16 +578,16 @@
}
}
-void av1_xform_quant_fp_nuq(MACROBLOCK *x, int plane, int block, int blk_row,
- int blk_col, BLOCK_SIZE plane_bsize,
- TX_SIZE tx_size, int ctx) {
+void av1_xform_quant_fp_nuq(const AV1_COMMON *cm, MACROBLOCK *x, int plane,
+ int block, int blk_row, int blk_col,
+ BLOCK_SIZE plane_bsize, TX_SIZE tx_size, int ctx) {
MACROBLOCKD *const xd = &x->e_mbd;
const struct macroblock_plane *const p = &x->plane[plane];
const struct macroblockd_plane *const pd = &xd->plane[plane];
const int is_inter = is_inter_block(&xd->mi[0]->mbmi);
PLANE_TYPE plane_type = (plane == 0) ? PLANE_TYPE_Y : PLANE_TYPE_UV;
TX_TYPE tx_type = get_tx_type(plane_type, xd, block, tx_size);
- const SCAN_ORDER *const scan_order = get_scan(tx_size, tx_type, is_inter);
+ const SCAN_ORDER *const scan_order = get_scan(cm, tx_size, tx_type, is_inter);
int dq = get_dq_profile_from_ctx(xd->qindex[xd->mi[0]->mbmi.segment_id], ctx,
is_inter, plane_type);
tran_low_t *const coeff = BLOCK_OFFSET(p->coeff, block);
@@ -784,6 +784,7 @@
static void encode_block(int plane, int block, int blk_row, int blk_col,
BLOCK_SIZE plane_bsize, TX_SIZE tx_size, void *arg) {
struct encode_b_args *const args = arg;
+ AV1_COMMON *cm = args->cm;
MACROBLOCK *const x = args->x;
MACROBLOCKD *const xd = &x->e_mbd;
int ctx;
@@ -815,10 +816,10 @@
{
#endif
#if CONFIG_NEW_QUANT
- av1_xform_quant_fp_nuq(x, plane, block, blk_row, blk_col, plane_bsize,
+ av1_xform_quant_fp_nuq(cm, x, plane, block, blk_row, blk_col, plane_bsize,
tx_size, ctx);
#else
- av1_xform_quant(x, plane, block, blk_row, blk_col, plane_bsize, tx_size,
+ av1_xform_quant(cm, x, plane, block, blk_row, blk_col, plane_bsize, tx_size,
AV1_XFORM_QUANT_FP);
#endif // CONFIG_NEW_QUANT
}
@@ -829,7 +830,7 @@
#endif
if (p->eobs[block]) {
- *a = *l = av1_optimize_b(x, plane, block, tx_size, ctx) > 0;
+ *a = *l = av1_optimize_b(cm, x, plane, block, tx_size, ctx) > 0;
} else {
*a = *l = p->eobs[block] > 0;
}
@@ -918,10 +919,17 @@
}
#endif
+typedef struct encode_block_pass1_args {
+ AV1_COMMON *cm;
+ MACROBLOCK *x;
+} encode_block_pass1_args;
+
static void encode_block_pass1(int plane, int block, int blk_row, int blk_col,
BLOCK_SIZE plane_bsize, TX_SIZE tx_size,
void *arg) {
- MACROBLOCK *const x = (MACROBLOCK *)arg;
+ encode_block_pass1_args *args = (encode_block_pass1_args *)arg;
+ AV1_COMMON *cm = args->cm;
+ MACROBLOCK *const x = args->x;
MACROBLOCKD *const xd = &x->e_mbd;
struct macroblock_plane *const p = &x->plane[plane];
struct macroblockd_plane *const pd = &xd->plane[plane];
@@ -934,10 +942,10 @@
#if CONFIG_NEW_QUANT
ctx = 0;
- av1_xform_quant_fp_nuq(x, plane, block, blk_row, blk_col, plane_bsize,
+ av1_xform_quant_fp_nuq(cm, x, plane, block, blk_row, blk_col, plane_bsize,
tx_size, ctx);
#else
- av1_xform_quant(x, plane, block, blk_row, blk_col, plane_bsize, tx_size,
+ av1_xform_quant(cm, x, plane, block, blk_row, blk_col, plane_bsize, tx_size,
AV1_XFORM_QUANT_B);
#endif // CONFIG_NEW_QUANT
@@ -962,17 +970,18 @@
}
}
-void av1_encode_sby_pass1(MACROBLOCK *x, BLOCK_SIZE bsize) {
+void av1_encode_sby_pass1(AV1_COMMON *cm, MACROBLOCK *x, BLOCK_SIZE bsize) {
+ encode_block_pass1_args args = { cm, x };
av1_subtract_plane(x, bsize, 0);
av1_foreach_transformed_block_in_plane(&x->e_mbd, bsize, 0,
- encode_block_pass1, x);
+ encode_block_pass1, &args);
}
-void av1_encode_sb(MACROBLOCK *x, BLOCK_SIZE bsize) {
+void av1_encode_sb(AV1_COMMON *cm, MACROBLOCK *x, BLOCK_SIZE bsize) {
MACROBLOCKD *const xd = &x->e_mbd;
struct optimize_ctx ctx;
MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi;
- struct encode_b_args arg = { x, &ctx, &mbmi->skip, NULL, NULL, 1 };
+ struct encode_b_args arg = { cm, x, &ctx, &mbmi->skip, NULL, NULL, 1 };
int plane;
mbmi->skip = 1;
@@ -1027,11 +1036,11 @@
}
#if CONFIG_SUPERTX
-void av1_encode_sb_supertx(MACROBLOCK *x, BLOCK_SIZE bsize) {
+void av1_encode_sb_supertx(AV1_COMMON *cm, MACROBLOCK *x, BLOCK_SIZE bsize) {
MACROBLOCKD *const xd = &x->e_mbd;
struct optimize_ctx ctx;
MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi;
- struct encode_b_args arg = { x, &ctx, &mbmi->skip, NULL, NULL, 1 };
+ struct encode_b_args arg = { cm, x, &ctx, &mbmi->skip, NULL, NULL, 1 };
int plane;
mbmi->skip = 1;
@@ -1058,6 +1067,7 @@
BLOCK_SIZE plane_bsize, TX_SIZE tx_size,
void *arg) {
struct encode_b_args *const args = arg;
+ AV1_COMMON *cm = args->cm;
MACROBLOCK *const x = args->x;
MACROBLOCKD *const xd = &x->e_mbd;
MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi;
@@ -1110,19 +1120,19 @@
if (args->enable_optimize_b) {
#if CONFIG_NEW_QUANT
- av1_xform_quant_fp_nuq(x, plane, block, blk_row, blk_col, plane_bsize,
+ av1_xform_quant_fp_nuq(cm, x, plane, block, blk_row, blk_col, plane_bsize,
tx_size, ctx);
#else // CONFIG_NEW_QUANT
- av1_xform_quant(x, plane, block, blk_row, blk_col, plane_bsize, tx_size,
+ av1_xform_quant(cm, x, plane, block, blk_row, blk_col, plane_bsize, tx_size,
AV1_XFORM_QUANT_FP);
#endif // CONFIG_NEW_QUANT
if (p->eobs[block]) {
- *a = *l = av1_optimize_b(x, plane, block, tx_size, ctx) > 0;
+ *a = *l = av1_optimize_b(cm, x, plane, block, tx_size, ctx) > 0;
} else {
*a = *l = 0;
}
} else {
- av1_xform_quant(x, plane, block, blk_row, blk_col, plane_bsize, tx_size,
+ av1_xform_quant(cm, x, plane, block, blk_row, blk_col, plane_bsize, tx_size,
AV1_XFORM_QUANT_B);
*a = *l = p->eobs[block] > 0;
}
@@ -1148,14 +1158,16 @@
}
}
-void av1_encode_intra_block_plane(MACROBLOCK *x, BLOCK_SIZE bsize, int plane,
+void av1_encode_intra_block_plane(AV1_COMMON *cm, MACROBLOCK *x,
+ BLOCK_SIZE bsize, int plane,
int enable_optimize_b) {
const MACROBLOCKD *const xd = &x->e_mbd;
ENTROPY_CONTEXT ta[2 * MAX_MIB_SIZE];
ENTROPY_CONTEXT tl[2 * MAX_MIB_SIZE];
- struct encode_b_args arg = { x, NULL, &xd->mi[0]->mbmi.skip,
- ta, tl, enable_optimize_b };
+ struct encode_b_args arg = {
+ cm, x, NULL, &xd->mi[0]->mbmi.skip, ta, tl, enable_optimize_b
+ };
if (enable_optimize_b) {
const struct macroblockd_plane *const pd = &xd->plane[plane];
const TX_SIZE tx_size =
diff --git a/av1/encoder/encodemb.h b/av1/encoder/encodemb.h
index ac10f94..e9b6bc8 100644
--- a/av1/encoder/encodemb.h
+++ b/av1/encoder/encodemb.h
@@ -25,6 +25,7 @@
};
struct encode_b_args {
+ AV1_COMMON *cm;
MACROBLOCK *x;
struct optimize_ctx *ctx;
int8_t *skip;
@@ -41,38 +42,39 @@
AV1_XFORM_QUANT_LAST = 4
} AV1_XFORM_QUANT;
-void av1_encode_sb(MACROBLOCK *x, BLOCK_SIZE bsize);
+void av1_encode_sb(AV1_COMMON *cm, MACROBLOCK *x, BLOCK_SIZE bsize);
#if CONFIG_SUPERTX
-void av1_encode_sb_supertx(MACROBLOCK *x, BLOCK_SIZE bsize);
+void av1_encode_sb_supertx(AV1_COMMON *cm, MACROBLOCK *x, BLOCK_SIZE bsize);
#endif // CONFIG_SUPERTX
-void av1_encode_sby_pass1(MACROBLOCK *x, BLOCK_SIZE bsize);
-void av1_xform_quant(MACROBLOCK *x, int plane, int block, int blk_row,
- int blk_col, BLOCK_SIZE plane_bsize, TX_SIZE tx_size,
- AV1_XFORM_QUANT xform_quant_idx);
+void av1_encode_sby_pass1(AV1_COMMON *cm, MACROBLOCK *x, BLOCK_SIZE bsize);
+void av1_xform_quant(const AV1_COMMON *cm, MACROBLOCK *x, int plane, int block,
+ int blk_row, int blk_col, BLOCK_SIZE plane_bsize,
+ TX_SIZE tx_size, AV1_XFORM_QUANT xform_quant_idx);
#if CONFIG_NEW_QUANT
-void av1_xform_quant_nuq(MACROBLOCK *x, int plane, int block, int blk_row,
- int blk_col, BLOCK_SIZE plane_bsize, TX_SIZE tx_size,
- int ctx);
+void av1_xform_quant_nuq(const AV1_COMMON *cm, MACROBLOCK *x, int plane,
+ int block, int blk_row, int blk_col,
+ BLOCK_SIZE plane_bsize, TX_SIZE tx_size, int ctx);
void av1_xform_quant_dc_nuq(MACROBLOCK *x, int plane, int block, int blk_row,
int blk_col, BLOCK_SIZE plane_bsize,
TX_SIZE tx_size, int ctx);
-void av1_xform_quant_fp_nuq(MACROBLOCK *x, int plane, int block, int blk_row,
- int blk_col, BLOCK_SIZE plane_bsize,
- TX_SIZE tx_size, int ctx);
+void av1_xform_quant_fp_nuq(const AV1_COMMON *cm, MACROBLOCK *x, int plane,
+ int block, int blk_row, int blk_col,
+ BLOCK_SIZE plane_bsize, TX_SIZE tx_size, int ctx);
void av1_xform_quant_dc_fp_nuq(MACROBLOCK *x, int plane, int block, int blk_row,
int blk_col, BLOCK_SIZE plane_bsize,
TX_SIZE tx_size, int ctx);
#endif
-int av1_optimize_b(MACROBLOCK *mb, int plane, int block, TX_SIZE tx_size,
- int ctx);
+int av1_optimize_b(const AV1_COMMON *cm, MACROBLOCK *mb, int plane, int block,
+ TX_SIZE tx_size, int ctx);
void av1_subtract_plane(MACROBLOCK *x, BLOCK_SIZE bsize, int plane);
void av1_encode_block_intra(int plane, int block, int blk_row, int blk_col,
BLOCK_SIZE plane_bsize, TX_SIZE tx_size, void *arg);
-void av1_encode_intra_block_plane(MACROBLOCK *x, BLOCK_SIZE bsize, int plane,
+void av1_encode_intra_block_plane(AV1_COMMON *cm, MACROBLOCK *x,
+ BLOCK_SIZE bsize, int plane,
int enable_optimize_b);
#ifdef __cplusplus
diff --git a/av1/encoder/encodemv.c b/av1/encoder/encodemv.c
index 7276fee..53dac12 100644
--- a/av1/encoder/encodemv.c
+++ b/av1/encoder/encodemv.c
@@ -45,9 +45,13 @@
// Sign
aom_write(w, sign, mvcomp->sign);
- // Class
+// Class
+#if CONFIG_DAALA_EC
+ aom_write_symbol(w, mv_class, mvcomp->class_cdf, MV_CLASSES);
+#else
av1_write_token(w, av1_mv_class_tree, mvcomp->classes,
&mv_class_encodings[mv_class]);
+#endif
// Integer bits
if (mv_class == MV_CLASS_0) {
@@ -58,10 +62,16 @@
for (i = 0; i < n; ++i) aom_write(w, (d >> i) & 1, mvcomp->bits[i]);
}
- // Fractional bits
+// Fractional bits
+#if CONFIG_DAALA_EC
+ aom_write_symbol(
+ w, fr, mv_class == MV_CLASS_0 ? mvcomp->class0_fp_cdf[d] : mvcomp->fp_cdf,
+ MV_FP_SIZE);
+#else
av1_write_token(w, av1_mv_fp_tree,
mv_class == MV_CLASS_0 ? mvcomp->class0_fp[d] : mvcomp->fp,
&mv_fp_encodings[fr]);
+#endif
// High precision bit
if (usehp)
@@ -203,6 +213,9 @@
update_mv(w, comp_counts->sign, &comp->sign, MV_UPDATE_PROB);
write_mv_update(av1_mv_class_tree, comp->classes, comp_counts->classes,
MV_CLASSES, w);
+#if CONFIG_DAALA_EC
+ av1_tree_to_cdf(av1_mv_class_tree, comp->classes, comp->class_cdf);
+#endif
write_mv_update(av1_mv_class0_tree, comp->class0, comp_counts->class0,
CLASS0_SIZE, w);
for (j = 0; j < MV_OFFSET_BITS; ++j)
@@ -210,12 +223,19 @@
}
for (i = 0; i < 2; ++i) {
- for (j = 0; j < CLASS0_SIZE; ++j)
+ for (j = 0; j < CLASS0_SIZE; ++j) {
write_mv_update(av1_mv_fp_tree, mvc->comps[i].class0_fp[j],
counts->comps[i].class0_fp[j], MV_FP_SIZE, w);
-
+#if CONFIG_DAALA_EC
+ av1_tree_to_cdf(av1_mv_fp_tree, mvc->comps[i].class0_fp[j],
+ mvc->comps[i].class0_fp_cdf[j]);
+#endif
+ }
write_mv_update(av1_mv_fp_tree, mvc->comps[i].fp, counts->comps[i].fp,
MV_FP_SIZE, w);
+#if CONFIG_DAALA_EC
+ av1_tree_to_cdf(av1_mv_fp_tree, mvc->comps[i].fp, mvc->comps[i].fp_cdf);
+#endif
}
if (usehp) {
@@ -239,7 +259,11 @@
#if CONFIG_REF_MV
(void)is_compound;
#endif
+#if CONFIG_DAALA_EC
+ aom_write_symbol(w, j, mvctx->joint_cdf, MV_JOINTS);
+#else
av1_write_token(w, av1_mv_joint_tree, mvctx->joints, &mv_joint_encodings[j]);
+#endif
if (mv_joint_vertical(j))
encode_mv_component(w, diff.row, &mvctx->comps[0], usehp);
diff --git a/av1/encoder/firstpass.c b/av1/encoder/firstpass.c
index 1e10531..466cb9c 100644
--- a/av1/encoder/firstpass.c
+++ b/av1/encoder/firstpass.c
@@ -590,7 +590,7 @@
xd->mi[0]->mbmi.mode = DC_PRED;
xd->mi[0]->mbmi.tx_size =
use_dc_pred ? (bsize >= BLOCK_16X16 ? TX_16X16 : TX_8X8) : TX_4X4;
- av1_encode_intra_block_plane(x, bsize, 0, 0);
+ av1_encode_intra_block_plane(cm, x, bsize, 0, 0);
this_error = aom_get_mb_ss(x->plane[0].src_diff);
// Keep a record of blocks that have almost no intra error residual
@@ -811,7 +811,7 @@
xd->mi[0]->mbmi.ref_frame[0] = LAST_FRAME;
xd->mi[0]->mbmi.ref_frame[1] = NONE;
av1_build_inter_predictors_sby(xd, mb_row << 1, mb_col << 1, bsize);
- av1_encode_sby_pass1(x, bsize);
+ av1_encode_sby_pass1(cm, x, bsize);
sum_mvr += mv.row;
sum_mvr_abs += abs(mv.row);
sum_mvc += mv.col;
diff --git a/av1/encoder/rdopt.c b/av1/encoder/rdopt.c
index 8ba6b7b..627352b 100644
--- a/av1/encoder/rdopt.c
+++ b/av1/encoder/rdopt.c
@@ -1122,6 +1122,7 @@
MACROBLOCK *const x = args->x;
MACROBLOCKD *const xd = &x->e_mbd;
MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
+ const AV1_COMMON *cm = &args->cpi->common;
int64_t rd1, rd2, rd;
int rate;
int64_t dist;
@@ -1134,7 +1135,7 @@
if (!is_inter_block(mbmi)) {
struct encode_b_args b_args = {
- x, NULL, &mbmi->skip, args->t_above, args->t_left, 1
+ (AV1_COMMON *)cm, x, NULL, &mbmi->skip, args->t_above, args->t_left, 1
};
av1_encode_block_intra(plane, block, blk_row, blk_col, plane_bsize, tx_size,
&b_args);
@@ -1174,14 +1175,14 @@
} else {
// full forward transform and quantization
#if CONFIG_NEW_QUANT
- av1_xform_quant_fp_nuq(x, plane, block, blk_row, blk_col, plane_bsize,
+ av1_xform_quant_fp_nuq(cm, x, plane, block, blk_row, blk_col, plane_bsize,
tx_size, coeff_ctx);
#else
- av1_xform_quant(x, plane, block, blk_row, blk_col, plane_bsize, tx_size,
+ av1_xform_quant(cm, x, plane, block, blk_row, blk_col, plane_bsize, tx_size,
AV1_XFORM_QUANT_FP);
#endif // CONFIG_NEW_QUANT
if (x->plane[plane].eobs[block])
- av1_optimize_b(x, plane, block, tx_size, coeff_ctx);
+ av1_optimize_b(cm, x, plane, block, tx_size, coeff_ctx);
dist_block(args->cpi, x, plane, block, blk_row, blk_col, tx_size, &dist,
&sse);
}
@@ -1219,6 +1220,7 @@
int64_t *distortion, int *skippable, int64_t *sse,
int64_t ref_best_rd, int plane, BLOCK_SIZE bsize,
TX_SIZE tx_size, int use_fast_coef_casting) {
+ const AV1_COMMON *const cm = &cpi->common;
MACROBLOCKD *const xd = &x->e_mbd;
const struct macroblockd_plane *const pd = &xd->plane[plane];
TX_TYPE tx_type;
@@ -1236,7 +1238,7 @@
tx_type = get_tx_type(pd->plane_type, xd, 0, tx_size);
args.scan_order =
- get_scan(tx_size, tx_type, is_inter_block(&xd->mi[0]->mbmi));
+ get_scan(cm, tx_size, tx_type, is_inter_block(&xd->mi[0]->mbmi));
av1_foreach_transformed_block_in_plane(xd, bsize, plane, block_rd_txfm,
&args);
@@ -1259,6 +1261,7 @@
int64_t *sse, int64_t ref_best_rd, int plane,
BLOCK_SIZE bsize, TX_SIZE tx_size,
int use_fast_coef_casting) {
+ const AV1_COMMON *cm = &cpi->common;
MACROBLOCKD *const xd = &x->e_mbd;
const struct macroblockd_plane *const pd = &xd->plane[plane];
struct rdcost_block_args args;
@@ -1280,7 +1283,7 @@
tx_type = get_tx_type(pd->plane_type, xd, 0, tx_size);
args.scan_order =
- get_scan(tx_size, tx_type, is_inter_block(&xd->mi[0]->mbmi));
+ get_scan(cm, tx_size, tx_type, is_inter_block(&xd->mi[0]->mbmi));
block_rd_txfm(plane, 0, 0, 0, get_plane_block_size(bsize, pd), tx_size,
&args);
@@ -1947,14 +1950,14 @@
dst_stride, xd->bd);
if (xd->lossless[xd->mi[0]->mbmi.segment_id]) {
TX_TYPE tx_type = get_tx_type(PLANE_TYPE_Y, xd, block, TX_4X4);
- const SCAN_ORDER *scan_order = get_scan(TX_4X4, tx_type, 0);
+ const SCAN_ORDER *scan_order = get_scan(cm, TX_4X4, tx_type, 0);
const int coeff_ctx =
combine_entropy_contexts(*(tempa + idx), *(templ + idy));
#if CONFIG_NEW_QUANT
- av1_xform_quant_fp_nuq(x, 0, block, row + idy, col + idx, BLOCK_8X8,
- TX_4X4, coeff_ctx);
+ av1_xform_quant_fp_nuq(cm, x, 0, block, row + idy, col + idx,
+ BLOCK_8X8, TX_4X4, coeff_ctx);
#else
- av1_xform_quant(x, 0, block, row + idy, col + idx, BLOCK_8X8,
+ av1_xform_quant(cm, x, 0, block, row + idy, col + idx, BLOCK_8X8,
TX_4X4, AV1_XFORM_QUANT_FP);
#endif // CONFIG_NEW_QUANT
ratey += av1_cost_coeffs(cm, x, 0, block, coeff_ctx, TX_4X4,
@@ -1972,17 +1975,17 @@
int64_t dist;
unsigned int tmp;
TX_TYPE tx_type = get_tx_type(PLANE_TYPE_Y, xd, block, TX_4X4);
- const SCAN_ORDER *scan_order = get_scan(TX_4X4, tx_type, 0);
+ const SCAN_ORDER *scan_order = get_scan(cm, TX_4X4, tx_type, 0);
const int coeff_ctx =
combine_entropy_contexts(*(tempa + idx), *(templ + idy));
#if CONFIG_NEW_QUANT
- av1_xform_quant_fp_nuq(x, 0, block, row + idy, col + idx, BLOCK_8X8,
- TX_4X4, coeff_ctx);
+ av1_xform_quant_fp_nuq(cm, x, 0, block, row + idy, col + idx,
+ BLOCK_8X8, TX_4X4, coeff_ctx);
#else
- av1_xform_quant(x, 0, block, row + idy, col + idx, BLOCK_8X8,
+ av1_xform_quant(cm, x, 0, block, row + idy, col + idx, BLOCK_8X8,
TX_4X4, AV1_XFORM_QUANT_FP);
#endif // CONFIG_NEW_QUANT
- av1_optimize_b(x, 0, block, TX_4X4, coeff_ctx);
+ av1_optimize_b(cm, x, 0, block, TX_4X4, coeff_ctx);
ratey += av1_cost_coeffs(cm, x, 0, block, coeff_ctx, TX_4X4,
scan_order->scan, scan_order->neighbors,
cpi->sf.use_fast_coef_costing);
@@ -2067,15 +2070,15 @@
if (xd->lossless[xd->mi[0]->mbmi.segment_id]) {
TX_TYPE tx_type = get_tx_type(PLANE_TYPE_Y, xd, block, TX_4X4);
- const SCAN_ORDER *scan_order = get_scan(TX_4X4, tx_type, 0);
+ const SCAN_ORDER *scan_order = get_scan(cm, TX_4X4, tx_type, 0);
const int coeff_ctx =
combine_entropy_contexts(*(tempa + idx), *(templ + idy));
#if CONFIG_NEW_QUANT
- av1_xform_quant_fp_nuq(x, 0, block, row + idy, col + idx, BLOCK_8X8,
- TX_4X4, coeff_ctx);
+ av1_xform_quant_fp_nuq(cm, x, 0, block, row + idy, col + idx,
+ BLOCK_8X8, TX_4X4, coeff_ctx);
#else
- av1_xform_quant(x, 0, block, row + idy, col + idx, BLOCK_8X8, TX_4X4,
- AV1_XFORM_QUANT_B);
+ av1_xform_quant(cm, x, 0, block, row + idy, col + idx, BLOCK_8X8,
+ TX_4X4, AV1_XFORM_QUANT_B);
#endif // CONFIG_NEW_QUANT
ratey += av1_cost_coeffs(cm, x, 0, block, coeff_ctx, TX_4X4,
scan_order->scan, scan_order->neighbors,
@@ -2091,17 +2094,17 @@
int64_t dist;
unsigned int tmp;
TX_TYPE tx_type = get_tx_type(PLANE_TYPE_Y, xd, block, TX_4X4);
- const SCAN_ORDER *scan_order = get_scan(TX_4X4, tx_type, 0);
+ const SCAN_ORDER *scan_order = get_scan(cm, TX_4X4, tx_type, 0);
const int coeff_ctx =
combine_entropy_contexts(*(tempa + idx), *(templ + idy));
#if CONFIG_NEW_QUANT
- av1_xform_quant_fp_nuq(x, 0, block, row + idy, col + idx, BLOCK_8X8,
- TX_4X4, coeff_ctx);
+ av1_xform_quant_fp_nuq(cm, x, 0, block, row + idy, col + idx,
+ BLOCK_8X8, TX_4X4, coeff_ctx);
#else
- av1_xform_quant(x, 0, block, row + idy, col + idx, BLOCK_8X8, TX_4X4,
- AV1_XFORM_QUANT_FP);
+ av1_xform_quant(cm, x, 0, block, row + idy, col + idx, BLOCK_8X8,
+ TX_4X4, AV1_XFORM_QUANT_FP);
#endif // CONFIG_NEW_QUANT
- av1_optimize_b(x, 0, block, TX_4X4, coeff_ctx);
+ av1_optimize_b(cm, x, 0, block, TX_4X4, coeff_ctx);
ratey += av1_cost_coeffs(cm, x, 0, block, coeff_ctx, TX_4X4,
scan_order->scan, scan_order->neighbors,
cpi->sf.use_fast_coef_costing);
@@ -2894,7 +2897,7 @@
PLANE_TYPE plane_type = (plane == 0) ? PLANE_TYPE_Y : PLANE_TYPE_UV;
TX_TYPE tx_type = get_tx_type(plane_type, xd, block, tx_size);
const SCAN_ORDER *const scan_order =
- get_scan(tx_size, tx_type, is_inter_block(&xd->mi[0]->mbmi));
+ get_scan(cm, tx_size, tx_type, is_inter_block(&xd->mi[0]->mbmi));
BLOCK_SIZE txm_bsize = txsize_to_bsize[tx_size];
int bh = 4 * num_4x4_blocks_wide_lookup[txm_bsize];
@@ -2923,14 +2926,14 @@
max_blocks_wide += xd->mb_to_right_edge >> (5 + pd->subsampling_x);
#if CONFIG_NEW_QUANT
- av1_xform_quant_fp_nuq(x, plane, block, blk_row, blk_col, plane_bsize,
+ av1_xform_quant_fp_nuq(cm, x, plane, block, blk_row, blk_col, plane_bsize,
tx_size, coeff_ctx);
#else
- av1_xform_quant(x, plane, block, blk_row, blk_col, plane_bsize, tx_size,
+ av1_xform_quant(cm, x, plane, block, blk_row, blk_col, plane_bsize, tx_size,
AV1_XFORM_QUANT_FP);
#endif // CONFIG_NEW_QUANT
- av1_optimize_b(x, plane, block, tx_size, coeff_ctx);
+ av1_optimize_b(cm, x, plane, block, tx_size, coeff_ctx);
// TODO(any): Use dist_block to compute distortion
#if CONFIG_AOM_HIGHBITDEPTH
@@ -4361,7 +4364,7 @@
TX_SIZE tx_size = mi->mbmi.tx_size;
TX_TYPE tx_type = get_tx_type(PLANE_TYPE_Y, xd, i, tx_size);
- const SCAN_ORDER *scan_order = get_scan(tx_size, tx_type, 1);
+ const SCAN_ORDER *scan_order = get_scan(cm, tx_size, tx_type, 1);
const int num_4x4_w = num_4x4_blocks_wide_txsize_lookup[tx_size];
const int num_4x4_h = num_4x4_blocks_high_txsize_lookup[tx_size];
@@ -4409,14 +4412,14 @@
coeff_ctx = combine_entropy_contexts(*(ta + (k & 1)), *(tl + (k >> 1)));
#endif
#if CONFIG_NEW_QUANT
- av1_xform_quant_fp_nuq(x, 0, block, idy + (i >> 1), idx + (i & 0x01),
+ av1_xform_quant_fp_nuq(cm, x, 0, block, idy + (i >> 1), idx + (i & 0x01),
BLOCK_8X8, tx_size, coeff_ctx);
#else
- av1_xform_quant(x, 0, block, idy + (i >> 1), idx + (i & 0x01), BLOCK_8X8,
- tx_size, AV1_XFORM_QUANT_FP);
+ av1_xform_quant(cm, x, 0, block, idy + (i >> 1), idx + (i & 0x01),
+ BLOCK_8X8, tx_size, AV1_XFORM_QUANT_FP);
#endif // CONFIG_NEW_QUANT
if (xd->lossless[xd->mi[0]->mbmi.segment_id] == 0)
- av1_optimize_b(x, 0, block, tx_size, coeff_ctx);
+ av1_optimize_b(cm, x, 0, block, tx_size, coeff_ctx);
dist_block(cpi, x, 0, block, idy + (i >> 1), idx + (i & 0x1), tx_size,
&dist, &ssz);
thisdistortion += dist;
@@ -8069,7 +8072,7 @@
mbmi->filter_intra_mode_info.filter_intra_mode[1]);
}
distortion2 = distortion_y + distortion_uv;
- av1_encode_intra_block_plane(x, bsize, 0, 0);
+ av1_encode_intra_block_plane((AV1_COMMON *)cm, x, bsize, 0, 0);
#if CONFIG_AOM_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
x->recon_variance = av1_high_get_sby_perpixel_variance(
@@ -8846,7 +8849,7 @@
if (this_mode != DC_PRED && this_mode != TM_PRED)
rate2 += intra_cost_penalty;
distortion2 = distortion_y + distortion_uv;
- av1_encode_intra_block_plane(x, bsize, 0, 1);
+ av1_encode_intra_block_plane((AV1_COMMON *)cm, x, bsize, 0, 1);
#if CONFIG_AOM_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
x->recon_variance = av1_high_get_sby_perpixel_variance(
diff --git a/av1/encoder/tokenize.c b/av1/encoder/tokenize.c
index 67f4b5d..7707e8f 100644
--- a/av1/encoder/tokenize.c
+++ b/av1/encoder/tokenize.c
@@ -363,7 +363,7 @@
const PLANE_TYPE type = pd->plane_type;
const int ref = is_inter_block(mbmi);
const TX_TYPE tx_type = get_tx_type(type, xd, block, tx_size);
- const SCAN_ORDER *const scan_order = get_scan(tx_size, tx_type, ref);
+ const SCAN_ORDER *const scan_order = get_scan(cm, tx_size, tx_type, ref);
int pt = get_entropy_context(tx_size, pd->above_context + blk_col,
pd->left_context + blk_row);
int rate = av1_cost_coeffs(cm, x, plane, block, pt, tx_size, scan_order->scan,
@@ -453,6 +453,7 @@
BLOCK_SIZE plane_bsize, TX_SIZE tx_size, void *arg) {
struct tokenize_b_args *const args = arg;
const AV1_COMP *cpi = args->cpi;
+ const AV1_COMMON *const cm = &cpi->common;
ThreadData *const td = args->td;
MACROBLOCK *const x = &td->mb;
MACROBLOCKD *const xd = &x->e_mbd;
@@ -475,7 +476,7 @@
const int16_t *scan, *nb;
const TX_TYPE tx_type = get_tx_type(type, xd, block, tx_size);
const SCAN_ORDER *const scan_order =
- get_scan(tx_size, tx_type, is_inter_block(mbmi));
+ get_scan(cm, tx_size, tx_type, is_inter_block(mbmi));
const int ref = is_inter_block(mbmi);
unsigned int(*const counts)[COEFF_CONTEXTS][ENTROPY_TOKENS] =
td->rd_counts.coef_counts[txsize_sqr_map[tx_size]][type][ref];
diff --git a/test/av1_inv_txfm_test.cc b/test/av1_inv_txfm_test.cc
index f637d51..ff358b6 100644
--- a/test/av1_inv_txfm_test.cc
+++ b/test/av1_inv_txfm_test.cc
@@ -184,8 +184,9 @@
// quantization with maximum allowed step sizes
test_coef_block1[0] = (output_ref_block[0] / 1336) * 1336;
for (int j = 1; j < last_nonzero_; ++j)
- test_coef_block1[get_scan(tx_size_, DCT_DCT, 0)->scan[j]] =
- (output_ref_block[j] / 1828) * 1828;
+ test_coef_block1[get_scan((const AV1_COMMON *)NULL, tx_size_, DCT_DCT,
+ 0)
+ ->scan[j]] = (output_ref_block[j] / 1828) * 1828;
}
ASM_REGISTER_STATE_CHECK(full_itxfm_(test_coef_block1, dst1, size));
@@ -235,7 +236,8 @@
max_energy_leftover = 0;
coef = 0;
}
- test_coef_block1[get_scan(tx_size_, DCT_DCT, 0)->scan[j]] = coef;
+ test_coef_block1[get_scan((const AV1_COMMON *)NULL, tx_size_, DCT_DCT, 0)
+ ->scan[j]] = coef;
}
memcpy(test_coef_block2, test_coef_block1,