Remove compile guards for CONFIG_EXT_INTER
This experiment has been adopted, we can simplify the code
by dropping the associated preprocessor conditionals.
Change-Id: Ic077963f72e8cc2ae9872b58c8a0241988384110
diff --git a/av1/encoder/bitstream.c b/av1/encoder/bitstream.c
index 329f670..8140380 100644
--- a/av1/encoder/bitstream.c
+++ b/av1/encoder/bitstream.c
@@ -59,10 +59,10 @@
#define ENC_MISMATCH_DEBUG 0
-#if CONFIG_EXT_INTER && CONFIG_COMPOUND_SINGLEREF
+#if CONFIG_COMPOUND_SINGLEREF
static struct av1_token
inter_singleref_comp_mode_encodings[INTER_SINGLEREF_COMP_MODES];
-#endif // CONFIG_EXT_INTER && CONFIG_COMPOUND_SINGLEREF
+#endif // CONFIG_COMPOUND_SINGLEREF
// TODO(anybody) : remove this flag when PVQ supports pallete coding tool
#if !CONFIG_PVQ || CONFIG_EXT_INTRA
@@ -84,14 +84,12 @@
static struct av1_token intra_filter_encodings[INTRA_FILTERS];
#endif // CONFIG_INTRA_INTERP
#endif // CONFIG_EXT_INTRA
-#if CONFIG_EXT_INTER
#if CONFIG_INTERINTRA
static struct av1_token interintra_mode_encodings[INTERINTRA_MODES];
#endif
#if CONFIG_COMPOUND_SEGMENT || CONFIG_WEDGE
static struct av1_token compound_type_encodings[COMPOUND_TYPES];
#endif // CONFIG_COMPOUND_SEGMENT || CONFIG_WEDGE
-#endif // CONFIG_EXT_INTER
#if CONFIG_LOOP_RESTORATION
static struct av1_token switchable_restore_encodings[RESTORE_SWITCHABLE_TYPES];
static void loop_restoration_write_sb_coeffs(const AV1_COMMON *const cm,
@@ -132,7 +130,6 @@
#if CONFIG_EXT_INTRA && CONFIG_INTRA_INTERP
av1_tokens_from_tree(intra_filter_encodings, av1_intra_filter_tree);
#endif // CONFIG_EXT_INTRA && CONFIG_INTRA_INTERP
-#if CONFIG_EXT_INTER
#if CONFIG_INTERINTRA
av1_tokens_from_tree(interintra_mode_encodings, av1_interintra_mode_tree);
#endif // CONFIG_INTERINTRA
@@ -143,7 +140,6 @@
#if CONFIG_COMPOUND_SEGMENT || CONFIG_WEDGE
av1_tokens_from_tree(compound_type_encodings, av1_compound_type_tree);
#endif // CONFIG_COMPOUND_SEGMENT || CONFIG_WEDGE
-#endif // CONFIG_EXT_INTER
#if CONFIG_LOOP_RESTORATION
av1_tokens_from_tree(switchable_restore_encodings,
av1_switchable_restore_tree);
@@ -207,16 +203,12 @@
assert(mbmi->ref_mv_idx < 3);
-#if CONFIG_EXT_INTER
#if CONFIG_COMPOUND_SINGLEREF
if (mbmi->mode == NEWMV || mbmi->mode == NEW_NEWMV ||
mbmi->mode == SR_NEW_NEWMV) {
#else // !CONFIG_COMPOUND_SINGLEREF
if (mbmi->mode == NEWMV || mbmi->mode == NEW_NEWMV) {
#endif // CONFIG_COMPOUND_SINGLEREF
-#else // !CONFIG_EXT_INTER
- if (mbmi->mode == NEWMV) {
-#endif // CONFIG_EXT_INTER
int idx;
for (idx = 0; idx < 2; ++idx) {
if (mbmi_ext->ref_mv_count[ref_frame_type] > idx + 1) {
@@ -255,7 +247,6 @@
}
}
-#if CONFIG_EXT_INTER
static void write_inter_compound_mode(AV1_COMMON *cm, MACROBLOCKD *xd,
aom_writer *w, PREDICTION_MODE mode,
const int16_t mode_ctx) {
@@ -278,7 +269,6 @@
inter_singleref_comp_cdf, INTER_SINGLEREF_COMP_MODES);
}
#endif // CONFIG_COMPOUND_SINGLEREF
-#endif // CONFIG_EXT_INTER
static void encode_unsigned_max(struct aom_write_bit_buffer *wb, int data,
int max) {
@@ -1832,16 +1822,15 @@
int16_t mode_ctx;
write_ref_frames(cm, xd, w);
-#if CONFIG_EXT_INTER && CONFIG_COMPOUND_SINGLEREF
+#if CONFIG_COMPOUND_SINGLEREF
if (!segfeature_active(seg, segment_id, SEG_LVL_REF_FRAME)) {
// NOTE: Handle single ref comp mode
if (!is_compound)
aom_write(w, is_inter_singleref_comp_mode(mode),
av1_get_inter_mode_prob(cm, xd));
}
-#endif // CONFIG_EXT_INTER && CONFIG_COMPOUND_SINGLEREF
+#endif // CONFIG_COMPOUND_SINGLEREF
-#if CONFIG_EXT_INTER
#if CONFIG_COMPOUND_SINGLEREF
if (is_compound || is_inter_singleref_comp_mode(mode))
#else // !CONFIG_COMPOUND_SINGLEREF
@@ -1849,7 +1838,6 @@
#endif // CONFIG_COMPOUND_SINGLEREF
mode_ctx = mbmi_ext->compound_mode_context[mbmi->ref_frame[0]];
else
-#endif // CONFIG_EXT_INTER
mode_ctx = av1_mode_context_analyzer(mbmi_ext->mode_context,
mbmi->ref_frame, bsize, -1);
@@ -1857,7 +1845,6 @@
// If segment skip is not enabled code the mode.
if (!segfeature_active(seg, segment_id, SEG_LVL_SKIP)) {
if (bsize >= BLOCK_8X8 || unify_bsize) {
-#if CONFIG_EXT_INTER
if (is_inter_compound_mode(mode))
write_inter_compound_mode(cm, xd, w, mode, mode_ctx);
#if CONFIG_COMPOUND_SINGLEREF
@@ -1865,18 +1852,13 @@
write_inter_singleref_comp_mode(xd, w, mode, mode_ctx);
#endif // CONFIG_COMPOUND_SINGLEREF
else if (is_inter_singleref_mode(mode))
-#endif // CONFIG_EXT_INTER
write_inter_mode(w, mode, ec_ctx, mode_ctx);
-#if CONFIG_EXT_INTER
if (mode == NEWMV || mode == NEW_NEWMV ||
#if CONFIG_COMPOUND_SINGLEREF
mbmi->mode == SR_NEW_NEWMV ||
#endif // CONFIG_COMPOUND_SINGLEREF
have_nearmv_in_inter_mode(mode))
-#else // !CONFIG_EXT_INTER
- if (mode == NEARMV || mode == NEWMV)
-#endif // CONFIG_EXT_INTER
write_drl_idx(ec_ctx, mbmi, mbmi_ext, w);
else
assert(mbmi->ref_mv_idx == 0);
@@ -1899,23 +1881,15 @@
for (idx = 0; idx < 2; idx += num_4x4_w) {
const int j = idy * 2 + idx;
const PREDICTION_MODE b_mode = mi->bmi[j].as_mode;
-#if CONFIG_EXT_INTER
if (!is_compound)
-#endif // CONFIG_EXT_INTER
mode_ctx = av1_mode_context_analyzer(mbmi_ext->mode_context,
mbmi->ref_frame, bsize, j);
-#if CONFIG_EXT_INTER
if (is_inter_compound_mode(b_mode))
write_inter_compound_mode(cm, xd, w, b_mode, mode_ctx);
else if (is_inter_singleref_mode(b_mode))
-#endif // CONFIG_EXT_INTER
write_inter_mode(w, b_mode, ec_ctx, mode_ctx);
-#if CONFIG_EXT_INTER
if (b_mode == NEWMV || b_mode == NEW_NEWMV) {
-#else
- if (b_mode == NEWMV) {
-#endif // CONFIG_EXT_INTER
for (ref = 0; ref < 1 + is_compound; ++ref) {
int8_t rf_type = av1_ref_frame_type(mbmi->ref_frame);
int nmv_ctx = av1_nmv_ctx(mbmi_ext->ref_mv_count[rf_type],
@@ -1923,16 +1897,9 @@
mbmi->ref_mv_idx);
nmv_context *nmvc = &ec_ctx->nmvc[nmv_ctx];
av1_encode_mv(cpi, w, &mi->bmi[j].as_mv[ref].as_mv,
-#if CONFIG_EXT_INTER
- &mi->bmi[j].ref_mv[ref].as_mv,
-#else
- &mi->bmi[j].pred_mv[ref].as_mv,
-#endif // CONFIG_EXT_INTER
- nmvc, allow_hp);
+ &mi->bmi[j].ref_mv[ref].as_mv, nmvc, allow_hp);
}
- }
-#if CONFIG_EXT_INTER
- else if (b_mode == NEAREST_NEWMV || b_mode == NEAR_NEWMV) {
+ } else if (b_mode == NEAREST_NEWMV || b_mode == NEAR_NEWMV) {
int8_t rf_type = av1_ref_frame_type(mbmi->ref_frame);
int nmv_ctx = av1_nmv_ctx(mbmi_ext->ref_mv_count[rf_type],
mbmi_ext->ref_mv_stack[rf_type], 1,
@@ -1949,15 +1916,10 @@
av1_encode_mv(cpi, w, &mi->bmi[j].as_mv[0].as_mv,
&mi->bmi[j].ref_mv[0].as_mv, nmvc, allow_hp);
}
-#endif // CONFIG_EXT_INTER
}
}
} else {
-#if CONFIG_EXT_INTER
if (mode == NEWMV || mode == NEW_NEWMV) {
-#else
- if (mode == NEWMV) {
-#endif // CONFIG_EXT_INTER
int_mv ref_mv;
for (ref = 0; ref < 1 + is_compound; ++ref) {
int8_t rf_type = av1_ref_frame_type(mbmi->ref_frame);
@@ -1969,7 +1931,6 @@
av1_encode_mv(cpi, w, &mbmi->mv[ref].as_mv, &ref_mv.as_mv, nmvc,
allow_hp);
}
-#if CONFIG_EXT_INTER
} else if (mode == NEAREST_NEWMV || mode == NEAR_NEWMV) {
int8_t rf_type = av1_ref_frame_type(mbmi->ref_frame);
int nmv_ctx =
@@ -2004,11 +1965,10 @@
av1_encode_mv(cpi, w, &mbmi->mv[1].as_mv, &ref_mv.as_mv, nmvc,
allow_hp);
#endif // CONFIG_COMPOUND_SINGLEREF
-#endif // CONFIG_EXT_INTER
}
}
-#if CONFIG_EXT_INTER && CONFIG_INTERINTRA
+#if CONFIG_INTERINTRA
if (cpi->common.reference_mode != COMPOUND_REFERENCE &&
#if CONFIG_SUPERTX
!supertx_enabled &&
@@ -2041,22 +2001,18 @@
}
}
}
-#endif // CONFIG_EXT_INTER && CONFIG_INTERINTRA
+#endif // CONFIG_INTERINTRA
#if CONFIG_MOTION_VAR || CONFIG_WARPED_MOTION
#if CONFIG_SUPERTX
if (!supertx_enabled)
#endif // CONFIG_SUPERTX
-#if CONFIG_EXT_INTER
- if (mbmi->ref_frame[1] != INTRA_FRAME)
-#endif // CONFIG_EXT_INTER
- write_motion_mode(cm, xd, mi, w);
+ if (mbmi->ref_frame[1] != INTRA_FRAME) write_motion_mode(cm, xd, mi, w);
#if CONFIG_NCOBMC_ADAPT_WEIGHT
write_ncobmc_mode(xd, mi, w);
#endif
#endif // CONFIG_MOTION_VAR || CONFIG_WARPED_MOTION
-#if CONFIG_EXT_INTER
if (
#if CONFIG_COMPOUND_SINGLEREF
is_inter_anyref_comp_mode(mbmi->mode) &&
@@ -2092,7 +2048,6 @@
}
#endif // CONFIG_COMPOUND_SEGMENT || CONFIG_WEDGE
}
-#endif // CONFIG_EXT_INTER
#if CONFIG_DUAL_FILTER || CONFIG_WARPED_MOTION || CONFIG_GLOBAL_MOTION
write_mb_interp_filter(cpi, xd, w);
@@ -2423,10 +2378,10 @@
// up if they are scaled. has_subpel_mv_component is in turn needed by
// write_switchable_interp_filter, which is called by pack_inter_mode_mvs.
set_ref_ptrs(cm, xd, m->mbmi.ref_frame[0], m->mbmi.ref_frame[1]);
-#if CONFIG_EXT_INTER && CONFIG_COMPOUND_SINGLEREF
+#if CONFIG_COMPOUND_SINGLEREF
if (!has_second_ref(&m->mbmi) && is_inter_singleref_comp_mode(m->mbmi.mode))
xd->block_refs[1] = xd->block_refs[0];
-#endif // CONFIG_EXT_INTER && CONFIG_COMPOUND_SINGLEREF
+#endif // CONFIG_COMPOUND_SINGLEREF
#endif // CONFIG_DUAL_FILTER || CONFIG_WARPED_MOTION
#if ENC_MISMATCH_DEBUG
@@ -4376,7 +4331,6 @@
#endif // CONFIG_EXT_PARTITION
}
-#if CONFIG_EXT_INTER
static void write_compound_tools(const AV1_COMMON *cm,
struct aom_write_bit_buffer *wb) {
(void)cm;
@@ -4400,7 +4354,6 @@
}
#endif // CONFIG_WEDGE || CONFIG_COMPOUND_SEGMENT
}
-#endif // CONFIG_EXT_INTER
#if CONFIG_GLOBAL_MOTION
static void write_global_motion_params(WarpedMotionParams *params,
@@ -4805,9 +4758,7 @@
if (!use_hybrid_pred) aom_wb_write_bit(wb, use_compound_pred);
#endif // !CONFIG_REF_ADAPT
}
-#if CONFIG_EXT_INTER
write_compound_tools(cm, wb);
-#endif // CONFIG_EXT_INTER
#if CONFIG_EXT_TX
aom_wb_write_bit(wb, cm->reduced_tx_set_used);
@@ -5162,9 +5113,7 @@
if (!use_hybrid_pred) aom_wb_write_bit(wb, use_compound_pred);
#endif // !CONFIG_REF_ADAPT
}
-#if CONFIG_EXT_INTER
write_compound_tools(cm, wb);
-#endif // CONFIG_EXT_INTER
#if CONFIG_EXT_TX
aom_wb_write_bit(wb, cm->reduced_tx_set_used);
@@ -5225,7 +5174,6 @@
#if !CONFIG_NEW_MULTISYMBOL
update_inter_mode_probs(cm, header_bc, counts);
#endif
-#if CONFIG_EXT_INTER
#if CONFIG_INTERINTRA
if (cm->reference_mode != COMPOUND_REFERENCE &&
cm->allow_interintra_compound) {
@@ -5251,7 +5199,6 @@
#endif // CONFIG_WEDGE && CONFIG_NEW_MULTISYMBOL
}
#endif // CONFIG_INTERINTRA
-#endif // CONFIG_EXT_INTER
#if !CONFIG_NEW_MULTISYMBOL
for (i = 0; i < INTRA_INTER_CONTEXTS; i++)
@@ -5309,11 +5256,11 @@
}
#endif // CONFIG_NEW_MULTISYMBOL
-#if CONFIG_EXT_INTER && CONFIG_COMPOUND_SINGLEREF
+#if CONFIG_COMPOUND_SINGLEREF
for (i = 0; i < COMP_INTER_MODE_CONTEXTS; i++)
av1_cond_prob_diff_update(header_bc, &fc->comp_inter_mode_prob[i],
counts->comp_inter_mode[i], probwt);
-#endif // CONFIG_EXT_INTER && CONFIG_COMPOUND_SINGLEREF
+#endif // CONFIG_COMPOUND_SINGLEREF
#if !CONFIG_NEW_MULTISYMBOL
av1_write_nmv_probs(cm, cm->allow_high_precision_mv, header_bc, counts->mv);
diff --git a/av1/encoder/block.h b/av1/encoder/block.h
index b28848d..3b1672a 100644
--- a/av1/encoder/block.h
+++ b/av1/encoder/block.h
@@ -109,9 +109,7 @@
#endif
uint8_t ref_mv_count[MODE_CTX_REF_FRAMES];
CANDIDATE_MV ref_mv_stack[MODE_CTX_REF_FRAMES][MAX_REF_MV_STACK_SIZE];
-#if CONFIG_EXT_INTER
int16_t compound_mode_context[MODE_CTX_REF_FRAMES];
-#endif // CONFIG_EXT_INTER
} MB_MODE_INFO_EXT;
typedef struct {
@@ -236,7 +234,6 @@
int refmv_mode_cost[REFMV_MODE_CONTEXTS][2];
int drl_mode_cost0[DRL_MODE_CONTEXTS][2];
-#if CONFIG_EXT_INTER
int inter_compound_mode_cost[INTER_MODE_CONTEXTS][INTER_COMPOUND_MODES];
int compound_type_cost[BLOCK_SIZES_ALL][COMPOUND_TYPES];
#if CONFIG_COMPOUND_SINGLEREF
@@ -246,7 +243,6 @@
#if CONFIG_INTERINTRA
int interintra_mode_cost[BLOCK_SIZE_GROUPS][INTERINTRA_MODES];
#endif // CONFIG_INTERINTRA
-#endif // CONFIG_EXT_INTER
#if CONFIG_MOTION_VAR || CONFIG_WARPED_MOTION
int motion_mode_cost[BLOCK_SIZES_ALL][MOTION_MODES];
#if CONFIG_MOTION_VAR && CONFIG_WARPED_MOTION
diff --git a/av1/encoder/encodeframe.c b/av1/encoder/encodeframe.c
index d903d71..d4b3e4c 100644
--- a/av1/encoder/encodeframe.c
+++ b/av1/encoder/encodeframe.c
@@ -81,10 +81,8 @@
int mi_row, int mi_col, BLOCK_SIZE bsize,
PC_TREE *pc_tree);
static void predict_superblock(const AV1_COMP *const cpi, ThreadData *td,
-#if CONFIG_EXT_INTER
- int mi_row_ori, int mi_col_ori,
-#endif // CONFIG_EXT_INTER
- int mi_row_pred, int mi_col_pred, int plane,
+ int mi_row_ori, int mi_col_ori, int mi_row_pred,
+ int mi_col_pred, int plane,
BLOCK_SIZE bsize_pred, int b_sub8x8, int block);
static int check_supertx_sb(BLOCK_SIZE bsize, TX_SIZE supertx_size,
PC_TREE *pc_tree);
@@ -488,11 +486,7 @@
static void update_global_motion_used(PREDICTION_MODE mode, BLOCK_SIZE bsize,
const MB_MODE_INFO *mbmi,
RD_COUNTS *rdc) {
- if (mode == ZEROMV
-#if CONFIG_EXT_INTER
- || mode == ZERO_ZEROMV
-#endif
- ) {
+ if (mode == ZEROMV || mode == ZERO_ZEROMV) {
const int num_4x4s =
num_4x4_blocks_wide_lookup[bsize] * num_4x4_blocks_high_lookup[bsize];
int ref;
@@ -524,7 +518,6 @@
MB_MODE_INFO_EXT *const mbmi_ext = x->mbmi_ext;
CANDIDATE_MV *const curr_ref_mv_stack = mbmi_ext->ref_mv_stack[rf_type];
-#if CONFIG_EXT_INTER
if (has_second_ref(mbmi)) {
// Special case: NEAR_NEWMV and NEW_NEARMV modes use 1 + mbmi->ref_mv_idx
// (like NEARMV) instead
@@ -560,7 +553,6 @@
}
#endif // CONFIG_COMPOUND_SINGLEREF
} else {
-#endif // CONFIG_EXT_INTER
if (mbmi->mode == NEWMV) {
int i;
for (i = 0; i < 1 + has_second_ref(mbmi); ++i) {
@@ -572,9 +564,7 @@
mi_pred_mv[i] = this_mv;
}
}
-#if CONFIG_EXT_INTER
}
-#endif // CONFIG_EXT_INTER
}
static void update_state(const AV1_COMP *const cpi, ThreadData *td,
@@ -1771,7 +1761,6 @@
#endif // CONFIG_EXT_REFS
}
-#if CONFIG_EXT_INTER
#if CONFIG_COMPOUND_SINGLEREF
if (!has_second_ref(mbmi))
counts->comp_inter_mode[av1_get_inter_mode_context(xd)]
@@ -1808,7 +1797,6 @@
}
}
#endif // CONFIG_INTERINTRA
-#endif // CONFIG_EXT_INTER
#if CONFIG_MOTION_VAR || CONFIG_WARPED_MOTION
#if CONFIG_WARPED_MOTION
@@ -1825,9 +1813,7 @@
#if CONFIG_SUPERTX
if (!supertx_enabled)
#endif // CONFIG_SUPERTX
-#if CONFIG_EXT_INTER
if (mbmi->ref_frame[1] != INTRA_FRAME)
-#endif // CONFIG_EXT_INTER
#if CONFIG_MOTION_VAR && CONFIG_WARPED_MOTION
{
if (motion_allowed == WARPED_CAUSAL) {
@@ -1854,11 +1840,11 @@
#endif // CONFIG_NCOBMC_ADAPT_WEIGHT
}
#else
- if (motion_allowed > SIMPLE_TRANSLATION) {
- counts->motion_mode[mbmi->sb_type][mbmi->motion_mode]++;
- update_cdf(fc->motion_mode_cdf[mbmi->sb_type], mbmi->motion_mode,
- MOTION_MODES);
- }
+ if (motion_allowed > SIMPLE_TRANSLATION) {
+ counts->motion_mode[mbmi->sb_type][mbmi->motion_mode]++;
+ update_cdf(fc->motion_mode_cdf[mbmi->sb_type], mbmi->motion_mode,
+ MOTION_MODES);
+ }
#endif // CONFIG_MOTION_VAR && CONFIG_WARPED_MOTION
#if CONFIG_NCOBMC_ADAPT_WEIGHT
@@ -1878,7 +1864,6 @@
#endif // CONFIG_MOTION_VAR || CONFIG_WARPED_MOTION
-#if CONFIG_EXT_INTER
if (
#if CONFIG_COMPOUND_SINGLEREF
is_inter_anyref_comp_mode(mbmi->mode)
@@ -1903,7 +1888,6 @@
#endif
#endif // CONFIG_WEDGE || CONFIG_COMPOUND_SEGMENT
}
-#endif // CONFIG_EXT_INTER
}
}
@@ -1911,7 +1895,6 @@
!segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP)) {
int16_t mode_ctx;
const PREDICTION_MODE mode = mbmi->mode;
-#if CONFIG_EXT_INTER
if (has_second_ref(mbmi)) {
mode_ctx = mbmi_ext->compound_mode_context[mbmi->ref_frame[0]];
++counts->inter_compound_mode[mode_ctx][INTER_COMPOUND_OFFSET(mode)];
@@ -1924,21 +1907,16 @@
[INTER_SINGLEREF_COMP_OFFSET(mode)];
#endif // CONFIG_COMPOUND_SINGLEREF
} else {
-#endif // CONFIG_EXT_INTER
mode_ctx = av1_mode_context_analyzer(mbmi_ext->mode_context,
mbmi->ref_frame, bsize, -1);
update_inter_mode_stats(counts, mode, mode_ctx);
-#if CONFIG_EXT_INTER
}
-#endif // CONFIG_EXT_INTER
int mode_allowed = (mbmi->mode == NEWMV);
-#if CONFIG_EXT_INTER
mode_allowed |= (mbmi->mode == NEW_NEWMV);
#if CONFIG_COMPOUND_SINGLEREF
mode_allowed |= (mbmi->mode == SR_NEW_NEWMV);
#endif // CONFIG_COMPOUND_SINGLEREF
-#endif // CONFIG_EXT_INTER
if (mode_allowed) {
uint8_t ref_frame_type = av1_ref_frame_type(mbmi->ref_frame);
int idx;
@@ -1954,12 +1932,7 @@
}
}
-#if CONFIG_EXT_INTER
- if (have_nearmv_in_inter_mode(mbmi->mode))
-#else
- if (mbmi->mode == NEARMV)
-#endif
- {
+ if (have_nearmv_in_inter_mode(mbmi->mode)) {
uint8_t ref_frame_type = av1_ref_frame_type(mbmi->ref_frame);
int idx;
@@ -5565,7 +5538,6 @@
#endif
}
-#if CONFIG_EXT_INTER
static void make_consistent_compound_tools(AV1_COMMON *cm) {
(void)cm;
#if CONFIG_INTERINTRA
@@ -5581,7 +5553,6 @@
cm->allow_masked_compound = 0;
#endif // CONFIG_COMPOUND_SEGMENT || CONFIG_WEDGE
}
-#endif // CONFIG_EXT_INTER
void av1_encode_frame(AV1_COMP *cpi) {
AV1_COMMON *const cm = &cpi->common;
@@ -5676,9 +5647,7 @@
cm->interp_filter = SWITCHABLE;
#endif
-#if CONFIG_EXT_INTER
make_consistent_compound_tools(cm);
-#endif // CONFIG_EXT_INTER
rdc->single_ref_used_flag = 0;
rdc->compound_ref_used_flag = 0;
@@ -5701,9 +5670,7 @@
#endif // !CONFIG_REF_ADAPT
}
}
-#if CONFIG_EXT_INTER
make_consistent_compound_tools(cm);
-#endif // CONFIG_EXT_INTER
#if CONFIG_VAR_TX
#if CONFIG_RECT_TX_EXT
@@ -5886,9 +5853,7 @@
}
#endif
} else {
-#if CONFIG_EXT_INTER
make_consistent_compound_tools(cm);
-#endif // CONFIG_EXT_INTER
encode_frame_internal(cpi);
}
}
@@ -6310,7 +6275,7 @@
av1_setup_pre_planes(xd, ref, cfg, mi_row, mi_col,
&xd->block_refs[ref]->sf);
}
-#if CONFIG_EXT_INTER && CONFIG_COMPOUND_SINGLEREF
+#if CONFIG_COMPOUND_SINGLEREF
// Single ref compound mode
if (!is_compound && is_inter_singleref_comp_mode(mbmi->mode)) {
xd->block_refs[1] = xd->block_refs[0];
@@ -6322,7 +6287,7 @@
#endif // !CONFIG_INTRABC
av1_setup_pre_planes(xd, 1, cfg, mi_row, mi_col, &xd->block_refs[1]->sf);
}
-#endif // CONFIG_EXT_INTER && CONFIG_COMPOUND_SINGLEREF
+#endif // CONFIG_COMPOUND_SINGLEREF
av1_build_inter_predictors_sb(cm, xd, mi_row, mi_col, NULL, block_size);
@@ -6503,9 +6468,7 @@
#if CONFIG_SUPERTX
static int check_intra_b(PICK_MODE_CONTEXT *ctx) {
if (!is_inter_mode((&ctx->mic)->mbmi.mode)) return 1;
-#if CONFIG_EXT_INTER
if (ctx->mic.mbmi.ref_frame[1] == INTRA_FRAME) return 1;
-#endif // CONFIG_EXT_INTER
return 0;
}
@@ -6638,10 +6601,8 @@
}
static void predict_superblock(const AV1_COMP *const cpi, ThreadData *td,
-#if CONFIG_EXT_INTER
- int mi_row_ori, int mi_col_ori,
-#endif // CONFIG_EXT_INTER
- int mi_row_pred, int mi_col_pred, int plane,
+ int mi_row_ori, int mi_col_ori, int mi_row_pred,
+ int mi_col_pred, int plane,
BLOCK_SIZE bsize_pred, int b_sub8x8, int block) {
// Used in supertx
// (mi_row_ori, mi_col_ori): location for mv
@@ -6663,7 +6624,7 @@
&xd->block_refs[ref]->sf);
}
-#if CONFIG_EXT_INTER && CONFIG_COMPOUND_SINGLEREF
+#if CONFIG_COMPOUND_SINGLEREF
// Single ref compound mode
if (!is_compound && is_inter_singleref_comp_mode(mbmi->mode)) {
xd->block_refs[1] = xd->block_refs[0];
@@ -6671,20 +6632,14 @@
av1_setup_pre_planes(xd, 1, cfg, mi_row_pred, mi_col_pred,
&xd->block_refs[1]->sf);
}
-#endif // CONFIG_EXT_INTER && CONFIG_COMPOUND_SINGLEREF
+#endif // CONFIG_COMPOUND_SINGLEREF
if (!b_sub8x8)
- av1_build_inter_predictor_sb_extend(cm, xd,
-#if CONFIG_EXT_INTER
- mi_row_ori, mi_col_ori,
-#endif // CONFIG_EXT_INTER
+ av1_build_inter_predictor_sb_extend(cm, xd, mi_row_ori, mi_col_ori,
mi_row_pred, mi_col_pred, plane,
bsize_pred);
else
- av1_build_inter_predictor_sb_sub8x8_extend(cm, xd,
-#if CONFIG_EXT_INTER
- mi_row_ori, mi_col_ori,
-#endif // CONFIG_EXT_INTER
+ av1_build_inter_predictor_sb_sub8x8_extend(cm, xd, mi_row_ori, mi_col_ori,
mi_row_pred, mi_col_pred, plane,
bsize_pred, block);
}
@@ -6725,12 +6680,8 @@
dst_buf + (r >> xd->plane[plane].subsampling_y) * dst_stride +
(c >> xd->plane[plane].subsampling_x);
- predict_superblock(cpi, td,
-#if CONFIG_EXT_INTER
- mi_row_ori, mi_col_ori,
-#endif // CONFIG_EXT_INTER
- mi_row_pred, mi_col_pred, plane, bsize_pred, b_sub8x8,
- block);
+ predict_superblock(cpi, td, mi_row_ori, mi_col_ori, mi_row_pred, mi_col_pred,
+ plane, bsize_pred, b_sub8x8, block);
if (!dry_run && (plane == 0) && (block == 0 || !b_sub8x8))
update_stats(&cpi->common, td, mi_row_pred, mi_col_pred, 1);
diff --git a/av1/encoder/encodemv.c b/av1/encoder/encodemv.c
index fa14964..da3d0d5 100644
--- a/av1/encoder/encodemv.c
+++ b/av1/encoder/encodemv.c
@@ -233,7 +233,6 @@
build_nmv_component_cost_table(mvcost[1], &ctx->comps[1], precision);
}
-#if CONFIG_EXT_INTER
static void inc_mvs(const MB_MODE_INFO *mbmi, const MB_MODE_INFO_EXT *mbmi_ext,
const int_mv mvs[2], const int_mv pred_mvs[2],
nmv_context_counts *nmv_counts
@@ -372,25 +371,6 @@
#endif
}
}
-#else // !CONFIG_EXT_INTER
-static void inc_mvs(const MB_MODE_INFO *mbmi, const MB_MODE_INFO_EXT *mbmi_ext,
- const int_mv mvs[2], const int_mv pred_mvs[2],
- nmv_context_counts *nmv_counts) {
- int i;
-
- for (i = 0; i < 1 + has_second_ref(mbmi); ++i) {
- int8_t rf_type = av1_ref_frame_type(mbmi->ref_frame);
- int nmv_ctx =
- av1_nmv_ctx(mbmi_ext->ref_mv_count[rf_type],
- mbmi_ext->ref_mv_stack[rf_type], i, mbmi->ref_mv_idx);
- nmv_context_counts *counts = &nmv_counts[nmv_ctx];
- const MV *ref = &pred_mvs[i].as_mv;
- const MV diff = { mvs[i].as_mv.row - ref->row,
- mvs[i].as_mv.col - ref->col };
- av1_inc_mv(&diff, counts, 1);
- }
-}
-#endif // CONFIG_EXT_INTER
void av1_update_mv_count(ThreadData *td) {
const MACROBLOCKD *xd = &td->mb.e_mbd;
@@ -418,7 +398,6 @@
for (idx = 0; idx < 2; idx += num_4x4_w) {
const int i = idy * 2 + idx;
-#if CONFIG_EXT_INTER
if (have_newmv_in_inter_mode(mi->bmi[i].as_mode))
#if CONFIG_AMVR
@@ -427,19 +406,10 @@
#else
inc_mvs_sub8x8(mi, i, mi->bmi[i].as_mv, mbmi_ext, td->counts->mv);
#endif
-#else
- if (mi->bmi[i].as_mode == NEWMV)
- inc_mvs(mbmi, mbmi_ext, mi->bmi[i].as_mv, mi->bmi[i].pred_mv,
- td->counts->mv);
-#endif // CONFIG_EXT_INTER
}
}
} else {
-#if CONFIG_EXT_INTER
if (have_newmv_in_inter_mode(mbmi->mode))
-#else
- if (mbmi->mode == NEWMV)
-#endif // CONFIG_EXT_INTER
#if CONFIG_AMVR
inc_mvs(mbmi, mbmi_ext, mbmi->mv, mbmi->pred_mv, td->counts->mv,
diff --git a/av1/encoder/encoder.c b/av1/encoder/encoder.c
index 866194c..e9eda84 100644
--- a/av1/encoder/encoder.c
+++ b/av1/encoder/encoder.c
@@ -485,9 +485,7 @@
#endif
av1_entropy_mv_init();
av1_encode_token_init();
-#if CONFIG_EXT_INTER
av1_init_wedge_masks();
-#endif
init_done = 1;
}
}
@@ -1378,7 +1376,6 @@
#endif // CONFIG_EXT_PARTITION
#endif // CONFIG_EXT_PARTITION_TYPES
-#if CONFIG_EXT_INTER
#define HIGHBD_MBFP(BT, MCSDF, MCSVF) \
cpi->fn_ptr[BT].msdf = MCSDF; \
cpi->fn_ptr[BT].msvf = MCSVF;
@@ -1439,7 +1436,6 @@
MAKE_MBFP_COMPOUND_SAD_WRAPPER(aom_highbd_masked_sad128x32)
#endif // CONFIG_EXT_PARTITION
#endif // CONFIG_EXT_PARTITION_TYPES
-#endif // CONFIG_EXT_INTER
#if CONFIG_MOTION_VAR
#define HIGHBD_OBFP(BT, OSDF, OVF, OSVF) \
@@ -1666,7 +1662,6 @@
aom_highbd_sad64x128x4d_bits8)
#endif // CONFIG_EXT_PARTITION
-#if CONFIG_EXT_INTER
#if CONFIG_EXT_PARTITION
HIGHBD_MBFP(BLOCK_128X128, aom_highbd_masked_sad128x128_bits8,
aom_highbd_8_masked_sub_pixel_variance128x128)
@@ -1728,7 +1723,6 @@
HIGHBD_MBFP(BLOCK_4X16, aom_highbd_masked_sad4x16_bits8,
aom_highbd_8_masked_sub_pixel_variance4x16)
#endif
-#endif // CONFIG_EXT_INTER
#if CONFIG_MOTION_VAR
#if CONFIG_EXT_PARTITION
HIGHBD_OBFP(BLOCK_128X128, aom_highbd_obmc_sad128x128_bits8,
@@ -1989,7 +1983,6 @@
aom_highbd_sad64x128x4d_bits10)
#endif // CONFIG_EXT_PARTITION
-#if CONFIG_EXT_INTER
#if CONFIG_EXT_PARTITION
HIGHBD_MBFP(BLOCK_128X128, aom_highbd_masked_sad128x128_bits10,
aom_highbd_10_masked_sub_pixel_variance128x128)
@@ -2051,7 +2044,6 @@
HIGHBD_MBFP(BLOCK_4X16, aom_highbd_masked_sad4x16_bits10,
aom_highbd_10_masked_sub_pixel_variance4x16)
#endif
-#endif // CONFIG_EXT_INTER
#if CONFIG_MOTION_VAR
#if CONFIG_EXT_PARTITION
HIGHBD_OBFP(BLOCK_128X128, aom_highbd_obmc_sad128x128_bits10,
@@ -2312,7 +2304,6 @@
aom_highbd_sad64x128x4d_bits12)
#endif // CONFIG_EXT_PARTITION
-#if CONFIG_EXT_INTER
#if CONFIG_EXT_PARTITION
HIGHBD_MBFP(BLOCK_128X128, aom_highbd_masked_sad128x128_bits12,
aom_highbd_12_masked_sub_pixel_variance128x128)
@@ -2374,7 +2365,6 @@
HIGHBD_MBFP(BLOCK_4X16, aom_highbd_masked_sad4x16_bits12,
aom_highbd_12_masked_sub_pixel_variance4x16)
#endif
-#endif // CONFIG_EXT_INTER
#if CONFIG_MOTION_VAR
#if CONFIG_EXT_PARTITION
@@ -2493,7 +2483,6 @@
aom_calloc(cm->mi_rows * cm->mi_cols, 1));
}
-#if CONFIG_EXT_INTER
void set_compound_tools(AV1_COMMON *cm) {
(void)cm;
#if CONFIG_INTERINTRA
@@ -2503,7 +2492,6 @@
cm->allow_masked_compound = 1;
#endif // CONFIG_WEDGE || CONFIG_COMPOUND_SEGMENT
}
-#endif // CONFIG_EXT_INTER
void av1_change_config(struct AV1_COMP *cpi, const AV1EncoderConfig *oxcf) {
AV1_COMMON *const cm = &cpi->common;
@@ -2555,9 +2543,7 @@
CHECK_MEM_ERROR(cm, x->palette_buffer,
aom_memalign(16, sizeof(*x->palette_buffer)));
}
-#if CONFIG_EXT_INTER
set_compound_tools(cm);
-#endif // CONFIG_EXT_INTER
av1_reset_segment_features(cm);
#if CONFIG_AMVR
set_high_precision_mv(cpi, 0, 0);
@@ -3038,7 +3024,6 @@
#endif // CONFIG_EXT_PARTITION_TYPES
#endif // CONFIG_MOTION_VAR
-#if CONFIG_EXT_INTER
#define MBFP(BT, MCSDF, MCSVF) \
cpi->fn_ptr[BT].msdf = MCSDF; \
cpi->fn_ptr[BT].msvf = MCSVF;
@@ -3082,7 +3067,6 @@
MBFP(BLOCK_128X32, aom_masked_sad128x32, aom_masked_sub_pixel_variance128x32)
#endif // CONFIG_EXT_PARTITION
#endif // CONFIG_EXT_PARTITION_TYPES
-#endif // CONFIG_EXT_INTER
#if CONFIG_HIGHBITDEPTH
highbd_set_var_fns(cpi);
@@ -4187,9 +4171,7 @@
av1_set_rd_speed_thresholds(cpi);
av1_set_rd_speed_thresholds_sub8x8(cpi);
cpi->common.interp_filter = cpi->sf.default_interp_filter;
-#if CONFIG_EXT_INTER
if (!frame_is_intra_only(&cpi->common)) set_compound_tools(&cpi->common);
-#endif // CONFIG_EXT_INTER
}
static void set_size_dependent_vars(AV1_COMP *cpi, int *q, int *bottom_index,
diff --git a/av1/encoder/mbgraph.c b/av1/encoder/mbgraph.c
index e271304..7d2510a 100644
--- a/av1/encoder/mbgraph.c
+++ b/av1/encoder/mbgraph.c
@@ -59,27 +59,20 @@
#endif
int distortion;
unsigned int sse;
- cpi->find_fractional_mv_step(
- x, ref_mv, cpi->common.allow_high_precision_mv, x->errorperbit,
- &v_fn_ptr, 0, mv_sf->subpel_iters_per_step,
- cond_cost_list(cpi, cost_list), NULL, NULL, &distortion, &sse, NULL,
-#if CONFIG_EXT_INTER
- NULL, 0, 0,
-#endif
- 0, 0, 0);
+ cpi->find_fractional_mv_step(x, ref_mv, cpi->common.allow_high_precision_mv,
+ x->errorperbit, &v_fn_ptr, 0,
+ mv_sf->subpel_iters_per_step,
+ cond_cost_list(cpi, cost_list), NULL, NULL,
+ &distortion, &sse, NULL, NULL, 0, 0, 0, 0, 0);
}
-#if CONFIG_EXT_INTER
if (has_second_ref(&xd->mi[0]->mbmi))
xd->mi[0]->mbmi.mode = NEW_NEWMV;
else
-#endif // CONFIG_EXT_INTER
xd->mi[0]->mbmi.mode = NEWMV;
xd->mi[0]->mbmi.mv[0] = x->best_mv;
-#if CONFIG_EXT_INTER
xd->mi[0]->mbmi.ref_frame[1] = NONE_FRAME;
-#endif // CONFIG_EXT_INTER
av1_build_inter_predictors_sby(&cpi->common, xd, mb_row, mb_col, NULL,
BLOCK_16X16);
diff --git a/av1/encoder/mcomp.c b/av1/encoder/mcomp.c
index 492a996..9f649a6 100644
--- a/av1/encoder/mcomp.c
+++ b/av1/encoder/mcomp.c
@@ -176,7 +176,6 @@
}
/* checks if (r, c) has better score than previous best */
-#if CONFIG_EXT_INTER
#define CHECK_BETTER(v, r, c) \
if (c >= minc && c <= maxc && r >= minr && r <= maxr) { \
MV this_mv = { r, c }; \
@@ -202,34 +201,10 @@
} else { \
v = INT_MAX; \
}
-#else
-#define CHECK_BETTER(v, r, c) \
- if (c >= minc && c <= maxc && r >= minr && r <= maxr) { \
- MV this_mv = { r, c }; \
- v = mv_err_cost(&this_mv, ref_mv, mvjcost, mvcost, error_per_bit); \
- if (second_pred == NULL) \
- thismse = vfp->svf(pre(y, y_stride, r, c), y_stride, sp(c), sp(r), \
- src_address, src_stride, &sse); \
- else \
- thismse = vfp->svaf(pre(y, y_stride, r, c), y_stride, sp(c), sp(r), \
- src_address, src_stride, &sse, second_pred); \
- v += thismse; \
- if (v < besterr) { \
- besterr = v; \
- br = r; \
- bc = c; \
- *distortion = thismse; \
- *sse1 = sse; \
- } \
- } else { \
- v = INT_MAX; \
- }
-#endif // CONFIG_EXT_INTER
#define CHECK_BETTER0(v, r, c) CHECK_BETTER(v, r, c)
/* checks if (r, c) has better score than previous best */
-#if CONFIG_EXT_INTER
#define CHECK_BETTER1(v, r, c) \
if (c >= minc && c <= maxc && r >= minr && r <= maxr) { \
MV this_mv = { r, c }; \
@@ -249,26 +224,6 @@
} else { \
v = INT_MAX; \
}
-#else
-#define CHECK_BETTER1(v, r, c) \
- if (c >= minc && c <= maxc && r >= minr && r <= maxr) { \
- MV this_mv = { r, c }; \
- thismse = upsampled_pref_error(xd, vfp, src_address, src_stride, \
- pre(y, y_stride, r, c), y_stride, sp(c), \
- sp(r), second_pred, w, h, &sse); \
- v = mv_err_cost(&this_mv, ref_mv, mvjcost, mvcost, error_per_bit); \
- v += thismse; \
- if (v < besterr) { \
- besterr = v; \
- br = r; \
- bc = c; \
- *distortion = thismse; \
- *sse1 = sse; \
- } \
- } else { \
- v = INT_MAX; \
- }
-#endif // CONFIG_EXT_INTER
#define FIRST_LEVEL_CHECKS \
{ \
@@ -372,35 +327,28 @@
const MACROBLOCKD *xd, const MV *bestmv, const MV *ref_mv,
int error_per_bit, const aom_variance_fn_ptr_t *vfp,
const uint8_t *const src, const int src_stride, const uint8_t *const y,
- int y_stride, const uint8_t *second_pred,
-#if CONFIG_EXT_INTER
- const uint8_t *mask, int mask_stride, int invert_mask,
-#endif
- int w, int h, int offset, int *mvjcost, int *mvcost[2], unsigned int *sse1,
- int *distortion) {
+ int y_stride, const uint8_t *second_pred, const uint8_t *mask,
+ int mask_stride, int invert_mask, int w, int h, int offset, int *mvjcost,
+ int *mvcost[2], unsigned int *sse1, int *distortion) {
unsigned int besterr;
#if CONFIG_HIGHBITDEPTH
if (second_pred != NULL) {
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
DECLARE_ALIGNED(16, uint16_t, comp_pred16[MAX_SB_SQUARE]);
-#if CONFIG_EXT_INTER
if (mask)
aom_highbd_comp_mask_pred(comp_pred16, second_pred, w, h, y + offset,
y_stride, mask, mask_stride, invert_mask);
else
-#endif
aom_highbd_comp_avg_pred(comp_pred16, second_pred, w, h, y + offset,
y_stride);
besterr =
vfp->vf(CONVERT_TO_BYTEPTR(comp_pred16), w, src, src_stride, sse1);
} else {
DECLARE_ALIGNED(16, uint8_t, comp_pred[MAX_SB_SQUARE]);
-#if CONFIG_EXT_INTER
if (mask)
aom_comp_mask_pred(comp_pred, second_pred, w, h, y + offset, y_stride,
mask, mask_stride, invert_mask);
else
-#endif
aom_comp_avg_pred(comp_pred, second_pred, w, h, y + offset, y_stride);
besterr = vfp->vf(comp_pred, w, src, src_stride, sse1);
}
@@ -413,12 +361,10 @@
(void)xd;
if (second_pred != NULL) {
DECLARE_ALIGNED(16, uint8_t, comp_pred[MAX_SB_SQUARE]);
-#if CONFIG_EXT_INTER
if (mask)
aom_comp_mask_pred(comp_pred, second_pred, w, h, y + offset, y_stride,
mask, mask_stride, invert_mask);
else
-#endif
aom_comp_avg_pred(comp_pred, second_pred, w, h, y + offset, y_stride);
besterr = vfp->vf(comp_pred, w, src, src_stride, sse1);
} else {
@@ -458,19 +404,13 @@
MACROBLOCK *x, const MV *ref_mv, int allow_hp, int error_per_bit,
const aom_variance_fn_ptr_t *vfp, int forced_stop, int iters_per_step,
int *cost_list, int *mvjcost, int *mvcost[2], int *distortion,
- unsigned int *sse1, const uint8_t *second_pred,
-#if CONFIG_EXT_INTER
- const uint8_t *mask, int mask_stride, int invert_mask,
-#endif
- int w, int h, int use_upsampled_ref) {
+ unsigned int *sse1, const uint8_t *second_pred, const uint8_t *mask,
+ int mask_stride, int invert_mask, int w, int h, int use_upsampled_ref) {
SETUP_SUBPEL_SEARCH;
- besterr =
- setup_center_error(xd, bestmv, ref_mv, error_per_bit, vfp, src_address,
- src_stride, y, y_stride, second_pred,
-#if CONFIG_EXT_INTER
- mask, mask_stride, invert_mask,
-#endif
- w, h, offset, mvjcost, mvcost, sse1, distortion);
+ besterr = setup_center_error(xd, bestmv, ref_mv, error_per_bit, vfp,
+ src_address, src_stride, y, y_stride,
+ second_pred, mask, mask_stride, invert_mask, w,
+ h, offset, mvjcost, mvcost, sse1, distortion);
(void)halfiters;
(void)quarteriters;
(void)eighthiters;
@@ -531,21 +471,15 @@
MACROBLOCK *x, const MV *ref_mv, int allow_hp, int error_per_bit,
const aom_variance_fn_ptr_t *vfp, int forced_stop, int iters_per_step,
int *cost_list, int *mvjcost, int *mvcost[2], int *distortion,
- unsigned int *sse1, const uint8_t *second_pred,
-#if CONFIG_EXT_INTER
- const uint8_t *mask, int mask_stride, int invert_mask,
-#endif
- int w, int h, int use_upsampled_ref) {
+ unsigned int *sse1, const uint8_t *second_pred, const uint8_t *mask,
+ int mask_stride, int invert_mask, int w, int h, int use_upsampled_ref) {
SETUP_SUBPEL_SEARCH;
(void)use_upsampled_ref;
- besterr =
- setup_center_error(xd, bestmv, ref_mv, error_per_bit, vfp, src_address,
- src_stride, y, y_stride, second_pred,
-#if CONFIG_EXT_INTER
- mask, mask_stride, invert_mask,
-#endif
- w, h, offset, mvjcost, mvcost, sse1, distortion);
+ besterr = setup_center_error(xd, bestmv, ref_mv, error_per_bit, vfp,
+ src_address, src_stride, y, y_stride,
+ second_pred, mask, mask_stride, invert_mask, w,
+ h, offset, mvjcost, mvcost, sse1, distortion);
if (cost_list && cost_list[0] != INT_MAX && cost_list[1] != INT_MAX &&
cost_list[2] != INT_MAX && cost_list[3] != INT_MAX &&
cost_list[4] != INT_MAX && is_cost_list_wellbehaved(cost_list)) {
@@ -600,21 +534,15 @@
MACROBLOCK *x, const MV *ref_mv, int allow_hp, int error_per_bit,
const aom_variance_fn_ptr_t *vfp, int forced_stop, int iters_per_step,
int *cost_list, int *mvjcost, int *mvcost[2], int *distortion,
- unsigned int *sse1, const uint8_t *second_pred,
-#if CONFIG_EXT_INTER
- const uint8_t *mask, int mask_stride, int invert_mask,
-#endif
- int w, int h, int use_upsampled_ref) {
+ unsigned int *sse1, const uint8_t *second_pred, const uint8_t *mask,
+ int mask_stride, int invert_mask, int w, int h, int use_upsampled_ref) {
SETUP_SUBPEL_SEARCH;
(void)use_upsampled_ref;
- besterr =
- setup_center_error(xd, bestmv, ref_mv, error_per_bit, vfp, src_address,
- src_stride, y, y_stride, second_pred,
-#if CONFIG_EXT_INTER
- mask, mask_stride, invert_mask,
-#endif
- w, h, offset, mvjcost, mvcost, sse1, distortion);
+ besterr = setup_center_error(xd, bestmv, ref_mv, error_per_bit, vfp,
+ src_address, src_stride, y, y_stride,
+ second_pred, mask, mask_stride, invert_mask, w,
+ h, offset, mvjcost, mvcost, sse1, distortion);
if (cost_list && cost_list[0] != INT_MAX && cost_list[1] != INT_MAX &&
cost_list[2] != INT_MAX && cost_list[3] != INT_MAX &&
cost_list[4] != INT_MAX) {
@@ -696,26 +624,24 @@
};
/* clang-format on */
-static int upsampled_pref_error(
- const MACROBLOCKD *xd, const aom_variance_fn_ptr_t *vfp,
- const uint8_t *const src, const int src_stride, const uint8_t *const y,
- int y_stride, int subpel_x_q3, int subpel_y_q3, const uint8_t *second_pred,
-#if CONFIG_EXT_INTER
- const uint8_t *mask, int mask_stride, int invert_mask,
-#endif
- int w, int h, unsigned int *sse) {
+static int upsampled_pref_error(const MACROBLOCKD *xd,
+ const aom_variance_fn_ptr_t *vfp,
+ const uint8_t *const src, const int src_stride,
+ const uint8_t *const y, int y_stride,
+ int subpel_x_q3, int subpel_y_q3,
+ const uint8_t *second_pred, const uint8_t *mask,
+ int mask_stride, int invert_mask, int w, int h,
+ unsigned int *sse) {
unsigned int besterr;
#if CONFIG_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
DECLARE_ALIGNED(16, uint16_t, pred16[MAX_SB_SQUARE]);
if (second_pred != NULL) {
-#if CONFIG_EXT_INTER
if (mask)
aom_highbd_comp_mask_upsampled_pred(
pred16, second_pred, w, h, subpel_x_q3, subpel_y_q3, y, y_stride,
mask, mask_stride, invert_mask, xd->bd);
else
-#endif
aom_highbd_comp_avg_upsampled_pred(pred16, second_pred, w, h,
subpel_x_q3, subpel_y_q3, y,
y_stride, xd->bd);
@@ -732,13 +658,11 @@
(void)xd;
#endif // CONFIG_HIGHBITDEPTH
if (second_pred != NULL) {
-#if CONFIG_EXT_INTER
if (mask)
aom_comp_mask_upsampled_pred(pred, second_pred, w, h, subpel_x_q3,
subpel_y_q3, y, y_stride, mask,
mask_stride, invert_mask);
else
-#endif
aom_comp_avg_upsampled_pred(pred, second_pred, w, h, subpel_x_q3,
subpel_y_q3, y, y_stride);
} else {
@@ -756,18 +680,12 @@
const MACROBLOCKD *xd, const MV *bestmv, const MV *ref_mv,
int error_per_bit, const aom_variance_fn_ptr_t *vfp,
const uint8_t *const src, const int src_stride, const uint8_t *const y,
- int y_stride, const uint8_t *second_pred,
-#if CONFIG_EXT_INTER
- const uint8_t *mask, int mask_stride, int invert_mask,
-#endif
- int w, int h, int offset, int *mvjcost, int *mvcost[2], unsigned int *sse1,
- int *distortion) {
+ int y_stride, const uint8_t *second_pred, const uint8_t *mask,
+ int mask_stride, int invert_mask, int w, int h, int offset, int *mvjcost,
+ int *mvcost[2], unsigned int *sse1, int *distortion) {
unsigned int besterr = upsampled_pref_error(
- xd, vfp, src, src_stride, y + offset, y_stride, 0, 0, second_pred,
-#if CONFIG_EXT_INTER
- mask, mask_stride, invert_mask,
-#endif
- w, h, sse1);
+ xd, vfp, src, src_stride, y + offset, y_stride, 0, 0, second_pred, mask,
+ mask_stride, invert_mask, w, h, sse1);
*distortion = besterr;
besterr += mv_err_cost(bestmv, ref_mv, mvjcost, mvcost, error_per_bit);
return besterr;
@@ -777,11 +695,8 @@
MACROBLOCK *x, const MV *ref_mv, int allow_hp, int error_per_bit,
const aom_variance_fn_ptr_t *vfp, int forced_stop, int iters_per_step,
int *cost_list, int *mvjcost, int *mvcost[2], int *distortion,
- unsigned int *sse1, const uint8_t *second_pred,
-#if CONFIG_EXT_INTER
- const uint8_t *mask, int mask_stride, int invert_mask,
-#endif
- int w, int h, int use_upsampled_ref) {
+ unsigned int *sse1, const uint8_t *second_pred, const uint8_t *mask,
+ int mask_stride, int invert_mask, int w, int h, int use_upsampled_ref) {
const uint8_t *const src_address = x->plane[0].src.buf;
const int src_stride = x->plane[0].src.stride;
const MACROBLOCKD *xd = &x->e_mbd;
@@ -818,19 +733,13 @@
if (use_upsampled_ref)
besterr = upsampled_setup_center_error(
xd, bestmv, ref_mv, error_per_bit, vfp, src_address, src_stride, y,
- y_stride, second_pred,
-#if CONFIG_EXT_INTER
- mask, mask_stride, invert_mask,
-#endif
- w, h, offset, mvjcost, mvcost, sse1, distortion);
+ y_stride, second_pred, mask, mask_stride, invert_mask, w, h, offset,
+ mvjcost, mvcost, sse1, distortion);
else
- besterr =
- setup_center_error(xd, bestmv, ref_mv, error_per_bit, vfp, src_address,
- src_stride, y, y_stride, second_pred,
-#if CONFIG_EXT_INTER
- mask, mask_stride, invert_mask,
-#endif
- w, h, offset, mvjcost, mvcost, sse1, distortion);
+ besterr = setup_center_error(xd, bestmv, ref_mv, error_per_bit, vfp,
+ src_address, src_stride, y, y_stride,
+ second_pred, mask, mask_stride, invert_mask, w,
+ h, offset, mvjcost, mvcost, sse1, distortion);
(void)cost_list; // to silence compiler warning
@@ -845,22 +754,17 @@
if (use_upsampled_ref) {
thismse = upsampled_pref_error(xd, vfp, src_address, src_stride,
pre(y, y_stride, tr, tc), y_stride,
- sp(tc), sp(tr), second_pred,
-#if CONFIG_EXT_INTER
- mask, mask_stride, invert_mask,
-#endif
- w, h, &sse);
+ sp(tc), sp(tr), second_pred, mask,
+ mask_stride, invert_mask, w, h, &sse);
} else {
const uint8_t *const pre_address = pre(y, y_stride, tr, tc);
if (second_pred == NULL)
thismse = vfp->svf(pre_address, y_stride, sp(tc), sp(tr),
src_address, src_stride, &sse);
-#if CONFIG_EXT_INTER
else if (mask)
thismse = vfp->msvf(pre_address, y_stride, sp(tc), sp(tr),
src_address, src_stride, second_pred, mask,
mask_stride, invert_mask, &sse);
-#endif
else
thismse = vfp->svaf(pre_address, y_stride, sp(tc), sp(tr),
src_address, src_stride, &sse, second_pred);
@@ -892,23 +796,18 @@
if (use_upsampled_ref) {
thismse = upsampled_pref_error(xd, vfp, src_address, src_stride,
pre(y, y_stride, tr, tc), y_stride,
- sp(tc), sp(tr), second_pred,
-#if CONFIG_EXT_INTER
- mask, mask_stride, invert_mask,
-#endif
- w, h, &sse);
+ sp(tc), sp(tr), second_pred, mask,
+ mask_stride, invert_mask, w, h, &sse);
} else {
const uint8_t *const pre_address = pre(y, y_stride, tr, tc);
if (second_pred == NULL)
thismse = vfp->svf(pre_address, y_stride, sp(tc), sp(tr), src_address,
src_stride, &sse);
-#if CONFIG_EXT_INTER
else if (mask)
thismse = vfp->msvf(pre_address, y_stride, sp(tc), sp(tr),
src_address, src_stride, second_pred, mask,
mask_stride, invert_mask, &sse);
-#endif
else
thismse = vfp->svaf(pre_address, y_stride, sp(tc), sp(tr),
src_address, src_stride, &sse, second_pred);
@@ -1493,7 +1392,6 @@
: 0);
}
-#if CONFIG_EXT_INTER
int av1_get_mvpred_mask_var(const MACROBLOCK *x, const MV *best_mv,
const MV *center_mv, const uint8_t *second_pred,
const uint8_t *mask, int mask_stride,
@@ -1512,7 +1410,6 @@
x->errorperbit)
: 0);
}
-#endif
int av1_hex_search(MACROBLOCK *x, MV *start_mv, int search_param,
int sad_per_bit, int do_init_search, int *cost_list,
@@ -2481,11 +2378,9 @@
// mode, or when searching for one component of an ext-inter compound mode.
int av1_refining_search_8p_c(MACROBLOCK *x, int error_per_bit, int search_range,
const aom_variance_fn_ptr_t *fn_ptr,
-#if CONFIG_EXT_INTER
const uint8_t *mask, int mask_stride,
- int invert_mask,
-#endif
- const MV *center_mv, const uint8_t *second_pred) {
+ int invert_mask, const MV *center_mv,
+ const uint8_t *second_pred) {
const MV neighbors[8] = { { -1, 0 }, { 0, -1 }, { 0, 1 }, { 1, 0 },
{ -1, -1 }, { 1, -1 }, { -1, 1 }, { 1, 1 } };
const MACROBLOCKD *const xd = &x->e_mbd;
@@ -2498,14 +2393,12 @@
clamp_mv(best_mv, x->mv_limits.col_min, x->mv_limits.col_max,
x->mv_limits.row_min, x->mv_limits.row_max);
-#if CONFIG_EXT_INTER
if (mask)
best_sad = fn_ptr->msdf(what->buf, what->stride,
get_buf_from_mv(in_what, best_mv), in_what->stride,
second_pred, mask, mask_stride, invert_mask) +
mvsad_err_cost(x, best_mv, &fcenter_mv, error_per_bit);
else
-#endif
best_sad =
fn_ptr->sdaf(what->buf, what->stride, get_buf_from_mv(in_what, best_mv),
in_what->stride, second_pred) +
@@ -2520,13 +2413,11 @@
if (is_mv_in(&x->mv_limits, &mv)) {
unsigned int sad;
-#if CONFIG_EXT_INTER
if (mask)
sad = fn_ptr->msdf(what->buf, what->stride,
get_buf_from_mv(in_what, &mv), in_what->stride,
second_pred, mask, mask_stride, invert_mask);
else
-#endif
sad = fn_ptr->sdaf(what->buf, what->stride,
get_buf_from_mv(in_what, &mv), in_what->stride,
second_pred);
@@ -3272,17 +3163,12 @@
MACROBLOCK *x, const MV *ref_mv, int allow_hp, int error_per_bit,
const aom_variance_fn_ptr_t *vfp, int forced_stop, int iters_per_step,
int *cost_list, int *mvjcost, int *mvcost[2], int *distortion,
- unsigned int *sse1, const uint8_t *second_pred,
-#if CONFIG_EXT_INTER
- const uint8_t *mask, int mask_stride, int invert_mask,
-#endif
- int w, int h, int use_upsampled_ref) {
+ unsigned int *sse1, const uint8_t *second_pred, const uint8_t *mask,
+ int mask_stride, int invert_mask, int w, int h, int use_upsampled_ref) {
COMMON_MV_TEST;
-#if CONFIG_EXT_INTER
(void)mask;
(void)mask_stride;
(void)invert_mask;
-#endif
(void)minr;
(void)minc;
bestmv->row = maxr;
@@ -3302,19 +3188,14 @@
MACROBLOCK *x, const MV *ref_mv, int allow_hp, int error_per_bit,
const aom_variance_fn_ptr_t *vfp, int forced_stop, int iters_per_step,
int *cost_list, int *mvjcost, int *mvcost[2], int *distortion,
- unsigned int *sse1, const uint8_t *second_pred,
-#if CONFIG_EXT_INTER
- const uint8_t *mask, int mask_stride, int invert_mask,
-#endif
- int w, int h, int use_upsampled_ref) {
+ unsigned int *sse1, const uint8_t *second_pred, const uint8_t *mask,
+ int mask_stride, int invert_mask, int w, int h, int use_upsampled_ref) {
COMMON_MV_TEST;
(void)maxr;
(void)maxc;
-#if CONFIG_EXT_INTER
(void)mask;
(void)mask_stride;
(void)invert_mask;
-#endif
bestmv->row = minr;
bestmv->col = minc;
besterr = 0;
diff --git a/av1/encoder/mcomp.h b/av1/encoder/mcomp.h
index 46a41d2..2c53075 100644
--- a/av1/encoder/mcomp.h
+++ b/av1/encoder/mcomp.h
@@ -58,13 +58,11 @@
int av1_get_mvpred_av_var(const MACROBLOCK *x, const MV *best_mv,
const MV *center_mv, const uint8_t *second_pred,
const aom_variance_fn_ptr_t *vfp, int use_mvcost);
-#if CONFIG_EXT_INTER
int av1_get_mvpred_mask_var(const MACROBLOCK *x, const MV *best_mv,
const MV *center_mv, const uint8_t *second_pred,
const uint8_t *mask, int mask_stride,
int invert_mask, const aom_variance_fn_ptr_t *vfp,
int use_mvcost);
-#endif
struct AV1_COMP;
struct SPEED_FEATURES;
@@ -99,10 +97,8 @@
int forced_stop, // 0 - full, 1 - qtr only, 2 - half only
int iters_per_step, int *cost_list, int *mvjcost, int *mvcost[2],
int *distortion, unsigned int *sse1, const uint8_t *second_pred,
-#if CONFIG_EXT_INTER
- const uint8_t *mask, int mask_stride, int invert_mask,
-#endif
- int w, int h, int use_upsampled_ref);
+ const uint8_t *mask, int mask_stride, int invert_mask, int w, int h,
+ int use_upsampled_ref);
extern fractional_mv_step_fp av1_find_best_sub_pixel_tree;
extern fractional_mv_step_fp av1_find_best_sub_pixel_tree_pruned;
@@ -123,11 +119,9 @@
int av1_refining_search_8p_c(MACROBLOCK *x, int error_per_bit, int search_range,
const aom_variance_fn_ptr_t *fn_ptr,
-#if CONFIG_EXT_INTER
const uint8_t *mask, int mask_stride,
- int invert_mask,
-#endif
- const MV *center_mv, const uint8_t *second_pred);
+ int invert_mask, const MV *center_mv,
+ const uint8_t *second_pred);
struct AV1_COMP;
diff --git a/av1/encoder/rd.c b/av1/encoder/rd.c
index e250ded..d2fde41 100644
--- a/av1/encoder/rd.c
+++ b/av1/encoder/rd.c
@@ -294,7 +294,6 @@
x->drl_mode_cost0[i][1] = av1_cost_bit(fc->drl_prob[i], 1);
#endif
}
-#if CONFIG_EXT_INTER
for (i = 0; i < INTER_MODE_CONTEXTS; ++i)
av1_cost_tokens_from_cdf(x->inter_compound_mode_cost[i],
fc->inter_compound_mode_cdf[i], NULL);
@@ -313,7 +312,6 @@
av1_cost_tokens_from_cdf(x->interintra_mode_cost[i],
fc->interintra_mode_cdf[i], NULL);
#endif // CONFIG_INTERINTRA
-#endif // CONFIG_EXT_INTER
#if CONFIG_MOTION_VAR || CONFIG_WARPED_MOTION
for (i = BLOCK_8X8; i < BLOCK_SIZES_ALL; i++) {
av1_cost_tokens_from_cdf(x->motion_mode_cost[i], fc->motion_mode_cdf[i],
@@ -1302,8 +1300,6 @@
rd->thresh_mult[THR_TM] += 1000;
-#if CONFIG_EXT_INTER
-
#if CONFIG_COMPOUND_SINGLEREF
rd->thresh_mult[THR_SR_NEAREST_NEARMV] += 1200;
#if CONFIG_EXT_REFS
@@ -1376,36 +1372,6 @@
#endif // CONFIG_EXT_COMP_REFS
#endif // CONFIG_EXT_REFS
-#else // CONFIG_EXT_INTER
-
- rd->thresh_mult[THR_COMP_NEARESTLA] += 1000;
-#if CONFIG_EXT_REFS
- rd->thresh_mult[THR_COMP_NEARESTL2A] += 1000;
- rd->thresh_mult[THR_COMP_NEARESTL3A] += 1000;
-#endif // CONFIG_EXT_REFS
- rd->thresh_mult[THR_COMP_NEARESTGA] += 1000;
-#if CONFIG_EXT_REFS
- rd->thresh_mult[THR_COMP_NEARESTLB] += 1000;
- rd->thresh_mult[THR_COMP_NEARESTL2B] += 1000;
- rd->thresh_mult[THR_COMP_NEARESTL3B] += 1000;
- rd->thresh_mult[THR_COMP_NEARESTGB] += 1000;
- rd->thresh_mult[THR_COMP_NEARESTLA2] += 1000;
- rd->thresh_mult[THR_COMP_NEARESTL2A2] += 1000;
- rd->thresh_mult[THR_COMP_NEARESTL3A2] += 1000;
- rd->thresh_mult[THR_COMP_NEARESTGA2] += 1000;
-
-#if CONFIG_EXT_COMP_REFS
- rd->thresh_mult[THR_COMP_NEARESTLL2] += 1000;
- rd->thresh_mult[THR_COMP_NEARESTLL3] += 1000;
- rd->thresh_mult[THR_COMP_NEARESTLG] += 1000;
- rd->thresh_mult[THR_COMP_NEARESTBA] += 1000;
-#endif // CONFIG_EXT_COMP_REFS
-#endif // CONFIG_EXT_REFS
-
-#endif // CONFIG_EXT_INTER
-
-#if CONFIG_EXT_INTER
-
rd->thresh_mult[THR_COMP_NEAR_NEARLA] += 1200;
rd->thresh_mult[THR_COMP_NEAREST_NEWLA] += 1500;
rd->thresh_mult[THR_COMP_NEW_NEARESTLA] += 1500;
@@ -1540,78 +1506,6 @@
#endif // CONFIG_EXT_COMP_REFS
#endif // CONFIG_EXT_REFS
-#else // CONFIG_EXT_INTER
-
- rd->thresh_mult[THR_COMP_NEARLA] += 1500;
- rd->thresh_mult[THR_COMP_NEWLA] += 2000;
-#if CONFIG_EXT_REFS
- rd->thresh_mult[THR_COMP_NEARL2A] += 1500;
- rd->thresh_mult[THR_COMP_NEWL2A] += 2000;
- rd->thresh_mult[THR_COMP_NEARL3A] += 1500;
- rd->thresh_mult[THR_COMP_NEWL3A] += 2000;
-#endif // CONFIG_EXT_REFS
- rd->thresh_mult[THR_COMP_NEARGA] += 1500;
- rd->thresh_mult[THR_COMP_NEWGA] += 2000;
-
-#if CONFIG_EXT_REFS
- rd->thresh_mult[THR_COMP_NEARLB] += 1500;
- rd->thresh_mult[THR_COMP_NEWLB] += 2000;
- rd->thresh_mult[THR_COMP_NEARL2B] += 1500;
- rd->thresh_mult[THR_COMP_NEWL2B] += 2000;
- rd->thresh_mult[THR_COMP_NEARL3B] += 1500;
- rd->thresh_mult[THR_COMP_NEWL3B] += 2000;
- rd->thresh_mult[THR_COMP_NEARGB] += 1500;
- rd->thresh_mult[THR_COMP_NEWGB] += 2000;
-
- rd->thresh_mult[THR_COMP_NEARLA2] += 1500;
- rd->thresh_mult[THR_COMP_NEWLA2] += 2000;
- rd->thresh_mult[THR_COMP_NEARL2A2] += 1500;
- rd->thresh_mult[THR_COMP_NEWL2A2] += 2000;
- rd->thresh_mult[THR_COMP_NEARL3A2] += 1500;
- rd->thresh_mult[THR_COMP_NEWL3A2] += 2000;
- rd->thresh_mult[THR_COMP_NEARGA2] += 1500;
- rd->thresh_mult[THR_COMP_NEWGA2] += 2000;
-
-#if CONFIG_EXT_COMP_REFS
- rd->thresh_mult[THR_COMP_NEARLL2] += 1500;
- rd->thresh_mult[THR_COMP_NEWLL2] += 2000;
- rd->thresh_mult[THR_COMP_NEARLL3] += 1500;
- rd->thresh_mult[THR_COMP_NEWLL3] += 2000;
- rd->thresh_mult[THR_COMP_NEARLG] += 1500;
- rd->thresh_mult[THR_COMP_NEWLG] += 2000;
- rd->thresh_mult[THR_COMP_NEARBA] += 1500;
- rd->thresh_mult[THR_COMP_NEWBA] += 2000;
-#endif // CONFIG_EXT_COMP_REFS
-#endif // CONFIG_EXT_REFS
-
- rd->thresh_mult[THR_COMP_ZEROLA] += 2500;
-#if CONFIG_EXT_REFS
- rd->thresh_mult[THR_COMP_ZEROL2A] += 2500;
- rd->thresh_mult[THR_COMP_ZEROL3A] += 2500;
-#endif // CONFIG_EXT_REFS
- rd->thresh_mult[THR_COMP_ZEROGA] += 2500;
-
-#if CONFIG_EXT_REFS
- rd->thresh_mult[THR_COMP_ZEROLB] += 2500;
- rd->thresh_mult[THR_COMP_ZEROL2B] += 2500;
- rd->thresh_mult[THR_COMP_ZEROL3B] += 2500;
- rd->thresh_mult[THR_COMP_ZEROGB] += 2500;
-
- rd->thresh_mult[THR_COMP_ZEROLA2] += 2500;
- rd->thresh_mult[THR_COMP_ZEROL2A2] += 2500;
- rd->thresh_mult[THR_COMP_ZEROL3A2] += 2500;
- rd->thresh_mult[THR_COMP_ZEROGA2] += 2500;
-
-#if CONFIG_EXT_COMP_REFS
- rd->thresh_mult[THR_COMP_ZEROLL2] += 2500;
- rd->thresh_mult[THR_COMP_ZEROLL3] += 2500;
- rd->thresh_mult[THR_COMP_ZEROLG] += 2500;
- rd->thresh_mult[THR_COMP_ZEROBA] += 2500;
-#endif // CONFIG_EXT_COMP_REFS
-#endif // CONFIG_EXT_REFS
-
-#endif // CONFIG_EXT_INTER
-
rd->thresh_mult[THR_H_PRED] += 2000;
rd->thresh_mult[THR_V_PRED] += 2000;
rd->thresh_mult[THR_D135_PRED] += 2500;
@@ -1621,7 +1515,6 @@
rd->thresh_mult[THR_D117_PRED] += 2500;
rd->thresh_mult[THR_D45_PRED] += 2500;
-#if CONFIG_EXT_INTER
rd->thresh_mult[THR_COMP_INTERINTRA_ZEROL] += 1500;
rd->thresh_mult[THR_COMP_INTERINTRA_NEARESTL] += 1500;
rd->thresh_mult[THR_COMP_INTERINTRA_NEARL] += 1500;
@@ -1660,7 +1553,6 @@
rd->thresh_mult[THR_COMP_INTERINTRA_NEARESTA] += 1500;
rd->thresh_mult[THR_COMP_INTERINTRA_NEARA] += 1500;
rd->thresh_mult[THR_COMP_INTERINTRA_NEWA] += 2000;
-#endif // CONFIG_EXT_INTER
}
void av1_set_rd_speed_thresholds_sub8x8(AV1_COMP *cpi) {
diff --git a/av1/encoder/rd.h b/av1/encoder/rd.h
index 0436cec..35ada8e 100644
--- a/av1/encoder/rd.h
+++ b/av1/encoder/rd.h
@@ -91,8 +91,6 @@
THR_ZEROA,
THR_ZEROG,
-#if CONFIG_EXT_INTER
-
#if CONFIG_COMPOUND_SINGLEREF
THR_SR_NEAREST_NEARMV,
#if CONFIG_EXT_REFS
@@ -164,33 +162,6 @@
#endif // CONFIG_EXT_COMP_REFS
#endif // CONFIG_EXT_REFS
-#else // CONFIG_EXT_INTER
-
- THR_COMP_NEARESTLA,
-#if CONFIG_EXT_REFS
- THR_COMP_NEARESTL2A,
- THR_COMP_NEARESTL3A,
-#endif // CONFIG_EXT_REFS
- THR_COMP_NEARESTGA,
-#if CONFIG_EXT_REFS
- THR_COMP_NEARESTLB,
- THR_COMP_NEARESTL2B,
- THR_COMP_NEARESTL3B,
- THR_COMP_NEARESTGB,
- THR_COMP_NEARESTLA2,
- THR_COMP_NEARESTL2A2,
- THR_COMP_NEARESTL3A2,
- THR_COMP_NEARESTGA2,
-#if CONFIG_EXT_COMP_REFS
- THR_COMP_NEARESTLL2,
- THR_COMP_NEARESTLL3,
- THR_COMP_NEARESTLG,
- THR_COMP_NEARESTBA,
-#endif // CONFIG_EXT_COMP_REFS
-#endif // CONFIG_EXT_REFS
-
-#endif // CONFIG_EXT_INTER
-
THR_TM,
THR_SMOOTH,
@@ -199,8 +170,6 @@
THR_SMOOTH_H,
#endif // CONFIG_SMOOTH_HV
-#if CONFIG_EXT_INTER
-
THR_COMP_NEAR_NEARLA,
THR_COMP_NEW_NEARESTLA,
THR_COMP_NEAREST_NEWLA,
@@ -335,78 +304,6 @@
#endif // CONFIG_EXT_COMP_REFS
#endif // CONFIG_EXT_REFS
-#else // CONFIG_EXT_INTER
-
- THR_COMP_NEARLA,
- THR_COMP_NEWLA,
-#if CONFIG_EXT_REFS
- THR_COMP_NEARL2A,
- THR_COMP_NEWL2A,
- THR_COMP_NEARL3A,
- THR_COMP_NEWL3A,
-#endif // CONFIG_EXT_REFS
- THR_COMP_NEARGA,
- THR_COMP_NEWGA,
-
-#if CONFIG_EXT_REFS
- THR_COMP_NEARLB,
- THR_COMP_NEWLB,
- THR_COMP_NEARL2B,
- THR_COMP_NEWL2B,
- THR_COMP_NEARL3B,
- THR_COMP_NEWL3B,
- THR_COMP_NEARGB,
- THR_COMP_NEWGB,
-
- THR_COMP_NEARLA2,
- THR_COMP_NEWLA2,
- THR_COMP_NEARL2A2,
- THR_COMP_NEWL2A2,
- THR_COMP_NEARL3A2,
- THR_COMP_NEWL3A2,
- THR_COMP_NEARGA2,
- THR_COMP_NEWGA2,
-
-#if CONFIG_EXT_COMP_REFS
- THR_COMP_NEARLL2,
- THR_COMP_NEWLL2,
- THR_COMP_NEARLL3,
- THR_COMP_NEWLL3,
- THR_COMP_NEARLG,
- THR_COMP_NEWLG,
- THR_COMP_NEARBA,
- THR_COMP_NEWBA,
-#endif // CONFIG_EXT_COMP_REFS
-#endif // CONFIG_EXT_REFS
-
- THR_COMP_ZEROLA,
-#if CONFIG_EXT_REFS
- THR_COMP_ZEROL2A,
- THR_COMP_ZEROL3A,
-#endif // CONFIG_EXT_REFS
- THR_COMP_ZEROGA,
-
-#if CONFIG_EXT_REFS
- THR_COMP_ZEROLB,
- THR_COMP_ZEROL2B,
- THR_COMP_ZEROL3B,
- THR_COMP_ZEROGB,
-
- THR_COMP_ZEROLA2,
- THR_COMP_ZEROL2A2,
- THR_COMP_ZEROL3A2,
- THR_COMP_ZEROGA2,
-
-#if CONFIG_EXT_COMP_REFS
- THR_COMP_ZEROLL2,
- THR_COMP_ZEROLL3,
- THR_COMP_ZEROLG,
- THR_COMP_ZEROBA,
-#endif // CONFIG_EXT_COMP_REFS
-#endif // CONFIG_EXT_REFS
-
-#endif // CONFIG_EXT_INTER
-
THR_H_PRED,
THR_V_PRED,
THR_D135_PRED,
@@ -416,7 +313,6 @@
THR_D117_PRED,
THR_D45_PRED,
-#if CONFIG_EXT_INTER
THR_COMP_INTERINTRA_ZEROL,
THR_COMP_INTERINTRA_NEARESTL,
THR_COMP_INTERINTRA_NEARL,
@@ -455,7 +351,6 @@
THR_COMP_INTERINTRA_NEARESTA,
THR_COMP_INTERINTRA_NEARA,
THR_COMP_INTERINTRA_NEWA,
-#endif // CONFIG_EXT_INTER
MAX_MODES
} THR_MODES;
diff --git a/av1/encoder/rdopt.c b/av1/encoder/rdopt.c
index b1c0fb3..0ad8d14 100644
--- a/av1/encoder/rdopt.c
+++ b/av1/encoder/rdopt.c
@@ -222,8 +222,6 @@
// TODO(zoeliu): May need to reconsider the order on the modes to check
-#if CONFIG_EXT_INTER
-
#if CONFIG_COMPOUND_SINGLEREF
// Single ref comp mode
{ SR_NEAREST_NEARMV, { LAST_FRAME, NONE_FRAME } },
@@ -297,33 +295,6 @@
#endif // CONFIG_EXT_COMP_REFS
#endif // CONFIG_EXT_REFS
-#else // CONFIG_EXT_INTER
-
- { NEARESTMV, { LAST_FRAME, ALTREF_FRAME } },
-#if CONFIG_EXT_REFS
- { NEARESTMV, { LAST2_FRAME, ALTREF_FRAME } },
- { NEARESTMV, { LAST3_FRAME, ALTREF_FRAME } },
-#endif // CONFIG_EXT_REFS
- { NEARESTMV, { GOLDEN_FRAME, ALTREF_FRAME } },
-#if CONFIG_EXT_REFS
- { NEARESTMV, { LAST_FRAME, BWDREF_FRAME } },
- { NEARESTMV, { LAST2_FRAME, BWDREF_FRAME } },
- { NEARESTMV, { LAST3_FRAME, BWDREF_FRAME } },
- { NEARESTMV, { GOLDEN_FRAME, BWDREF_FRAME } },
- { NEARESTMV, { LAST_FRAME, ALTREF2_FRAME } },
- { NEARESTMV, { LAST2_FRAME, ALTREF2_FRAME } },
- { NEARESTMV, { LAST3_FRAME, ALTREF2_FRAME } },
- { NEARESTMV, { GOLDEN_FRAME, ALTREF2_FRAME } },
-
-#if CONFIG_EXT_COMP_REFS
- { NEARESTMV, { LAST_FRAME, LAST2_FRAME } },
- { NEARESTMV, { LAST_FRAME, LAST3_FRAME } },
- { NEARESTMV, { LAST_FRAME, GOLDEN_FRAME } },
- { NEARESTMV, { BWDREF_FRAME, ALTREF_FRAME } },
-#endif // CONFIG_EXT_COMP_REFS
-#endif // CONFIG_EXT_REFS
-#endif // CONFIG_EXT_INTER
-
{ TM_PRED, { INTRA_FRAME, NONE_FRAME } },
{ SMOOTH_PRED, { INTRA_FRAME, NONE_FRAME } },
@@ -332,7 +303,6 @@
{ SMOOTH_H_PRED, { INTRA_FRAME, NONE_FRAME } },
#endif // CONFIG_SMOOTH_HV
-#if CONFIG_EXT_INTER
{ NEAR_NEARMV, { LAST_FRAME, ALTREF_FRAME } },
{ NEW_NEARESTMV, { LAST_FRAME, ALTREF_FRAME } },
{ NEAREST_NEWMV, { LAST_FRAME, ALTREF_FRAME } },
@@ -467,78 +437,6 @@
#endif // CONFIG_EXT_COMP_REFS
#endif // CONFIG_EXT_REFS
-#else // !CONFIG_EXT_INTER
-
- { NEARMV, { LAST_FRAME, ALTREF_FRAME } },
- { NEWMV, { LAST_FRAME, ALTREF_FRAME } },
-#if CONFIG_EXT_REFS
- { NEARMV, { LAST2_FRAME, ALTREF_FRAME } },
- { NEWMV, { LAST2_FRAME, ALTREF_FRAME } },
- { NEARMV, { LAST3_FRAME, ALTREF_FRAME } },
- { NEWMV, { LAST3_FRAME, ALTREF_FRAME } },
-#endif // CONFIG_EXT_REFS
- { NEARMV, { GOLDEN_FRAME, ALTREF_FRAME } },
- { NEWMV, { GOLDEN_FRAME, ALTREF_FRAME } },
-
-#if CONFIG_EXT_REFS
- { NEARMV, { LAST_FRAME, BWDREF_FRAME } },
- { NEWMV, { LAST_FRAME, BWDREF_FRAME } },
- { NEARMV, { LAST2_FRAME, BWDREF_FRAME } },
- { NEWMV, { LAST2_FRAME, BWDREF_FRAME } },
- { NEARMV, { LAST3_FRAME, BWDREF_FRAME } },
- { NEWMV, { LAST3_FRAME, BWDREF_FRAME } },
- { NEARMV, { GOLDEN_FRAME, BWDREF_FRAME } },
- { NEWMV, { GOLDEN_FRAME, BWDREF_FRAME } },
-
- { NEARMV, { LAST_FRAME, ALTREF2_FRAME } },
- { NEWMV, { LAST_FRAME, ALTREF2_FRAME } },
- { NEARMV, { LAST2_FRAME, ALTREF2_FRAME } },
- { NEWMV, { LAST2_FRAME, ALTREF2_FRAME } },
- { NEARMV, { LAST3_FRAME, ALTREF2_FRAME } },
- { NEWMV, { LAST3_FRAME, ALTREF2_FRAME } },
- { NEARMV, { GOLDEN_FRAME, ALTREF2_FRAME } },
- { NEWMV, { GOLDEN_FRAME, ALTREF2_FRAME } },
-
-#if CONFIG_EXT_COMP_REFS
- { NEARMV, { LAST_FRAME, LAST2_FRAME } },
- { NEWMV, { LAST_FRAME, LAST2_FRAME } },
- { NEARMV, { LAST_FRAME, LAST3_FRAME } },
- { NEWMV, { LAST_FRAME, LAST3_FRAME } },
- { NEARMV, { LAST_FRAME, GOLDEN_FRAME } },
- { NEWMV, { LAST_FRAME, GOLDEN_FRAME } },
- { NEARMV, { BWDREF_FRAME, ALTREF_FRAME } },
- { NEWMV, { BWDREF_FRAME, ALTREF_FRAME } },
-#endif // CONFIG_EXT_COMP_REFS
-#endif // CONFIG_EXT_REFS
-
- { ZEROMV, { LAST_FRAME, ALTREF_FRAME } },
-#if CONFIG_EXT_REFS
- { ZEROMV, { LAST2_FRAME, ALTREF_FRAME } },
- { ZEROMV, { LAST3_FRAME, ALTREF_FRAME } },
-#endif // CONFIG_EXT_REFS
- { ZEROMV, { GOLDEN_FRAME, ALTREF_FRAME } },
-
-#if CONFIG_EXT_REFS
- { ZEROMV, { LAST_FRAME, BWDREF_FRAME } },
- { ZEROMV, { LAST2_FRAME, BWDREF_FRAME } },
- { ZEROMV, { LAST3_FRAME, BWDREF_FRAME } },
- { ZEROMV, { GOLDEN_FRAME, BWDREF_FRAME } },
-
- { ZEROMV, { LAST_FRAME, ALTREF2_FRAME } },
- { ZEROMV, { LAST2_FRAME, ALTREF2_FRAME } },
- { ZEROMV, { LAST3_FRAME, ALTREF2_FRAME } },
- { ZEROMV, { GOLDEN_FRAME, ALTREF2_FRAME } },
-
-#if CONFIG_EXT_COMP_REFS
- { ZEROMV, { LAST_FRAME, LAST2_FRAME } },
- { ZEROMV, { LAST_FRAME, LAST3_FRAME } },
- { ZEROMV, { LAST_FRAME, GOLDEN_FRAME } },
- { ZEROMV, { BWDREF_FRAME, ALTREF_FRAME } },
-#endif // CONFIG_EXT_COMP_REFS
-#endif // CONFIG_EXT_REFS
-
-#endif // CONFIG_EXT_INTER
-
{ H_PRED, { INTRA_FRAME, NONE_FRAME } },
{ V_PRED, { INTRA_FRAME, NONE_FRAME } },
{ D135_PRED, { INTRA_FRAME, NONE_FRAME } },
@@ -548,7 +446,6 @@
{ D117_PRED, { INTRA_FRAME, NONE_FRAME } },
{ D45_PRED, { INTRA_FRAME, NONE_FRAME } },
-#if CONFIG_EXT_INTER
{ ZEROMV, { LAST_FRAME, INTRA_FRAME } },
{ NEARESTMV, { LAST_FRAME, INTRA_FRAME } },
{ NEARMV, { LAST_FRAME, INTRA_FRAME } },
@@ -587,7 +484,6 @@
{ NEARESTMV, { ALTREF_FRAME, INTRA_FRAME } },
{ NEARMV, { ALTREF_FRAME, INTRA_FRAME } },
{ NEWMV, { ALTREF_FRAME, INTRA_FRAME } },
-#endif // CONFIG_EXT_INTER
};
static const PREDICTION_MODE intra_rd_search_mode_order[INTRA_MODES] = {
@@ -2505,8 +2401,7 @@
return 0;
}
-#if CONFIG_EXT_INTER && \
- (CONFIG_WEDGE || CONFIG_COMPOUND_SEGMENT || CONFIG_INTERINTRA)
+#if (CONFIG_WEDGE || CONFIG_COMPOUND_SEGMENT || CONFIG_INTERINTRA)
static int64_t estimate_yrd_for_sb(const AV1_COMP *const cpi, BLOCK_SIZE bs,
MACROBLOCK *x, int *r, int64_t *d, int *s,
int64_t *sse, int64_t ref_best_rd) {
@@ -2519,7 +2414,7 @@
*sse = rd_stats.sse;
return rd;
}
-#endif // CONFIG_EXT_INTER && (CONFIG_WEDGE || CONFIG_COMPOUND_SEGMENT)
+#endif // (CONFIG_WEDGE || CONFIG_COMPOUND_SEGMENT)
static void choose_largest_tx_size(const AV1_COMP *const cpi, MACROBLOCK *x,
RD_STATS *rd_stats, int64_t ref_best_rd,
@@ -6166,7 +6061,6 @@
static int cost_mv_ref(const MACROBLOCK *const x, PREDICTION_MODE mode,
int16_t mode_context) {
-#if CONFIG_EXT_INTER
if (is_inter_compound_mode(mode)) {
return x
->inter_compound_mode_cost[mode_context][INTER_COMPOUND_OFFSET(mode)];
@@ -6176,7 +6070,6 @@
[INTER_SINGLEREF_COMP_OFFSET(mode)];
#endif // CONFIG_COMPOUND_SINGLEREF
}
-#endif
int mode_cost = 0;
int16_t mode_ctx = mode_context & NEWMV_CTX_MASK;
@@ -6210,7 +6103,7 @@
}
}
-#if CONFIG_EXT_INTER && (CONFIG_WEDGE || CONFIG_COMPOUND_SEGMENT)
+#if (CONFIG_WEDGE || CONFIG_COMPOUND_SEGMENT)
static int get_interinter_compound_type_bits(BLOCK_SIZE bsize,
COMPOUND_TYPE comp_type) {
(void)bsize;
@@ -6225,7 +6118,7 @@
default: assert(0); return 0;
}
}
-#endif // CONFIG_EXT_INTER && (CONFIG_WEDGE || CONFIG_COMPOUND_SEGMENT)
+#endif // (CONFIG_WEDGE || CONFIG_COMPOUND_SEGMENT)
typedef struct {
int eobs;
@@ -6236,9 +6129,7 @@
int64_t brdcost;
int_mv mvs[2];
int_mv pred_mv[2];
-#if CONFIG_EXT_INTER
int_mv ref_mv[2];
-#endif // CONFIG_EXT_INTER
#if CONFIG_CHROMA_2X2
ENTROPY_CONTEXT ta[4];
@@ -6259,16 +6150,12 @@
int64_t sse;
int segment_yrate;
PREDICTION_MODE modes[4];
-#if CONFIG_EXT_INTER
#if CONFIG_COMPOUND_SINGLEREF
SEG_RDSTAT rdstat[4][INTER_MODES + INTER_SINGLEREF_COMP_MODES +
INTER_COMPOUND_MODES];
#else // !CONFIG_COMPOUND_SINGLEREF
SEG_RDSTAT rdstat[4][INTER_MODES + INTER_COMPOUND_MODES];
#endif // CONFIG_COMPOUND_SINGLEREF
-#else // !CONFIG_EXT_INTER
- SEG_RDSTAT rdstat[4][INTER_MODES];
-#endif // CONFIG_EXT_INTER
int mvthresh;
} BEST_SEG_INFO;
@@ -6284,9 +6171,7 @@
static int check_best_zero_mv(
const AV1_COMP *const cpi, const MACROBLOCK *const x,
const int16_t mode_context[TOTAL_REFS_PER_FRAME],
-#if CONFIG_EXT_INTER
const int16_t compound_mode_context[TOTAL_REFS_PER_FRAME],
-#endif // CONFIG_EXT_INTER
int_mv frame_mv[MB_MODE_COUNT][TOTAL_REFS_PER_FRAME], int this_mode,
const MV_REFERENCE_FRAME ref_frames[2], const BLOCK_SIZE bsize, int block,
int mi_row, int mi_col) {
@@ -6298,11 +6183,7 @@
(void)mi_col;
(void)cpi;
#if CONFIG_GLOBAL_MOTION
- if (this_mode == ZEROMV
-#if CONFIG_EXT_INTER
- || this_mode == ZERO_ZEROMV
-#endif // CONFIG_EXT_INTER
- ) {
+ if (this_mode == ZEROMV || this_mode == ZERO_ZEROMV) {
for (int cur_frm = 0; cur_frm < 1 + comp_pred_mode; cur_frm++) {
zeromv[cur_frm].as_int =
gm_get_motion_vector(&cpi->common.global_motion[ref_frames[cur_frm]],
@@ -6318,9 +6199,6 @@
}
#endif // CONFIG_GLOBAL_MOTION
-#if !CONFIG_EXT_INTER
- assert(ref_frames[1] != INTRA_FRAME); // Just sanity check
-#endif // !CONFIG_EXT_INTER
if ((this_mode == NEARMV || this_mode == NEARESTMV || this_mode == ZEROMV) &&
frame_mv[this_mode][ref_frames[0]].as_int == zeromv[0].as_int &&
(ref_frames[1] <= INTRA_FRAME ||
@@ -6349,12 +6227,10 @@
return 0;
}
}
- }
-#if CONFIG_EXT_INTER
- else if ((this_mode == NEAREST_NEARESTMV || this_mode == NEAR_NEARMV ||
- this_mode == ZERO_ZEROMV) &&
- frame_mv[this_mode][ref_frames[0]].as_int == zeromv[0].as_int &&
- frame_mv[this_mode][ref_frames[1]].as_int == zeromv[1].as_int) {
+ } else if ((this_mode == NEAREST_NEARESTMV || this_mode == NEAR_NEARMV ||
+ this_mode == ZERO_ZEROMV) &&
+ frame_mv[this_mode][ref_frames[0]].as_int == zeromv[0].as_int &&
+ frame_mv[this_mode][ref_frames[1]].as_int == zeromv[1].as_int) {
int16_t rfc = compound_mode_context[ref_frames[0]];
int c2 = cost_mv_ref(x, NEAREST_NEARESTMV, rfc);
int c3 = cost_mv_ref(x, ZERO_ZEROMV, rfc);
@@ -6373,28 +6249,25 @@
return 0;
}
}
-#endif // CONFIG_EXT_INTER
return 1;
}
static void joint_motion_search(const AV1_COMP *cpi, MACROBLOCK *x,
BLOCK_SIZE bsize, int_mv *frame_mv,
-#if CONFIG_EXT_INTER && CONFIG_COMPOUND_SINGLEREF
+#if CONFIG_COMPOUND_SINGLEREF
int_mv *frame_comp_mv,
-#endif // CONFIG_EXT_INTER && CONFIG_COMPOUND_SINGLEREF
+#endif // CONFIG_COMPOUND_SINGLEREF
int mi_row, int mi_col,
-#if CONFIG_EXT_INTER
int_mv *ref_mv_sub8x8[2], const uint8_t *mask,
- int mask_stride,
-#endif // CONFIG_EXT_INTER
- int *rate_mv, const int block) {
+ int mask_stride, int *rate_mv,
+ const int block) {
const AV1_COMMON *const cm = &cpi->common;
const int pw = block_size_wide[bsize];
const int ph = block_size_high[bsize];
MACROBLOCKD *xd = &x->e_mbd;
MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi;
// This function should only ever be called for compound modes
-#if CONFIG_EXT_INTER && CONFIG_COMPOUND_SINGLEREF
+#if CONFIG_COMPOUND_SINGLEREF
if (!has_second_ref(mbmi)) {
assert(is_inter_singleref_comp_mode(mbmi->mode));
assert(frame_comp_mv);
@@ -6406,7 +6279,7 @@
#else
assert(has_second_ref(mbmi));
const int refs[2] = { mbmi->ref_frame[0], mbmi->ref_frame[1] };
-#endif // CONFIG_EXT_INTER && CONFIG_COMPOUND_SINGLEREF
+#endif // CONFIG_COMPOUND_SINGLEREF
int_mv ref_mv[2];
int ite, ref;
struct scale_factors sf;
@@ -6419,18 +6292,18 @@
const int p_row = ((mi_row * MI_SIZE) >> pd->subsampling_y) + 4 * ir;
#if CONFIG_GLOBAL_MOTION
int is_global[2];
-#if CONFIG_EXT_INTER && CONFIG_COMPOUND_SINGLEREF
+#if CONFIG_COMPOUND_SINGLEREF
for (ref = 0; ref < 1 + has_second_ref(mbmi); ++ref) {
#else
for (ref = 0; ref < 2; ++ref) {
-#endif // CONFIG_EXT_INTER && CONFIG_COMPOUND_SINGLEREF
+#endif // CONFIG_COMPOUND_SINGLEREF
WarpedMotionParams *const wm =
&xd->global_motion[xd->mi[0]->mbmi.ref_frame[ref]];
is_global[ref] = is_global_mv_block(xd->mi[0], block, wm->wmtype);
}
-#if CONFIG_EXT_INTER && CONFIG_COMPOUND_SINGLEREF
+#if CONFIG_COMPOUND_SINGLEREF
if (!has_second_ref(mbmi)) is_global[1] = is_global[0];
-#endif // CONFIG_EXT_INTER && CONFIG_COMPOUND_SINGLEREF
+#endif // CONFIG_COMPOUND_SINGLEREF
#endif // CONFIG_GLOBAL_MOTION
#else // CONFIG_GLOBAL_MOTION || CONFIG_WARPED_MOTION
(void)block;
@@ -6452,20 +6325,20 @@
DECLARE_ALIGNED(16, uint8_t, second_pred[MAX_SB_SQUARE]);
#endif // CONFIG_HIGHBITDEPTH
-#if CONFIG_EXT_INTER && CONFIG_CB4X4
+#if CONFIG_CB4X4
(void)ref_mv_sub8x8;
-#endif // CONFIG_EXT_INTER && CONFIG_CB4X4
+#endif // CONFIG_CB4X4
-#if CONFIG_EXT_INTER && CONFIG_COMPOUND_SINGLEREF
+#if CONFIG_COMPOUND_SINGLEREF
for (ref = 0; ref < 1 + has_second_ref(mbmi); ++ref) {
#else
for (ref = 0; ref < 2; ++ref) {
-#endif // CONFIG_EXT_INTER && CONFIG_COMPOUND_SINGLEREF
-#if CONFIG_EXT_INTER && !CONFIG_CB4X4
+#endif // CONFIG_COMPOUND_SINGLEREF
+#if !CONFIG_CB4X4
if (bsize < BLOCK_8X8 && ref_mv_sub8x8 != NULL)
ref_mv[ref].as_int = ref_mv_sub8x8[ref]->as_int;
else
-#endif // CONFIG_EXT_INTER && !CONFIG_CB4X4
+#endif // !CONFIG_CB4X4
ref_mv[ref] = x->mbmi_ext->ref_mvs[refs[ref]][0];
if (scaled_ref_frame[ref]) {
@@ -6480,7 +6353,7 @@
}
}
-#if CONFIG_EXT_INTER && CONFIG_COMPOUND_SINGLEREF
+#if CONFIG_COMPOUND_SINGLEREF
if (!has_second_ref(mbmi)) {
assert(is_inter_singleref_comp_mode(mbmi->mode));
// NOTE: For single ref comp mode, set up the 2nd set of ref_mv/pre_planes
@@ -6496,7 +6369,7 @@
av1_setup_pre_planes(xd, 1, scaled_ref_frame[0], mi_row, mi_col, NULL);
}
}
-#endif // CONFIG_EXT_INTER && CONFIG_COMPOUND_SINGLEREF
+#endif // CONFIG_COMPOUND_SINGLEREF
// Since we have scaled the reference frames to match the size of the current
// frame we must use a unit scaling factor during mode selection.
@@ -6510,14 +6383,14 @@
// Allow joint search multiple times iteratively for each reference frame
// and break out of the search loop if it couldn't find a better mv.
-#if CONFIG_EXT_INTER && CONFIG_COMPOUND_SINGLEREF
+#if CONFIG_COMPOUND_SINGLEREF
const int num_ites =
(has_second_ref(mbmi) || mbmi->mode == SR_NEW_NEWMV) ? 4 : 1;
const int start_ite = has_second_ref(mbmi) ? 0 : 1;
for (ite = start_ite; ite < (start_ite + num_ites); ite++) {
#else
for (ite = 0; ite < 4; ite++) {
-#endif // CONFIG_EXT_INTER && CONFIG_COMPOUND_SINGLEREF
+#endif // CONFIG_COMPOUND_SINGLEREF
struct buf_2d ref_yv12[2];
int bestsme = INT_MAX;
int sadpb = x->sadperbit16;
@@ -6545,22 +6418,22 @@
ref_yv12[1] = xd->plane[plane].pre[1];
// Get the prediction block from the 'other' reference frame.
-#if CONFIG_EXT_INTER && CONFIG_COMPOUND_SINGLEREF
+#if CONFIG_COMPOUND_SINGLEREF
MV *const the_other_mv = (has_second_ref(mbmi) || id)
? &frame_mv[refs[!id]].as_mv
: &frame_comp_mv[refs[0]].as_mv;
-#endif // CONFIG_EXT_INTER && CONFIG_COMPOUND_SINGLEREF
+#endif // CONFIG_COMPOUND_SINGLEREF
#if CONFIG_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
second_pred = CONVERT_TO_BYTEPTR(second_pred_alloc_16);
av1_highbd_build_inter_predictor(
ref_yv12[!id].buf, ref_yv12[!id].stride, second_pred, pw,
-#if CONFIG_EXT_INTER && CONFIG_COMPOUND_SINGLEREF
+#if CONFIG_COMPOUND_SINGLEREF
the_other_mv,
-#else // !(CONFIG_EXT_INTER && CONFIG_COMPOUND_SINGLEREF)
+#else // !(CONFIG_COMPOUND_SINGLEREF)
&frame_mv[refs[!id]].as_mv,
-#endif // CONFIG_EXT_INTER && CONFIG_COMPOUND_SINGLEREF
+#endif // CONFIG_COMPOUND_SINGLEREF
&sf, pw, ph, 0, mbmi->interp_filter,
#if CONFIG_GLOBAL_MOTION || CONFIG_WARPED_MOTION
&warp_types, p_col, p_row,
@@ -6571,11 +6444,11 @@
#endif // CONFIG_HIGHBITDEPTH
av1_build_inter_predictor(
ref_yv12[!id].buf, ref_yv12[!id].stride, second_pred, pw,
-#if CONFIG_EXT_INTER && CONFIG_COMPOUND_SINGLEREF
+#if CONFIG_COMPOUND_SINGLEREF
the_other_mv,
-#else // !(CONFIG_EXT_INTER && CONFIG_COMPOUND_SINGLEREF)
+#else // !(CONFIG_COMPOUND_SINGLEREF)
&frame_mv[refs[!id]].as_mv,
-#endif // CONFIG_EXT_INTER && CONFIG_COMPOUND_SINGLEREF
+#endif // CONFIG_COMPOUND_SINGLEREF
&sf, pw, ph, &conv_params, mbmi->interp_filter,
#if CONFIG_GLOBAL_MOTION || CONFIG_WARPED_MOTION
&warp_types, p_col, p_row, plane, !id,
@@ -6591,38 +6464,33 @@
// Use the mv result from the single mode as mv predictor.
// Use the mv result from the single mode as mv predictor.
-#if CONFIG_EXT_INTER && CONFIG_COMPOUND_SINGLEREF
+#if CONFIG_COMPOUND_SINGLEREF
if (!has_second_ref(mbmi) && id)
*best_mv = frame_comp_mv[refs[0]].as_mv;
else
-#endif // CONFIG_EXT_INTER && CONFIG_COMPOUND_SINGLEREF
+#endif // CONFIG_COMPOUND_SINGLEREF
*best_mv = frame_mv[refs[id]].as_mv;
best_mv->col >>= 3;
best_mv->row >>= 3;
-#if CONFIG_EXT_INTER && CONFIG_COMPOUND_SINGLEREF
+#if CONFIG_COMPOUND_SINGLEREF
if (!has_second_ref(mbmi))
av1_set_mvcost(x, refs[0], 0, mbmi->ref_mv_idx);
else
-#endif // CONFIG_EXT_INTER && CONFIG_COMPOUND_SINGLEREF
+#endif // CONFIG_COMPOUND_SINGLEREF
av1_set_mvcost(x, refs[id], id, mbmi->ref_mv_idx);
// Small-range full-pixel motion search.
- bestsme =
- av1_refining_search_8p_c(x, sadpb, search_range, &cpi->fn_ptr[bsize],
-#if CONFIG_EXT_INTER
- mask, mask_stride, id,
-#endif
- &ref_mv[id].as_mv, second_pred);
+ bestsme = av1_refining_search_8p_c(x, sadpb, search_range,
+ &cpi->fn_ptr[bsize], mask, mask_stride,
+ id, &ref_mv[id].as_mv, second_pred);
if (bestsme < INT_MAX) {
-#if CONFIG_EXT_INTER
if (mask)
bestsme = av1_get_mvpred_mask_var(x, best_mv, &ref_mv[id].as_mv,
second_pred, mask, mask_stride, id,
&cpi->fn_ptr[bsize], 1);
else
-#endif
bestsme = av1_get_mvpred_av_var(x, best_mv, &ref_mv[id].as_mv,
second_pred, &cpi->fn_ptr[bsize], 1);
}
@@ -6644,29 +6512,26 @@
x, &ref_mv[id].as_mv, cpi->common.allow_high_precision_mv,
x->errorperbit, &cpi->fn_ptr[bsize], 0,
cpi->sf.mv.subpel_iters_per_step, NULL, x->nmvjointcost, x->mvcost,
- &dis, &sse, second_pred,
-#if CONFIG_EXT_INTER
- mask, mask_stride, id,
-#endif
- pw, ph, cpi->sf.use_upsampled_references);
+ &dis, &sse, second_pred, mask, mask_stride, id, pw, ph,
+ cpi->sf.use_upsampled_references);
}
// Restore the pointer to the first (possibly scaled) prediction buffer.
if (id) xd->plane[plane].pre[0] = ref_yv12[0];
if (bestsme < last_besterr[id]) {
-#if CONFIG_EXT_INTER && CONFIG_COMPOUND_SINGLEREF
+#if CONFIG_COMPOUND_SINGLEREF
// NOTE: For single ref comp mode, frame_mv stores the first mv and
// frame_comp_mv stores the second mv.
if (!has_second_ref(mbmi) && id)
frame_comp_mv[refs[0]].as_mv = *best_mv;
else
-#endif // CONFIG_EXT_INTER && CONFIG_COMPOUND_SINGLEREF
+#endif // CONFIG_COMPOUND_SINGLEREF
frame_mv[refs[id]].as_mv = *best_mv;
last_besterr[id] = bestsme;
-#if CONFIG_EXT_INTER && CONFIG_COMPOUND_SINGLEREF
+#if CONFIG_COMPOUND_SINGLEREF
if (!has_second_ref(mbmi)) last_besterr[!id] = last_besterr[id];
-#endif // CONFIG_EXT_INTER && CONFIG_COMPOUND_SINGLEREF
+#endif // CONFIG_COMPOUND_SINGLEREF
} else {
break;
}
@@ -6674,11 +6539,11 @@
*rate_mv = 0;
-#if CONFIG_EXT_INTER && CONFIG_COMPOUND_SINGLEREF
+#if CONFIG_COMPOUND_SINGLEREF
for (ref = 0; ref < 1 + has_second_ref(mbmi); ++ref) {
#else
for (ref = 0; ref < 2; ++ref) {
-#endif // CONFIG_EXT_INTER && CONFIG_COMPOUND_SINGLEREF
+#endif // CONFIG_COMPOUND_SINGLEREF
if (scaled_ref_frame[ref]) {
// Restore the prediction frame pointers to their unscaled versions.
int i;
@@ -6686,14 +6551,14 @@
xd->plane[i].pre[ref] = backup_yv12[ref][i];
}
-#if CONFIG_EXT_INTER && CONFIG_COMPOUND_SINGLEREF
+#if CONFIG_COMPOUND_SINGLEREF
if (!has_second_ref(mbmi))
av1_set_mvcost(x, refs[0], 0, mbmi->ref_mv_idx);
else
-#endif // CONFIG_EXT_INTER && CONFIG_COMPOUND_SINGLEREF
+#endif // CONFIG_COMPOUND_SINGLEREF
av1_set_mvcost(x, refs[ref], ref, mbmi->ref_mv_idx);
-#if CONFIG_EXT_INTER && CONFIG_COMPOUND_SINGLEREF
+#if CONFIG_COMPOUND_SINGLEREF
if (!has_second_ref(mbmi)) {
// NOTE: For single ref comp mode, i.e. !has_second_ref(mbmi) is true, the
// first mv is stored in frame_mv[] and the second mv is stored in
@@ -6707,25 +6572,25 @@
&x->mbmi_ext->ref_mvs[refs[0]][0].as_mv,
x->nmvjointcost, x->mvcost, MV_COST_WEIGHT);
} else {
-#endif // CONFIG_EXT_INTER && CONFIG_COMPOUND_SINGLEREF
-#if CONFIG_EXT_INTER && !CONFIG_CB4X4
+#endif // CONFIG_COMPOUND_SINGLEREF
+#if !CONFIG_CB4X4
if (bsize >= BLOCK_8X8)
-#endif // CONFIG_EXT_INTER && !CONFIG_CB4X4
+#endif // !CONFIG_CB4X4
*rate_mv += av1_mv_bit_cost(&frame_mv[refs[ref]].as_mv,
&x->mbmi_ext->ref_mvs[refs[ref]][0].as_mv,
x->nmvjointcost, x->mvcost, MV_COST_WEIGHT);
-#if CONFIG_EXT_INTER && !CONFIG_CB4X4
+#if !CONFIG_CB4X4
else
*rate_mv += av1_mv_bit_cost(&frame_mv[refs[ref]].as_mv,
&ref_mv_sub8x8[ref]->as_mv, x->nmvjointcost,
x->mvcost, MV_COST_WEIGHT);
-#endif // CONFIG_EXT_INTER && !CONFIG_CB4X4
-#if CONFIG_EXT_INTER && CONFIG_COMPOUND_SINGLEREF
+#endif // !CONFIG_CB4X4
+#if CONFIG_COMPOUND_SINGLEREF
}
-#endif // CONFIG_EXT_INTER && CONFIG_COMPOUND_SINGLEREF
+#endif // CONFIG_COMPOUND_SINGLEREF
}
-#if CONFIG_EXT_INTER && CONFIG_COMPOUND_SINGLEREF
+#if CONFIG_COMPOUND_SINGLEREF
if (!has_second_ref(mbmi)) {
if (scaled_ref_frame[0]) {
// Restore the prediction frame pointers to their unscaled versions.
@@ -6734,7 +6599,7 @@
xd->plane[i].pre[1] = backup_yv12[1][i];
}
}
-#endif // CONFIG_EXT_INTER && CONFIG_COMPOUND_SINGLEREF
+#endif // CONFIG_COMPOUND_SINGLEREF
}
static void estimate_ref_frame_costs(
@@ -7016,11 +6881,8 @@
// Gets an initial list of candidate vectors from neighbours and orders them
av1_find_mv_refs(cm, xd, mi, ref_frame, &mbmi_ext->ref_mv_count[ref_frame],
mbmi_ext->ref_mv_stack[ref_frame],
-#if CONFIG_EXT_INTER
- mbmi_ext->compound_mode_context,
-#endif // CONFIG_EXT_INTER
- candidates, mi_row, mi_col, NULL, NULL,
- mbmi_ext->mode_context);
+ mbmi_ext->compound_mode_context, candidates, mi_row, mi_col,
+ NULL, NULL, mbmi_ext->mode_context);
// Candidate refinement carried out at encoder and decoder
#if CONFIG_AMVR
@@ -7047,10 +6909,7 @@
static void single_motion_search(const AV1_COMP *const cpi, MACROBLOCK *x,
BLOCK_SIZE bsize, int mi_row, int mi_col,
-#if CONFIG_EXT_INTER
- int ref_idx,
-#endif // CONFIG_EXT_INTER
- int *rate_mv) {
+ int ref_idx, int *rate_mv) {
MACROBLOCKD *xd = &x->e_mbd;
const AV1_COMMON *cm = &cpi->common;
MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi;
@@ -7059,17 +6918,12 @@
int step_param;
int sadpb = x->sadperbit16;
MV mvp_full;
-#if CONFIG_EXT_INTER
#if CONFIG_COMPOUND_SINGLEREF
int ref =
has_second_ref(mbmi) ? mbmi->ref_frame[ref_idx] : mbmi->ref_frame[0];
#else // !CONFIG_COMPOUND_SINGLEREF
int ref = mbmi->ref_frame[ref_idx];
#endif // CONFIG_COMPOUND_SINGLEREF
-#else // !CONFIG_EXT_INTER
- int ref = mbmi->ref_frame[0];
- int ref_idx = 0;
-#endif // CONFIG_EXT_INTER
MV ref_mv = x->mbmi_ext->ref_mvs[ref][0].as_mv;
MvLimits tmp_mv_limits = x->mv_limits;
@@ -7212,11 +7066,8 @@
x, &ref_mv, cm->allow_high_precision_mv, x->errorperbit,
&cpi->fn_ptr[bsize], cpi->sf.mv.subpel_force_stop,
cpi->sf.mv.subpel_iters_per_step, cond_cost_list(cpi, cost_list),
- x->nmvjointcost, x->mvcost, &dis, &x->pred_sse[ref], NULL,
-#if CONFIG_EXT_INTER
- NULL, 0, 0,
-#endif
- pw, ph, 1);
+ x->nmvjointcost, x->mvcost, &dis, &x->pred_sse[ref], NULL, NULL,
+ 0, 0, pw, ph, 1);
if (try_second) {
const int minc =
@@ -7240,11 +7091,7 @@
&cpi->fn_ptr[bsize], cpi->sf.mv.subpel_force_stop,
cpi->sf.mv.subpel_iters_per_step,
cond_cost_list(cpi, cost_list), x->nmvjointcost, x->mvcost,
- &dis, &x->pred_sse[ref], NULL,
-#if CONFIG_EXT_INTER
- NULL, 0, 0,
-#endif
- pw, ph, 1);
+ &dis, &x->pred_sse[ref], NULL, NULL, 0, 0, pw, ph, 1);
if (this_var < best_mv_var) best_mv = x->best_mv.as_mv;
x->best_mv.as_mv = best_mv;
}
@@ -7254,11 +7101,8 @@
x, &ref_mv, cm->allow_high_precision_mv, x->errorperbit,
&cpi->fn_ptr[bsize], cpi->sf.mv.subpel_force_stop,
cpi->sf.mv.subpel_iters_per_step, cond_cost_list(cpi, cost_list),
- x->nmvjointcost, x->mvcost, &dis, &x->pred_sse[ref], NULL,
-#if CONFIG_EXT_INTER
- NULL, 0, 0,
-#endif
- 0, 0, 0);
+ x->nmvjointcost, x->mvcost, &dis, &x->pred_sse[ref], NULL, NULL,
+ 0, 0, 0, 0, 0);
}
#if CONFIG_MOTION_VAR
break;
@@ -7298,7 +7142,6 @@
}
}
-#if CONFIG_EXT_INTER
static void build_second_inter_pred(const AV1_COMP *cpi, MACROBLOCK *x,
BLOCK_SIZE bsize, const MV *other_mv,
int mi_row, int mi_col, const int block,
@@ -7651,7 +7494,6 @@
tmp_mv[1].as_int = frame_mv[rf[1]].as_int;
}
#endif // CONFIG_COMPOUND_SEGMENT || CONFIG_WEDGE
-#endif // CONFIG_EXT_INTER
// In some situations we want to discount the apparent cost of a new motion
// vector. Where there is a subtle motion field and especially where there is
@@ -7683,7 +7525,6 @@
xd->mb_to_bottom_edge + RIGHT_BOTTOM_MARGIN);
}
-#if CONFIG_EXT_INTER
#if CONFIG_WEDGE
static int estimate_wedge_sign(const AV1_COMP *cpi, const MACROBLOCK *x,
const BLOCK_SIZE bsize, const uint8_t *pred0,
@@ -7728,7 +7569,6 @@
return (tl + br > 0);
}
#endif // CONFIG_WEDGE
-#endif // CONFIG_EXT_INTER
#if !CONFIG_DUAL_FILTER
static InterpFilter predict_interp_filter(
@@ -7755,16 +7595,11 @@
if (xd->up_available) af = xd->mi[-xd->mi_stride]->mbmi.interp_filter;
if (xd->left_available) lf = xd->mi[-1]->mbmi.interp_filter;
-#if CONFIG_EXT_INTER
if ((this_mode != NEWMV && this_mode != NEW_NEWMV) || (af == lf))
-#else
- if ((this_mode != NEWMV) || (af == lf))
-#endif // CONFIG_EXT_INTER
best_filter = af;
}
if (is_comp_pred) {
if (cpi->sf.adaptive_mode_search) {
-#if CONFIG_EXT_INTER
switch (this_mode) {
case NEAREST_NEARESTMV:
if (single_filter[NEARESTMV][refs[0]] ==
@@ -7807,11 +7642,6 @@
best_filter = single_filter[this_mode][refs[0]];
break;
}
-#else
- if (single_filter[this_mode][refs[0]] ==
- single_filter[this_mode][refs[1]])
- best_filter = single_filter[this_mode][refs[0]];
-#endif // CONFIG_EXT_INTER
}
}
if (x->source_variance < cpi->sf.disable_filter_search_var_thresh) {
@@ -7821,7 +7651,6 @@
}
#endif // !CONFIG_DUAL_FILTER
-#if CONFIG_EXT_INTER
// Choose the best wedge index and sign
#if CONFIG_WEDGE
static int64_t pick_wedge(const AV1_COMP *const cpi, const MACROBLOCK *const x,
@@ -8236,7 +8065,6 @@
return best_rd_cur;
}
#endif // CONFIG_COMPOUND_SEGMENT || CONFIG_WEDGE
-#endif // CONFIG_EXT_INTER
typedef struct {
#if CONFIG_MOTION_VAR
@@ -8247,23 +8075,21 @@
int left_pred_stride[MAX_MB_PLANE];
#endif // CONFIG_MOTION_VAR
int_mv *single_newmv;
-#if CONFIG_EXT_INTER
// Pointer to array of motion vectors to use for each ref and their rates
// Should point to first of 2 arrays in 2D array
int *single_newmv_rate;
// Pointer to array of predicted rate-distortion
// Should point to first of 2 arrays in 2D array
int64_t (*modelled_rd)[TOTAL_REFS_PER_FRAME];
-#endif // CONFIG_EXT_INTER
InterpFilter single_filter[MB_MODE_COUNT][TOTAL_REFS_PER_FRAME];
} HandleInterModeArgs;
static int64_t handle_newmv(const AV1_COMP *const cpi, MACROBLOCK *const x,
const BLOCK_SIZE bsize,
int_mv (*const mode_mv)[TOTAL_REFS_PER_FRAME],
-#if CONFIG_EXT_INTER && CONFIG_COMPOUND_SINGLEREF
+#if CONFIG_COMPOUND_SINGLEREF
int_mv (*const mode_comp_mv)[TOTAL_REFS_PER_FRAME],
-#endif // CONFIG_EXT_INTER && CONFIG_COMPOUND_SINGLEREF
+#endif // CONFIG_COMPOUND_SINGLEREF
const int mi_row, const int mi_col,
int *const rate_mv, int_mv *const single_newmv,
HandleInterModeArgs *const args) {
@@ -8272,13 +8098,11 @@
const MB_MODE_INFO_EXT *const mbmi_ext = x->mbmi_ext;
const int is_comp_pred = has_second_ref(mbmi);
const PREDICTION_MODE this_mode = mbmi->mode;
-#if CONFIG_EXT_INTER
const int is_comp_interintra_pred = (mbmi->ref_frame[1] == INTRA_FRAME);
-#endif // CONFIG_EXT_INTER
int_mv *const frame_mv = mode_mv[this_mode];
-#if CONFIG_EXT_INTER && CONFIG_COMPOUND_SINGLEREF
+#if CONFIG_COMPOUND_SINGLEREF
int_mv *const frame_comp_mv = mode_comp_mv[this_mode];
-#endif // CONFIG_EXT_INTER && CONFIG_COMPOUND_SINGLEREF
+#endif // CONFIG_COMPOUND_SINGLEREF
const int refs[2] = { mbmi->ref_frame[0],
mbmi->ref_frame[1] < 0 ? 0 : mbmi->ref_frame[1] };
int i;
@@ -8286,7 +8110,6 @@
(void)args;
if (is_comp_pred) {
-#if CONFIG_EXT_INTER
for (i = 0; i < 2; ++i) {
single_newmv[refs[i]].as_int = args->single_newmv[refs[i]].as_int;
}
@@ -8297,9 +8120,9 @@
if (cpi->sf.comp_inter_joint_search_thresh <= bsize) {
joint_motion_search(cpi, x, bsize, frame_mv,
-#if CONFIG_EXT_INTER && CONFIG_COMPOUND_SINGLEREF
+#if CONFIG_COMPOUND_SINGLEREF
NULL, // int_mv *frame_comp_mv
-#endif // CONFIG_EXT_INTER && CONFIG_COMPOUND_SINGLEREF
+#endif // CONFIG_COMPOUND_SINGLEREF
mi_row, mi_col, NULL, NULL, 0, rate_mv, 0);
} else {
*rate_mv = 0;
@@ -8346,24 +8169,7 @@
x->nmvjointcost, x->mvcost, MV_COST_WEIGHT);
}
}
-#else // !CONFIG_EXT_INTER
- // Initialize mv using single prediction mode result.
- frame_mv[refs[0]].as_int = single_newmv[refs[0]].as_int;
- frame_mv[refs[1]].as_int = single_newmv[refs[1]].as_int;
-
- if (cpi->sf.comp_inter_joint_search_thresh <= bsize) {
- joint_motion_search(cpi, x, bsize, frame_mv, mi_row, mi_col, rate_mv, 0);
- } else {
- *rate_mv = 0;
- for (i = 0; i < 2; ++i) {
- av1_set_mvcost(x, refs[i], i, mbmi->ref_mv_idx);
- *rate_mv += av1_mv_bit_cost(&frame_mv[refs[i]].as_mv,
- &mbmi_ext->ref_mvs[refs[i]][0].as_mv,
- x->nmvjointcost, x->mvcost, MV_COST_WEIGHT);
- }
- }
-#endif // CONFIG_EXT_INTER
-#if CONFIG_EXT_INTER && CONFIG_COMPOUND_SINGLEREF
+#if CONFIG_COMPOUND_SINGLEREF
} else if (is_inter_singleref_comp_mode(this_mode)) {
// Single ref comp mode
const int mode0 = compound_ref0_mode(this_mode);
@@ -8397,9 +8203,8 @@
&mbmi_ext->ref_mvs[refs[0]][0].as_mv,
x->nmvjointcost, x->mvcost, MV_COST_WEIGHT);
}
-#endif // CONFIG_EXT_INTER && CONFIG_COMPOUND_SINGLEREF
+#endif // CONFIG_COMPOUND_SINGLEREF
} else {
-#if CONFIG_EXT_INTER
if (is_comp_interintra_pred) {
x->best_mv = args->single_newmv[refs[0]];
*rate_mv = args->single_newmv_rate[refs[0]];
@@ -8408,10 +8213,6 @@
args->single_newmv[refs[0]] = x->best_mv;
args->single_newmv_rate[refs[0]] = *rate_mv;
}
-#else
- single_motion_search(cpi, x, bsize, mi_row, mi_col, rate_mv);
- single_newmv[refs[0]] = x->best_mv;
-#endif // CONFIG_EXT_INTER
if (x->best_mv.as_int == INVALID_MV) return INT64_MAX;
@@ -8554,10 +8355,8 @@
const int *refs, int rate_mv,
#if CONFIG_MOTION_VAR || CONFIG_WARPED_MOTION
// only used when WARPED_MOTION is on?
- int_mv *const single_newmv,
-#if CONFIG_EXT_INTER
- int rate2_bmc_nocoeff, MB_MODE_INFO *best_bmc_mbmi, int rate_mv_bmc,
-#endif // CONFIG_EXT_INTER
+ int_mv *const single_newmv, int rate2_bmc_nocoeff,
+ MB_MODE_INFO *best_bmc_mbmi, int rate_mv_bmc,
#endif // CONFIG_MOTION_VAR || CONFIG_WARPED_MOTION
int rs, int *skip_txfm_sb, int64_t *skip_sse_sb, BUFFER_SET *orig_dst) {
const AV1_COMMON *const cm = &cpi->common;
@@ -8613,9 +8412,7 @@
#else
mbmi->num_proj_ref[0] = findSamples(cm, xd, mi_row, mi_col, pts, pts_inref);
#endif // WARPED_MOTION_SORT_SAMPLES
-#if CONFIG_EXT_INTER
best_bmc_mbmi->num_proj_ref[0] = mbmi->num_proj_ref[0];
-#endif // CONFIG_EXT_INTER
#endif // CONFIG_WARPED_MOTION
#if CONFIG_MOTION_VAR || CONFIG_WARPED_MOTION
rate2_nocoeff = rd_stats->rate;
@@ -8637,12 +8434,8 @@
int64_t tmp_rd = INT64_MAX;
int tmp_rate;
int64_t tmp_dist;
-#if CONFIG_EXT_INTER
int tmp_rate2 =
motion_mode != SIMPLE_TRANSLATION ? rate2_bmc_nocoeff : rate2_nocoeff;
-#else
- int tmp_rate2 = rate2_nocoeff;
-#endif // CONFIG_EXT_INTER
#if CONFIG_NCOBMC_ADAPT_WEIGHT
// We cannot estimate the rd cost for the motion mode NCOBMC_ADAPT_WEIGHT
@@ -8656,32 +8449,22 @@
mbmi->motion_mode = motion_mode;
#if CONFIG_MOTION_VAR
if (mbmi->motion_mode == OBMC_CAUSAL) {
-#if CONFIG_EXT_INTER
*mbmi = *best_bmc_mbmi;
mbmi->motion_mode = OBMC_CAUSAL;
-#endif // CONFIG_EXT_INTER
if (!is_comp_pred &&
-#if CONFIG_EXT_INTER && CONFIG_COMPOUND_SINGLEREF
+#if CONFIG_COMPOUND_SINGLEREF
!is_inter_singleref_comp_mode(this_mode) &&
-#endif // CONFIG_EXT_INTER && CONFIG_COMPOUND_SINGLEREF
+#endif // CONFIG_COMPOUND_SINGLEREF
have_newmv_in_inter_mode(this_mode)) {
int tmp_rate_mv = 0;
- single_motion_search(cpi, x, bsize, mi_row, mi_col,
-#if CONFIG_EXT_INTER
- 0,
-#endif // CONFIG_EXT_INTER
- &tmp_rate_mv);
+ single_motion_search(cpi, x, bsize, mi_row, mi_col, 0, &tmp_rate_mv);
mbmi->mv[0].as_int = x->best_mv.as_int;
if (discount_newmv_test(cpi, this_mode, mbmi->mv[0], mode_mv,
refs[0])) {
tmp_rate_mv = AOMMAX((tmp_rate_mv / NEW_MV_DISCOUNT_FACTOR), 1);
}
-#if CONFIG_EXT_INTER
tmp_rate2 = rate2_bmc_nocoeff - rate_mv_bmc + tmp_rate_mv;
-#else
- tmp_rate2 = rate2_nocoeff - rate_mv + tmp_rate_mv;
-#endif // CONFIG_EXT_INTER
#if CONFIG_DUAL_FILTER
if (!has_subpel_mv_component(xd->mi[0], xd, 0))
mbmi->interp_filter[0] = EIGHTTAP_REGULAR;
@@ -8689,10 +8472,8 @@
mbmi->interp_filter[1] = EIGHTTAP_REGULAR;
#endif // CONFIG_DUAL_FILTER
av1_build_inter_predictors_sb(cm, xd, mi_row, mi_col, orig_dst, bsize);
-#if CONFIG_EXT_INTER
} else {
av1_build_inter_predictors_sb(cm, xd, mi_row, mi_col, orig_dst, bsize);
-#endif // CONFIG_EXT_INTER
}
av1_build_obmc_inter_prediction(
cm, xd, mi_row, mi_col, args->above_pred_buf, args->above_pred_stride,
@@ -8707,10 +8488,8 @@
#if WARPED_MOTION_SORT_SAMPLES
int pts[SAMPLES_ARRAY_SIZE], pts_inref[SAMPLES_ARRAY_SIZE];
#endif // WARPED_MOTION_SORT_SAMPLES
-#if CONFIG_EXT_INTER
*mbmi = *best_bmc_mbmi;
mbmi->motion_mode = WARPED_CAUSAL;
-#endif // CONFIG_EXT_INTER
mbmi->wm_params[0].wmtype = DEFAULT_WMTYPE;
#if CONFIG_DUAL_FILTER
for (int dir = 0; dir < 4; ++dir)
@@ -8729,9 +8508,7 @@
if (mbmi->num_proj_ref[0] > 1) {
mbmi->num_proj_ref[0] = sortSamples(pts_mv0, &mbmi->mv[0].as_mv, pts,
pts_inref, mbmi->num_proj_ref[0]);
-#if CONFIG_EXT_INTER
best_bmc_mbmi->num_proj_ref[0] = mbmi->num_proj_ref[0];
-#endif // CONFIG_EXT_INTER
}
#endif // WARPED_MOTION_SORT_SAMPLES
@@ -8772,14 +8549,10 @@
refs[0])) {
tmp_rate_mv = AOMMAX((tmp_rate_mv / NEW_MV_DISCOUNT_FACTOR), 1);
}
-#if CONFIG_EXT_INTER
#if WARPED_MOTION_SORT_SAMPLES
best_bmc_mbmi->num_proj_ref[0] = mbmi->num_proj_ref[0];
#endif // WARPED_MOTION_SORT_SAMPLES
tmp_rate2 = rate2_bmc_nocoeff - rate_mv_bmc + tmp_rate_mv;
-#else
- tmp_rate2 = rate2_nocoeff - rate_mv + tmp_rate_mv;
-#endif // CONFIG_EXT_INTER
#if CONFIG_DUAL_FILTER
if (!has_subpel_mv_component(xd->mi[0], xd, 0))
mbmi->interp_filter[0] = EIGHTTAP_REGULAR;
@@ -8940,11 +8713,7 @@
}
#if CONFIG_GLOBAL_MOTION
- if (this_mode == ZEROMV
-#if CONFIG_EXT_INTER
- || this_mode == ZERO_ZEROMV
-#endif // CONFIG_EXT_INTER
- ) {
+ if (this_mode == ZEROMV || this_mode == ZERO_ZEROMV) {
if (is_nontrans_global_motion(xd)) {
rd_stats->rate -= rs;
#if CONFIG_DUAL_FILTER
@@ -9008,9 +8777,9 @@
RD_STATS *rd_stats_y, RD_STATS *rd_stats_uv,
int *disable_skip,
int_mv (*mode_mv)[TOTAL_REFS_PER_FRAME],
-#if CONFIG_EXT_INTER && CONFIG_COMPOUND_SINGLEREF
+#if CONFIG_COMPOUND_SINGLEREF
int_mv (*mode_comp_mv)[TOTAL_REFS_PER_FRAME],
-#endif // CONFIG_EXT_INTER && CONFIG_COMPOUND_SINGLEREF
+#endif // CONFIG_COMPOUND_SINGLEREF
int mi_row, int mi_col,
HandleInterModeArgs *args,
const int64_t ref_best_rd) {
@@ -9021,20 +8790,19 @@
MB_MODE_INFO_EXT *const mbmi_ext = x->mbmi_ext;
const int is_comp_pred = has_second_ref(mbmi);
const int this_mode = mbmi->mode;
-#if CONFIG_EXT_INTER && CONFIG_COMPOUND_SINGLEREF
+#if CONFIG_COMPOUND_SINGLEREF
const int is_singleref_comp_mode = is_inter_singleref_comp_mode(this_mode);
-#endif // CONFIG_EXT_INTER && CONFIG_COMPOUND_SINGLEREF
+#endif // CONFIG_COMPOUND_SINGLEREF
int_mv *frame_mv = mode_mv[this_mode];
-#if CONFIG_EXT_INTER && CONFIG_COMPOUND_SINGLEREF
+#if CONFIG_COMPOUND_SINGLEREF
// The comp mv for the compound mode in single ref
int_mv *frame_comp_mv = mode_comp_mv[this_mode];
-#endif // CONFIG_EXT_INTER && CONFIG_COMPOUND_SINGLEREF
+#endif // CONFIG_COMPOUND_SINGLEREF
int i;
int refs[2] = { mbmi->ref_frame[0],
(mbmi->ref_frame[1] < 0 ? 0 : mbmi->ref_frame[1]) };
int_mv cur_mv[2];
int rate_mv = 0;
-#if CONFIG_EXT_INTER
int pred_exists = 1;
#if CONFIG_WEDGE || CONFIG_COMPOUND_SEGMENT || CONFIG_INTERINTRA
const int bw = block_size_wide[bsize];
@@ -9046,9 +8814,6 @@
#endif // CONFIG_INTERINTRA
const int is_comp_interintra_pred = (mbmi->ref_frame[1] == INTRA_FRAME);
uint8_t ref_frame_type = av1_ref_frame_type(mbmi->ref_frame);
-#else
- int_mv *const single_newmv = args->single_newmv;
-#endif // CONFIG_EXT_INTER
#if CONFIG_HIGHBITDEPTH
DECLARE_ALIGNED(16, uint8_t, tmp_buf_[2 * MAX_MB_PLANE * MAX_SB_SQUARE]);
#else
@@ -9057,11 +8822,9 @@
uint8_t *tmp_buf;
#if CONFIG_MOTION_VAR || CONFIG_WARPED_MOTION
-#if CONFIG_EXT_INTER
int rate2_bmc_nocoeff;
MB_MODE_INFO best_bmc_mbmi;
int rate_mv_bmc;
-#endif // CONFIG_EXT_INTER
#endif // CONFIG_MOTION_VAR || CONFIG_WARPED_MOTION
int64_t rd = INT64_MAX;
BUFFER_SET orig_dst, tmp_dst;
@@ -9076,7 +8839,6 @@
mbmi->ncobmc_mode[1] = NO_OVERLAP;
#endif
-#if CONFIG_EXT_INTER
#if CONFIG_INTERINTRA
int compmode_interintra_cost = 0;
mbmi->use_wedge_interintra = 0;
@@ -9095,9 +8857,7 @@
assert(!is_comp_interintra_pred || (!is_comp_pred));
// is_comp_interintra_pred implies is_interintra_allowed(mbmi->sb_type)
assert(!is_comp_interintra_pred || is_interintra_allowed(mbmi));
-#endif // CONFIG_EXT_INTER
-#if CONFIG_EXT_INTER
#if CONFIG_COMPOUND_SINGLEREF
if (is_comp_pred || is_singleref_comp_mode)
#else // !CONFIG_COMPOUND_SINGLEREF
@@ -9105,7 +8865,6 @@
#endif // CONFIG_COMPOUND_SINGLEREF
mode_ctx = mbmi_ext->compound_mode_context[refs[0]];
else
-#endif // CONFIG_EXT_INTER
mode_ctx = av1_mode_context_analyzer(mbmi_ext->mode_context,
mbmi->ref_frame, bsize, -1);
@@ -9128,21 +8887,21 @@
if (frame_mv[refs[0]].as_int == INVALID_MV ||
frame_mv[refs[1]].as_int == INVALID_MV)
return INT64_MAX;
-#if CONFIG_EXT_INTER && CONFIG_COMPOUND_SINGLEREF
+#if CONFIG_COMPOUND_SINGLEREF
} else if (is_singleref_comp_mode) {
if (frame_mv[refs[0]].as_int == INVALID_MV ||
frame_comp_mv[refs[0]].as_int == INVALID_MV)
return INT64_MAX;
-#endif // CONFIG_EXT_INTER && CONFIG_COMPOUND_SINGLEREF
+#endif // CONFIG_COMPOUND_SINGLEREF
}
mbmi->motion_mode = SIMPLE_TRANSLATION;
if (have_newmv_in_inter_mode(this_mode)) {
const int64_t ret_val =
handle_newmv(cpi, x, bsize, mode_mv,
-#if CONFIG_EXT_INTER && CONFIG_COMPOUND_SINGLEREF
+#if CONFIG_COMPOUND_SINGLEREF
mode_comp_mv,
-#endif // CONFIG_EXT_INTER && CONFIG_COMPOUND_SINGLEREF
+#endif // CONFIG_COMPOUND_SINGLEREF
mi_row, mi_col, &rate_mv, single_newmv, args);
if (ret_val != 0)
return ret_val;
@@ -9157,7 +8916,7 @@
mbmi->mv[i].as_int = cur_mv[i].as_int;
}
-#if CONFIG_EXT_INTER && CONFIG_COMPOUND_SINGLEREF
+#if CONFIG_COMPOUND_SINGLEREF
if (!is_comp_pred && is_singleref_comp_mode) {
cur_mv[1] = frame_comp_mv[refs[0]];
// Clip "next_nearest" so that it does not extend to far out of image
@@ -9165,17 +8924,9 @@
if (mv_check_bounds(&x->mv_limits, &cur_mv[1].as_mv)) return INT64_MAX;
mbmi->mv[1].as_int = cur_mv[1].as_int;
}
-#endif // CONFIG_EXT_INTER && CONFIG_COMPOUND_SINGLEREF
+#endif // CONFIG_COMPOUND_SINGLEREF
-#if CONFIG_EXT_INTER
- if (this_mode == NEAREST_NEARESTMV)
-#else
- if (this_mode == NEARESTMV && is_comp_pred)
-#endif // CONFIG_EXT_INTER
- {
-#if !CONFIG_EXT_INTER
- uint8_t ref_frame_type = av1_ref_frame_type(mbmi->ref_frame);
-#endif // !CONFIG_EXT_INTER
+ if (this_mode == NEAREST_NEARESTMV) {
if (mbmi_ext->ref_mv_count[ref_frame_type] > 0) {
cur_mv[0] = mbmi_ext->ref_mv_stack[ref_frame_type][0].this_mv;
cur_mv[1] = mbmi_ext->ref_mv_stack[ref_frame_type][0].comp_mv;
@@ -9188,7 +8939,6 @@
}
}
-#if CONFIG_EXT_INTER
if (mbmi_ext->ref_mv_count[ref_frame_type] > 0) {
#if CONFIG_COMPOUND_SINGLEREF
if (this_mode == NEAREST_NEWMV || // this_mode == SR_NEAREST_NEWMV ||
@@ -9268,22 +9018,6 @@
mbmi->mv[1].as_int = cur_mv[1].as_int;
}
}
-#else // !CONFIG_EXT_INTER
- if (this_mode == NEARMV && is_comp_pred) {
- uint8_t ref_frame_type = av1_ref_frame_type(mbmi->ref_frame);
- if (mbmi_ext->ref_mv_count[ref_frame_type] > 1) {
- int ref_mv_idx = mbmi->ref_mv_idx + 1;
- cur_mv[0] = mbmi_ext->ref_mv_stack[ref_frame_type][ref_mv_idx].this_mv;
- cur_mv[1] = mbmi_ext->ref_mv_stack[ref_frame_type][ref_mv_idx].comp_mv;
-
- for (i = 0; i < 2; ++i) {
- clamp_mv2(&cur_mv[i].as_mv, xd);
- if (mv_check_bounds(&x->mv_limits, &cur_mv[i].as_mv)) return INT64_MAX;
- mbmi->mv[i].as_int = cur_mv[i].as_int;
- }
- }
- }
-#endif // CONFIG_EXT_INTER
// do first prediction into the destination buffer. Do the next
// prediction into a temporary buffer. Then keep track of which one
@@ -9308,25 +9042,15 @@
// initiation of a motion field.
if (discount_newmv_test(cpi, this_mode, frame_mv[refs[0]], mode_mv,
refs[0])) {
-#if CONFIG_EXT_INTER
rd_stats->rate += AOMMIN(
cost_mv_ref(x, this_mode, mode_ctx),
cost_mv_ref(x, is_comp_pred ? NEAREST_NEARESTMV : NEARESTMV, mode_ctx));
-#else
- rd_stats->rate += AOMMIN(cost_mv_ref(x, this_mode, mode_ctx),
- cost_mv_ref(x, NEARESTMV, mode_ctx));
-#endif // CONFIG_EXT_INTER
} else {
rd_stats->rate += cost_mv_ref(x, this_mode, mode_ctx);
}
if (RDCOST(x->rdmult, rd_stats->rate, 0) > ref_best_rd &&
-#if CONFIG_EXT_INTER
- mbmi->mode != NEARESTMV && mbmi->mode != NEAREST_NEARESTMV
-#else
- mbmi->mode != NEARESTMV
-#endif // CONFIG_EXT_INTER
- )
+ mbmi->mode != NEARESTMV && mbmi->mode != NEAREST_NEARESTMV)
return INT64_MAX;
int64_t ret_val = interpolation_filter_search(
@@ -9334,7 +9058,6 @@
&rd, &rs, &skip_txfm_sb, &skip_sse_sb);
if (ret_val != 0) return ret_val;
-#if CONFIG_EXT_INTER
#if CONFIG_MOTION_VAR || CONFIG_WARPED_MOTION
best_bmc_mbmi = *mbmi;
rate2_bmc_nocoeff = rd_stats->rate;
@@ -9378,7 +9101,7 @@
best_compound_data.seg_mask = tmp_mask_buf;
#endif // CONFIG_COMPOUND_SEGMENT
-#if CONFIG_EXT_INTER && CONFIG_COMPOUND_SINGLEREF
+#if CONFIG_COMPOUND_SINGLEREF
// TODO(zoeliu): To further check whether the following setups are needed.
// Single ref compound mode: Prepare the 2nd ref frame predictor the same as
// the 1st one.
@@ -9387,7 +9110,7 @@
for (i = 0; i < MAX_MB_PLANE; i++)
xd->plane[i].pre[1] = xd->plane[i].pre[0];
}
-#endif // CONFIG_EXT_INTER && CONFIG_COMPOUND_SINGLEREF
+#endif // CONFIG_COMPOUND_SINGLEREF
if (masked_compound_used) {
// get inter predictors to use for masked compound modes
@@ -9671,7 +9394,6 @@
&tmp_dist, &skip_txfm_sb, &skip_sse_sb);
rd = RDCOST(x->rdmult, rs + tmp_rate, tmp_dist);
}
-#endif // CONFIG_EXT_INTER
if (!is_comp_pred)
#if CONFIG_DUAL_FILTER
@@ -9680,7 +9402,6 @@
args->single_filter[this_mode][refs[0]] = mbmi->interp_filter;
#endif // CONFIG_DUAL_FILTER
-#if CONFIG_EXT_INTER
if (args->modelled_rd != NULL) {
if (is_comp_pred) {
const int mode0 = compound_ref0_mode(this_mode);
@@ -9695,7 +9416,6 @@
args->modelled_rd[this_mode][refs[0]] = rd;
}
}
-#endif // CONFIG_EXT_INTER
if (cpi->sf.use_rd_breakout && ref_best_rd < INT64_MAX) {
// if current pred_error modeled rd is substantially more than the best
@@ -9706,7 +9426,6 @@
}
}
-#if CONFIG_EXT_INTER
#if CONFIG_INTERINTRA
rd_stats->rate += compmode_interintra_cost;
#if CONFIG_MOTION_VAR || CONFIG_WARPED_MOTION
@@ -9716,18 +9435,14 @@
#if CONFIG_WEDGE || CONFIG_COMPOUND_SEGMENT
rd_stats->rate += compmode_interinter_cost;
#endif
-#endif
- ret_val = motion_mode_rd(cpi, x, bsize, rd_stats, rd_stats_y, rd_stats_uv,
- disable_skip, mode_mv, mi_row, mi_col, args,
- ref_best_rd, refs, rate_mv,
+ ret_val = motion_mode_rd(
+ cpi, x, bsize, rd_stats, rd_stats_y, rd_stats_uv, disable_skip, mode_mv,
+ mi_row, mi_col, args, ref_best_rd, refs, rate_mv,
#if CONFIG_MOTION_VAR || CONFIG_WARPED_MOTION
- single_newmv,
-#if CONFIG_EXT_INTER
- rate2_bmc_nocoeff, &best_bmc_mbmi, rate_mv_bmc,
-#endif // CONFIG_EXT_INTER
+ single_newmv, rate2_bmc_nocoeff, &best_bmc_mbmi, rate_mv_bmc,
#endif // CONFIG_MOTION_VAR || CONFIG_WARPED_MOTION
- rs, &skip_txfm_sb, &skip_sse_sb, &orig_dst);
+ rs, &skip_txfm_sb, &skip_sse_sb, &orig_dst);
if (ret_val != 0) return ret_val;
return 0; // The rate-distortion cost will be re-calculated by caller.
@@ -9755,11 +9470,8 @@
int_mv *const candidates = x->mbmi_ext->ref_mvs[ref_frame];
av1_find_mv_refs(cm, xd, mi, ref_frame, &mbmi_ext->ref_mv_count[ref_frame],
mbmi_ext->ref_mv_stack[ref_frame],
-#if CONFIG_EXT_INTER
- mbmi_ext->compound_mode_context,
-#endif // CONFIG_EXT_INTER
- candidates, mi_row, mi_col, NULL, NULL,
- mbmi_ext->mode_context);
+ mbmi_ext->compound_mode_context, candidates, mi_row, mi_col,
+ NULL, NULL, mbmi_ext->mode_context);
int_mv nearestmv, nearmv;
av1_find_best_ref_mvs(0, candidates, &nearestmv, &nearmv);
@@ -10318,15 +10030,13 @@
unsigned char segment_id = mbmi->segment_id;
int comp_pred, i, k;
int_mv frame_mv[MB_MODE_COUNT][TOTAL_REFS_PER_FRAME];
-#if CONFIG_EXT_INTER && CONFIG_COMPOUND_SINGLEREF
+#if CONFIG_COMPOUND_SINGLEREF
int_mv frame_comp_mv[MB_MODE_COUNT][TOTAL_REFS_PER_FRAME];
-#endif // CONFIG_EXT_INTER && CONFIG_COMPOUND_SINGLEREF
+#endif // CONFIG_COMPOUND_SINGLEREF
struct buf_2d yv12_mb[TOTAL_REFS_PER_FRAME][MAX_MB_PLANE];
int_mv single_newmv[TOTAL_REFS_PER_FRAME] = { { 0 } };
-#if CONFIG_EXT_INTER
int single_newmv_rate[TOTAL_REFS_PER_FRAME] = { 0 };
int64_t modelled_rd[MB_MODE_COUNT][TOTAL_REFS_PER_FRAME];
-#endif // CONFIG_EXT_INTER
static const int flag_list[TOTAL_REFS_PER_FRAME] = {
0,
AOM_LAST_FLAG,
@@ -10380,10 +10090,10 @@
int best_skip2 = 0;
uint16_t ref_frame_skip_mask[2] = { 0 };
uint32_t mode_skip_mask[TOTAL_REFS_PER_FRAME] = { 0 };
-#if CONFIG_EXT_INTER && CONFIG_INTERINTRA
+#if CONFIG_INTERINTRA
MV_REFERENCE_FRAME best_single_inter_ref = LAST_FRAME;
int64_t best_single_inter_rd = INT64_MAX;
-#endif // CONFIG_EXT_INTER && CONFIG_INTERINTRA
+#endif // CONFIG_INTERINTRA
int mode_skip_start = sf->mode_skip_start + 1;
const int *const rd_threshes = rd_opt->threshes[segment_id][bsize];
const int *const rd_thresh_freq_fact = tile_data->thresh_freq_fact[bsize];
@@ -10401,13 +10111,9 @@
{ NULL },
{ MAX_SB_SIZE, MAX_SB_SIZE, MAX_SB_SIZE },
#endif // CONFIG_MOTION_VAR
-#if CONFIG_EXT_INTER
NULL,
NULL,
NULL,
-#else // CONFIG_EXT_INTER
- NULL,
-#endif // CONFIG_EXT_INTER
{ { 0 } },
};
@@ -10478,9 +10184,7 @@
for (ref_frame = LAST_FRAME; ref_frame <= ALTREF_FRAME; ++ref_frame) {
x->pred_mv_sad[ref_frame] = INT_MAX;
x->mbmi_ext->mode_context[ref_frame] = 0;
-#if CONFIG_EXT_INTER
x->mbmi_ext->compound_mode_context[ref_frame] = 0;
-#endif // CONFIG_EXT_INTER
if (cpi->ref_frame_flags & flag_list[ref_frame]) {
assert(get_ref_frame_buffer(cpi, ref_frame) != NULL);
setup_buffer_inter(cpi, x, ref_frame, bsize, mi_row, mi_col,
@@ -10501,7 +10205,6 @@
#else // CONFIG_GLOBAL_MOTION
frame_mv[ZEROMV][ref_frame].as_int = 0;
#endif // CONFIG_GLOBAL_MOTION
-#if CONFIG_EXT_INTER
frame_mv[NEW_NEWMV][ref_frame].as_int = INVALID_MV;
#if CONFIG_COMPOUND_SINGLEREF
frame_mv[SR_NEW_NEWMV][ref_frame].as_int = INVALID_MV;
@@ -10521,7 +10224,6 @@
#else // CONFIG_GLOBAL_MOTION
frame_mv[ZERO_ZEROMV][ref_frame].as_int = 0;
#endif // CONFIG_GLOBAL_MOTION
-#endif // CONFIG_EXT_INTER
}
for (; ref_frame < MODE_CTX_REF_FRAMES; ++ref_frame) {
@@ -10530,11 +10232,8 @@
x->mbmi_ext->mode_context[ref_frame] = 0;
av1_find_mv_refs(cm, xd, mi, ref_frame, &mbmi_ext->ref_mv_count[ref_frame],
mbmi_ext->ref_mv_stack[ref_frame],
-#if CONFIG_EXT_INTER
- mbmi_ext->compound_mode_context,
-#endif // CONFIG_EXT_INTER
- candidates, mi_row, mi_col, NULL, NULL,
- mbmi_ext->mode_context);
+ mbmi_ext->compound_mode_context, candidates, mi_row,
+ mi_col, NULL, NULL, mbmi_ext->mode_context);
if (mbmi_ext->ref_mv_count[ref_frame] < 2) {
MV_REFERENCE_FRAME rf[2];
av1_set_ref_frame(rf, ref_frame);
@@ -10630,7 +10329,6 @@
mode_skip_mask[ALTREF_FRAME] |= (1 << NEARMV);
if (frame_mv[NEARESTMV][ALTREF_FRAME].as_int != zeromv.as_int)
mode_skip_mask[ALTREF_FRAME] |= (1 << NEARESTMV);
-#if CONFIG_EXT_INTER
if (frame_mv[NEAREST_NEARESTMV][ALTREF_FRAME].as_int != zeromv.as_int)
mode_skip_mask[ALTREF_FRAME] |= (1 << NEAREST_NEARESTMV);
if (frame_mv[NEAR_NEARMV][ALTREF_FRAME].as_int != zeromv.as_int)
@@ -10641,7 +10339,6 @@
zeromv.as_int)
mode_skip_mask[ALTREF_FRAME] |= (1 << SR_NEAREST_NEARMV);
#endif // CONFIG_COMPOUND_SINGLEREF
-#endif // CONFIG_EXT_INTER
}
}
@@ -10704,11 +10401,9 @@
#if CONFIG_PVQ
od_encode_checkpoint(&x->daala_enc, &pre_buf);
#endif // CONFIG_PVQ
-#if CONFIG_EXT_INTER
for (i = 0; i < MB_MODE_COUNT; ++i)
for (ref_frame = 0; ref_frame < TOTAL_REFS_PER_FRAME; ++ref_frame)
modelled_rd[i][ref_frame] = INT64_MAX;
-#endif // CONFIG_EXT_INTER
for (midx = 0; midx < MAX_MODES; ++midx) {
int mode_index;
@@ -10731,7 +10426,6 @@
second_ref_frame = av1_mode_order[mode_index].ref_frame[1];
mbmi->ref_mv_idx = 0;
-#if CONFIG_EXT_INTER
if (ref_frame > INTRA_FRAME && second_ref_frame == INTRA_FRAME) {
// Mode must by compatible
if (!is_interintra_allowed_mode(this_mode)) continue;
@@ -10751,7 +10445,6 @@
frame_mv[compound_ref1_mode(this_mode)][ref_frame].as_int;
#endif // CONFIG_COMPOUND_SINGLEREF
}
-#endif // CONFIG_EXT_INTER
// Look at the reference frame of the best mode so far and set the
// skip mask to look at a subset of the remaining modes.
@@ -10906,11 +10599,8 @@
#endif // CONFIG_GLOBAL_MOTION
const MV_REFERENCE_FRAME ref_frames[2] = { ref_frame, second_ref_frame };
if (!check_best_zero_mv(cpi, x, mbmi_ext->mode_context,
-#if CONFIG_EXT_INTER
- mbmi_ext->compound_mode_context,
-#endif // CONFIG_EXT_INTER
- frame_mv, this_mode, ref_frames, bsize, -1,
- mi_row, mi_col))
+ mbmi_ext->compound_mode_context, frame_mv,
+ this_mode, ref_frames, bsize, -1, mi_row, mi_col))
continue;
}
@@ -10941,18 +10631,18 @@
if (comp_pred) xd->plane[i].pre[1] = yv12_mb[second_ref_frame][i];
}
-#if CONFIG_EXT_INTER && CONFIG_COMPOUND_SINGLEREF
+#if CONFIG_COMPOUND_SINGLEREF
// Single ref compound mode
if (!comp_pred && is_inter_singleref_comp_mode(mbmi->mode)) {
xd->block_refs[1] = xd->block_refs[0];
for (i = 0; i < MAX_MB_PLANE; i++)
xd->plane[i].pre[1] = xd->plane[i].pre[0];
}
-#endif // CONFIG_EXT_INTER && CONFIG_COMPOUND_SINGLEREF
+#endif // CONFIG_COMPOUND_SINGLEREF
-#if CONFIG_EXT_INTER && CONFIG_INTERINTRA
+#if CONFIG_INTERINTRA
mbmi->interintra_mode = (INTERINTRA_MODE)(II_DC_PRED - 1);
-#endif // CONFIG_EXT_INTER && CONFIG_INTERINTRA
+#endif // CONFIG_INTERINTRA
if (ref_frame == INTRA_FRAME) {
RD_STATS rd_stats_y;
@@ -11110,7 +10800,7 @@
backup_ref_mv[0] = mbmi_ext->ref_mvs[ref_frame][0];
if (comp_pred) backup_ref_mv[1] = mbmi_ext->ref_mvs[second_ref_frame][0];
-#if CONFIG_EXT_INTER && CONFIG_INTERINTRA
+#if CONFIG_INTERINTRA
if (second_ref_frame == INTRA_FRAME) {
if (best_single_inter_ref != ref_frame) continue;
mbmi->interintra_mode = intra_to_interintra_mode[best_intra_mode];
@@ -11128,11 +10818,10 @@
mbmi->filter_intra_mode_info.use_filter_intra_mode[1] = 0;
#endif // CONFIG_FILTER_INTRA
}
-#endif // CONFIG_EXT_INTER && CONFIG_INTERINTRA
+#endif // CONFIG_INTERINTRA
mbmi->ref_mv_idx = 0;
ref_frame_type = av1_ref_frame_type(mbmi->ref_frame);
-#if CONFIG_EXT_INTER
if (comp_pred) {
if (mbmi_ext->ref_mv_count[ref_frame_type] > 1) {
int ref_mv_idx = 0;
@@ -11179,7 +10868,6 @@
}
#endif // CONFIG_COMPOUND_SINGLEREF
} else {
-#endif // CONFIG_EXT_INTER
if (mbmi->mode == NEWMV && mbmi_ext->ref_mv_count[ref_frame_type] > 1) {
int ref;
for (ref = 0; ref < 1 + comp_pred; ++ref) {
@@ -11191,9 +10879,7 @@
mbmi_ext->ref_mvs[mbmi->ref_frame[ref]][0] = this_mv;
}
}
-#if CONFIG_EXT_INTER
}
-#endif // CONFIG_EXT_INTER
{
RD_STATS rd_stats, rd_stats_y, rd_stats_uv;
av1_init_rd_stats(&rd_stats);
@@ -11201,15 +10887,13 @@
// Point to variables that are maintained between loop iterations
args.single_newmv = single_newmv;
-#if CONFIG_EXT_INTER
args.single_newmv_rate = single_newmv_rate;
args.modelled_rd = modelled_rd;
-#endif // CONFIG_EXT_INTER
this_rd = handle_inter_mode(cpi, x, bsize, &rd_stats, &rd_stats_y,
&rd_stats_uv, &disable_skip, frame_mv,
-#if CONFIG_EXT_INTER && CONFIG_COMPOUND_SINGLEREF
+#if CONFIG_COMPOUND_SINGLEREF
frame_comp_mv,
-#endif // CONFIG_EXT_INTER && CONFIG_COMPOUND_SINGLEREF
+#endif // CONFIG_COMPOUND_SINGLEREF
mi_row, mi_col, &args, best_rd);
rate2 = rd_stats.rate;
@@ -11222,7 +10906,6 @@
// TODO(jingning): This needs some refactoring to improve code quality
// and reduce redundant steps.
-#if CONFIG_EXT_INTER
#if CONFIG_COMPOUND_SINGLEREF
if ((have_nearmv_in_inter_mode(mbmi->mode) &&
mbmi_ext->ref_mv_count[ref_frame_type] > 2) ||
@@ -11235,11 +10918,6 @@
((mbmi->mode == NEWMV || mbmi->mode == NEW_NEWMV) &&
mbmi_ext->ref_mv_count[ref_frame_type] > 1))
#endif // CONFIG_COMPOUND_SINGLEREF
-#else // !CONFIG_EXT_INTER
- if ((mbmi->mode == NEARMV &&
- mbmi_ext->ref_mv_count[ref_frame_type] > 2) ||
- (mbmi->mode == NEWMV && mbmi_ext->ref_mv_count[ref_frame_type] > 1))
-#endif // CONFIG_EXT_INTER
{
int_mv backup_mv = frame_mv[NEARMV][ref_frame];
MB_MODE_INFO backup_mbmi = *mbmi;
@@ -11247,12 +10925,8 @@
int64_t tmp_ref_rd = this_rd;
int ref_idx;
-// TODO(jingning): This should be deprecated shortly.
-#if CONFIG_EXT_INTER
+ // TODO(jingning): This should be deprecated shortly.
int idx_offset = have_nearmv_in_inter_mode(mbmi->mode) ? 1 : 0;
-#else
- int idx_offset = (mbmi->mode == NEARMV) ? 1 : 0;
-#endif // CONFIG_EXT_INTER
int ref_set =
AOMMIN(2, mbmi_ext->ref_mv_count[ref_frame_type] - 1 - idx_offset);
@@ -11297,7 +10971,6 @@
mbmi->ref_mv_idx = 1 + ref_idx;
-#if CONFIG_EXT_INTER
if (comp_pred) {
int ref_mv_idx = mbmi->ref_mv_idx;
// Special case: NEAR_NEWMV and NEW_NEARMV modes use
@@ -11362,7 +11035,6 @@
}
#endif // CONFIG_COMPOUND_SINGLEREF
} else {
-#endif // CONFIG_EXT_INTER
for (ref = 0; ref < 1 + comp_pred; ++ref) {
int_mv this_mv =
(ref == 0)
@@ -11374,9 +11046,7 @@
xd->n8_h << MI_SIZE_LOG2, xd);
mbmi_ext->ref_mvs[mbmi->ref_frame[ref]][0] = this_mv;
}
-#if CONFIG_EXT_INTER
}
-#endif
cur_mv =
mbmi_ext->ref_mv_stack[ref_frame][mbmi->ref_mv_idx + idx_offset]
@@ -11385,31 +11055,25 @@
if (!mv_check_bounds(&x->mv_limits, &cur_mv.as_mv)) {
int_mv dummy_single_newmv[TOTAL_REFS_PER_FRAME] = { { 0 } };
-#if CONFIG_EXT_INTER
int dummy_single_newmv_rate[TOTAL_REFS_PER_FRAME] = { 0 };
-#endif // CONFIG_EXT_INTER
frame_mv[NEARMV][ref_frame] = cur_mv;
av1_init_rd_stats(&tmp_rd_stats);
// Point to variables that are not maintained between iterations
args.single_newmv = dummy_single_newmv;
-#if CONFIG_EXT_INTER
args.single_newmv_rate = dummy_single_newmv_rate;
args.modelled_rd = NULL;
-#endif // CONFIG_EXT_INTER
tmp_alt_rd = handle_inter_mode(cpi, x, bsize, &tmp_rd_stats,
&tmp_rd_stats_y, &tmp_rd_stats_uv,
&dummy_disable_skip, frame_mv,
-#if CONFIG_EXT_INTER && CONFIG_COMPOUND_SINGLEREF
+#if CONFIG_COMPOUND_SINGLEREF
frame_comp_mv,
-#endif // CONFIG_EXT_INTER && CONFIG_COMPOUND_SINGLEREF
+#endif // CONFIG_COMPOUND_SINGLEREF
mi_row, mi_col, &args, best_rd);
// Prevent pointers from escaping local scope
args.single_newmv = NULL;
-#if CONFIG_EXT_INTER
args.single_newmv_rate = NULL;
-#endif // CONFIG_EXT_INTER
}
for (i = 0; i < mbmi->ref_mv_idx; ++i) {
@@ -11513,14 +11177,14 @@
rate2 += ref_costs_single[ref_frame];
}
-#if CONFIG_EXT_INTER && CONFIG_COMPOUND_SINGLEREF
+#if CONFIG_COMPOUND_SINGLEREF
// Add the cost to signal single/comp mode in single ref.
if (!comp_pred && cm->reference_mode != COMPOUND_REFERENCE) {
aom_prob singleref_comp_mode_p = av1_get_inter_mode_prob(cm, xd);
rate2 += av1_cost_bit(singleref_comp_mode_p,
is_inter_singleref_comp_mode(mbmi->mode));
}
-#endif // CONFIG_EXT_INTER && CONFIG_COMPOUND_SINGLEREF
+#endif // CONFIG_COMPOUND_SINGLEREF
#if CONFIG_MOTION_VAR || CONFIG_WARPED_MOTION
if (ref_frame == INTRA_FRAME)
@@ -11574,13 +11238,13 @@
best_intra_rd = this_rd;
best_intra_mode = mbmi->mode;
}
-#if CONFIG_EXT_INTER && CONFIG_INTERINTRA
+#if CONFIG_INTERINTRA
} else if (second_ref_frame == NONE_FRAME) {
if (this_rd < best_single_inter_rd) {
best_single_inter_rd = this_rd;
best_single_inter_ref = mbmi->ref_frame[0];
}
-#endif // CONFIG_EXT_INTER && CONFIG_INTERINTRA
+#endif // CONFIG_INTERINTRA
}
if (!disable_skip && ref_frame == INTRA_FRAME) {
@@ -11702,14 +11366,14 @@
xd->plane[i].pre[1] = yv12_mb[mbmi->ref_frame[1]][i];
}
-#if CONFIG_EXT_INTER && CONFIG_COMPOUND_SINGLEREF
+#if CONFIG_COMPOUND_SINGLEREF
// Single ref compound mode
if (!has_second_ref(mbmi) && is_inter_singleref_comp_mode(mbmi->mode)) {
xd->block_refs[1] = xd->block_refs[0];
for (i = 0; i < MAX_MB_PLANE; i++)
xd->plane[i].pre[1] = xd->plane[i].pre[0];
}
-#endif // CONFIG_EXT_INTER && CONFIG_COMPOUND_SINGLEREF
+#endif // CONFIG_COMPOUND_SINGLEREF
if (is_inter_mode(mbmi->mode)) {
av1_build_inter_predictors_sb(cm, xd, mi_row, mi_col, NULL, bsize);
@@ -11906,15 +11570,11 @@
// Therefore, sometimes, NEWMV is chosen instead of NEARESTMV, NEARMV, and
// ZEROMV. Here, checks are added for those cases, and the mode decisions
// are corrected.
-#if CONFIG_EXT_INTER && CONFIG_COMPOUND_SINGLEREF
+#if CONFIG_COMPOUND_SINGLEREF
// NOTE: For SR_NEW_NEWMV, no need to check as the two mvs from the same ref
// are surely different from each other.
-#endif // CONFIG_EXT_INTER && CONFIG_COMPOUND_SINGLEREF
- if (best_mbmode.mode == NEWMV
-#if CONFIG_EXT_INTER
- || best_mbmode.mode == NEW_NEWMV
-#endif // CONFIG_EXT_INTER
- ) {
+#endif // CONFIG_COMPOUND_SINGLEREF
+ if (best_mbmode.mode == NEWMV || best_mbmode.mode == NEW_NEWMV) {
const MV_REFERENCE_FRAME refs[2] = { best_mbmode.ref_frame[0],
best_mbmode.ref_frame[1] };
int comp_pred_mode = refs[1] > INTRA_FRAME;
@@ -11967,7 +11627,6 @@
int_mv nearestmv[2];
int_mv nearmv[2];
-#if CONFIG_EXT_INTER
if (mbmi_ext->ref_mv_count[rf_type] > 1) {
nearmv[0] = mbmi_ext->ref_mv_stack[rf_type][1].this_mv;
nearmv[1] = mbmi_ext->ref_mv_stack[rf_type][1].comp_mv;
@@ -11975,22 +11634,6 @@
nearmv[0] = frame_mv[NEARMV][refs[0]];
nearmv[1] = frame_mv[NEARMV][refs[1]];
}
-#else
- int ref_set = (mbmi_ext->ref_mv_count[rf_type] >= 2)
- ? AOMMIN(2, mbmi_ext->ref_mv_count[rf_type] - 2)
- : INT_MAX;
-
- for (i = 0; i <= ref_set && ref_set != INT_MAX; ++i) {
- nearmv[0] = mbmi_ext->ref_mv_stack[rf_type][i + 1].this_mv;
- nearmv[1] = mbmi_ext->ref_mv_stack[rf_type][i + 1].comp_mv;
-
- if (nearmv[0].as_int == best_mbmode.mv[0].as_int &&
- nearmv[1].as_int == best_mbmode.mv[1].as_int) {
- best_mbmode.mode = NEARMV;
- best_mbmode.ref_mv_idx = i;
- }
- }
-#endif // CONFIG_EXT_INTER
if (mbmi_ext->ref_mv_count[rf_type] >= 1) {
nearestmv[0] = mbmi_ext->ref_mv_stack[rf_type][0].this_mv;
nearestmv[1] = mbmi_ext->ref_mv_stack[rf_type][0].comp_mv;
@@ -12000,9 +11643,7 @@
}
if (nearestmv[0].as_int == best_mbmode.mv[0].as_int &&
- nearestmv[1].as_int == best_mbmode.mv[1].as_int)
-#if CONFIG_EXT_INTER
- {
+ nearestmv[1].as_int == best_mbmode.mv[1].as_int) {
best_mbmode.mode = NEAREST_NEARESTMV;
} else {
int ref_set = (mbmi_ext->ref_mv_count[rf_type] >= 2)
@@ -12026,21 +11667,12 @@
best_mbmode.mv[1].as_int == zeromv[1].as_int)
best_mbmode.mode = ZERO_ZEROMV;
}
-#else
- {
- best_mbmode.mode = NEARESTMV;
- } else if (best_mbmode.mv[0].as_int == zeromv[0].as_int &&
- best_mbmode.mv[1].as_int == zeromv[1].as_int) {
- best_mbmode.mode = ZEROMV;
- }
-#endif // CONFIG_EXT_INTER
}
}
// Make sure that the ref_mv_idx is only nonzero when we're
// using a mode which can support ref_mv_idx
if (best_mbmode.ref_mv_idx != 0 &&
-#if CONFIG_EXT_INTER
#if CONFIG_COMPOUND_SINGLEREF
!(best_mbmode.mode == NEWMV || best_mbmode.mode == SR_NEW_NEWMV ||
best_mbmode.mode == NEW_NEWMV ||
@@ -12049,9 +11681,6 @@
!(best_mbmode.mode == NEWMV || best_mbmode.mode == NEW_NEWMV ||
have_nearmv_in_inter_mode(best_mbmode.mode)))
#endif // CONFIG_COMPOUND_SINGLEREF
-#else // !CONFIG_EXT_INTER
- !(best_mbmode.mode == NEARMV || best_mbmode.mode == NEWMV))
-#endif // CONFIG_EXT_INTER
{
best_mbmode.ref_mv_idx = 0;
}
@@ -12105,13 +11734,7 @@
#endif // CONFIG_GLOBAL_MOTION
if (best_mbmode.ref_frame[0] > INTRA_FRAME &&
best_mbmode.mv[0].as_int == zeromv[0].as_int &&
-#if CONFIG_EXT_INTER
- (best_mbmode.ref_frame[1] <= INTRA_FRAME)
-#else
- (best_mbmode.ref_frame[1] == NONE_FRAME ||
- best_mbmode.mv[1].as_int == zeromv[1].as_int)
-#endif // CONFIG_EXT_INTER
- ) {
+ (best_mbmode.ref_frame[1] <= INTRA_FRAME)) {
best_mbmode.mode = ZEROMV;
}
}
@@ -12155,11 +11778,7 @@
// Note: this section is needed since the mode may have been forced to
// ZEROMV by the all-zero mode handling of ref-mv.
#if CONFIG_GLOBAL_MOTION
- if (mbmi->mode == ZEROMV
-#if CONFIG_EXT_INTER
- || mbmi->mode == ZERO_ZEROMV
-#endif // CONFIG_EXT_INTER
- ) {
+ if (mbmi->mode == ZEROMV || mbmi->mode == ZERO_ZEROMV) {
#if CONFIG_WARPED_MOTION || CONFIG_MOTION_VAR
// Correct the motion mode for ZEROMV
const MOTION_MODE last_motion_mode_allowed =
diff --git a/av1/encoder/speed_features.c b/av1/encoder/speed_features.c
index bf82b19..b9ddecf 100644
--- a/av1/encoder/speed_features.c
+++ b/av1/encoder/speed_features.c
@@ -196,10 +196,8 @@
// Use transform domain distortion.
// Note var-tx expt always uses pixel domain distortion.
sf->use_transform_domain_distortion = 1;
-#if CONFIG_EXT_INTER
sf->disable_wedge_search_var_thresh = 100;
sf->fast_wedge_sign_estimate = 1;
-#endif // CONFIG_EXT_INTER
}
if (speed >= 3) {
@@ -422,10 +420,8 @@
sf->adaptive_interp_filter_search = 0;
sf->allow_partition_search_skip = 0;
sf->use_upsampled_references = 1;
-#if CONFIG_EXT_INTER
sf->disable_wedge_search_var_thresh = 0;
sf->fast_wedge_sign_estimate = 0;
-#endif // CONFIG_EXT_INTER
for (i = 0; i < TX_SIZES; i++) {
sf->intra_y_mode_mask[i] = INTRA_ALL;
diff --git a/av1/encoder/speed_features.h b/av1/encoder/speed_features.h
index 1524c06..5cd373f 100644
--- a/av1/encoder/speed_features.h
+++ b/av1/encoder/speed_features.h
@@ -57,7 +57,6 @@
(1 << DC_PRED) | (1 << TM_PRED) | (1 << V_PRED) | (1 << H_PRED)
};
-#if CONFIG_EXT_INTER
enum {
#if CONFIG_COMPOUND_SINGLEREF
// TODO(zoeliu): To further consider following single ref comp modes:
@@ -93,17 +92,6 @@
(1 << NEW_NEARMV) | (1 << NEAR_NEWMV) |
(1 << NEAR_NEARMV),
};
-#else // !CONFIG_EXT_INTER
-enum {
- INTER_ALL = (1 << NEARESTMV) | (1 << NEARMV) | (1 << ZEROMV) | (1 << NEWMV),
- INTER_NEAREST = (1 << NEARESTMV),
- INTER_NEAREST_NEW = (1 << NEARESTMV) | (1 << NEWMV),
- INTER_NEAREST_ZERO = (1 << NEARESTMV) | (1 << ZEROMV),
- INTER_NEAREST_NEW_ZERO = (1 << NEARESTMV) | (1 << ZEROMV) | (1 << NEWMV),
- INTER_NEAREST_NEAR_NEW = (1 << NEARESTMV) | (1 << NEARMV) | (1 << NEWMV),
- INTER_NEAREST_NEAR_ZERO = (1 << NEARESTMV) | (1 << NEARMV) | (1 << ZEROMV),
-};
-#endif // CONFIG_EXT_INTER
enum {
DISABLE_ALL_INTER_SPLIT = (1 << THR_COMP_GA) | (1 << THR_COMP_LA) |
@@ -412,13 +400,11 @@
// Choose a very large value (UINT_MAX) to use 8-tap always
unsigned int disable_filter_search_var_thresh;
-#if CONFIG_EXT_INTER
// A source variance threshold below which wedge search is disabled
unsigned int disable_wedge_search_var_thresh;
// Whether fast wedge sign estimate is used
int fast_wedge_sign_estimate;
-#endif // CONFIG_EXT_INTER
// These bit masks allow you to enable or disable intra modes for each
// transform size separately.
diff --git a/av1/encoder/temporal_filter.c b/av1/encoder/temporal_filter.c
index 74732e7..8fa0362 100644
--- a/av1/encoder/temporal_filter.c
+++ b/av1/encoder/temporal_filter.c
@@ -311,10 +311,7 @@
x, &best_ref_mv1, cpi->common.allow_high_precision_mv, x->errorperbit,
&cpi->fn_ptr[BLOCK_16X16], 0, mv_sf->subpel_iters_per_step,
cond_cost_list(cpi, cost_list), NULL, NULL, &distortion, &sse, NULL,
-#if CONFIG_EXT_INTER
- NULL, 0, 0,
-#endif
- 0, 0, 0);
+ NULL, 0, 0, 0, 0, 0);
#if CONFIG_AMVR
}
#endif