EXT_INTER experiment
NEW2MV is enabled, representing a new motion vector predicted from
NEARMV. It is mostly ported from nextgen, where it was named
NEW_INTER.
A few fixes are done for sub8x8 RDO to correct some misused
mv references in the original patch.
A 'bug-fix' for encoding complexity is done, reducing the additional
encoding time from 50% to 20%. In sub8x8 case, the old patch
did motion search for every interpolation filter (vp9 only
searches once). This fix also slightly improves the coding gain.
This experiment has been made compatible with REF_MV and EXT_REFS.
Coding gain (derflr/hevcmr/hevchd): 0.267%/0.542%/0.257%
Change-Id: I9a94c5f292e7454492a877f65072e8aedba087d4
diff --git a/vp10/common/blockd.h b/vp10/common/blockd.h
index 95084a7..8c75c97 100644
--- a/vp10/common/blockd.h
+++ b/vp10/common/blockd.h
@@ -48,9 +48,19 @@
#define MAXTXLEN 32
static INLINE int is_inter_mode(PREDICTION_MODE mode) {
+#if CONFIG_EXT_INTER
+ return mode >= NEARESTMV && mode <= NEWFROMNEARMV;
+#else
return mode >= NEARESTMV && mode <= NEWMV;
+#endif // CONFIG_EXT_INTER
}
+#if CONFIG_EXT_INTER
+static INLINE int have_newmv_in_inter_mode(PREDICTION_MODE mode) {
+ return (mode == NEWMV || mode == NEWFROMNEARMV);
+}
+#endif // CONFIG_EXT_INTER
+
/* For keyframes, intra block modes are predicted by the (already decoded)
modes for the Y blocks to the left and above us; for interframes, there
is a single probability table. */
@@ -58,6 +68,9 @@
typedef struct {
PREDICTION_MODE as_mode;
int_mv as_mv[2]; // first, second inter predictor motion vectors
+#if CONFIG_EXT_INTER
+ int_mv ref_mv[2];
+#endif // CONFIG_EXT_INTER
} b_mode_info;
// Note that the rate-distortion optimization loop, bit-stream writer, and
diff --git a/vp10/common/entropymode.c b/vp10/common/entropymode.c
index 1b4fd26..92f00c4 100644
--- a/vp10/common/entropymode.c
+++ b/vp10/common/entropymode.c
@@ -183,10 +183,24 @@
static const vpx_prob default_refmv_prob[REFMV_MODE_CONTEXTS] = {
220, 220, 200, 200, 180, 128, 30, 220, 30,
};
+
+#if CONFIG_EXT_INTER
+static const vpx_prob default_new2mv_prob = 180;
+#endif
#endif
static const vpx_prob default_inter_mode_probs[INTER_MODE_CONTEXTS]
[INTER_MODES - 1] = {
+#if CONFIG_EXT_INTER
+ // TODO(zoeliu): To adjust the initial default probs
+ {2, 173, 34, 173}, // 0 = both zero mv
+ {7, 145, 85, 145}, // 1 = one zero mv + one a predicted mv
+ {7, 166, 63, 166}, // 2 = two predicted mvs
+ {7, 94, 66, 128}, // 3 = one predicted/zero and one new mv
+ {8, 64, 46, 128}, // 4 = two new mvs
+ {17, 81, 31, 128}, // 5 = one intra neighbour + x
+ {25, 29, 30, 96}, // 6 = two intra neighbours
+#else
{2, 173, 34}, // 0 = both zero mv
{7, 145, 85}, // 1 = one zero mv + one a predicted mv
{7, 166, 63}, // 2 = two predicted mvs
@@ -194,6 +208,7 @@
{8, 64, 46}, // 4 = two new mvs
{17, 81, 31}, // 5 = one intra neighbour + x
{25, 29, 30}, // 6 = two intra neighbours
+#endif // CONFIG_EXT_INTER
};
/* Array indices are identical to previously-existing INTRAMODECONTEXTNODES. */
@@ -212,7 +227,12 @@
const vpx_tree_index vp10_inter_mode_tree[TREE_SIZE(INTER_MODES)] = {
-INTER_OFFSET(ZEROMV), 2,
-INTER_OFFSET(NEARESTMV), 4,
+#if CONFIG_EXT_INTER
+ -INTER_OFFSET(NEARMV), 6,
+ -INTER_OFFSET(NEWMV), -INTER_OFFSET(NEWFROMNEARMV)
+#else
-INTER_OFFSET(NEARMV), -INTER_OFFSET(NEWMV)
+#endif // CONFIG_EXT_INTER
};
const vpx_tree_index vp10_partition_tree[TREE_SIZE(PARTITION_TYPES)] = {
@@ -1232,7 +1252,10 @@
vp10_copy(fc->newmv_prob, default_newmv_prob);
vp10_copy(fc->zeromv_prob, default_zeromv_prob);
vp10_copy(fc->refmv_prob, default_refmv_prob);
-#endif
+#if CONFIG_EXT_INTER
+ fc->new2mv_prob = default_new2mv_prob;
+#endif // CONFIG_EXT_INTER
+#endif // CONFIG_REF_MV
vp10_copy(fc->inter_mode_probs, default_inter_mode_probs);
#if CONFIG_SUPERTX
vp10_copy(fc->supertx_prob, default_supertx_prob);
@@ -1292,6 +1315,11 @@
for (i = 0; i < REFMV_MODE_CONTEXTS; ++i)
fc->refmv_prob[i] = mode_mv_merge_probs(pre_fc->refmv_prob[i],
counts->refmv_mode[i]);
+
+#if CONFIG_EXT_INTER
+ fc->new2mv_prob = mode_mv_merge_probs(pre_fc->new2mv_prob,
+ counts->new2mv_mode);
+#endif // CONFIG_EXT_INTER
#else
for (i = 0; i < INTER_MODE_CONTEXTS; i++)
vpx_tree_merge_probs(vp10_inter_mode_tree, pre_fc->inter_mode_probs[i],
diff --git a/vp10/common/entropymode.h b/vp10/common/entropymode.h
index a1ad2c4..ffaa3df 100644
--- a/vp10/common/entropymode.h
+++ b/vp10/common/entropymode.h
@@ -65,6 +65,9 @@
vpx_prob newmv_prob[NEWMV_MODE_CONTEXTS];
vpx_prob zeromv_prob[ZEROMV_MODE_CONTEXTS];
vpx_prob refmv_prob[REFMV_MODE_CONTEXTS];
+#if CONFIG_EXT_INTER
+ vpx_prob new2mv_prob;
+#endif // CONFIG_EXT_INTER
#endif
vpx_prob inter_mode_probs[INTER_MODE_CONTEXTS][INTER_MODES - 1];
@@ -110,6 +113,9 @@
unsigned int newmv_mode[NEWMV_MODE_CONTEXTS][2];
unsigned int zeromv_mode[ZEROMV_MODE_CONTEXTS][2];
unsigned int refmv_mode[REFMV_MODE_CONTEXTS][2];
+#if CONFIG_EXT_INTER
+ unsigned int new2mv_mode[2];
+#endif // CONFIG_EXT_INTER
#endif
unsigned int inter_mode[INTER_MODE_CONTEXTS][INTER_MODES];
diff --git a/vp10/common/enums.h b/vp10/common/enums.h
index f0d1ba2..5b4d1c5 100644
--- a/vp10/common/enums.h
+++ b/vp10/common/enums.h
@@ -177,7 +177,12 @@
#define NEARMV 11
#define ZEROMV 12
#define NEWMV 13
+#if CONFIG_EXT_INTER
+#define NEWFROMNEARMV 14
+#define MB_MODE_COUNT 15
+#else
#define MB_MODE_COUNT 14
+#endif // CONFIG_EXT_INTER
typedef uint8_t PREDICTION_MODE;
#define INTRA_MODES (TM_PRED + 1)
@@ -201,7 +206,11 @@
#define DIRECTIONAL_MODES (INTRA_MODES - 2)
#endif // CONFIG_EXT_INTRA
+#if CONFIG_EXT_INTER
+#define INTER_MODES (1 + NEWFROMNEARMV - NEARESTMV)
+#else
#define INTER_MODES (1 + NEWMV - NEARESTMV)
+#endif // CONFIG_EXT_INTER
#define SKIP_CONTEXTS 3
diff --git a/vp10/common/loopfilter.c b/vp10/common/loopfilter.c
index 20d724d..875030d 100644
--- a/vp10/common/loopfilter.c
+++ b/vp10/common/loopfilter.c
@@ -207,6 +207,9 @@
static const int mode_lf_lut[MB_MODE_COUNT] = {
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // INTRA_MODES
1, 1, 0, 1 // INTER_MODES (ZEROMV == 0)
+#if CONFIG_EXT_INTER
+ , 1 // NEWFROMNEARMV mode
+#endif // CONFIG_EXT_INTER
};
static void update_sharpness(loop_filter_info_n *lfi, int sharpness_lvl) {
diff --git a/vp10/common/mvref_common.c b/vp10/common/mvref_common.c
index e6db818..89cd8bb 100644
--- a/vp10/common/mvref_common.c
+++ b/vp10/common/mvref_common.c
@@ -613,6 +613,52 @@
mv_ref_list[i].as_int = 0;
}
+#if CONFIG_EXT_INTER
+// This function keeps a mode count for a given MB/SB
+void vp10_update_mv_context(const VP10_COMMON *cm, const MACROBLOCKD *xd,
+ MODE_INFO *mi, MV_REFERENCE_FRAME ref_frame,
+ int_mv *mv_ref_list,
+ int block, int mi_row, int mi_col,
+ int16_t *mode_context) {
+ int i, refmv_count = 0;
+ const POSITION *const mv_ref_search = mv_ref_blocks[mi->mbmi.sb_type];
+ int context_counter = 0;
+ const int bw = num_8x8_blocks_wide_lookup[mi->mbmi.sb_type] << 3;
+ const int bh = num_8x8_blocks_high_lookup[mi->mbmi.sb_type] << 3;
+ const TileInfo *const tile = &xd->tile;
+
+ // Blank the reference vector list
+ memset(mv_ref_list, 0, sizeof(*mv_ref_list) * MAX_MV_REF_CANDIDATES);
+
+ // The nearest 2 blocks are examined only.
+ // If the size < 8x8, we get the mv from the bmi substructure;
+ for (i = 0; i < 2; ++i) {
+ const POSITION *const mv_ref = &mv_ref_search[i];
+ if (is_inside(tile, mi_col, mi_row, cm->mi_rows, mv_ref)) {
+ const MODE_INFO *const candidate_mi =
+ xd->mi[mv_ref->col + mv_ref->row * xd->mi_stride];
+ const MB_MODE_INFO *const candidate = &candidate_mi->mbmi;
+
+ // Keep counts for entropy encoding.
+ context_counter += mode_2_counter[candidate->mode];
+
+ if (candidate->ref_frame[0] == ref_frame) {
+ ADD_MV_REF_LIST(get_sub_block_mv(candidate_mi, 0, mv_ref->col, block),
+ refmv_count, mv_ref_list, bw, bh, xd, Done);
+ } else if (candidate->ref_frame[1] == ref_frame) {
+ ADD_MV_REF_LIST(get_sub_block_mv(candidate_mi, 1, mv_ref->col, block),
+ refmv_count, mv_ref_list, bw, bh, xd, Done);
+ }
+ }
+ }
+
+ Done:
+
+ if (mode_context)
+ mode_context[ref_frame] = counter_to_context[context_counter];
+}
+#endif // CONFIG_EXT_INTER
+
void vp10_find_mv_refs(const VP10_COMMON *cm, const MACROBLOCKD *xd,
MODE_INFO *mi, MV_REFERENCE_FRAME ref_frame,
#if CONFIG_REF_MV
@@ -626,8 +672,15 @@
#if CONFIG_REF_MV
int idx, all_zero = 1;
#endif
+#if CONFIG_EXT_INTER
+ vp10_update_mv_context(cm, xd, mi, ref_frame, mv_ref_list, -1,
+ mi_row, mi_col, mode_context);
+ find_mv_refs_idx(cm, xd, mi, ref_frame, mv_ref_list, -1,
+ mi_row, mi_col, sync, data, NULL);
+#else
find_mv_refs_idx(cm, xd, mi, ref_frame, mv_ref_list, -1,
mi_row, mi_col, sync, data, mode_context);
+#endif // CONFIG_EXT_INTER
#if CONFIG_REF_MV
setup_ref_mv_list(cm, xd, ref_frame, ref_mv_count, ref_mv_stack,
@@ -656,8 +709,13 @@
void vp10_append_sub8x8_mvs_for_idx(VP10_COMMON *cm, MACROBLOCKD *xd,
int block, int ref, int mi_row, int mi_col,
+#if CONFIG_EXT_INTER
+ int_mv *mv_list,
+#endif // CONFIG_EXT_INTER
int_mv *nearest_mv, int_mv *near_mv) {
+#if !CONFIG_EXT_INTER
int_mv mv_list[MAX_MV_REF_CANDIDATES];
+#endif // !CONFIG_EXT_INTER
MODE_INFO *const mi = xd->mi[0];
b_mode_info *bmi = mi->bmi;
int n;
diff --git a/vp10/common/mvref_common.h b/vp10/common/mvref_common.h
index 24bde6c..c1ddc95 100644
--- a/vp10/common/mvref_common.h
+++ b/vp10/common/mvref_common.h
@@ -55,6 +55,9 @@
0, // NEARMV
3, // ZEROMV
1, // NEWMV
+#if CONFIG_EXT_INTER
+ 1, // NEWFROMNEARMV
+#endif // CONFIG_EXT_INTER
};
// There are 3^3 different combinations of 3 counts that can be either 0,1 or
@@ -265,8 +268,20 @@
void vp10_append_sub8x8_mvs_for_idx(VP10_COMMON *cm, MACROBLOCKD *xd,
int block, int ref, int mi_row, int mi_col,
+#if CONFIG_EXT_INTER
+ int_mv *mv_list,
+#endif // CONFIG_EXT_INTER
int_mv *nearest_mv, int_mv *near_mv);
+#if CONFIG_EXT_INTER
+// This function keeps a mode count for a given MB/SB
+void vp10_update_mv_context(const VP10_COMMON *cm, const MACROBLOCKD *xd,
+ MODE_INFO *mi, MV_REFERENCE_FRAME ref_frame,
+ int_mv *mv_ref_list,
+ int block, int mi_row, int mi_col,
+ int16_t *mode_context);
+#endif // CONFIG_EXT_INTER
+
#ifdef __cplusplus
} // extern "C"
#endif
diff --git a/vp10/common/thread_common.c b/vp10/common/thread_common.c
index a1f17e9..c9cc343 100644
--- a/vp10/common/thread_common.c
+++ b/vp10/common/thread_common.c
@@ -378,6 +378,11 @@
for (i = 0; i < REFMV_MODE_CONTEXTS; ++i)
for (j = 0; j < 2; ++j)
cm->counts.refmv_mode[i][j] += counts->refmv_mode[i][j];
+
+#if CONFIG_EXT_INTER
+ for (j = 0; j < 2; ++j)
+ cm->counts.new2mv_mode[j] += counts->new2mv_mode[j];
+#endif // CONFIG_EXT_INTER
#endif
for (i = 0; i < INTER_MODE_CONTEXTS; i++)
diff --git a/vp10/decoder/decodeframe.c b/vp10/decoder/decodeframe.c
index 4d8bdb4..10fdb54 100644
--- a/vp10/decoder/decodeframe.c
+++ b/vp10/decoder/decodeframe.c
@@ -132,6 +132,9 @@
vp10_diff_update_prob(r, &fc->zeromv_prob[i]);
for (i = 0; i < REFMV_MODE_CONTEXTS; ++i)
vp10_diff_update_prob(r, &fc->refmv_prob[i]);
+#if CONFIG_EXT_INTER
+ vp10_diff_update_prob(r, &fc->new2mv_prob);
+#endif // CONFIG_EXT_INTER
#else
int j;
for (i = 0; i < INTER_MODE_CONTEXTS; ++i)
diff --git a/vp10/decoder/decodemv.c b/vp10/decoder/decodemv.c
index c6579c6..d7c3faf 100644
--- a/vp10/decoder/decodemv.c
+++ b/vp10/decoder/decodemv.c
@@ -63,6 +63,9 @@
}
static PREDICTION_MODE read_inter_mode(VP10_COMMON *cm, MACROBLOCKD *xd,
+#if CONFIG_REF_MV && CONFIG_EXT_INTER
+ MB_MODE_INFO *mbmi,
+#endif
vpx_reader *r, int16_t ctx) {
#if CONFIG_REF_MV
FRAME_COUNTS *counts = xd->counts;
@@ -72,7 +75,25 @@
if (vpx_read(r, mode_prob) == 0) {
if (counts)
++counts->newmv_mode[mode_ctx][0];
+
+#if CONFIG_EXT_INTER
+ if (has_second_ref(mbmi)) {
+#endif // CONFIG_EXT_INTER
return NEWMV;
+#if CONFIG_EXT_INTER
+ } else {
+ mode_prob = cm->fc->new2mv_prob;
+ if (vpx_read(r, mode_prob) == 0) {
+ if (counts)
+ ++counts->new2mv_mode[0];
+ return NEWMV;
+ } else {
+ if (counts)
+ ++counts->new2mv_mode[1];
+ return NEWFROMNEARMV;
+ }
+ }
+#endif // CONFIG_EXT_INTER
}
if (counts)
++counts->newmv_mode[mode_ctx][1];
@@ -774,6 +795,9 @@
int ret = 1;
switch (mode) {
+#if CONFIG_EXT_INTER
+ case NEWFROMNEARMV:
+#endif // CONFIG_EXT_INTER
case NEWMV: {
FRAME_COUNTS *counts = xd->counts;
nmv_context_counts *const mv_counts = counts ? &counts->mv : NULL;
@@ -839,6 +863,9 @@
const int allow_hp = cm->allow_high_precision_mv;
int_mv nearestmv[2], nearmv[2];
int_mv ref_mvs[MODE_CTX_REF_FRAMES][MAX_MV_REF_CANDIDATES];
+#if CONFIG_EXT_INTER
+ int mv_idx;
+#endif // CONFIG_EXT_INTER
int ref, is_compound;
int16_t inter_mode_ctx[MODE_CTX_REF_FRAMES];
int16_t mode_ctx = 0;
@@ -885,7 +912,11 @@
}
} else {
if (bsize >= BLOCK_8X8)
- mbmi->mode = read_inter_mode(cm, xd, r, mode_ctx);
+ mbmi->mode = read_inter_mode(cm, xd,
+#if CONFIG_REF_MV && CONFIG_EXT_INTER
+ mbmi,
+#endif // CONFIG_REF_MV && CONFIG_EXT_INTER
+ r, mode_ctx);
}
if (bsize < BLOCK_8X8 || mbmi->mode != ZEROMV) {
@@ -936,6 +967,9 @@
int idx, idy;
PREDICTION_MODE b_mode;
int_mv nearest_sub8x8[2], near_sub8x8[2];
+#if CONFIG_EXT_INTER
+ int_mv ref_mv[2][2];
+#endif // CONFIG_EXT_INTER
for (idy = 0; idy < 2; idy += num_4x4_h) {
for (idx = 0; idx < 2; idx += num_4x4_w) {
int_mv block[2];
@@ -944,16 +978,49 @@
mode_ctx = vp10_mode_context_analyzer(inter_mode_ctx, mbmi->ref_frame,
bsize, j);
#endif
- b_mode = read_inter_mode(cm, xd, r, mode_ctx);
+ b_mode = read_inter_mode(cm, xd,
+#if CONFIG_REF_MV && CONFIG_EXT_INTER
+ mbmi,
+#endif // CONFIG_REF_MV && CONFIG_EXT_INTER
+ r, mode_ctx);
+#if CONFIG_EXT_INTER
+ mv_idx = (b_mode == NEWFROMNEARMV) ? 1 : 0;
+
+ if (b_mode != ZEROMV) {
+#else
if (b_mode == NEARESTMV || b_mode == NEARMV) {
+#endif // CONFIG_EXT_INTER
for (ref = 0; ref < 1 + is_compound; ++ref)
+#if CONFIG_EXT_INTER
+ {
+ int_mv mv_ref_list[MAX_MV_REF_CANDIDATES];
+ vp10_update_mv_context(cm, xd, mi, mbmi->ref_frame[ref],
+ mv_ref_list, j, mi_row, mi_col, NULL);
+#endif // CONFIG_EXT_INTER
vp10_append_sub8x8_mvs_for_idx(cm, xd, j, ref, mi_row, mi_col,
+#if CONFIG_EXT_INTER
+ mv_ref_list,
+#endif // CONFIG_EXT_INTER
&nearest_sub8x8[ref],
&near_sub8x8[ref]);
+#if CONFIG_EXT_INTER
+ if (have_newmv_in_inter_mode(b_mode)) {
+ mv_ref_list[0].as_int = nearest_sub8x8[ref].as_int;
+ mv_ref_list[1].as_int = near_sub8x8[ref].as_int;
+ vp10_find_best_ref_mvs(allow_hp, mv_ref_list,
+ &ref_mv[0][ref], &ref_mv[1][ref]);
+ }
+ }
+#endif // CONFIG_EXT_INTER
}
- if (!assign_mv(cm, xd, b_mode, block, nearestmv,
+ if (!assign_mv(cm, xd, b_mode, block,
+#if CONFIG_EXT_INTER
+ ref_mv[mv_idx],
+#else
+ nearestmv,
+#endif // CONFIG_EXT_INTER
nearest_sub8x8, near_sub8x8,
is_compound, allow_hp, r)) {
xd->corrupted |= 1;
@@ -976,7 +1043,13 @@
mbmi->mv[0].as_int = mi->bmi[3].as_mv[0].as_int;
mbmi->mv[1].as_int = mi->bmi[3].as_mv[1].as_int;
} else {
- xd->corrupted |= !assign_mv(cm, xd, mbmi->mode, mbmi->mv, nearestmv,
+ xd->corrupted |= !assign_mv(cm, xd, mbmi->mode, mbmi->mv,
+#if CONFIG_EXT_INTER
+ mbmi->mode == NEWFROMNEARMV ?
+ nearmv : nearestmv,
+#else
+ nearestmv,
+#endif // CONFIG_EXT_INTER
nearestmv, nearmv, is_compound, allow_hp, r);
}
#if CONFIG_EXT_INTERP
diff --git a/vp10/encoder/bitstream.c b/vp10/encoder/bitstream.c
index 4e414fe..177dcc3 100644
--- a/vp10/encoder/bitstream.c
+++ b/vp10/encoder/bitstream.c
@@ -49,7 +49,11 @@
{{0, 1}, {2, 2}, {6, 3}, {7, 3}};
#if !CONFIG_REF_MV
static const struct vp10_token inter_mode_encodings[INTER_MODES] =
+#if CONFIG_EXT_INTER
+ {{2, 2}, {6, 3}, {0, 1}, {14, 4}, {15, 4}};
+#else
{{2, 2}, {6, 3}, {0, 1}, {7, 3}};
+#endif // CONFIG_EXT_INTER
#endif
static const struct vp10_token palette_size_encodings[] = {
{0, 1}, {2, 2}, {6, 3}, {14, 4}, {30, 5}, {62, 6}, {63, 6},
@@ -117,13 +121,25 @@
static void write_inter_mode(VP10_COMMON *cm,
vpx_writer *w, PREDICTION_MODE mode,
+#if CONFIG_REF_MV && CONFIG_EXT_INTER
+ int is_compound,
+#endif // CONFIG_REF_MV && CONFIG_EXT_INTER
const int16_t mode_ctx) {
#if CONFIG_REF_MV
const int16_t newmv_ctx = mode_ctx & NEWMV_CTX_MASK;
const vpx_prob newmv_prob = cm->fc->newmv_prob[newmv_ctx];
+#if CONFIG_EXT_INTER
+ vpx_write(w, mode != NEWMV && mode != NEWFROMNEARMV, newmv_prob);
+
+ if (!is_compound && (mode == NEWMV || mode == NEWFROMNEARMV))
+ vpx_write(w, mode == NEWFROMNEARMV, cm->fc->new2mv_prob);
+
+ if (mode != NEWMV && mode != NEWFROMNEARMV) {
+#else
vpx_write(w, mode != NEWMV, newmv_prob);
if (mode != NEWMV) {
+#endif // CONFIG_EXT_INTER
const int16_t zeromv_ctx = (mode_ctx >> ZEROMV_OFFSET) & ZEROMV_CTX_MASK;
const vpx_prob zeromv_prob = cm->fc->zeromv_prob[zeromv_ctx];
@@ -279,6 +295,10 @@
for (i = 0; i < REFMV_MODE_CONTEXTS; ++i)
vp10_cond_prob_diff_update(w, &cm->fc->refmv_prob[i],
counts->refmv_mode[i]);
+
+#if CONFIG_EXT_INTER
+ vp10_cond_prob_diff_update(w, &cm->fc->new2mv_prob, counts->new2mv_mode);
+#endif // CONFIG_EXT_INTER
}
#endif
@@ -906,7 +926,11 @@
// If segment skip is not enabled code the mode.
if (!segfeature_active(seg, segment_id, SEG_LVL_SKIP)) {
if (bsize >= BLOCK_8X8) {
- write_inter_mode(cm, w, mode, mode_ctx);
+ write_inter_mode(cm, w, mode,
+#if CONFIG_REF_MV && CONFIG_EXT_INTER
+ has_second_ref(mbmi),
+#endif // CONFIG_REF_MV && CONFIG_EXT_INTER
+ mode_ctx);
}
}
@@ -926,23 +950,52 @@
mode_ctx = vp10_mode_context_analyzer(mbmi_ext->mode_context,
mbmi->ref_frame, bsize, j);
#endif
- write_inter_mode(cm, w, b_mode, mode_ctx);
+ write_inter_mode(cm, w, b_mode,
+#if CONFIG_REF_MV && CONFIG_EXT_INTER
+ has_second_ref(mbmi),
+#endif // CONFIG_REF_MV && CONFIG_EXT_INTER
+ mode_ctx);
+
+#if CONFIG_EXT_INTER
+ if (b_mode == NEWMV || b_mode == NEWFROMNEARMV) {
+#else
if (b_mode == NEWMV) {
+#endif // CONFIG_EXT_INTER
for (ref = 0; ref < 1 + is_compound; ++ref)
vp10_encode_mv(cpi, w, &mi->bmi[j].as_mv[ref].as_mv,
- &mbmi_ext->ref_mvs[mbmi->ref_frame[ref]][0].as_mv,
+#if CONFIG_EXT_INTER
+ &mi->bmi[j].ref_mv[ref].as_mv,
+#else
+ &mbmi_ext->ref_mvs[mbmi->ref_frame[ref]][0].as_mv,
+#endif // CONFIG_EXT_INTER
nmvc, allow_hp);
}
}
}
} else {
+#if CONFIG_EXT_INTER
+ if (mode == NEWMV || mode == NEWFROMNEARMV) {
+#else
if (mode == NEWMV) {
+#endif // CONFIG_EXT_INTER
for (ref = 0; ref < 1 + is_compound; ++ref)
+#if CONFIG_EXT_INTER
+ {
+ if (mode == NEWFROMNEARMV)
+ vp10_encode_mv(cpi, w, &mbmi->mv[ref].as_mv,
+ &mbmi_ext->ref_mvs[mbmi->ref_frame[ref]][1].as_mv,
+ nmvc, allow_hp);
+ else
+#endif // CONFIG_EXT_INTER
vp10_encode_mv(cpi, w, &mbmi->mv[ref].as_mv,
&mbmi_ext->ref_mvs[mbmi->ref_frame[ref]][0].as_mv, nmvc,
allow_hp);
+#if CONFIG_EXT_INTER
+ }
+#endif // CONFIG_EXT_INTER
}
}
+
#if CONFIG_EXT_INTERP
write_switchable_interp_filter(cpi, xd, w);
#endif // CONFIG_EXT_INTERP
diff --git a/vp10/encoder/encodeframe.c b/vp10/encoder/encodeframe.c
index fbbf1f6..52926e3 100644
--- a/vp10/encoder/encodeframe.c
+++ b/vp10/encoder/encodeframe.c
@@ -1668,9 +1668,18 @@
#if CONFIG_REF_MV
static void update_inter_mode_stats(FRAME_COUNTS *counts,
PREDICTION_MODE mode,
+#if CONFIG_EXT_INTER
+ int is_compound,
+#endif // CONFIG_EXT_INTER
int16_t mode_context) {
int16_t mode_ctx = mode_context & NEWMV_CTX_MASK;
+#if CONFIG_EXT_INTER
+ if (mode == NEWMV || mode == NEWFROMNEARMV) {
+ if (!is_compound)
+ ++counts->new2mv_mode[mode == NEWFROMNEARMV];
+#else
if (mode == NEWMV) {
+#endif // CONFIG_EXT_INTER
++counts->newmv_mode[mode_ctx][0];
return;
} else {
@@ -1789,7 +1798,12 @@
#if CONFIG_REF_MV
mode_ctx = vp10_mode_context_analyzer(mbmi_ext->mode_context,
mbmi->ref_frame, bsize, -1);
- update_inter_mode_stats(counts, mode, mode_ctx);
+ update_inter_mode_stats(counts, mode,
+#if CONFIG_EXT_INTER
+ has_second_ref(mbmi),
+#endif // CONFIG_EXT_INTER
+ mode_ctx);
+
#else
++counts->inter_mode[mode_ctx][INTER_OFFSET(mode)];
#endif
@@ -1804,7 +1818,11 @@
#if CONFIG_REF_MV
mode_ctx = vp10_mode_context_analyzer(mbmi_ext->mode_context,
mbmi->ref_frame, bsize, j);
- update_inter_mode_stats(counts, b_mode, mode_ctx);
+ update_inter_mode_stats(counts, b_mode,
+#if CONFIG_EXT_INTER
+ has_second_ref(mbmi),
+#endif // CONFIG_EXT_INTER
+ mode_ctx);
#else
++counts->inter_mode[mode_ctx][INTER_OFFSET(b_mode)];
#endif
diff --git a/vp10/encoder/encodemv.c b/vp10/encoder/encodemv.c
index 623e6f6..0184bae 100644
--- a/vp10/encoder/encodemv.c
+++ b/vp10/encoder/encodemv.c
@@ -224,6 +224,41 @@
build_nmv_component_cost_table(mvcost[1], &ctx->comps[1], usehp);
}
+#if CONFIG_EXT_INTER
+static void inc_mvs(const MB_MODE_INFO *mbmi, const MB_MODE_INFO_EXT *mbmi_ext,
+ const int_mv mvs[2],
+ nmv_context_counts *counts) {
+ int i;
+ PREDICTION_MODE mode = mbmi->mode;
+ int mv_idx = (mode == NEWFROMNEARMV);
+
+ if (mode == NEWMV || mode == NEWFROMNEARMV) {
+ for (i = 0; i < 1 + has_second_ref(mbmi); ++i) {
+ const MV *ref = &mbmi_ext->ref_mvs[mbmi->ref_frame[i]][mv_idx].as_mv;
+ const MV diff = {mvs[i].as_mv.row - ref->row,
+ mvs[i].as_mv.col - ref->col};
+ vp10_inc_mv(&diff, counts, vp10_use_mv_hp(ref));
+ }
+ }
+}
+
+static void inc_mvs_sub8x8(const MODE_INFO *mi,
+ int block,
+ const int_mv mvs[2],
+ nmv_context_counts *counts) {
+ int i;
+ PREDICTION_MODE mode = mi->bmi[block].as_mode;
+
+ if (mode == NEWMV || mode == NEWFROMNEARMV) {
+ for (i = 0; i < 1 + has_second_ref(&mi->mbmi); ++i) {
+ const MV *ref = &mi->bmi[block].ref_mv[i].as_mv;
+ const MV diff = {mvs[i].as_mv.row - ref->row,
+ mvs[i].as_mv.col - ref->col};
+ vp10_inc_mv(&diff, counts, vp10_use_mv_hp(ref));
+ }
+ }
+}
+#else
static void inc_mvs(const MB_MODE_INFO *mbmi, const MB_MODE_INFO_EXT *mbmi_ext,
const int_mv mvs[2],
nmv_context_counts *counts) {
@@ -236,6 +271,7 @@
vp10_inc_mv(&diff, counts, vp10_use_mv_hp(ref));
}
}
+#endif // CONFIG_EXT_INTER
void vp10_update_mv_count(ThreadData *td) {
const MACROBLOCKD *xd = &td->mb.e_mbd;
@@ -251,12 +287,22 @@
for (idy = 0; idy < 2; idy += num_4x4_h) {
for (idx = 0; idx < 2; idx += num_4x4_w) {
const int i = idy * 2 + idx;
+
+#if CONFIG_EXT_INTER
+ if (have_newmv_in_inter_mode(mi->bmi[i].as_mode))
+ inc_mvs_sub8x8(mi, i, mi->bmi[i].as_mv, &td->counts->mv);
+#else
if (mi->bmi[i].as_mode == NEWMV)
inc_mvs(mbmi, mbmi_ext, mi->bmi[i].as_mv, &td->counts->mv);
+#endif // CONFIG_EXT_INTER
}
}
} else {
+#if CONFIG_EXT_INTER
+ if (have_newmv_in_inter_mode(mbmi->mode))
+#else
if (mbmi->mode == NEWMV)
+#endif // CONFIG_EXT_INTER
inc_mvs(mbmi, mbmi_ext, mbmi->mv, &td->counts->mv);
}
}
diff --git a/vp10/encoder/encoder.h b/vp10/encoder/encoder.h
index 707255d..40bc4d7 100644
--- a/vp10/encoder/encoder.h
+++ b/vp10/encoder/encoder.h
@@ -473,6 +473,9 @@
int newmv_mode_cost[NEWMV_MODE_CONTEXTS][2];
int zeromv_mode_cost[ZEROMV_MODE_CONTEXTS][2];
int refmv_mode_cost[REFMV_MODE_CONTEXTS][2];
+#if CONFIG_EXT_INTER
+ int new2mv_mode_cost[2];
+#endif // CONFIG_EXT_INTER
#endif
unsigned int inter_mode_cost[INTER_MODE_CONTEXTS][INTER_MODES];
diff --git a/vp10/encoder/rd.c b/vp10/encoder/rd.c
index ef41600..d67f7c3 100644
--- a/vp10/encoder/rd.c
+++ b/vp10/encoder/rd.c
@@ -377,6 +377,10 @@
cpi->refmv_mode_cost[i][0] = vp10_cost_bit(cm->fc->refmv_prob[i], 0);
cpi->refmv_mode_cost[i][1] = vp10_cost_bit(cm->fc->refmv_prob[i], 1);
}
+#if CONFIG_EXT_INTER
+ cpi->new2mv_mode_cost[0] = vp10_cost_bit(cm->fc->new2mv_prob, 0);
+ cpi->new2mv_mode_cost[1] = vp10_cost_bit(cm->fc->new2mv_prob, 1);
+#endif // CONFIG_EXT_INTER
#else
for (i = 0; i < INTER_MODE_CONTEXTS; ++i)
vp10_cost_tokens((int *)cpi->inter_mode_cost[i],
@@ -686,6 +690,17 @@
rd->thresh_mult[THR_NEARA] += 1000;
rd->thresh_mult[THR_NEARG] += 1000;
+#if CONFIG_EXT_INTER
+ rd->thresh_mult[THR_NEWFROMNEARMV] += 1000;
+#if CONFIG_EXT_REF
+ rd->thresh_mult[THR_NEWFROMNEARL2] += 1000;
+ rd->thresh_mult[THR_NEWFROMNEARL3] += 1000;
+ rd->thresh_mult[THR_NEWFROMNEARL4] += 1000;
+#endif // CONFIG_EXT_REF
+ rd->thresh_mult[THR_NEWFROMNEARG] += 1000;
+ rd->thresh_mult[THR_NEWFROMNEARA] += 1000;
+#endif // CONFIG_EXT_INTER
+
rd->thresh_mult[THR_ZEROMV] += 2000;
#if CONFIG_EXT_REFS
rd->thresh_mult[THR_ZEROL2] += 2000;
diff --git a/vp10/encoder/rd.h b/vp10/encoder/rd.h
index 42261ac..42d8ea1 100644
--- a/vp10/encoder/rd.h
+++ b/vp10/encoder/rd.h
@@ -34,9 +34,17 @@
#define INVALID_MV 0x80008000
#if CONFIG_EXT_REFS
+#if CONFIG_EXT_INTER
+#define MAX_MODES 60
+#else
#define MAX_MODES 54
+#endif // CONFIG_EXT_INTER
+#else
+#if CONFIG_EXT_INTER
+#define MAX_MODES 33
#else
#define MAX_MODES 30
+#endif // CONFIG_EXT_INTER
#endif // CONFIG_EXT_REFS
#if CONFIG_EXT_REFS
@@ -80,6 +88,17 @@
THR_NEARA,
THR_NEARG,
+#if CONFIG_EXT_INTER
+ THR_NEWFROMNEARMV,
+#if CONFIG_EXT_REF
+ THR_NEWFROMNEARL2,
+ THR_NEWFROMNEARL3,
+ THR_NEWFROMNEARL4,
+#endif // CONFIG_EXT_REF
+ THR_NEWFROMNEARA,
+ THR_NEWFROMNEARG,
+#endif // CONFIG_EXT_INTER
+
THR_ZEROMV,
#if CONFIG_EXT_REFS
THR_ZEROL2,
diff --git a/vp10/encoder/rdopt.c b/vp10/encoder/rdopt.c
index 736c7ae..b55f53a 100644
--- a/vp10/encoder/rdopt.c
+++ b/vp10/encoder/rdopt.c
@@ -146,6 +146,17 @@
{NEARMV, {ALTREF_FRAME, NONE}},
{NEARMV, {GOLDEN_FRAME, NONE}},
+#if CONFIG_EXT_INTER
+ {NEWFROMNEARMV, {LAST_FRAME, NONE}},
+#if CONFIG_EXT_REF
+ {NEWFROMNEARMV, {LAST2_FRAME, NONE}},
+ {NEWFROMNEARMV, {LAST3_FRAME, NONE}},
+ {NEWFROMNEARMV, {LAST4_FRAME, NONE}},
+#endif // CONFIG_EXT_REF
+ {NEWFROMNEARMV, {ALTREF_FRAME, NONE}},
+ {NEWFROMNEARMV, {GOLDEN_FRAME, NONE}},
+#endif // CONFIG_EXT_INTER
+
{ZEROMV, {LAST_FRAME, NONE}},
#if CONFIG_EXT_REFS
{ZEROMV, {LAST2_FRAME, NONE}},
@@ -3113,6 +3124,9 @@
}
static int cost_mv_ref(const VP10_COMP *cpi, PREDICTION_MODE mode,
+#if CONFIG_REF_MV && CONFIG_EXT_INTER
+ int is_compound,
+#endif // CONFIG_REF_MV && CONFIG_EXT_INTER
int16_t mode_context) {
#if CONFIG_REF_MV
int mode_cost = 0;
@@ -3121,8 +3135,16 @@
assert(is_inter_mode(mode));
+#if CONFIG_EXT_INTER
+ if (mode == NEWMV || mode == NEWFROMNEARMV) {
+#else
if (mode == NEWMV) {
+#endif // CONFIG_EXT_INTER
mode_cost = cpi->newmv_mode_cost[mode_ctx][0];
+#if CONFIG_EXT_INTER
+ if (!is_compound)
+ mode_cost += cpi->new2mv_mode_cost[mode == NEWFROMNEARMV];
+#endif // CONFIG_EXT_INTER
return mode_cost;
} else {
mode_cost = cpi->newmv_mode_cost[mode_ctx][1];
@@ -3160,6 +3182,9 @@
PREDICTION_MODE mode, int_mv this_mv[2],
int_mv frame_mv[MB_MODE_COUNT][MAX_REF_FRAMES],
int_mv seg_mvs[MAX_REF_FRAMES],
+#if CONFIG_EXT_INTER
+ int_mv compound_seg_newmvs[2],
+#endif // CONFIG_EXT_INTER
int_mv *best_ref_mv[2], const int *mvjcost,
int *mvcost[2]) {
MODE_INFO *const mic = xd->mi[0];
@@ -3174,14 +3199,48 @@
switch (mode) {
case NEWMV:
+#if CONFIG_EXT_INTER
+ case NEWFROMNEARMV:
+ if (!is_compound) {
+#endif // CONFIG_EXT_INTER
this_mv[0].as_int = seg_mvs[mbmi->ref_frame[0]].as_int;
+#if CONFIG_EXT_INTER
+ if (!cpi->common.allow_high_precision_mv ||
+ !vp10_use_mv_hp(&best_ref_mv[0]->as_mv))
+ lower_mv_precision(&this_mv[0].as_mv, 0);
+#endif // CONFIG_EXT_INTER
thismvcost += vp10_mv_bit_cost(&this_mv[0].as_mv, &best_ref_mv[0]->as_mv,
mvjcost, mvcost, MV_COST_WEIGHT_SUB);
+#if CONFIG_EXT_INTER
+ } else {
+ if (compound_seg_newmvs[0].as_int == INVALID_MV ||
+ compound_seg_newmvs[1].as_int == INVALID_MV) {
+ this_mv[0].as_int = seg_mvs[mbmi->ref_frame[0]].as_int;
+ this_mv[1].as_int = seg_mvs[mbmi->ref_frame[1]].as_int;
+ } else {
+ this_mv[0].as_int = compound_seg_newmvs[0].as_int;
+ this_mv[1].as_int = compound_seg_newmvs[1].as_int;
+ }
+ if (!cpi->common.allow_high_precision_mv ||
+ !vp10_use_mv_hp(&best_ref_mv[0]->as_mv))
+ lower_mv_precision(&this_mv[0].as_mv, 0);
+ if (!cpi->common.allow_high_precision_mv ||
+ !vp10_use_mv_hp(&best_ref_mv[1]->as_mv))
+ lower_mv_precision(&this_mv[1].as_mv, 0);
+ thismvcost += vp10_mv_bit_cost(&this_mv[0].as_mv,
+ &best_ref_mv[0]->as_mv,
+ mvjcost, mvcost, MV_COST_WEIGHT_SUB);
+ thismvcost += vp10_mv_bit_cost(&this_mv[1].as_mv,
+ &best_ref_mv[1]->as_mv,
+ mvjcost, mvcost, MV_COST_WEIGHT_SUB);
+ }
+#else
if (is_compound) {
this_mv[1].as_int = seg_mvs[mbmi->ref_frame[1]].as_int;
thismvcost += vp10_mv_bit_cost(&this_mv[1].as_mv, &best_ref_mv[1]->as_mv,
mvjcost, mvcost, MV_COST_WEIGHT_SUB);
}
+#endif // CONFIG_EXT_INTER
break;
case NEARMV:
case NEARESTMV:
@@ -3212,7 +3271,11 @@
mode_ctx = vp10_mode_context_analyzer(mbmi_ext->mode_context,
mbmi->ref_frame, mbmi->sb_type, i);
#endif
+#if CONFIG_REF_MV && CONFIG_EXT_INTER
+ return cost_mv_ref(cpi, mode, is_compound, mode_ctx) + thismvcost;
+#else
return cost_mv_ref(cpi, mode, mode_ctx) + thismvcost;
+#endif // CONFIG_REF_MV && CONFIG_EXT_INTER
}
static int64_t encode_inter_mb_segment(VP10_COMP *cpi,
@@ -3341,6 +3404,9 @@
int64_t bsse;
int64_t brdcost;
int_mv mvs[2];
+#if CONFIG_EXT_INTER
+ int_mv ref_mv[2];
+#endif // CONFIG_EXT_INTER
ENTROPY_CONTEXT ta[2];
ENTROPY_CONTEXT tl[2];
} SEG_RDSTAT;
@@ -3407,9 +3473,15 @@
#else
int16_t rfc = mode_context[ref_frames[0]];
#endif
+#if CONFIG_REF_MV && CONFIG_EXT_INTER
+ int c1 = cost_mv_ref(cpi, NEARMV, ref_frames[1] > INTRA_FRAME, rfc);
+ int c2 = cost_mv_ref(cpi, NEARESTMV, ref_frames[1] > INTRA_FRAME, rfc);
+ int c3 = cost_mv_ref(cpi, ZEROMV, ref_frames[1] > INTRA_FRAME, rfc);
+#else
int c1 = cost_mv_ref(cpi, NEARMV, rfc);
int c2 = cost_mv_ref(cpi, NEARESTMV, rfc);
int c3 = cost_mv_ref(cpi, ZEROMV, rfc);
+#endif // CONFIG_REF_MV && CONFIG_EXT_INTER
#if !CONFIG_REF_MV
(void)bsize;
@@ -3442,6 +3514,9 @@
BLOCK_SIZE bsize,
int_mv *frame_mv,
int mi_row, int mi_col,
+#if CONFIG_EXT_INTER
+ int_mv* ref_mv_sub8x8[2],
+#endif
int_mv single_newmv[MAX_REF_FRAMES],
int *rate_mv) {
const VP10_COMMON *const cm = &cpi->common;
@@ -3473,6 +3548,11 @@
#endif // CONFIG_VP9_HIGHBITDEPTH
for (ref = 0; ref < 2; ++ref) {
+#if CONFIG_EXT_INTER
+ if (bsize < BLOCK_8X8 && ref_mv_sub8x8 != NULL)
+ ref_mv[ref].as_int = ref_mv_sub8x8[ref]->as_int;
+ else
+#endif // CONFIG_EXT_INTER
ref_mv[ref] = x->mbmi_ext->ref_mvs[refs[ref]][0];
if (scaled_ref_frame[ref]) {
@@ -3616,9 +3696,18 @@
xd->plane[i].pre[ref] = backup_yv12[ref][i];
}
+#if CONFIG_EXT_INTER
+ if (bsize >= BLOCK_8X8)
+#endif // CONFIG_EXT_INTER
*rate_mv += vp10_mv_bit_cost(&frame_mv[refs[ref]].as_mv,
&x->mbmi_ext->ref_mvs[refs[ref]][0].as_mv,
x->nmvjointcost, x->mvcost, MV_COST_WEIGHT);
+#if CONFIG_EXT_INTER
+ else
+ *rate_mv += vp10_mv_bit_cost(&frame_mv[refs[ref]].as_mv,
+ &ref_mv_sub8x8[ref]->as_mv,
+ x->nmvjointcost, x->mvcost, MV_COST_WEIGHT);
+#endif // CONFIG_EXT_INTER
}
}
@@ -3630,7 +3719,12 @@
int64_t *returndistortion,
int *skippable, int64_t *psse,
int mvthresh,
+#if CONFIG_EXT_INTER
+ int_mv seg_mvs[4][2][MAX_REF_FRAMES],
+ int_mv compound_seg_newmvs[4][2],
+#else
int_mv seg_mvs[4][MAX_REF_FRAMES],
+#endif // CONFIG_EXT_INTER
BEST_SEG_INFO *bsi_buf, int filter_idx,
int mi_row, int mi_col) {
int i;
@@ -3689,21 +3783,49 @@
int64_t best_rd = INT64_MAX;
const int i = idy * 2 + idx;
int ref;
+#if CONFIG_EXT_INTER
+ int mv_idx;
+ int_mv ref_mvs_sub8x8[2][2];
+#endif // CONFIG_EXT_INTER
for (ref = 0; ref < 1 + has_second_rf; ++ref) {
const MV_REFERENCE_FRAME frame = mbmi->ref_frame[ref];
+#if CONFIG_EXT_INTER
+ int_mv mv_ref_list[MAX_MV_REF_CANDIDATES];
+ vp10_update_mv_context(cm, xd, mi, frame, mv_ref_list, i,
+ mi_row, mi_col, NULL);
+#endif // CONFIG_EXT_INTER
frame_mv[ZEROMV][frame].as_int = 0;
vp10_append_sub8x8_mvs_for_idx(cm, xd, i, ref, mi_row, mi_col,
+#if CONFIG_EXT_INTER
+ mv_ref_list,
+#endif // CONFIG_EXT_INTER
&frame_mv[NEARESTMV][frame],
&frame_mv[NEARMV][frame]);
+#if CONFIG_EXT_INTER
+ mv_ref_list[0].as_int = frame_mv[NEARESTMV][frame].as_int;
+ mv_ref_list[1].as_int = frame_mv[NEARMV][frame].as_int;
+ vp10_find_best_ref_mvs(cm->allow_high_precision_mv, mv_ref_list,
+ &ref_mvs_sub8x8[0][ref], &ref_mvs_sub8x8[1][ref]);
+#endif // CONFIG_EXT_INTER
}
// search for the best motion vector on this segment
+#if CONFIG_EXT_INTER
+ for (this_mode = NEARESTMV;
+ this_mode <= (has_second_rf ? NEWMV : NEWFROMNEARMV); ++this_mode) {
+#else
for (this_mode = NEARESTMV; this_mode <= NEWMV; ++this_mode) {
+#endif // CONFIG_EXT_INTER
const struct buf_2d orig_src = x->plane[0].src;
struct buf_2d orig_pre[2];
mode_idx = INTER_OFFSET(this_mode);
+#if CONFIG_EXT_INTER
+ mv_idx = (this_mode == NEWFROMNEARMV) ? 1 : 0;
+ for (ref = 0; ref < 1 + has_second_rf; ++ref)
+ bsi->ref_mv[ref]->as_int = ref_mvs_sub8x8[mv_idx][ref].as_int;
+#endif // CONFIG_EXT_INTER
bsi->rdstat[i][mode_idx].brdcost = INT64_MAX;
if (!(inter_mode_mask & (1 << this_mode)))
continue;
@@ -3719,9 +3841,20 @@
sizeof(bsi->rdstat[i][mode_idx].tl));
// motion search for newmv (single predictor case only)
- if (!has_second_rf && this_mode == NEWMV &&
- seg_mvs[i][mbmi->ref_frame[0]].as_int == INVALID_MV) {
+ if (!has_second_rf &&
+#if CONFIG_EXT_INTER
+ have_newmv_in_inter_mode(this_mode) &&
+ seg_mvs[i][mv_idx][mbmi->ref_frame[0]].as_int == INVALID_MV
+#else
+ this_mode == NEWMV &&
+ seg_mvs[i][mbmi->ref_frame[0]].as_int == INVALID_MV
+#endif // CONFIG_EXT_INTER
+ ) {
+#if CONFIG_EXT_INTER
+ MV *const new_mv = &mode_mv[this_mode][0].as_mv;
+#else
MV *const new_mv = &mode_mv[NEWMV][0].as_mv;
+#endif // CONFIG_EXT_INTER
int step_param = 0;
int bestsme = INT_MAX;
int sadpb = x->sadperbit4;
@@ -3735,12 +3868,16 @@
break;
if (cpi->oxcf.mode != BEST) {
+#if CONFIG_EXT_INTER
+ bsi->mvp.as_int = bsi->ref_mv[0]->as_int;
+#else
// use previous block's result as next block's MV predictor.
if (i > 0) {
bsi->mvp.as_int = mi->bmi[i - 1].as_mv[0].as_int;
if (i == 2)
bsi->mvp.as_int = mi->bmi[i - 2].as_mv[0].as_int;
}
+#endif // CONFIG_EXT_INTER
}
if (i == 0)
max_mv = x->max_mv_context[mbmi->ref_frame[0]];
@@ -3795,7 +3932,11 @@
NULL, 0, 0);
// save motion search result for use in compound prediction
+#if CONFIG_EXT_INTER
+ seg_mvs[i][mv_idx][mbmi->ref_frame[0]].as_mv = *new_mv;
+#else
seg_mvs[i][mbmi->ref_frame[0]].as_mv = *new_mv;
+#endif // CONFIG_EXT_INTER
}
if (cpi->sf.adaptive_motion_search)
@@ -3806,8 +3947,13 @@
}
if (has_second_rf) {
+#if CONFIG_EXT_INTER
+ if (seg_mvs[i][mv_idx][mbmi->ref_frame[1]].as_int == INVALID_MV ||
+ seg_mvs[i][mv_idx][mbmi->ref_frame[0]].as_int == INVALID_MV)
+#else
if (seg_mvs[i][mbmi->ref_frame[1]].as_int == INVALID_MV ||
seg_mvs[i][mbmi->ref_frame[0]].as_int == INVALID_MV)
+#endif // CONFIG_EXT_INTER
continue;
}
@@ -3818,12 +3964,25 @@
if (cpi->sf.comp_inter_joint_search_thresh <= bsize) {
int rate_mv;
joint_motion_search(cpi, x, bsize, frame_mv[this_mode],
- mi_row, mi_col, seg_mvs[i],
+ mi_row, mi_col,
+#if CONFIG_EXT_INTER
+ bsi->ref_mv,
+ seg_mvs[i][mv_idx],
+#else
+ seg_mvs[i],
+#endif // CONFIG_EXT_INTER
&rate_mv);
+#if CONFIG_EXT_INTER
+ compound_seg_newmvs[i][0].as_int =
+ frame_mv[this_mode][mbmi->ref_frame[0]].as_int;
+ compound_seg_newmvs[i][1].as_int =
+ frame_mv[this_mode][mbmi->ref_frame[1]].as_int;
+#else
seg_mvs[i][mbmi->ref_frame[0]].as_int =
frame_mv[this_mode][mbmi->ref_frame[0]].as_int;
seg_mvs[i][mbmi->ref_frame[1]].as_int =
frame_mv[this_mode][mbmi->ref_frame[1]].as_int;
+#endif // CONFIG_EXT_INTER
}
// restore src pointers
mi_buf_restore(x, orig_src, orig_pre);
@@ -3831,7 +3990,14 @@
bsi->rdstat[i][mode_idx].brate =
set_and_cost_bmi_mvs(cpi, x, xd, i, this_mode, mode_mv[this_mode],
- frame_mv, seg_mvs[i], bsi->ref_mv,
+ frame_mv,
+#if CONFIG_EXT_INTER
+ seg_mvs[i][mv_idx],
+ compound_seg_newmvs[i],
+#else
+ seg_mvs[i],
+#endif // CONFIG_EXT_INTER
+ bsi->ref_mv,
x->nmvjointcost, x->mvcost);
for (ref = 0; ref < 1 + has_second_rf; ++ref) {
@@ -3843,6 +4009,16 @@
if (num_4x4_blocks_high > 1)
bsi->rdstat[i + 2][mode_idx].mvs[ref].as_int =
mode_mv[this_mode][ref].as_int;
+#if CONFIG_EXT_INTER
+ bsi->rdstat[i][mode_idx].ref_mv[ref].as_int =
+ bsi->ref_mv[ref]->as_int;
+ if (num_4x4_blocks_wide > 1)
+ bsi->rdstat[i + 1][mode_idx].ref_mv[ref].as_int =
+ bsi->ref_mv[ref]->as_int;
+ if (num_4x4_blocks_high > 1)
+ bsi->rdstat[i + 2][mode_idx].ref_mv[ref].as_int =
+ bsi->ref_mv[ref]->as_int;
+#endif // CONFIG_EXT_INTER
}
// Trap vectors that reach beyond the UMV borders
@@ -3858,6 +4034,15 @@
for (ref = 0; ref < 1 + has_second_rf; ++ref) {
subpelmv |= mv_has_subpel(&mode_mv[this_mode][ref].as_mv);
+#if CONFIG_EXT_INTER
+ if (have_newmv_in_inter_mode(this_mode))
+ have_ref &= (
+ (mode_mv[this_mode][ref].as_int ==
+ ref_bsi->rdstat[i][mode_idx].mvs[ref].as_int) &&
+ (bsi->ref_mv[ref]->as_int ==
+ ref_bsi->rdstat[i][mode_idx].ref_mv[ref].as_int));
+ else
+#endif // CONFIG_EXT_INTER
have_ref &= mode_mv[this_mode][ref].as_int ==
ref_bsi->rdstat[i][mode_idx].mvs[ref].as_int;
}
@@ -3866,6 +4051,15 @@
ref_bsi = bsi_buf + 1;
have_ref = 1;
for (ref = 0; ref < 1 + has_second_rf; ++ref)
+#if CONFIG_EXT_INTER
+ if (have_newmv_in_inter_mode(this_mode))
+ have_ref &= (
+ (mode_mv[this_mode][ref].as_int ==
+ ref_bsi->rdstat[i][mode_idx].mvs[ref].as_int) &&
+ (bsi->ref_mv[ref]->as_int ==
+ ref_bsi->rdstat[i][mode_idx].ref_mv[ref].as_int));
+ else
+#endif // CONFIG_EXT_INTER
have_ref &= mode_mv[this_mode][ref].as_int ==
ref_bsi->rdstat[i][mode_idx].mvs[ref].as_int;
}
@@ -3929,9 +4123,21 @@
memcpy(t_above, bsi->rdstat[i][mode_idx].ta, sizeof(t_above));
memcpy(t_left, bsi->rdstat[i][mode_idx].tl, sizeof(t_left));
+#if CONFIG_EXT_INTER
+ mv_idx = (mode_selected == NEWFROMNEARMV) ? 1 : 0;
+ bsi->ref_mv[0]->as_int = bsi->rdstat[i][mode_idx].ref_mv[0].as_int;
+ if (has_second_rf)
+ bsi->ref_mv[1]->as_int = bsi->rdstat[i][mode_idx].ref_mv[1].as_int;
+#endif // CONFIG_EXT_INTER
set_and_cost_bmi_mvs(cpi, x, xd, i, mode_selected, mode_mv[mode_selected],
- frame_mv, seg_mvs[i], bsi->ref_mv, x->nmvjointcost,
- x->mvcost);
+ frame_mv,
+#if CONFIG_EXT_INTER
+ seg_mvs[i][mv_idx],
+ compound_seg_newmvs[i],
+#else
+ seg_mvs[i],
+#endif // CONFIG_EXT_INTER
+ bsi->ref_mv, x->nmvjointcost, x->mvcost);
br += bsi->rdstat[i][mode_idx].brate;
bd += bsi->rdstat[i][mode_idx].bdist;
@@ -3968,6 +4174,11 @@
mi->bmi[i].as_mv[0].as_int = bsi->rdstat[i][mode_idx].mvs[0].as_int;
if (has_second_ref(mbmi))
mi->bmi[i].as_mv[1].as_int = bsi->rdstat[i][mode_idx].mvs[1].as_int;
+#if CONFIG_EXT_INTER
+ mi->bmi[i].ref_mv[0].as_int = bsi->rdstat[i][mode_idx].ref_mv[0].as_int;
+ if (has_second_rf)
+ mi->bmi[i].ref_mv[1].as_int = bsi->rdstat[i][mode_idx].ref_mv[1].as_int;
+#endif // CONFIG_EXT_INTER
x->plane[0].eobs[i] = bsi->rdstat[i][mode_idx].eobs;
mi->bmi[i].as_mode = bsi->modes[i];
}
@@ -4189,6 +4400,10 @@
static void single_motion_search(VP10_COMP *cpi, MACROBLOCK *x,
BLOCK_SIZE bsize,
int mi_row, int mi_col,
+#if CONFIG_EXT_INTER
+ int ref_idx,
+ int mv_idx,
+#endif // CONFIG_EXT_INTER
int_mv *tmp_mv, int *rate_mv) {
MACROBLOCKD *xd = &x->e_mbd;
const VP10_COMMON *cm = &cpi->common;
@@ -4198,8 +4413,13 @@
int step_param;
int sadpb = x->sadperbit16;
MV mvp_full;
+#if CONFIG_EXT_INTER
+ int ref = mbmi->ref_frame[ref_idx];
+ MV ref_mv = x->mbmi_ext->ref_mvs[ref][mv_idx].as_mv;
+#else
int ref = mbmi->ref_frame[0];
MV ref_mv = x->mbmi_ext->ref_mvs[ref][0].as_mv;
+#endif // CONFIG_EXT_INTER
int tmp_col_min = x->mv_col_min;
int tmp_col_max = x->mv_col_max;
@@ -4365,7 +4585,11 @@
int *disable_skip,
int_mv (*mode_mv)[MAX_REF_FRAMES],
int mi_row, int mi_col,
+#if CONFIG_EXT_INTER
+ int_mv single_newmvs[2][MAX_REF_FRAMES],
+#else
int_mv single_newmv[MAX_REF_FRAMES],
+#endif // CONFIG_EXT_INTER
INTERP_FILTER (*single_filter)[MAX_REF_FRAMES],
int (*single_skippable)[MAX_REF_FRAMES],
int64_t *psse,
@@ -4383,6 +4607,10 @@
int refs[2] = { mbmi->ref_frame[0],
(mbmi->ref_frame[1] < 0 ? 0 : mbmi->ref_frame[1]) };
int_mv cur_mv[2];
+#if CONFIG_EXT_INTER
+ int mv_idx = (this_mode == NEWFROMNEARMV) ? 1 : 0;
+ int_mv single_newmv[MAX_REF_FRAMES];
+#endif // CONFIG_EXT_INTER
#if CONFIG_VP9_HIGHBITDEPTH
DECLARE_ALIGNED(16, uint16_t, tmp_buf16[MAX_MB_PLANE * 64 * 64]);
uint8_t *tmp_buf;
@@ -4430,7 +4658,11 @@
if (xd->left_available)
lf = xd->mi[-1]->mbmi.interp_filter;
+#if CONFIG_EXT_INTER
+ if ((this_mode != NEWMV && this_mode != NEWFROMNEARMV) || (af == lf))
+#else
if ((this_mode != NEWMV) || (af == lf))
+#endif // CONFIG_EXT_INTER
best_filter = af;
}
@@ -4446,16 +4678,30 @@
}
}
+#if CONFIG_EXT_INTER
+ if (have_newmv_in_inter_mode(this_mode)) {
+#else
if (this_mode == NEWMV) {
+#endif // CONFIG_EXT_INTER
int rate_mv;
if (is_comp_pred) {
+#if CONFIG_EXT_INTER
+ for (i = 0; i < 2; ++i) {
+ single_newmv[refs[i]].as_int =
+ single_newmvs[mv_idx][refs[i]].as_int;
+ }
+#endif // CONFIG_EXT_INTER
// Initialize mv using single prediction mode result.
frame_mv[refs[0]].as_int = single_newmv[refs[0]].as_int;
frame_mv[refs[1]].as_int = single_newmv[refs[1]].as_int;
if (cpi->sf.comp_inter_joint_search_thresh <= bsize) {
joint_motion_search(cpi, x, bsize, frame_mv,
- mi_row, mi_col, single_newmv, &rate_mv);
+ mi_row, mi_col,
+#if CONFIG_EXT_INTER
+ NULL,
+#endif // CONFIG_EXT_INTER
+ single_newmv, &rate_mv);
} else {
rate_mv = vp10_mv_bit_cost(&frame_mv[refs[0]].as_mv,
&x->mbmi_ext->ref_mvs[refs[0]][0].as_mv,
@@ -4468,13 +4714,20 @@
} else {
int_mv tmp_mv;
single_motion_search(cpi, x, bsize, mi_row, mi_col,
+#if CONFIG_EXT_INTER
+ 0, mv_idx,
+#endif // CONFIG_EXT_INTER
&tmp_mv, &rate_mv);
if (tmp_mv.as_int == INVALID_MV)
return INT64_MAX;
frame_mv[refs[0]].as_int =
xd->mi[0]->bmi[0].as_mv[0].as_int = tmp_mv.as_int;
+#if CONFIG_EXT_INTER
+ single_newmvs[mv_idx][refs[0]].as_int = tmp_mv.as_int;
+#else
single_newmv[refs[0]].as_int = tmp_mv.as_int;
+#endif // CONFIG_EXT_INTER
// Estimate the rate implications of a new mv but discount this
// under certain circumstances where we want to help initiate a weak
@@ -4491,7 +4744,11 @@
for (i = 0; i < is_comp_pred + 1; ++i) {
cur_mv[i] = frame_mv[refs[i]];
// Clip "next_nearest" so that it does not extend to far out of image
+#if CONFIG_EXT_INTER
+ if (this_mode != NEWMV && this_mode != NEWFROMNEARMV)
+#else
if (this_mode != NEWMV)
+#endif // CONFIG_EXT_INTER
clamp_mv2(&cur_mv[i].as_mv, xd);
if (mv_check_bounds(x, &cur_mv[i].as_mv))
@@ -4552,10 +4809,19 @@
// initiation of a motion field.
if (discount_newmv_test(cpi, this_mode, frame_mv[refs[0]],
mode_mv, refs[0])) {
+#if CONFIG_REF_MV && CONFIG_EXT_INTER
+ *rate2 += VPXMIN(cost_mv_ref(cpi, this_mode, is_comp_pred, mode_ctx),
+ cost_mv_ref(cpi, NEARESTMV, is_comp_pred, mode_ctx));
+#else
*rate2 += VPXMIN(cost_mv_ref(cpi, this_mode, mode_ctx),
cost_mv_ref(cpi, NEARESTMV, mode_ctx));
+#endif // CONFIG_REF_MV && CONFIG_EXT_INTER
} else {
+#if CONFIG_REF_MV && CONFIG_EXT_INTER
+ *rate2 += cost_mv_ref(cpi, this_mode, is_comp_pred, mode_ctx);
+#else
*rate2 += cost_mv_ref(cpi, this_mode, mode_ctx);
+#endif // CONFIG_REF_MV && CONFIG_EXT_INTER
}
if (RDCOST(x->rdmult, x->rddiv, *rate2, 0) > ref_best_rd &&
@@ -5008,7 +5274,11 @@
int comp_pred, i, k;
int_mv frame_mv[MB_MODE_COUNT][MAX_REF_FRAMES];
struct buf_2d yv12_mb[MAX_REF_FRAMES][MAX_MB_PLANE];
+#if CONFIG_EXT_INTER
+ int_mv single_newmvs[2][MAX_REF_FRAMES] = { { { 0 } }, { { 0 } } };
+#else
int_mv single_newmv[MAX_REF_FRAMES] = { { 0 } };
+#endif // CONFIG_EXT_INTER
INTERP_FILTER single_inter_filter[MB_MODE_COUNT][MAX_REF_FRAMES];
int single_skippable[MB_MODE_COUNT][MAX_REF_FRAMES];
static const int flag_list[REFS_PER_FRAME + 1] = {
@@ -5052,7 +5322,11 @@
cpi->mbmode_cost[size_group_lookup[bsize]];
int best_skip2 = 0;
uint8_t ref_frame_skip_mask[2] = { 0 };
+#if CONFIG_EXT_INTER
+ uint32_t mode_skip_mask[MAX_REF_FRAMES] = { 0 };
+#else
uint16_t mode_skip_mask[MAX_REF_FRAMES] = { 0 };
+#endif // CONFIG_EXT_INTER
int mode_skip_start = sf->mode_skip_start + 1;
const int *const rd_threshes = rd_opt->threshes[segment_id][bsize];
const int *const rd_thresh_freq_fact = tile_data->thresh_freq_fact[bsize];
@@ -5105,6 +5379,9 @@
}
frame_mv[NEWMV][ref_frame].as_int = INVALID_MV;
frame_mv[ZEROMV][ref_frame].as_int = 0;
+#if CONFIG_EXT_INTER
+ frame_mv[NEWFROMNEARMV][ref_frame].as_int = INVALID_MV;
+#endif // CONFIG_EXT_INTER
}
#if CONFIG_REF_MV
@@ -5506,7 +5783,12 @@
&rate_y, &rate_uv,
&disable_skip, frame_mv,
mi_row, mi_col,
- single_newmv, single_inter_filter,
+#if CONFIG_EXT_INTER
+ single_newmvs,
+#else
+ single_newmv,
+#endif // CONFIG_EXT_INTER
+ single_inter_filter,
single_skippable, &total_sse, best_rd,
&mask_filter, filter_cache);
@@ -5708,7 +5990,11 @@
// Therefore, sometimes, NEWMV is chosen instead of NEARESTMV, NEARMV, and
// ZEROMV. Here, checks are added for those cases, and the mode decisions
// are corrected.
- if (best_mbmode.mode == NEWMV) {
+ if (best_mbmode.mode == NEWMV
+#if CONFIG_EXT_INTER
+ || best_mbmode.mode == NEWFROMNEARMV
+#endif // CONFIG_EXT_INTER
+ ) {
const MV_REFERENCE_FRAME refs[2] = {best_mbmode.ref_frame[0],
best_mbmode.ref_frame[1]};
int comp_pred_mode = refs[1] > INTRA_FRAME;
@@ -6021,7 +6307,11 @@
PREDICTION_MODE mode_uv = DC_PRED;
const int intra_cost_penalty = vp10_get_intra_cost_penalty(
cm->base_qindex, cm->y_dc_delta_q, cm->bit_depth);
+#if CONFIG_EXT_INTER
+ int_mv seg_mvs[4][2][MAX_REF_FRAMES];
+#else
int_mv seg_mvs[4][MAX_REF_FRAMES];
+#endif // CONFIG_EXT_INTER
b_mode_info best_bmodes[4];
int best_skip2 = 0;
int ref_frame_skip_mask[2] = { 0 };
@@ -6048,8 +6338,16 @@
for (i = 0; i < 4; i++) {
int j;
+#if CONFIG_EXT_INTER
+ int k;
+
+ for (k = 0; k < 2; k++)
+ for (j = 0; j < MAX_REF_FRAMES; j++)
+ seg_mvs[i][k][j].as_int = INVALID_MV;
+#else
for (j = 0; j < MAX_REF_FRAMES; j++)
seg_mvs[i][j].as_int = INVALID_MV;
+#endif // CONFIG_EXT_INTER
}
estimate_ref_frame_costs(cm, xd, segment_id, ref_costs_single, ref_costs_comp,
@@ -6077,6 +6375,9 @@
ref_frame_skip_mask[1] |= SECOND_REF_FRAME_MASK;
}
frame_mv[NEWMV][ref_frame].as_int = INVALID_MV;
+#if CONFIG_EXT_INTER
+ frame_mv[NEWFROMNEARMV][ref_frame].as_int = INVALID_MV;
+#endif // CONFIG_EXT_INTER
frame_mv[ZEROMV][ref_frame].as_int = 0;
}
@@ -6287,6 +6588,15 @@
BEST_SEG_INFO bsi[SWITCHABLE_FILTERS];
int pred_exists = 0;
int uv_skippable;
+#if CONFIG_EXT_INTER
+ int_mv compound_seg_newmvs[4][2];
+ int i;
+
+ for (i = 0; i < 4; i++) {
+ compound_seg_newmvs[i][0].as_int = INVALID_MV;
+ compound_seg_newmvs[i][1].as_int = INVALID_MV;
+ }
+#endif // CONFIG_EXT_INTER
this_rd_thresh = (ref_frame == LAST_FRAME) ?
rd_opt->threshes[segment_id][bsize][THR_LAST] :
@@ -6328,6 +6638,9 @@
&rate_y, &distortion,
&skippable, &total_sse,
(int) this_rd_thresh, seg_mvs,
+#if CONFIG_EXT_INTER
+ compound_seg_newmvs,
+#endif // CONFIG_EXT_INTER
bsi, switchable_filter_index,
mi_row, mi_col);
#if CONFIG_EXT_INTERP
@@ -6397,19 +6710,26 @@
&x->mbmi_ext->ref_mvs[ref_frame][0],
second_ref, best_yrd, &rate, &rate_y,
&distortion, &skippable, &total_sse,
- (int) this_rd_thresh, seg_mvs, bsi, 0,
+ (int) this_rd_thresh, seg_mvs,
+#if CONFIG_EXT_INTER
+ compound_seg_newmvs,
+#endif // CONFIG_EXT_INTER
+ bsi, 0,
mi_row, mi_col);
#if CONFIG_EXT_INTERP
if (!vp10_is_interp_needed(xd) && cm->interp_filter == SWITCHABLE &&
mbmi->interp_filter != EIGHTTAP) {
mbmi->interp_filter = EIGHTTAP;
- tmp_rd = rd_pick_best_sub8x8_mode(
- cpi, x,
- &x->mbmi_ext->ref_mvs[ref_frame][0],
- second_ref, best_yrd, &rate, &rate_y,
- &distortion, &skippable, &total_sse,
- (int) this_rd_thresh, seg_mvs, bsi, 0,
- mi_row, mi_col);
+ tmp_rd = rd_pick_best_sub8x8_mode(cpi, x,
+ &x->mbmi_ext->ref_mvs[ref_frame][0],
+ second_ref, best_yrd, &rate, &rate_y,
+ &distortion, &skippable, &total_sse,
+ (int) this_rd_thresh, seg_mvs,
+#if CONFIG_EXT_INTER
+ compound_seg_newmvs,
+#endif // CONFIG_EXT_INTER
+ bsi, 0,
+ mi_row, mi_col);
}
#endif // CONFIG_EXT_INTERP
if (tmp_rd == INT64_MAX)
diff --git a/vp10/encoder/speed_features.h b/vp10/encoder/speed_features.h
index 3b91999..a48a76c 100644
--- a/vp10/encoder/speed_features.h
+++ b/vp10/encoder/speed_features.h
@@ -31,6 +31,22 @@
(1 << H_PRED)
};
+#if CONFIG_EXT_INTER
+enum {
+ INTER_ALL =
+ (1 << NEARESTMV) | (1 << NEARMV) | (1 << ZEROMV) |
+ (1 << NEWMV) | (1 << NEWFROMNEARMV),
+ INTER_NEAREST = (1 << NEARESTMV),
+ INTER_NEAREST_NEW = (1 << NEARESTMV) | (1 << NEWMV) | (1 << NEWFROMNEARMV),
+ INTER_NEAREST_ZERO = (1 << NEARESTMV) | (1 << ZEROMV),
+ INTER_NEAREST_NEW_ZERO =
+ (1 << NEARESTMV) | (1 << ZEROMV) | (1 << NEWMV) | (1 << NEWFROMNEARMV),
+ INTER_NEAREST_NEAR_NEW =
+ (1 << NEARESTMV) | (1 << NEARMV) | (1 << NEWMV) | (1 << NEWFROMNEARMV),
+ INTER_NEAREST_NEAR_ZERO =
+ (1 << NEARESTMV) | (1 << NEARMV) | (1 << ZEROMV)
+};
+#else
enum {
INTER_ALL = (1 << NEARESTMV) | (1 << NEARMV) | (1 << ZEROMV) | (1 << NEWMV),
INTER_NEAREST = (1 << NEARESTMV),
@@ -40,6 +56,7 @@
INTER_NEAREST_NEAR_NEW = (1 << NEARESTMV) | (1 << NEARMV) | (1 << NEWMV),
INTER_NEAREST_NEAR_ZERO = (1 << NEARESTMV) | (1 << NEARMV) | (1 << ZEROMV),
};
+#endif // CONFIG_EXT_INTER
enum {
DISABLE_ALL_INTER_SPLIT = (1 << THR_COMP_GA) |