Skip unneeded motion search
Slightly bring back ~5% encoding speed in speed 1. This change
affects the compression performance in a noise range (0.03%). The
overall coding performance change due to its parent CLs in speed 1,
150 frames are:
overall PSNR SSIM
lowres -0.85% -0.87%
midres2 -0.88% -0.83%
STATS_CHANGED
Change-Id: I96aadaf57d5cc8260006082ecff0fe81237677f4
diff --git a/av1/encoder/compound_type.c b/av1/encoder/compound_type.c
index 92a2b35..659b432 100644
--- a/av1/encoder/compound_type.c
+++ b/av1/encoder/compound_type.c
@@ -1293,7 +1293,7 @@
comp_index_ctx, masked_compound_used, masked_type_cost);
int64_t comp_model_rd_cur = INT64_MAX;
- int64_t best_rd_cur = INT64_MAX;
+ int64_t best_rd_cur = ref_best_rd;
const int mi_row = xd->mi_row;
const int mi_col = xd->mi_col;
@@ -1314,10 +1314,11 @@
comp_model_rd_cur = INT64_MAX;
tmp_rate_mv = *rate_mv;
best_rd_cur = INT64_MAX;
+ ref_best_rd = AOMMIN(ref_best_rd, *rd);
update_mbmi_for_compound_type(mbmi, cur_type);
rs2 = masked_type_cost[cur_type];
- const int64_t mode_rd = RDCOST(x->rdmult, rs2 + rd_stats->rate, 0);
+ int64_t mode_rd = RDCOST(x->rdmult, rs2 + rd_stats->rate, 0);
if (mode_rd >= ref_best_rd) continue;
// Case COMPOUND_AVERAGE and COMPOUND_DISTWTD
@@ -1349,42 +1350,54 @@
} else if (cur_type == COMPOUND_WEDGE) {
int best_mask_index = 0;
int best_wedge_sign = 0;
+ int_mv tmp_mv[2] = { mbmi->mv[0], mbmi->mv[1] };
+ int best_rate_mv = *rate_mv;
const int wedge_mask_size = get_wedge_types_lookup(bsize);
for (int wedge_mask = 0; wedge_mask < wedge_mask_size; ++wedge_mask) {
for (int wedge_sign = 0; wedge_sign < 2; ++wedge_sign) {
tmp_rate_mv = *rate_mv;
- rs2 = masked_type_cost[cur_type];
mbmi->interinter_comp.wedge_index = wedge_mask;
mbmi->interinter_comp.wedge_sign = wedge_sign;
+ rs2 = masked_type_cost[cur_type];
+ rs2 += get_interinter_compound_mask_rate(&x->mode_costs, mbmi);
+
+ mode_rd = RDCOST(x->rdmult, rs2 + rd_stats->rate, 0);
+ if (mode_rd >= ref_best_rd) continue;
+
if (have_newmv_in_inter_mode(this_mode)) {
tmp_rate_mv = av1_interinter_compound_motion_search(
cpi, x, cur_mv, bsize, this_mode);
}
- rs2 += get_interinter_compound_mask_rate(&x->mode_costs, mbmi);
+
av1_enc_build_inter_predictor(cm, xd, mi_row, mi_col, orig_dst, bsize,
AOM_PLANE_Y, AOM_PLANE_Y);
RD_STATS est_rd_stats;
- estimate_yrd_for_sb(cpi, bsize, x, INT64_MAX, &est_rd_stats);
int64_t this_rd_cur =
- RDCOST(x->rdmult, rs2 + tmp_rate_mv + est_rd_stats.rate,
- est_rd_stats.dist);
+ estimate_yrd_for_sb(cpi, bsize, x, best_rd_cur, &est_rd_stats);
+ if (this_rd_cur < INT64_MAX) {
+ this_rd_cur =
+ RDCOST(x->rdmult, rs2 + tmp_rate_mv + est_rd_stats.rate,
+ est_rd_stats.dist);
+ }
if (this_rd_cur < best_rd_cur) {
best_mask_index = wedge_mask;
best_wedge_sign = wedge_sign;
best_rd_cur = this_rd_cur;
+ tmp_mv[0] = mbmi->mv[0];
+ tmp_mv[1] = mbmi->mv[1];
+ best_rate_mv = tmp_rate_mv;
}
}
}
mbmi->interinter_comp.wedge_index = best_mask_index;
mbmi->interinter_comp.wedge_sign = best_wedge_sign;
+ mbmi->mv[0] = tmp_mv[0];
+ mbmi->mv[1] = tmp_mv[1];
+ tmp_rate_mv = best_rate_mv;
rs2 = masked_type_cost[cur_type];
- tmp_rate_mv = *rate_mv;
- if (have_newmv_in_inter_mode(this_mode)) {
- tmp_rate_mv = av1_interinter_compound_motion_search(cpi, x, cur_mv,
- bsize, this_mode);
- }
rs2 += get_interinter_compound_mask_rate(&x->mode_costs, mbmi);
+
av1_enc_build_inter_predictor(cm, xd, mi_row, mi_col, orig_dst, bsize,
AOM_PLANE_Y, AOM_PLANE_Y);
RD_STATS est_rd_stats;