Rework motion vector precision limit
This commit enables 1/8 luma component motion vector precision
for all motion vector cases. It improves the compression performance
of lowres by 0.13% and hdres by 0.49%.
Change-Id: Iccfc85e8ee1c0154dfbd18f060344f1e3db5dc18
diff --git a/vp10/common/entropymv.c b/vp10/common/entropymv.c
index 6e8c993..097d693 100644
--- a/vp10/common/entropymv.c
+++ b/vp10/common/entropymv.c
@@ -132,14 +132,11 @@
return c;
}
+// TODO(jingning): This idle function is intentionally left as is for
+// experimental purpose.
int vp10_use_mv_hp(const MV *ref) {
-#if CONFIG_MISC_FIXES
(void) ref;
return 1;
-#else
- return (abs(ref->row) >> 3) < COMPANDED_MVREF_THRESH &&
- (abs(ref->col) >> 3) < COMPANDED_MVREF_THRESH;
-#endif
}
static void inc_mv_component(int v, nmv_component_counts *comp_counts,
@@ -160,14 +157,16 @@
if (c == MV_CLASS_0) {
comp_counts->class0[d] += incr;
comp_counts->class0_fp[d][f] += incr;
- comp_counts->class0_hp[e] += usehp * incr;
+ if (usehp)
+ comp_counts->class0_hp[e] += incr;
} else {
int i;
int b = c + CLASS0_BITS - 1; // number of bits
for (i = 0; i < b; ++i)
comp_counts->bits[i][((d >> i) & 1)] += incr;
comp_counts->fp[f] += incr;
- comp_counts->hp[e] += usehp * incr;
+ if (usehp)
+ comp_counts->hp[e] += incr;
}
}
@@ -182,15 +181,11 @@
#endif
++counts->joints[j];
- if (mv_joint_vertical(j)) {
- inc_mv_component(mv->row, &counts->comps[0], 1,
- !CONFIG_MISC_FIXES || usehp);
- }
+ if (mv_joint_vertical(j))
+ inc_mv_component(mv->row, &counts->comps[0], 1, usehp);
- if (mv_joint_horizontal(j)) {
- inc_mv_component(mv->col, &counts->comps[1], 1,
- !CONFIG_MISC_FIXES || usehp);
- }
+ if (mv_joint_horizontal(j))
+ inc_mv_component(mv->col, &counts->comps[1], 1, usehp);
}
}
diff --git a/vp10/encoder/encoder.c b/vp10/encoder/encoder.c
index 40d4c36..437b366 100644
--- a/vp10/encoder/encoder.c
+++ b/vp10/encoder/encoder.c
@@ -243,10 +243,10 @@
#else
if (cpi->common.allow_high_precision_mv) {
mb->mvcost = mb->nmvcost_hp;
- mb->mvsadcost = mb->nmvsadcost_hp;
+ mb->mvsadcost = mb->nmvcost_hp;
} else {
mb->mvcost = mb->nmvcost;
- mb->mvsadcost = mb->nmvsadcost;
+ mb->mvsadcost = mb->nmvcost;
}
#endif
}
diff --git a/vp10/encoder/mcomp.c b/vp10/encoder/mcomp.c
index 823095e..dd0c311 100644
--- a/vp10/encoder/mcomp.c
+++ b/vp10/encoder/mcomp.c
@@ -98,22 +98,12 @@
static int mvsad_err_cost(const MACROBLOCK *x, const MV *mv, const MV *ref,
int sad_per_bit) {
-#if CONFIG_REF_MV
const MV diff = { (mv->row - ref->row) * 8,
(mv->col - ref->col) * 8 };
return ROUND_POWER_OF_TWO(
(unsigned)mv_cost(&diff, x->nmvjointsadcost, x->mvsadcost) *
sad_per_bit,
VP9_PROB_COST_SHIFT);
-#else
- const MV diff = { mv->row - ref->row,
- mv->col - ref->col };
-
- return ROUND_POWER_OF_TWO(
- (unsigned)mv_cost(&diff, x->nmvjointsadcost, x->nmvsadcost) *
- sad_per_bit,
- VP9_PROB_COST_SHIFT);
-#endif
}
void vp10_init_dsmotion_compensation(search_site_config *cfg, int stride) {