Regularize temporal filter's motion search with L1 norm
Previously, temporal filter used the rate cost of mv in the full pixel
search process, which can lead to some instability depending on how the
mv's cdf had adapted. This CL corrected the cost to L1 norm of the mv.
During full pixel motion search, the cost is now
sad + \lambda * |mv|_1
where \lambda is a hard-coded value based on whether the video is lt
480p, geq 480p, or geq 720p. A better performance can be achieved by
adaptively picking the \lambda based on the frame size and motion
activity level.
Performance on Speed 1
TESTSET | AVG_PSNR | OVR_PSNR | SSIM |
LOWRES | -0.049% | -0.056% | -0.141% |
MIDRES | -0.169% | -0.159% | -0.304% |
HDRES | -0.117% | -0.118% | -0.183% |
STATS_CHANGED
Change-Id: I399fc0dc4836d3b16cdfe589802bb4c0b68302a9
diff --git a/av1/encoder/mcomp.c b/av1/encoder/mcomp.c
index 1a77dad..df5f1b3 100644
--- a/av1/encoder/mcomp.c
+++ b/av1/encoder/mcomp.c
@@ -35,6 +35,21 @@
// #define NEW_DIAMOND_SEARCH
+// TODO(any): Adaptively adjust the regularization strength based on image size
+// and motion activity instead of using hard-coded values. It seems like we
+// roughly half the lambda for each increase in resolution
+// These are multiplier used to perform regularization in motion compensation
+// when x->mv_cost_type is set to MV_COST_L1.
+// LOWRES
+#define SSE_LAMBDA_LOWRES 2 // Used by mv_cost_err_fn
+#define SAD_LAMBDA_LOWRES 32 // Used by mvsad_err_cost during full pixel search
+// MIDRES
+#define SSE_LAMBDA_MIDRES 0 // Used by mv_cost_err_fn
+#define SAD_LAMBDA_MIDRES 15 // Used by mvsad_err_cost during full pixel search
+// HDRES
+#define SSE_LAMBDA_HDRES 1 // Used by mv_cost_err_fn
+#define SAD_LAMBDA_HDRES 8 // Used by mvsad_err_cost during full pixel search
+
static INLINE const uint8_t *get_buf_from_mv(const struct buf_2d *buf,
const MV *mv) {
return &buf->buf[mv->row * buf->stride + mv->col];
@@ -70,37 +85,77 @@
return sr;
}
+// Returns the rate of encoding the current motion vector based on the
+// joint_cost and comp_cost. joint_costs covers the cost of transimitting
+// JOINT_MV, and comp_cost covers the cost of transitting the actual motoin
+// vector.
static INLINE int mv_cost(const MV *mv, const int *joint_cost,
int *const comp_cost[2]) {
return joint_cost[av1_get_mv_joint(mv)] + comp_cost[0][mv->row] +
comp_cost[1][mv->col];
}
+// Returns the cost of encoding the motion vector diff := mv - *ref. The cost is
+// defined as the rate reqjuired to encode diff * weight, rounded to the nearest
+// 2 ** 7.
+// This is NOT used during motion compensation.
int av1_mv_bit_cost(const MV *mv, const MV *ref, const int *mvjcost,
int *mvcost[2], int weight) {
const MV diff = { mv->row - ref->row, mv->col - ref->col };
return ROUND_POWER_OF_TWO(mv_cost(&diff, mvjcost, mvcost) * weight, 7);
}
+// Returns the cost of using the current mv during the motion search. This is
+// used when var is used as the error metric.
#define PIXEL_TRANSFORM_ERROR_SCALE 4
static int mv_err_cost(const MV *mv, const MV *ref, const int *mvjcost,
- int *mvcost[2], int error_per_bit) {
- if (mvcost) {
- const MV diff = { mv->row - ref->row, mv->col - ref->col };
- return (int)ROUND_POWER_OF_TWO_64(
- (int64_t)mv_cost(&diff, mvjcost, mvcost) * error_per_bit,
- RDDIV_BITS + AV1_PROB_COST_SHIFT - RD_EPB_SHIFT +
- PIXEL_TRANSFORM_ERROR_SCALE);
+ int *mvcost[2], int error_per_bit,
+ MV_COST_TYPE mv_cost_type) {
+ const MV diff = { mv->row - ref->row, mv->col - ref->col };
+ const MV abs_diff = { abs(diff.row), abs(diff.col) };
+
+ switch (mv_cost_type) {
+ case MV_COST_ENTROPY:
+ if (mvcost) {
+ return (int)ROUND_POWER_OF_TWO_64(
+ (int64_t)mv_cost(&diff, mvjcost, mvcost) * error_per_bit,
+ RDDIV_BITS + AV1_PROB_COST_SHIFT - RD_EPB_SHIFT +
+ PIXEL_TRANSFORM_ERROR_SCALE);
+ }
+ return 0;
+ case MV_COST_L1_LOWRES:
+ return (SSE_LAMBDA_LOWRES * (abs_diff.row + abs_diff.col)) >> 3;
+ case MV_COST_L1_MIDRES:
+ return (SSE_LAMBDA_MIDRES * (abs_diff.row + abs_diff.col)) >> 3;
+ case MV_COST_L1_HDRES:
+ return (SSE_LAMBDA_HDRES * (abs_diff.row + abs_diff.col)) >> 3;
+ case MV_COST_NONE: return 0;
+ default: assert(0 && "Invalid rd_cost_type"); return 0;
}
- return 0;
}
+// Returns the cost of using the current mv during the motion search. This is
+// only used during full pixel motion search when sad is used as the error
+// metric.
static int mvsad_err_cost(const MACROBLOCK *x, const MV *mv, const MV *ref,
int sad_per_bit) {
const MV diff = { (mv->row - ref->row) * 8, (mv->col - ref->col) * 8 };
- return ROUND_POWER_OF_TWO(
- (unsigned)mv_cost(&diff, x->nmv_vec_cost, x->mv_cost_stack) * sad_per_bit,
- AV1_PROB_COST_SHIFT);
+ const MV_COST_TYPE mv_cost_type = x->mv_cost_type;
+ switch (mv_cost_type) {
+ case MV_COST_ENTROPY:
+ return ROUND_POWER_OF_TWO(
+ (unsigned)mv_cost(&diff, x->nmv_vec_cost, x->mv_cost_stack) *
+ sad_per_bit,
+ AV1_PROB_COST_SHIFT);
+ case MV_COST_L1_LOWRES:
+ return (SAD_LAMBDA_LOWRES * (abs(diff.row) + abs(diff.col))) >> 3;
+ case MV_COST_L1_MIDRES:
+ return (SAD_LAMBDA_MIDRES * (abs(diff.row) + abs(diff.col))) >> 3;
+ case MV_COST_L1_HDRES:
+ return (SAD_LAMBDA_HDRES * (abs(diff.row) + abs(diff.col))) >> 3;
+ case MV_COST_NONE: return 0;
+ default: assert(0 && "Invalid rd_cost_type"); return 0;
+ }
}
void av1_init_dsmotion_compensation(search_site_config *cfg, int stride) {
@@ -242,7 +297,8 @@
#define CHECK_BETTER(v, r, c) \
if (c >= minc && c <= maxc && r >= minr && r <= maxr) { \
MV this_mv = { r, c }; \
- v = mv_err_cost(&this_mv, ref_mv, mvjcost, mvcost, error_per_bit); \
+ v = mv_err_cost(&this_mv, ref_mv, mvjcost, mvcost, error_per_bit, \
+ mv_cost_type); \
if (second_pred == NULL) { \
thismse = vfp->svf(pre(y, y_stride, r, c), y_stride, sp(c), sp(r), \
src_address, src_stride, &sse); \
@@ -276,7 +332,8 @@
xd, cm, mi_row, mi_col, &this_mv, vfp, src_address, src_stride, \
pre(y, y_stride, r, c), y_stride, sp(c), sp(r), second_pred, mask, \
mask_stride, invert_mask, w, h, &sse, use_accurate_subpel_search); \
- v = mv_err_cost(&this_mv, ref_mv, mvjcost, mvcost, error_per_bit); \
+ v = mv_err_cost(&this_mv, ref_mv, mvjcost, mvcost, error_per_bit, \
+ mv_cost_type); \
v += thismse; \
if (v < besterr) { \
besterr = v; \
@@ -373,6 +430,7 @@
const int y_stride = xd->plane[0].pre[0].stride; \
const int offset = bestmv->row * y_stride + bestmv->col; \
const uint8_t *const y = xd->plane[0].pre[0].buf; \
+ const MV_COST_TYPE mv_cost_type = x->mv_cost_type; \
\
int br = bestmv->row * 8; \
int bc = bestmv->col * 8; \
@@ -393,7 +451,8 @@
const uint8_t *const src, const int src_stride, const uint8_t *const y,
int y_stride, const uint8_t *second_pred, const uint8_t *mask,
int mask_stride, int invert_mask, int w, int h, int offset, int *mvjcost,
- int *mvcost[2], unsigned int *sse1, int *distortion) {
+ int *mvcost[2], unsigned int *sse1, int *distortion,
+ const MV_COST_TYPE mv_cost_type) {
unsigned int besterr;
if (second_pred != NULL) {
#if CONFIG_AV1_HIGHBITDEPTH
@@ -433,7 +492,8 @@
besterr = vfp->vf(y + offset, y_stride, src, src_stride, sse1);
}
*distortion = besterr;
- besterr += mv_err_cost(bestmv, ref_mv, mvjcost, mvcost, error_per_bit);
+ besterr +=
+ mv_err_cost(bestmv, ref_mv, mvjcost, mvcost, error_per_bit, mv_cost_type);
return besterr;
}
@@ -471,10 +531,10 @@
int mask_stride, int invert_mask, int w, int h,
int use_accurate_subpel_search, const int do_reset_fractional_mv) {
SETUP_SUBPEL_SEARCH;
- besterr = setup_center_error(xd, bestmv, ref_mv, error_per_bit, vfp,
- src_address, src_stride, y, y_stride,
- second_pred, mask, mask_stride, invert_mask, w,
- h, offset, mvjcost, mvcost, sse1, distortion);
+ besterr = setup_center_error(
+ xd, bestmv, ref_mv, error_per_bit, vfp, src_address, src_stride, y,
+ y_stride, second_pred, mask, mask_stride, invert_mask, w, h, offset,
+ mvjcost, mvcost, sse1, distortion, mv_cost_type);
(void)halfiters;
(void)quarteriters;
(void)eighthiters;
@@ -550,10 +610,10 @@
(void)mi_col;
(void)do_reset_fractional_mv;
- besterr = setup_center_error(xd, bestmv, ref_mv, error_per_bit, vfp,
- src_address, src_stride, y, y_stride,
- second_pred, mask, mask_stride, invert_mask, w,
- h, offset, mvjcost, mvcost, sse1, distortion);
+ besterr = setup_center_error(
+ xd, bestmv, ref_mv, error_per_bit, vfp, src_address, src_stride, y,
+ y_stride, second_pred, mask, mask_stride, invert_mask, w, h, offset,
+ mvjcost, mvcost, sse1, distortion, mv_cost_type);
if (cost_list && cost_list[0] != INT_MAX && cost_list[1] != INT_MAX &&
cost_list[2] != INT_MAX && cost_list[3] != INT_MAX &&
cost_list[4] != INT_MAX && is_cost_list_wellbehaved(cost_list)) {
@@ -619,10 +679,10 @@
(void)mi_col;
(void)do_reset_fractional_mv;
- besterr = setup_center_error(xd, bestmv, ref_mv, error_per_bit, vfp,
- src_address, src_stride, y, y_stride,
- second_pred, mask, mask_stride, invert_mask, w,
- h, offset, mvjcost, mvcost, sse1, distortion);
+ besterr = setup_center_error(
+ xd, bestmv, ref_mv, error_per_bit, vfp, src_address, src_stride, y,
+ y_stride, second_pred, mask, mask_stride, invert_mask, w, h, offset,
+ mvjcost, mvcost, sse1, distortion, mv_cost_type);
if (cost_list && cost_list[0] != INT_MAX && cost_list[1] != INT_MAX &&
cost_list[2] != INT_MAX && cost_list[3] != INT_MAX &&
cost_list[4] != INT_MAX) {
@@ -785,13 +845,15 @@
const int src_stride, const uint8_t *const y, int y_stride,
const uint8_t *second_pred, const uint8_t *mask, int mask_stride,
int invert_mask, int w, int h, int offset, int *mvjcost, int *mvcost[2],
- unsigned int *sse1, int *distortion, int subpel_search) {
+ unsigned int *sse1, int *distortion, int subpel_search,
+ MV_COST_TYPE mv_cost_type) {
unsigned int besterr =
upsampled_pref_error(xd, cm, mi_row, mi_col, bestmv, vfp, src, src_stride,
y + offset, y_stride, 0, 0, second_pred, mask,
mask_stride, invert_mask, w, h, sse1, subpel_search);
*distortion = besterr;
- besterr += mv_err_cost(bestmv, ref_mv, mvjcost, mvcost, error_per_bit);
+ besterr +=
+ mv_err_cost(bestmv, ref_mv, mvjcost, mvcost, error_per_bit, mv_cost_type);
return besterr;
}
@@ -831,6 +893,7 @@
MV *bestmv = &x->best_mv.as_mv;
const int offset = bestmv->row * y_stride + bestmv->col;
const uint8_t *const y = xd->plane[0].pre[0].buf;
+ const MV_COST_TYPE mv_cost_type = x->mv_cost_type;
int br = bestmv->row * 8;
int bc = bestmv->col * 8;
@@ -857,12 +920,12 @@
xd, cm, mi_row, mi_col, bestmv, ref_mv, error_per_bit, vfp, src_address,
src_stride, y, y_stride, second_pred, mask, mask_stride, invert_mask, w,
h, offset, mvjcost, mvcost, sse1, distortion,
- use_accurate_subpel_search);
+ use_accurate_subpel_search, mv_cost_type);
else
- besterr = setup_center_error(xd, bestmv, ref_mv, error_per_bit, vfp,
- src_address, src_stride, y, y_stride,
- second_pred, mask, mask_stride, invert_mask, w,
- h, offset, mvjcost, mvcost, sse1, distortion);
+ besterr = setup_center_error(
+ xd, bestmv, ref_mv, error_per_bit, vfp, src_address, src_stride, y,
+ y_stride, second_pred, mask, mask_stride, invert_mask, w, h, offset,
+ mvjcost, mvcost, sse1, distortion, mv_cost_type);
(void)cost_list; // to silence compiler warning
@@ -896,8 +959,9 @@
&sse);
}
- cost_array[idx] = thismse + mv_err_cost(&this_mv, ref_mv, mvjcost,
- mvcost, error_per_bit);
+ cost_array[idx] =
+ thismse + mv_err_cost(&this_mv, ref_mv, mvjcost, mvcost,
+ error_per_bit, mv_cost_type);
if (cost_array[idx] < besterr) {
best_idx = idx;
@@ -932,7 +996,7 @@
}
cost_array[4] = thismse + mv_err_cost(&this_mv, ref_mv, mvjcost, mvcost,
- error_per_bit);
+ error_per_bit, mv_cost_type);
if (cost_array[4] < besterr) {
best_idx = 4;
@@ -993,12 +1057,13 @@
unsigned int sse;
const int mi_row = xd->mi_row;
const int mi_col = xd->mi_col;
+ const MV_COST_TYPE mv_cost_type = x->mv_cost_type;
av1_enc_build_inter_predictor(cm, xd, mi_row, mi_col, NULL, bsize,
AOM_PLANE_Y, AOM_PLANE_Y);
mse = vfp->vf(dst, dst_stride, src, src_stride, &sse);
mse += mv_err_cost(this_mv, &ref_mv.as_mv, x->nmv_vec_cost, x->mv_cost_stack,
- x->errorperbit);
+ x->errorperbit, mv_cost_type);
return mse;
}
@@ -1122,6 +1187,7 @@
const MV fcenter_mv = { ref_mv->row >> 3, ref_mv->col >> 3 };
const int br = best_mv->row;
const int bc = best_mv->col;
+ const MV_COST_TYPE mv_cost_type = x->mv_cost_type;
unsigned int sse;
cost_list[0] =
@@ -1138,7 +1204,7 @@
get_buf_from_mv(in_what, &neighbor_mv), in_what->stride,
&sse) +
mv_err_cost(&neighbor_mv_subpel, ref_mv, x->nmv_vec_cost,
- x->mv_cost_stack, x->errorperbit);
+ x->mv_cost_stack, x->errorperbit, mv_cost_type);
}
} else {
for (int i = 0; i < 4; i++) {
@@ -1153,7 +1219,7 @@
get_buf_from_mv(in_what, &neighbor_mv), in_what->stride,
&sse) +
mv_err_cost(&neighbor_mv_subpel, ref_mv, x->nmv_vec_cost,
- x->mv_cost_stack, x->errorperbit);
+ x->mv_cost_stack, x->errorperbit, mv_cost_type);
}
}
}
@@ -1472,6 +1538,7 @@
const struct buf_2d *const what = &x->plane[0].src;
const struct buf_2d *const in_what = &xd->plane[0].pre[0];
const MV mv = { best_mv->row * 8, best_mv->col * 8 };
+ const MV_COST_TYPE mv_cost_type = x->mv_cost_type;
unsigned int sse, var;
var = vfp->vf(what->buf, what->stride, get_buf_from_mv(in_what, best_mv),
@@ -1480,7 +1547,7 @@
if (!use_var) var = sse;
return var + mv_err_cost(&mv, center_mv, x->nmv_vec_cost, x->mv_cost_stack,
- x->errorperbit);
+ x->errorperbit, mv_cost_type);
}
int av1_get_mvpred_av_var(const MACROBLOCK *x, const MV *best_mv,
@@ -1491,13 +1558,15 @@
const struct buf_2d *const what = src;
const struct buf_2d *const in_what = pre;
const MV mv = { best_mv->row * 8, best_mv->col * 8 };
+ const MV_COST_TYPE mv_cost_type = x->mv_cost_type;
unsigned int unused;
return vfp->svaf(get_buf_from_mv(in_what, best_mv), in_what->stride, 0, 0,
what->buf, what->stride, &unused, second_pred) +
- (use_mvcost ? mv_err_cost(&mv, center_mv, x->nmv_vec_cost,
- x->mv_cost_stack, x->errorperbit)
- : 0);
+ (use_mvcost
+ ? mv_err_cost(&mv, center_mv, x->nmv_vec_cost, x->mv_cost_stack,
+ x->errorperbit, mv_cost_type)
+ : 0);
}
int av1_get_mvpred_mask_var(const MACROBLOCK *x, const MV *best_mv,
@@ -1509,14 +1578,16 @@
const struct buf_2d *const what = src;
const struct buf_2d *const in_what = pre;
const MV mv = { best_mv->row * 8, best_mv->col * 8 };
+ const MV_COST_TYPE mv_cost_type = x->mv_cost_type;
unsigned int unused;
return vfp->msvf(what->buf, what->stride, 0, 0,
get_buf_from_mv(in_what, best_mv), in_what->stride,
second_pred, mask, mask_stride, invert_mask, &unused) +
- (use_mvcost ? mv_err_cost(&mv, center_mv, x->nmv_vec_cost,
- x->mv_cost_stack, x->errorperbit)
- : 0);
+ (use_mvcost
+ ? mv_err_cost(&mv, center_mv, x->nmv_vec_cost, x->mv_cost_stack,
+ x->errorperbit, mv_cost_type)
+ : 0);
}
int av1_hex_search(MACROBLOCK *x, MV *start_mv, int search_param,
@@ -2531,7 +2602,8 @@
mask, vfp, z, pre(y, y_stride, r, c), \
y_stride, sp(c), sp(r), w, h, &sse, \
use_accurate_subpel_search); \
- v = mv_err_cost(&this_mv, ref_mv, mvjcost, mvcost, error_per_bit); \
+ v = mv_err_cost(&this_mv, ref_mv, mvjcost, mvcost, error_per_bit, \
+ mv_cost_type); \
if ((v + thismse) < besterr) { \
besterr = v + thismse; \
br = r; \
@@ -2547,11 +2619,13 @@
const int32_t *mask, const MV *bestmv, const MV *ref_mv, int error_per_bit,
const aom_variance_fn_ptr_t *vfp, const int32_t *const wsrc,
const uint8_t *const y, int y_stride, int offset, int *mvjcost,
- int *mvcost[2], unsigned int *sse1, int *distortion) {
+ int *mvcost[2], unsigned int *sse1, int *distortion,
+ MV_COST_TYPE mv_cost_type) {
unsigned int besterr;
besterr = vfp->ovf(y + offset, y_stride, wsrc, mask, sse1);
*distortion = besterr;
- besterr += mv_err_cost(bestmv, ref_mv, mvjcost, mvcost, error_per_bit);
+ besterr +=
+ mv_err_cost(bestmv, ref_mv, mvjcost, mvcost, error_per_bit, mv_cost_type);
return besterr;
}
@@ -2592,12 +2666,13 @@
const aom_variance_fn_ptr_t *vfp, const int32_t *const wsrc,
const uint8_t *const y, int y_stride, int w, int h, int offset,
int *mvjcost, int *mvcost[2], unsigned int *sse1, int *distortion,
- int subpel_search) {
+ int subpel_search, MV_COST_TYPE mv_cost_type) {
unsigned int besterr = upsampled_obmc_pref_error(
xd, cm, mi_row, mi_col, bestmv, mask, vfp, wsrc, y + offset, y_stride, 0,
0, w, h, sse1, subpel_search);
*distortion = besterr;
- besterr += mv_err_cost(bestmv, ref_mv, mvjcost, mvcost, error_per_bit);
+ besterr +=
+ mv_err_cost(bestmv, ref_mv, mvjcost, mvcost, error_per_bit, mv_cost_type);
return besterr;
}
@@ -2611,6 +2686,7 @@
const int32_t *mask = x->mask_buf;
const int *const z = wsrc;
const int *const src_address = z;
+ const MV_COST_TYPE mv_cost_type = x->mv_cost_type;
MACROBLOCKD *xd = &x->e_mbd;
struct macroblockd_plane *const pd = &xd->plane[0];
MB_MODE_INFO *mbmi = xd->mi[0];
@@ -2655,11 +2731,11 @@
besterr = upsampled_setup_obmc_center_error(
xd, cm, mi_row, mi_col, mask, bestmv, ref_mv, error_per_bit, vfp, z, y,
y_stride, w, h, offset, mvjcost, mvcost, sse1, distortion,
- use_accurate_subpel_search);
+ use_accurate_subpel_search, mv_cost_type);
else
besterr = setup_obmc_center_error(mask, bestmv, ref_mv, error_per_bit, vfp,
z, y, y_stride, offset, mvjcost, mvcost,
- sse1, distortion);
+ sse1, distortion, mv_cost_type);
for (iter = 0; iter < round; ++iter) {
// Check vertical and horizontal sub-pixel positions.
@@ -2678,8 +2754,9 @@
sp(tr), src_address, mask, &sse);
}
- cost_array[idx] = thismse + mv_err_cost(&this_mv, ref_mv, mvjcost,
- mvcost, error_per_bit);
+ cost_array[idx] =
+ thismse + mv_err_cost(&this_mv, ref_mv, mvjcost, mvcost,
+ error_per_bit, mv_cost_type);
if (cost_array[idx] < besterr) {
best_idx = idx;
besterr = cost_array[idx];
@@ -2711,7 +2788,7 @@
}
cost_array[4] = thismse + mv_err_cost(&this_mv, ref_mv, mvjcost, mvcost,
- error_per_bit);
+ error_per_bit, mv_cost_type);
if (cost_array[4] < besterr) {
best_idx = 4;
@@ -2770,13 +2847,15 @@
const MACROBLOCKD *const xd = &x->e_mbd;
const struct buf_2d *const in_what = &xd->plane[0].pre[is_second];
const MV mv = { best_mv->row * 8, best_mv->col * 8 };
+ const MV_COST_TYPE mv_cost_type = x->mv_cost_type;
unsigned int unused;
return vfp->ovf(get_buf_from_mv(in_what, best_mv), in_what->stride, wsrc,
mask, &unused) +
- (use_mvcost ? mv_err_cost(&mv, center_mv, x->nmv_vec_cost,
- x->mv_cost_stack, x->errorperbit)
- : 0);
+ (use_mvcost
+ ? mv_err_cost(&mv, center_mv, x->nmv_vec_cost, x->mv_cost_stack,
+ x->errorperbit, mv_cost_type)
+ : 0);
}
static int obmc_refining_search_sad(const MACROBLOCK *x, const int32_t *wsrc,
@@ -3027,6 +3106,7 @@
(void)mi_row;
(void)mi_col;
(void)do_reset_fractional_mv;
+ (void)mv_cost_type;
bestmv->row = maxr;
bestmv->col = maxc;
@@ -3056,6 +3136,7 @@
(void)mi_row;
(void)mi_col;
(void)do_reset_fractional_mv;
+ (void)mv_cost_type;
bestmv->row = minr;
bestmv->col = minc;