Make a fullpel compound search use the new mcomp struct
BUG=aomedia:2594
Change-Id: I6d6f4bd9ddfd9341df4762b370457382afddcd91
diff --git a/av1/encoder/mcomp.c b/av1/encoder/mcomp.c
index e4d9d6a..ef505ec 100644
--- a/av1/encoder/mcomp.c
+++ b/av1/encoder/mcomp.c
@@ -48,10 +48,7 @@
ms_buffers->ref = &x->e_mbd.plane[0].pre[0];
ms_buffers->src = &x->plane[0].src;
- ms_buffers->second_pred = NULL;
- ms_buffers->mask = NULL;
- ms_buffers->mask_stride = 0;
- ms_buffers->inv_mask = 0;
+ set_ms_compound_refs(ms_buffers, NULL, NULL, 0, 0);
ms_buffers->wsrc = x->wsrc_buf;
ms_buffers->obmc_mask = x->mask_buf;
@@ -116,10 +113,7 @@
// Ref and src buffers
MSBuffers *ms_buffers = &ms_params->var_params.ms_buffers;
init_ms_buffers(ms_buffers, x);
- ms_buffers->second_pred = second_pred;
- ms_buffers->mask = mask;
- ms_buffers->mask_stride = mask_stride;
- ms_buffers->inv_mask = invert_mask;
+ set_ms_compound_refs(ms_buffers, second_pred, mask, mask_stride, invert_mask);
}
static INLINE int get_offset_from_fullmv(const FULLPEL_MV *mv, int stride) {
@@ -1341,13 +1335,8 @@
// This function is called when we do joint motion search in comp_inter_inter
// mode, or when searching for one component of an ext-inter compound mode.
-int av1_refining_search_8p_c(MACROBLOCK *x, int error_per_bit, int search_range,
- const aom_variance_fn_ptr_t *fn_ptr,
- const uint8_t *mask, int mask_stride,
- int invert_mask, const MV *ref_mv,
- const uint8_t *second_pred,
- const struct buf_2d *src,
- const struct buf_2d *pre) {
+int av1_refining_search_8p_c(const FULLPEL_MOTION_SEARCH_PARAMS *ms_params,
+ const FULLPEL_MV start_mv, FULLPEL_MV *best_mv) {
static const search_neighbors neighbors[8] = {
{ { -1, 0 }, -1 * SEARCH_GRID_STRIDE_8P + 0 },
{ { 0, -1 }, 0 * SEARCH_GRID_STRIDE_8P - 1 },
@@ -1358,41 +1347,32 @@
{ { -1, 1 }, -1 * SEARCH_GRID_STRIDE_8P + 1 },
{ { 1, 1 }, 1 * SEARCH_GRID_STRIDE_8P + 1 }
};
- const struct buf_2d *const what = src;
- const struct buf_2d *const in_what = pre;
- const FULLPEL_MV full_ref_mv = get_fullmv_from_mv(ref_mv);
- FULLPEL_MV *best_mv = &x->best_mv.as_fullmv;
- unsigned int best_sad = INT_MAX;
- int i, j;
+
uint8_t do_refine_search_grid[SEARCH_GRID_STRIDE_8P *
SEARCH_GRID_STRIDE_8P] = { 0 };
int grid_center = SEARCH_GRID_CENTER_8P;
int grid_coord = grid_center;
- clamp_fullmv(best_mv, &x->mv_limits);
- if (mask) {
- best_sad =
- fn_ptr->msdf(what->buf, what->stride,
- get_buf_from_fullmv(in_what, best_mv), in_what->stride,
- second_pred, mask, mask_stride, invert_mask) +
- mvsad_err_cost(best_mv, &full_ref_mv, x->nmv_vec_cost,
- CONVERT_TO_CONST_MVCOST(x->mv_cost_stack), error_per_bit,
- x->mv_cost_type);
- } else {
- best_sad = fn_ptr->sdaf(what->buf, what->stride,
- get_buf_from_fullmv(in_what, best_mv),
- in_what->stride, second_pred) +
- mvsad_err_cost(best_mv, &full_ref_mv, x->nmv_vec_cost,
- CONVERT_TO_CONST_MVCOST(x->mv_cost_stack),
- error_per_bit, x->mv_cost_type);
- }
+ const MV_COST_PARAMS *mv_cost_params = &ms_params->mv_cost_params;
+ const FullMvLimits *mv_limits = &ms_params->mv_limits;
+ const MSBuffers *ms_buffers = &ms_params->ms_buffers;
+ const struct buf_2d *src = ms_buffers->src;
+ const struct buf_2d *ref = ms_buffers->ref;
+ const int ref_stride = ref->stride;
+
+ *best_mv = start_mv;
+ clamp_fullmv(best_mv, mv_limits);
+
+ unsigned int best_sad = get_mvpred_compound_sad(
+ ms_params, src, get_buf_from_fullmv(ref, best_mv), ref_stride);
+ best_sad += mvsad_err_cost_(best_mv, mv_cost_params);
do_refine_search_grid[grid_coord] = 1;
- for (i = 0; i < search_range; ++i) {
+ for (int i = 0; i < SEARCH_RANGE_8P; ++i) {
int best_site = -1;
- for (j = 0; j < 8; ++j) {
+ for (int j = 0; j < 8; ++j) {
grid_coord = grid_center + neighbors[j].coord_offset;
if (do_refine_search_grid[grid_coord] == 1) {
continue;
@@ -1401,21 +1381,12 @@
best_mv->col + neighbors[j].coord.col };
do_refine_search_grid[grid_coord] = 1;
- if (av1_is_fullmv_in_range(&x->mv_limits, mv)) {
+ if (av1_is_fullmv_in_range(mv_limits, mv)) {
unsigned int sad;
- if (mask) {
- sad = fn_ptr->msdf(what->buf, what->stride,
- get_buf_from_fullmv(in_what, &mv), in_what->stride,
- second_pred, mask, mask_stride, invert_mask);
- } else {
- sad = fn_ptr->sdaf(what->buf, what->stride,
- get_buf_from_fullmv(in_what, &mv), in_what->stride,
- second_pred);
- }
+ sad = get_mvpred_compound_sad(
+ ms_params, src, get_buf_from_fullmv(ref, &mv), ref_stride);
if (sad < best_sad) {
- sad += mvsad_err_cost(&mv, &full_ref_mv, x->nmv_vec_cost,
- CONVERT_TO_CONST_MVCOST(x->mv_cost_stack),
- error_per_bit, x->mv_cost_type);
+ sad += mvsad_err_cost_(&mv, mv_cost_params);
if (sad < best_sad) {
best_sad = sad;
@@ -3369,41 +3340,52 @@
x->errorperbit, mv_cost_type);
}
-int av1_get_mvpred_av_var(const MACROBLOCK *x, const FULLPEL_MV *best_mv,
- const MV *ref_mv, const uint8_t *second_pred,
- const aom_variance_fn_ptr_t *vfp,
- const struct buf_2d *src, const struct buf_2d *pre) {
+static INLINE int get_mvpred_av_var(const MV_COST_PARAMS *mv_cost_params,
+ const FULLPEL_MV best_mv,
+ const uint8_t *second_pred,
+ const aom_variance_fn_ptr_t *vfp,
+ const struct buf_2d *src,
+ const struct buf_2d *pre) {
const struct buf_2d *const what = src;
const struct buf_2d *const in_what = pre;
- const MV mv = get_mv_from_fullmv(best_mv);
- const MV_COST_TYPE mv_cost_type = x->mv_cost_type;
+ const MV mv = get_mv_from_fullmv(&best_mv);
unsigned int unused;
- return vfp->svaf(get_buf_from_fullmv(in_what, best_mv), in_what->stride, 0, 0,
- what->buf, what->stride, &unused, second_pred) +
- mv_err_cost(&mv, ref_mv, x->nmv_vec_cost,
- CONVERT_TO_CONST_MVCOST(x->mv_cost_stack), x->errorperbit,
- mv_cost_type);
+ return vfp->svaf(get_buf_from_fullmv(in_what, &best_mv), in_what->stride, 0,
+ 0, what->buf, what->stride, &unused, second_pred) +
+ mv_err_cost_(&mv, mv_cost_params);
}
-int av1_get_mvpred_mask_var(const MACROBLOCK *x, const FULLPEL_MV *best_mv,
- const MV *ref_mv, const uint8_t *second_pred,
- const uint8_t *mask, int mask_stride,
- int invert_mask, const aom_variance_fn_ptr_t *vfp,
- const struct buf_2d *src,
- const struct buf_2d *pre) {
+static INLINE int get_mvpred_mask_var(
+ const MV_COST_PARAMS *mv_cost_params, const FULLPEL_MV best_mv,
+ const uint8_t *second_pred, const uint8_t *mask, int mask_stride,
+ int invert_mask, const aom_variance_fn_ptr_t *vfp, const struct buf_2d *src,
+ const struct buf_2d *pre) {
const struct buf_2d *const what = src;
const struct buf_2d *const in_what = pre;
- const MV mv = get_mv_from_fullmv(best_mv);
- const MV_COST_TYPE mv_cost_type = x->mv_cost_type;
+ const MV mv = get_mv_from_fullmv(&best_mv);
unsigned int unused;
return vfp->msvf(what->buf, what->stride, 0, 0,
- get_buf_from_fullmv(in_what, best_mv), in_what->stride,
+ get_buf_from_fullmv(in_what, &best_mv), in_what->stride,
second_pred, mask, mask_stride, invert_mask, &unused) +
- mv_err_cost(&mv, ref_mv, x->nmv_vec_cost,
- CONVERT_TO_CONST_MVCOST(x->mv_cost_stack), x->errorperbit,
- mv_cost_type);
+ mv_err_cost_(&mv, mv_cost_params);
+}
+
+int av1_get_mvpred_compound_var(const MV_COST_PARAMS *mv_cost_params,
+ const FULLPEL_MV best_mv,
+ const uint8_t *second_pred, const uint8_t *mask,
+ int mask_stride, int invert_mask,
+ const aom_variance_fn_ptr_t *vfp,
+ const struct buf_2d *src,
+ const struct buf_2d *pre) {
+ if (mask) {
+ return get_mvpred_mask_var(mv_cost_params, best_mv, second_pred, mask,
+ mask_stride, invert_mask, vfp, src, pre);
+ } else {
+ return get_mvpred_av_var(mv_cost_params, best_mv, second_pred, vfp, src,
+ pre);
+ }
}
unsigned int av1_compute_motion_cost(const AV1_COMP *cpi, MACROBLOCK *const x,
diff --git a/av1/encoder/mcomp.h b/av1/encoder/mcomp.h
index 4e24aeb..5358f13 100644
--- a/av1/encoder/mcomp.h
+++ b/av1/encoder/mcomp.h
@@ -77,33 +77,21 @@
const MV *ref_mv, const aom_variance_fn_ptr_t *vfp);
int av1_get_mvpred_var(const MACROBLOCK *x, const FULLPEL_MV *best_mv,
const MV *ref_mv, const aom_variance_fn_ptr_t *vfp);
-int av1_get_mvpred_av_var(const MACROBLOCK *x, const FULLPEL_MV *best_mv,
- const MV *ref_mv, const uint8_t *second_pred,
- const aom_variance_fn_ptr_t *vfp,
- const struct buf_2d *src, const struct buf_2d *pre);
-int av1_get_mvpred_mask_var(const MACROBLOCK *x, const FULLPEL_MV *best_mv,
- const MV *ref_mv, const uint8_t *second_pred,
- const uint8_t *mask, int mask_stride,
- int invert_mask, const aom_variance_fn_ptr_t *vfp,
- const struct buf_2d *src, const struct buf_2d *pre);
+int av1_get_mvpred_compound_var(const MV_COST_PARAMS *ms_params,
+ const FULLPEL_MV best_mv,
+ const uint8_t *second_pred, const uint8_t *mask,
+ int mask_stride, int invert_mask,
+ const aom_variance_fn_ptr_t *vfp,
+ const struct buf_2d *src,
+ const struct buf_2d *pre);
unsigned int av1_compute_motion_cost(const struct AV1_COMP *cpi,
MACROBLOCK *const x, BLOCK_SIZE bsize,
const MV *this_mv);
// =============================================================================
-// Fullpixel Motion Search
+// Motion Search
// =============================================================================
-enum {
- DIAMOND = 0,
- NSTEP = 1,
- HEX = 2,
- BIGDIA = 3,
- SQUARE = 4,
- FAST_HEX = 5,
- FAST_DIAMOND = 6
-} UENUM1BYTE(SEARCH_METHODS);
-
typedef struct {
// The reference buffer
const struct buf_2d *ref;
@@ -120,6 +108,29 @@
const int32_t *obmc_mask;
} MSBuffers;
+static INLINE void set_ms_compound_refs(MSBuffers *ms_buffers,
+ const uint8_t *second_pred,
+ const uint8_t *mask, int mask_stride,
+ int invert_mask) {
+ ms_buffers->second_pred = second_pred;
+ ms_buffers->mask = mask;
+ ms_buffers->mask_stride = mask_stride;
+ ms_buffers->inv_mask = invert_mask;
+}
+
+// =============================================================================
+// Fullpixel Motion Search
+// =============================================================================
+enum {
+ DIAMOND = 0,
+ NSTEP = 1,
+ HEX = 2,
+ BIGDIA = 3,
+ SQUARE = 4,
+ FAST_HEX = 5,
+ FAST_DIAMOND = 6
+} UENUM1BYTE(SEARCH_METHODS);
+
// This struct holds fullpixel motion search parameters that should be constant
// during the search
typedef struct {
@@ -205,13 +216,8 @@
int mi_row, int mi_col,
const MV *ref_mv);
-int av1_refining_search_8p_c(MACROBLOCK *x, int error_per_bit, int search_range,
- const aom_variance_fn_ptr_t *fn_ptr,
- const uint8_t *mask, int mask_stride,
- int invert_mask, const MV *ref_mv,
- const uint8_t *second_pred,
- const struct buf_2d *src,
- const struct buf_2d *pre);
+int av1_refining_search_8p_c(const FULLPEL_MOTION_SEARCH_PARAMS *ms_params,
+ const FULLPEL_MV start_mv, FULLPEL_MV *best_mv);
int av1_full_pixel_search(const FULLPEL_MV start_mv,
const FULLPEL_MOTION_SEARCH_PARAMS *ms_params,
@@ -226,11 +232,6 @@
const FULLPEL_MOTION_SEARCH_PARAMS *ms_params,
const int step_param, FULLPEL_MV *best_mv);
-unsigned int av1_refine_warped_mv(const struct AV1_COMP *cpi,
- MACROBLOCK *const x, BLOCK_SIZE bsize,
- int *pts0, int *pts_inref0,
- int total_samples);
-
static INLINE int av1_is_fullmv_in_range(const FullMvLimits *mv_limits,
FULLPEL_MV mv) {
return (mv.col >= mv_limits->col_min) && (mv.col <= mv_limits->col_max) &&
@@ -293,6 +294,11 @@
extern fractional_mv_step_fp av1_return_min_sub_pixel_mv;
extern fractional_mv_step_fp av1_find_best_obmc_sub_pixel_tree_up;
+unsigned int av1_refine_warped_mv(const struct AV1_COMP *cpi,
+ MACROBLOCK *const x, BLOCK_SIZE bsize,
+ int *pts0, int *pts_inref0,
+ int total_samples);
+
static INLINE void av1_set_fractional_mv(int_mv *fractional_best_mv) {
for (int z = 0; z < 3; z++) {
fractional_best_mv[z].as_int = INVALID_MV;
diff --git a/av1/encoder/motion_search_facade.c b/av1/encoder/motion_search_facade.c
index fe14fdb..f131ece 100644
--- a/av1/encoder/motion_search_facade.c
+++ b/av1/encoder/motion_search_facade.c
@@ -394,14 +394,11 @@
uint8_t *second_pred = get_buf_by_bd(xd, second_pred16);
int_mv *best_int_mv = &x->best_mv;
- const int search_range = SEARCH_RANGE_8P;
- const int sadpb = x->sadperbit;
// Allow joint search multiple times iteratively for each reference frame
// and break out of the search loop if it couldn't find a better mv.
for (ite = 0; ite < 4; ite++) {
struct buf_2d ref_yv12[2];
int bestsme = INT_MAX;
- FullMvLimits tmp_mv_limits = x->mv_limits;
int id = ite % 2; // Even iterations search in the first reference frame,
// odd iterations search in the second. The predictor
// found for the 'other' reference frame is factored in.
@@ -461,28 +458,27 @@
// Do full-pixel compound motion search on the current reference frame.
if (id) xd->plane[plane].pre[0] = ref_yv12[id];
- av1_set_mv_search_range(&x->mv_limits, &ref_mv[id].as_mv);
+
+ // Make motion search params
+ FULLPEL_MOTION_SEARCH_PARAMS full_ms_params;
+ av1_make_default_fullpel_ms_params(&full_ms_params, cpi, x, bsize,
+ &ref_mv[id].as_mv, NULL);
+ set_ms_compound_refs(&full_ms_params.ms_buffers, second_pred, mask,
+ mask_stride, id);
// Use the mv result from the single mode as mv predictor.
- best_int_mv->as_fullmv = get_fullmv_from_mv(&cur_mv[id].as_mv);
+ const FULLPEL_MV start_fullmv = get_fullmv_from_mv(&cur_mv[id].as_mv);
// Small-range full-pixel motion search.
- bestsme = av1_refining_search_8p_c(
- x, sadpb, search_range, &cpi->fn_ptr[bsize], mask, mask_stride, id,
- &ref_mv[id].as_mv, second_pred, &x->plane[0].src, &ref_yv12[id]);
- if (bestsme < INT_MAX) {
- if (mask)
- bestsme = av1_get_mvpred_mask_var(x, &best_int_mv->as_fullmv,
- &ref_mv[id].as_mv, second_pred, mask,
- mask_stride, id, &cpi->fn_ptr[bsize],
- &x->plane[0].src, &ref_yv12[id]);
- else
- bestsme = av1_get_mvpred_av_var(
- x, &best_int_mv->as_fullmv, &ref_mv[id].as_mv, second_pred,
- &cpi->fn_ptr[bsize], &x->plane[0].src, &ref_yv12[id]);
- }
+ bestsme = av1_refining_search_8p_c(&full_ms_params, start_fullmv,
+ &best_int_mv->as_fullmv);
- x->mv_limits = tmp_mv_limits;
+ if (bestsme < INT_MAX) {
+ bestsme = av1_get_mvpred_compound_var(
+ &full_ms_params.mv_cost_params, best_int_mv->as_fullmv, second_pred,
+ mask, mask_stride, id, &cpi->fn_ptr[bsize], &x->plane[0].src,
+ &ref_yv12[id]);
+ }
// Restore the pointer to the first (possibly scaled) prediction buffer.
if (id) xd->plane[plane].pre[0] = ref_yv12[0];
@@ -583,35 +579,28 @@
}
int bestsme = INT_MAX;
- int sadpb = x->sadperbit;
int_mv *best_int_mv = &x->best_mv;
- int search_range = SEARCH_RANGE_8P;
- FullMvLimits tmp_mv_limits = x->mv_limits;
-
- // Do compound motion search on the current reference frame.
- av1_set_mv_search_range(&x->mv_limits, &ref_mv.as_mv);
+ // Make motion search params
+ FULLPEL_MOTION_SEARCH_PARAMS full_ms_params;
+ av1_make_default_fullpel_ms_params(&full_ms_params, cpi, x, bsize,
+ &ref_mv.as_mv, NULL);
+ set_ms_compound_refs(&full_ms_params.ms_buffers, second_pred, mask,
+ mask_stride, ref_idx);
// Use the mv result from the single mode as mv predictor.
- best_int_mv->as_fullmv = get_fullmv_from_mv(this_mv);
+ const FULLPEL_MV start_fullmv = get_fullmv_from_mv(this_mv);
// Small-range full-pixel motion search.
- bestsme = av1_refining_search_8p_c(
- x, sadpb, search_range, &cpi->fn_ptr[bsize], mask, mask_stride, ref_idx,
- &ref_mv.as_mv, second_pred, &x->plane[0].src, &ref_yv12);
- if (bestsme < INT_MAX) {
- if (mask)
- bestsme = av1_get_mvpred_mask_var(
- x, &best_int_mv->as_fullmv, &ref_mv.as_mv, second_pred, mask,
- mask_stride, ref_idx, &cpi->fn_ptr[bsize], &x->plane[0].src,
- &ref_yv12);
- else
- bestsme = av1_get_mvpred_av_var(x, &best_int_mv->as_fullmv, &ref_mv.as_mv,
- second_pred, &cpi->fn_ptr[bsize],
- &x->plane[0].src, &ref_yv12);
- }
+ bestsme = av1_refining_search_8p_c(&full_ms_params, start_fullmv,
+ &best_int_mv->as_fullmv);
- x->mv_limits = tmp_mv_limits;
+ if (bestsme < INT_MAX) {
+ bestsme = av1_get_mvpred_compound_var(
+ &full_ms_params.mv_cost_params, best_int_mv->as_fullmv, second_pred,
+ mask, mask_stride, ref_idx, &cpi->fn_ptr[bsize], &x->plane[0].src,
+ &ref_yv12);
+ }
if (scaled_ref_frame) {
// Swap back the original buffers for subpel motion search.