blob: d96450baf0e00a6beb88c2aeefe8bd181570cba3 [file] [log] [blame]
/*
* Copyright (c) 2021, Alliance for Open Media. All rights reserved
*
* This source code is subject to the terms of the BSD 3-Clause Clear License
* and the Alliance for Open Media Patent License 1.0. If the BSD 3-Clause Clear
* License was not distributed with this source code in the LICENSE file, you
* can obtain it at aomedia.org/license/software-license/bsd-3-c-c/. If the
* Alliance for Open Media Patent License 1.0 was not distributed with this
* source code in the PATENTS file, you can obtain it at
* aomedia.org/license/patent-license/.
*/
#include <assert.h>
#include <math.h>
#include <stdbool.h>
#include "config/aom_config.h"
#include "config/aom_dsp_rtcd.h"
#include "config/av1_rtcd.h"
#include "aom_dsp/aom_dsp_common.h"
#include "aom_dsp/blend.h"
#include "aom_mem/aom_mem.h"
#include "aom_ports/aom_timer.h"
#include "aom_ports/mem.h"
#include "aom_ports/system_state.h"
#include "av1/common/av1_common_int.h"
#include "av1/common/cfl.h"
#include "av1/common/common.h"
#include "av1/common/common_data.h"
#include "av1/common/entropy.h"
#include "av1/common/entropymode.h"
#include "av1/common/idct.h"
#include "av1/common/mvref_common.h"
#include "av1/common/obmc.h"
#include "av1/common/pred_common.h"
#include "av1/common/quant_common.h"
#include "av1/common/reconinter.h"
#include "av1/common/reconintra.h"
#include "av1/common/scan.h"
#include "av1/common/seg_common.h"
#include "av1/common/tip.h"
#include "av1/common/txb_common.h"
#include "av1/common/warped_motion.h"
#include "av1/encoder/aq_variance.h"
#include "av1/encoder/av1_quantize.h"
#include "av1/encoder/cost.h"
#include "av1/encoder/compound_type.h"
#include "av1/encoder/encodemb.h"
#include "av1/encoder/encodemv.h"
#include "av1/encoder/encoder.h"
#include "av1/encoder/encodetxb.h"
#include "av1/encoder/hybrid_fwd_txfm.h"
#include "av1/encoder/interp_search.h"
#include "av1/encoder/intra_mode_search.h"
#include "av1/encoder/mcomp.h"
#include "av1/encoder/ml.h"
#include "av1/encoder/mode_prune_model_weights.h"
#include "av1/encoder/model_rd.h"
#include "av1/encoder/motion_search_facade.h"
#include "av1/encoder/palette.h"
#include "av1/encoder/pustats.h"
#include "av1/encoder/random.h"
#include "av1/encoder/ratectrl.h"
#include "av1/encoder/rd.h"
#include "av1/encoder/rdopt.h"
#include "av1/encoder/reconinter_enc.h"
#include "av1/encoder/tokenize.h"
#include "av1/encoder/tpl_model.h"
#include "av1/encoder/tx_search.h"
#if CONFIG_EXT_RECUR_PARTITIONS
#include "av1/encoder/partition_strategy.h"
#endif // CONFIG_EXT_RECUR_PARTITIONS
#define LAST_NEW_MV_INDEX 6
// Mode_threshold multiplication factor table for prune_inter_modes_if_skippable
// The values are kept in Q12 format and equation used to derive is
// (2.5 - ((float)x->qindex / MAXQ) * 1.5)
#define MODE_THRESH_QBITS 12
static const int mode_threshold_mul_factor[QINDEX_RANGE] = {
10240, 10216, 10192, 10168, 10144, 10120, 10095, 10071, 10047, 10023, 9999,
9975, 9951, 9927, 9903, 9879, 9854, 9830, 9806, 9782, 9758, 9734,
9710, 9686, 9662, 9638, 9614, 9589, 9565, 9541, 9517, 9493, 9469,
9445, 9421, 9397, 9373, 9349, 9324, 9300, 9276, 9252, 9228, 9204,
9180, 9156, 9132, 9108, 9083, 9059, 9035, 9011, 8987, 8963, 8939,
8915, 8891, 8867, 8843, 8818, 8794, 8770, 8746, 8722, 8698, 8674,
8650, 8626, 8602, 8578, 8553, 8529, 8505, 8481, 8457, 8433, 8409,
8385, 8361, 8337, 8312, 8288, 8264, 8240, 8216, 8192, 8168, 8144,
8120, 8096, 8072, 8047, 8023, 7999, 7975, 7951, 7927, 7903, 7879,
7855, 7831, 7806, 7782, 7758, 7734, 7710, 7686, 7662, 7638, 7614,
7590, 7566, 7541, 7517, 7493, 7469, 7445, 7421, 7397, 7373, 7349,
7325, 7301, 7276, 7252, 7228, 7204, 7180, 7156, 7132, 7108, 7084,
7060, 7035, 7011, 6987, 6963, 6939, 6915, 6891, 6867, 6843, 6819,
6795, 6770, 6746, 6722, 6698, 6674, 6650, 6626, 6602, 6578, 6554,
6530, 6505, 6481, 6457, 6433, 6409, 6385, 6361, 6337, 6313, 6289,
6264, 6240, 6216, 6192, 6168, 6144, 6120, 6096, 6072, 6048, 6024,
5999, 5975, 5951, 5927, 5903, 5879, 5855, 5831, 5807, 5783, 5758,
5734, 5710, 5686, 5662, 5638, 5614, 5590, 5566, 5542, 5518, 5493,
5469, 5445, 5421, 5397, 5373, 5349, 5325, 5301, 5277, 5253, 5228,
5204, 5180, 5156, 5132, 5108, 5084, 5060, 5036, 5012, 4987, 4963,
4939, 4915, 4891, 4867, 4843, 4819, 4795, 4771, 4747, 4722, 4698,
4674, 4650, 4626, 4602, 4578, 4554, 4530, 4506, 4482, 4457, 4433,
4409, 4385, 4361, 4337, 4313, 4289, 4265, 4241, 4216, 4192, 4168,
4144, 4120, 4096
};
/*!\cond */
typedef struct SingleInterModeState {
int64_t rd;
MV_REFERENCE_FRAME ref_frame;
int valid;
} SingleInterModeState;
typedef struct InterModeSearchState {
int64_t best_rd;
int64_t best_skip_rd[2];
MB_MODE_INFO best_mbmode;
#if CONFIG_C071_SUBBLK_WARPMV
SUBMB_INFO best_submb[MAX_MIB_SIZE * MAX_MIB_SIZE];
#endif // CONFIG_C071_SUBBLK_WARPMV
int best_rate_y;
int best_rate_uv;
int best_mode_skippable;
int best_skip2;
int num_available_refs;
int64_t dist_refs[REF_FRAMES];
int dist_order_refs[REF_FRAMES];
int64_t mode_threshold[MB_MODE_COUNT];
int64_t best_intra_rd;
unsigned int best_pred_sse;
int64_t best_pred_diff[REFERENCE_MODES];
// Save a set of single_newmv for each checked ref_mv.
int_mv single_newmv[NUM_MV_PRECISIONS][MAX_REF_MV_SEARCH][SINGLE_REF_FRAMES];
int single_newmv_rate[NUM_MV_PRECISIONS][MAX_REF_MV_SEARCH]
[SINGLE_REF_FRAMES];
int single_newmv_valid[NUM_MV_PRECISIONS][MAX_REF_MV_SEARCH]
[SINGLE_REF_FRAMES];
int64_t modelled_rd[MB_MODE_COUNT][MAX_REF_MV_SEARCH][SINGLE_REF_FRAMES];
// The rd of simple translation in single inter modes
int64_t simple_rd[MB_MODE_COUNT][MAX_REF_MV_SEARCH][SINGLE_REF_FRAMES];
int64_t best_single_rd[SINGLE_REF_FRAMES];
PREDICTION_MODE best_single_mode[SINGLE_REF_FRAMES];
// Single search results by [directions][modes][reference frames]
int single_state_cnt[2][SINGLE_INTER_MODE_NUM];
int single_state_modelled_cnt[2][SINGLE_INTER_MODE_NUM];
SingleInterModeState single_state[2][SINGLE_INTER_MODE_NUM]
[SINGLE_REF_FRAMES];
SingleInterModeState single_state_modelled[2][SINGLE_INTER_MODE_NUM]
[SINGLE_REF_FRAMES];
MV_REFERENCE_FRAME single_rd_order[2][SINGLE_INTER_MODE_NUM]
[SINGLE_REF_FRAMES];
IntraModeSearchState intra_search_state;
} InterModeSearchState;
/*!\endcond */
void av1_inter_mode_data_init(TileDataEnc *tile_data) {
for (int i = 0; i < BLOCK_SIZES_ALL; ++i) {
InterModeRdModel *md = &tile_data->inter_mode_rd_models[i];
md->ready = 0;
md->num = 0;
md->dist_sum = 0;
md->ld_sum = 0;
md->sse_sum = 0;
md->sse_sse_sum = 0;
md->sse_ld_sum = 0;
}
}
static int get_est_rate_dist(const TileDataEnc *tile_data, BLOCK_SIZE bsize,
int64_t sse, int *est_residue_cost,
int64_t *est_dist) {
aom_clear_system_state();
const InterModeRdModel *md = &tile_data->inter_mode_rd_models[bsize];
if (md->ready) {
if (sse < md->dist_mean) {
*est_residue_cost = 0;
*est_dist = sse;
} else {
*est_dist = (int64_t)round(md->dist_mean);
const double est_ld = md->a * sse + md->b;
// Clamp estimated rate cost by INT_MAX / 2.
// TODO(angiebird@google.com): find better solution than clamping.
if (fabs(est_ld) < 1e-2) {
*est_residue_cost = INT_MAX / 2;
} else {
double est_residue_cost_dbl = ((sse - md->dist_mean) / est_ld);
if (est_residue_cost_dbl < 0) {
*est_residue_cost = 0;
} else {
*est_residue_cost =
(int)AOMMIN((int64_t)round(est_residue_cost_dbl), INT_MAX / 2);
}
}
if (*est_residue_cost <= 0) {
*est_residue_cost = 0;
*est_dist = sse;
}
}
return 1;
}
return 0;
}
void av1_inter_mode_data_fit(TileDataEnc *tile_data, int rdmult) {
aom_clear_system_state();
for (int bsize = 0; bsize < BLOCK_SIZES_ALL; ++bsize) {
const int block_idx = inter_mode_data_block_idx(bsize);
InterModeRdModel *md = &tile_data->inter_mode_rd_models[bsize];
if (block_idx == -1) continue;
if ((md->ready == 0 && md->num < 200) || (md->ready == 1 && md->num < 64)) {
continue;
} else {
if (md->ready == 0) {
md->dist_mean = md->dist_sum / md->num;
md->ld_mean = md->ld_sum / md->num;
md->sse_mean = md->sse_sum / md->num;
md->sse_sse_mean = md->sse_sse_sum / md->num;
md->sse_ld_mean = md->sse_ld_sum / md->num;
} else {
const double factor = 3;
md->dist_mean =
(md->dist_mean * factor + (md->dist_sum / md->num)) / (factor + 1);
md->ld_mean =
(md->ld_mean * factor + (md->ld_sum / md->num)) / (factor + 1);
md->sse_mean =
(md->sse_mean * factor + (md->sse_sum / md->num)) / (factor + 1);
md->sse_sse_mean =
(md->sse_sse_mean * factor + (md->sse_sse_sum / md->num)) /
(factor + 1);
md->sse_ld_mean =
(md->sse_ld_mean * factor + (md->sse_ld_sum / md->num)) /
(factor + 1);
}
const double my = md->ld_mean;
const double mx = md->sse_mean;
const double dx = sqrt(md->sse_sse_mean);
const double dxy = md->sse_ld_mean;
md->a = (dxy - mx * my) / (dx * dx - mx * mx);
md->b = my - md->a * mx;
md->ready = 1;
md->num = 0;
md->dist_sum = 0;
md->ld_sum = 0;
md->sse_sum = 0;
md->sse_sse_sum = 0;
md->sse_ld_sum = 0;
}
(void)rdmult;
}
}
static AOM_INLINE void inter_mode_data_push(TileDataEnc *tile_data,
BLOCK_SIZE bsize, int64_t sse,
int64_t dist, int residue_cost) {
if (residue_cost == 0 || sse == dist) return;
const int block_idx = inter_mode_data_block_idx(bsize);
if (block_idx == -1) return;
InterModeRdModel *rd_model = &tile_data->inter_mode_rd_models[bsize];
if (rd_model->num < INTER_MODE_RD_DATA_OVERALL_SIZE) {
aom_clear_system_state();
const double ld = (sse - dist) * 1. / residue_cost;
++rd_model->num;
rd_model->dist_sum += dist;
rd_model->ld_sum += ld;
rd_model->sse_sum += sse;
rd_model->sse_sse_sum += (double)sse * (double)sse;
rd_model->sse_ld_sum += sse * ld;
}
}
static AOM_INLINE void inter_modes_info_push(InterModesInfo *inter_modes_info,
int mode_rate, int64_t sse,
int64_t rd, RD_STATS *rd_cost,
RD_STATS *rd_cost_y,
RD_STATS *rd_cost_uv,
const MB_MODE_INFO *mbmi) {
const int num = inter_modes_info->num;
assert(num < MAX_INTER_MODES);
inter_modes_info->mbmi_arr[num] = *mbmi;
inter_modes_info->mode_rate_arr[num] = mode_rate;
inter_modes_info->sse_arr[num] = sse;
inter_modes_info->est_rd_arr[num] = rd;
inter_modes_info->rd_cost_arr[num] = *rd_cost;
inter_modes_info->rd_cost_y_arr[num] = *rd_cost_y;
inter_modes_info->rd_cost_uv_arr[num] = *rd_cost_uv;
++inter_modes_info->num;
}
static int compare_rd_idx_pair(const void *a, const void *b) {
if (((RdIdxPair *)a)->rd == ((RdIdxPair *)b)->rd) {
// To avoid inconsistency in qsort() ordering when two elements are equal,
// using idx as tie breaker. Refer aomedia:2928
if (((RdIdxPair *)a)->idx == ((RdIdxPair *)b)->idx)
return 0;
else if (((RdIdxPair *)a)->idx > ((RdIdxPair *)b)->idx)
return 1;
else
return -1;
} else if (((const RdIdxPair *)a)->rd > ((const RdIdxPair *)b)->rd) {
return 1;
} else {
return -1;
}
}
static AOM_INLINE void inter_modes_info_sort(
const InterModesInfo *inter_modes_info, RdIdxPair *rd_idx_pair_arr) {
if (inter_modes_info->num == 0) {
return;
}
for (int i = 0; i < inter_modes_info->num; ++i) {
rd_idx_pair_arr[i].idx = i;
rd_idx_pair_arr[i].rd = inter_modes_info->est_rd_arr[i];
}
qsort(rd_idx_pair_arr, inter_modes_info->num, sizeof(rd_idx_pair_arr[0]),
compare_rd_idx_pair);
}
// Similar to get_horver_correlation, but also takes into account first
// row/column, when computing horizontal/vertical correlation.
void av1_get_horver_correlation_full_c(const int16_t *diff, int stride,
int width, int height, float *hcorr,
float *vcorr) {
// The following notation is used:
// x - current pixel
// y - left neighbor pixel
// z - top neighbor pixel
int64_t x_sum = 0, x2_sum = 0, xy_sum = 0, xz_sum = 0;
int64_t x_firstrow = 0, x_finalrow = 0, x_firstcol = 0, x_finalcol = 0;
int64_t x2_firstrow = 0, x2_finalrow = 0, x2_firstcol = 0, x2_finalcol = 0;
// First, process horizontal correlation on just the first row
x_sum += diff[0];
x2_sum += diff[0] * diff[0];
x_firstrow += diff[0];
x2_firstrow += diff[0] * diff[0];
for (int j = 1; j < width; ++j) {
const int16_t x = diff[j];
const int16_t y = diff[j - 1];
x_sum += x;
x_firstrow += x;
x2_sum += x * x;
x2_firstrow += x * x;
xy_sum += x * y;
}
// Process vertical correlation in the first column
x_firstcol += diff[0];
x2_firstcol += diff[0] * diff[0];
for (int i = 1; i < height; ++i) {
const int16_t x = diff[i * stride];
const int16_t z = diff[(i - 1) * stride];
x_sum += x;
x_firstcol += x;
x2_sum += x * x;
x2_firstcol += x * x;
xz_sum += x * z;
}
// Now process horiz and vert correlation through the rest unit
for (int i = 1; i < height; ++i) {
for (int j = 1; j < width; ++j) {
const int16_t x = diff[i * stride + j];
const int16_t y = diff[i * stride + j - 1];
const int16_t z = diff[(i - 1) * stride + j];
x_sum += x;
x2_sum += x * x;
xy_sum += x * y;
xz_sum += x * z;
}
}
for (int j = 0; j < width; ++j) {
x_finalrow += diff[(height - 1) * stride + j];
x2_finalrow +=
diff[(height - 1) * stride + j] * diff[(height - 1) * stride + j];
}
for (int i = 0; i < height; ++i) {
x_finalcol += diff[i * stride + width - 1];
x2_finalcol += diff[i * stride + width - 1] * diff[i * stride + width - 1];
}
int64_t xhor_sum = x_sum - x_finalcol;
int64_t xver_sum = x_sum - x_finalrow;
int64_t y_sum = x_sum - x_firstcol;
int64_t z_sum = x_sum - x_firstrow;
int64_t x2hor_sum = x2_sum - x2_finalcol;
int64_t x2ver_sum = x2_sum - x2_finalrow;
int64_t y2_sum = x2_sum - x2_firstcol;
int64_t z2_sum = x2_sum - x2_firstrow;
const float num_hor = (float)(height * (width - 1));
const float num_ver = (float)((height - 1) * width);
const float xhor_var_n = x2hor_sum - (xhor_sum * xhor_sum) / num_hor;
const float xver_var_n = x2ver_sum - (xver_sum * xver_sum) / num_ver;
const float y_var_n = y2_sum - (y_sum * y_sum) / num_hor;
const float z_var_n = z2_sum - (z_sum * z_sum) / num_ver;
const float xy_var_n = xy_sum - (xhor_sum * y_sum) / num_hor;
const float xz_var_n = xz_sum - (xver_sum * z_sum) / num_ver;
if (xhor_var_n > 0 && y_var_n > 0) {
*hcorr = xy_var_n / sqrtf(xhor_var_n * y_var_n);
*hcorr = *hcorr < 0 ? 0 : *hcorr;
} else {
*hcorr = 1.0;
}
if (xver_var_n > 0 && z_var_n > 0) {
*vcorr = xz_var_n / sqrtf(xver_var_n * z_var_n);
*vcorr = *vcorr < 0 ? 0 : *vcorr;
} else {
*vcorr = 1.0;
}
}
static int64_t get_sse(const AV1_COMP *cpi, const MACROBLOCK *x,
int64_t *sse_y) {
const AV1_COMMON *cm = &cpi->common;
const int num_planes = av1_num_planes(cm);
const MACROBLOCKD *xd = &x->e_mbd;
const MB_MODE_INFO *mbmi = xd->mi[0];
int64_t total_sse = 0;
for (int plane = 0; plane < num_planes; ++plane) {
if (plane && !xd->is_chroma_ref) break;
const struct macroblock_plane *const p = &x->plane[plane];
const struct macroblockd_plane *const pd = &xd->plane[plane];
#if CONFIG_EXT_RECUR_PARTITIONS
const BLOCK_SIZE bs = get_mb_plane_block_size(
xd, mbmi, plane, pd->subsampling_x, pd->subsampling_y);
#else
const BLOCK_SIZE bs = get_plane_block_size(
mbmi->sb_type[plane > 0], pd->subsampling_x, pd->subsampling_y);
#endif // CONFIG_EXT_RECUR_PARTITIONS
unsigned int sse;
cpi->fn_ptr[bs].vf(p->src.buf, p->src.stride, pd->dst.buf, pd->dst.stride,
&sse);
total_sse += sse;
if (!plane && sse_y) *sse_y = sse;
}
total_sse <<= 4;
return total_sse;
}
int64_t av1_highbd_block_error_c(const tran_low_t *coeff,
const tran_low_t *dqcoeff, intptr_t block_size,
int64_t *ssz, int bd) {
int i;
int64_t error = 0, sqcoeff = 0;
int shift = 2 * (bd - 8);
int rounding = shift > 0 ? 1 << (shift - 1) : 0;
for (i = 0; i < block_size; i++) {
const int64_t diff = coeff[i] - dqcoeff[i];
error += diff * diff;
sqcoeff += (int64_t)coeff[i] * (int64_t)coeff[i];
}
assert(error >= 0 && sqcoeff >= 0);
error = (error + rounding) >> shift;
sqcoeff = (sqcoeff + rounding) >> shift;
*ssz = sqcoeff;
return error;
}
static int cost_mv_ref(const ModeCosts *const mode_costs, PREDICTION_MODE mode,
#if CONFIG_OPTFLOW_REFINEMENT || CONFIG_EXTENDED_WARP_PREDICTION
const AV1_COMMON *cm, const MB_MODE_INFO *const mbmi,
#endif // CONFIG_OPTFLOW_REFINEMENT || CONFIG_EXTENDED_WARP_PREDICTION
#if CONFIG_EXTENDED_WARP_PREDICTION
const MACROBLOCKD *xd,
#endif // CONFIG_EXTENDED_WARP_PREDICTION
int16_t mode_context) {
if (is_inter_compound_mode(mode)) {
#if CONFIG_OPTFLOW_REFINEMENT
int use_optical_flow_cost = 0;
const int comp_mode_idx = opfl_get_comp_idx(mode);
if (cm->features.opfl_refine_type == REFINE_SWITCHABLE &&
opfl_allowed_for_cur_refs(cm, mbmi)) {
const int use_optical_flow = mode >= NEAR_NEARMV_OPTFLOW;
#if CONFIG_AFFINE_REFINEMENT
const int allow_translational = is_translational_refinement_allowed(
cm, comp_idx_to_opfl_mode[comp_mode_idx]);
const int allow_affine = is_affine_refinement_allowed(
cm, xd, comp_idx_to_opfl_mode[comp_mode_idx]);
if (use_optical_flow) {
assert(IMPLIES(allow_translational,
mbmi->comp_refine_type > COMP_REFINE_NONE));
assert(IMPLIES(allow_affine,
mbmi->comp_refine_type >= COMP_AFFINE_REFINE_START));
}
if (allow_affine || allow_translational)
#endif // CONFIG_AFFINE_REFINEMENT
use_optical_flow_cost +=
mode_costs->use_optflow_cost[mode_context][use_optical_flow];
}
return use_optical_flow_cost +
mode_costs->inter_compound_mode_cost[mode_context][comp_mode_idx];
#else
return mode_costs
->inter_compound_mode_cost[mode_context][INTER_COMPOUND_OFFSET(mode)];
#endif // CONFIG_OPTFLOW_REFINEMENT
}
assert(is_inter_mode(mode));
const int16_t ismode_ctx = inter_single_mode_ctx(mode_context);
#if CONFIG_EXTENDED_WARP_PREDICTION
int warp_mode_cost = 0;
if (is_warpmv_mode_allowed(cm, mbmi, mbmi->sb_type[PLANE_TYPE_Y])) {
const int16_t iswarpmvmode_ctx = inter_warpmv_mode_ctx(cm, xd, mbmi);
warp_mode_cost =
mode_costs->inter_warp_mode_cost[iswarpmvmode_ctx][mode == WARPMV];
if (mode == WARPMV) return warp_mode_cost;
}
#endif // CONFIG_EXTENDED_WARP_PREDICTION
return (mode_costs->inter_single_mode_cost[ismode_ctx]
[mode - SINGLE_INTER_MODE_START]
#if CONFIG_EXTENDED_WARP_PREDICTION
+ warp_mode_cost
#endif // CONFIG_EXTENDED_WARP_PREDICTION
);
}
static int cost_mv_precision(const ModeCosts *const mode_costs,
MvSubpelPrecision max_mv_precision,
MvSubpelPrecision pb_mv_precision,
const int down_ctx,
MvSubpelPrecision most_probable_pb_mv_precision,
const int mpp_flag_context,
const MB_MODE_INFO *mbmi) {
int flex_mv_cost = 0;
const int mpp_flag = (pb_mv_precision == most_probable_pb_mv_precision);
flex_mv_cost +=
(mode_costs->pb_block_mv_mpp_flag_costs[mpp_flag_context][mpp_flag]);
if (!mpp_flag) {
int down = av1_get_pb_mv_precision_index(mbmi);
assert(down >= 0);
flex_mv_cost +=
(mode_costs->pb_block_mv_precision_costs[down_ctx]
[max_mv_precision -
MV_PRECISION_HALF_PEL][down]);
}
return flex_mv_cost;
}
static INLINE PREDICTION_MODE get_single_mode(PREDICTION_MODE this_mode,
int ref_idx) {
return ref_idx ? compound_ref1_mode(this_mode)
: compound_ref0_mode(this_mode);
}
static AOM_INLINE void estimate_ref_frame_costs(
const AV1_COMMON *cm, const MACROBLOCKD *xd, const ModeCosts *mode_costs,
int segment_id, unsigned int *ref_costs_single,
unsigned int (*ref_costs_comp)[REF_FRAMES]) {
(void)segment_id;
int seg_ref_active = 0;
if (seg_ref_active) {
memset(ref_costs_single, 0, SINGLE_REF_FRAMES * sizeof(*ref_costs_single));
int ref_frame;
for (ref_frame = 0; ref_frame < REF_FRAMES; ++ref_frame)
memset(ref_costs_comp[ref_frame], 0,
REF_FRAMES * sizeof((*ref_costs_comp)[0]));
} else {
int intra_inter_ctx = av1_get_intra_inter_context(xd);
#if CONFIG_CONTEXT_DERIVATION && !CONFIG_SKIP_TXFM_OPT
const int skip_txfm = xd->mi[0]->skip_txfm[xd->tree_type == CHROMA_PART];
ref_costs_single[INTRA_FRAME_INDEX] =
mode_costs->intra_inter_cost[skip_txfm][intra_inter_ctx][0];
unsigned int base_cost =
mode_costs->intra_inter_cost[skip_txfm][intra_inter_ctx][1];
#else
ref_costs_single[INTRA_FRAME_INDEX] =
mode_costs->intra_inter_cost[intra_inter_ctx][0];
unsigned int base_cost = mode_costs->intra_inter_cost[intra_inter_ctx][1];
#endif // CONFIG_CONTEXT_DERIVATION && !CONFIG_SKIP_TXFM_OPT
if (cm->features.tip_frame_mode) {
const int tip_ctx = get_tip_ctx(xd);
ref_costs_single[TIP_FRAME_INDEX] =
base_cost + mode_costs->tip_cost[tip_ctx][1];
base_cost += mode_costs->tip_cost[tip_ctx][0];
}
for (int i = 0; i < INTER_REFS_PER_FRAME; ++i)
ref_costs_single[i] = base_cost;
const int n_refs = cm->ref_frames_info.num_total_refs;
for (int i = 0; i < n_refs; i++) {
for (int j = 0; j <= AOMMIN(i, n_refs - 2); j++) {
aom_cdf_prob ctx = av1_get_ref_pred_context(xd, j, n_refs);
const int bit = i == j;
ref_costs_single[i] += mode_costs->single_ref_cost[ctx][j][bit];
}
}
for (int i = n_refs; i < INTER_REFS_PER_FRAME; i++)
ref_costs_single[i] = INT_MAX;
if (cm->current_frame.reference_mode != SINGLE_REFERENCE) {
for (int i = 0; i < REF_FRAMES; i++) {
for (int j = 0; j < REF_FRAMES; j++) ref_costs_comp[i][j] = INT_MAX;
}
#if CONFIG_ALLOW_SAME_REF_COMPOUND
int use_same_ref_comp = cm->ref_frames_info.num_same_ref_compound > 0;
for (int i = 0; i < n_refs + use_same_ref_comp - 1; i++) {
if (i >= RANKED_REF0_TO_PRUNE) break;
if (i == n_refs - 1 && i >= cm->ref_frames_info.num_same_ref_compound)
break;
int prev_cost = base_cost;
for (int j = 0; j < n_refs; j++) {
#if CONFIG_IMPROVED_SAME_REF_COMPOUND
int implicit_ref0_bit =
j >= RANKED_REF0_TO_PRUNE - 1 ||
(i == j && i < cm->ref_frames_info.num_same_ref_compound &&
i + 1 >= cm->ref_frames_info.num_same_ref_compound &&
i >= n_refs - 2);
int implicit_ref0_ref1_bits =
j >= n_refs - 2 && j >= cm->ref_frames_info.num_same_ref_compound;
#endif // CONFIG_IMPROVED_SAME_REF_COMPOUND
if (j <= i) {
// Keep track of the cost to encode the first reference
aom_cdf_prob ctx = av1_get_ref_pred_context(xd, j, n_refs);
const int bit = i == j;
#if CONFIG_IMPROVED_SAME_REF_COMPOUND
if (!implicit_ref0_bit && !implicit_ref0_ref1_bits)
prev_cost += mode_costs->comp_ref0_cost[ctx][j][bit];
#else
if (j < n_refs - 1 && j < RANKED_REF0_TO_PRUNE - 1)
prev_cost += mode_costs->comp_ref0_cost[ctx][j][bit];
#endif
}
if (j > i ||
(j == i && i < cm->ref_frames_info.num_same_ref_compound)) {
// Assign the cost of signaling both references
ref_costs_comp[i][j] = prev_cost;
if (j < n_refs - 1) {
aom_cdf_prob ctx = av1_get_ref_pred_context(xd, j, n_refs);
const int bit_type =
av1_get_compound_ref_bit_type(&cm->ref_frames_info, i, j);
ref_costs_comp[i][j] +=
mode_costs->comp_ref1_cost[ctx][bit_type][j][1];
// Maintain the cost of sending a 0 bit for the 2nd reference to
// be used in the next iteration.
prev_cost += mode_costs->comp_ref1_cost[ctx][bit_type][j][0];
}
}
}
}
#else
for (int i = 0; i < n_refs - 1; i++) {
if (i >= RANKED_REF0_TO_PRUNE) break;
int prev_cost = base_cost;
for (int j = 0; j < n_refs; j++) {
if (j <= i) {
if (n_refs == 2) continue; // No bits need to be sent in this case
// Keep track of the cost to encode the first reference
aom_cdf_prob ctx = av1_get_ref_pred_context(xd, j, n_refs);
const int bit = i == j;
if (j < n_refs - 2 && j < RANKED_REF0_TO_PRUNE - 1)
prev_cost += mode_costs->comp_ref0_cost[ctx][j][bit];
} else {
// Assign the cost of signaling both references
ref_costs_comp[i][j] = prev_cost;
if (j < n_refs - 1) {
aom_cdf_prob ctx = av1_get_ref_pred_context(xd, j, n_refs);
const int bit_type =
av1_get_compound_ref_bit_type(&cm->ref_frames_info, i, j);
ref_costs_comp[i][j] +=
mode_costs->comp_ref1_cost[ctx][bit_type][j - 1][1];
// Maintain the cost of sending a 0 bit for the 2nd reference to
// be used in the next iteration.
prev_cost += mode_costs->comp_ref1_cost[ctx][bit_type][j - 1][0];
}
}
}
}
#endif // CONFIG_ALLOW_SAME_REF_COMPOUND
#ifndef NDEBUG
for (int i = 0; i < n_refs - 1; i++) {
for (int j = i + 1; j < n_refs; j++) {
if (i < RANKED_REF0_TO_PRUNE) assert(ref_costs_comp[i][j] != INT_MAX);
}
}
#endif // NDEBUG
} else {
for (int ref0 = 0; ref0 < REF_FRAMES; ++ref0) {
for (int ref1 = ref0 + 1; ref1 < REF_FRAMES; ++ref1) {
ref_costs_comp[ref0][ref1] = 512;
ref_costs_comp[ref1][ref0] = 512;
}
}
}
}
}
#if CONFIG_C071_SUBBLK_WARPMV
void store_submi(const MACROBLOCKD *const xd, const AV1_COMMON *cm,
SUBMB_INFO *dst_submi, BLOCK_SIZE bsize) {
const int bw = mi_size_wide[bsize];
const int bh = mi_size_high[bsize];
const int mi_row = xd->mi_row;
const int mi_col = xd->mi_col;
const int x_inside_boundary = AOMMIN(bw, cm->mi_params.mi_cols - mi_col);
const int y_inside_boundary = AOMMIN(bh, cm->mi_params.mi_rows - mi_row);
const int dst_stride = bw;
const int src_stride = cm->mi_params.mi_stride;
for (int y = 0; y < y_inside_boundary; y++) {
for (int x = 0; x < x_inside_boundary; x++) {
dst_submi[y * dst_stride + x] = *xd->submi[y * src_stride + x];
}
}
}
void update_submi(MACROBLOCKD *const xd, const AV1_COMMON *cm,
const SUBMB_INFO *src_submi, BLOCK_SIZE bsize) {
const int bw = mi_size_wide[bsize];
const int bh = mi_size_high[bsize];
const int mi_row = xd->mi_row;
const int mi_col = xd->mi_col;
const int x_inside_boundary = AOMMIN(bw, cm->mi_params.mi_cols - mi_col);
const int y_inside_boundary = AOMMIN(bh, cm->mi_params.mi_rows - mi_row);
const int src_stride = bw;
const int dst_stride = cm->mi_params.mi_stride;
for (int y = 0; y < y_inside_boundary; y++) {
for (int x = 0; x < x_inside_boundary; x++) {
*xd->submi[y * dst_stride + x] = src_submi[y * src_stride + x];
}
}
}
#endif // CONFIG_C071_SUBBLK_WARPMV
static AOM_INLINE void store_coding_context(
MACROBLOCK *x, PICK_MODE_CONTEXT *ctx,
int64_t comp_pred_diff[REFERENCE_MODES], int skippable
#if CONFIG_C071_SUBBLK_WARPMV
,
const AV1_COMMON *cm
#endif // CONFIG_C071_SUBBLK_WARPMV
) {
MACROBLOCKD *const xd = &x->e_mbd;
// Take a snapshot of the coding context so it can be
// restored if we decide to encode this way
ctx->rd_stats.skip_txfm = x->txfm_search_info.skip_txfm;
ctx->skippable = skippable;
ctx->mic = *xd->mi[0];
#if CONFIG_C071_SUBBLK_WARPMV
if (is_warp_mode(xd->mi[0]->motion_mode)) {
store_submi(xd, cm, ctx->submic, xd->mi[0]->sb_type[PLANE_TYPE_Y]);
}
#endif // CONFIG_C071_SUBBLK_WARPMV
if (xd->tree_type != CHROMA_PART)
av1_copy_mbmi_ext_to_mbmi_ext_frame(
&ctx->mbmi_ext_best, x->mbmi_ext,
#if CONFIG_SEP_COMP_DRL
xd->mi[0],
#endif // CONFIG_SEP_COMP_DRL
#if CONFIG_SKIP_MODE_ENHANCEMENT
xd->mi[0]->skip_mode,
#endif // CONFIG_SKIP_MODE_ENHANCEMENT
av1_ref_frame_type(xd->mi[0]->ref_frame));
ctx->single_pred_diff = (int)comp_pred_diff[SINGLE_REFERENCE];
ctx->comp_pred_diff = (int)comp_pred_diff[COMPOUND_REFERENCE];
ctx->hybrid_pred_diff = (int)comp_pred_diff[REFERENCE_MODE_SELECT];
}
static AOM_INLINE void setup_buffer_ref_mvs_inter(
const AV1_COMP *const cpi, MACROBLOCK *x, MV_REFERENCE_FRAME ref_frame,
BLOCK_SIZE block_size,
struct buf_2d yv12_mb[SINGLE_REF_FRAMES][MAX_MB_PLANE]) {
const AV1_COMMON *cm = &cpi->common;
const int num_planes = av1_num_planes(cm);
const YV12_BUFFER_CONFIG *scaled_ref_frame =
av1_get_scaled_ref_frame(cpi, ref_frame);
MACROBLOCKD *const xd = &x->e_mbd;
MB_MODE_INFO *const mbmi = xd->mi[0];
MB_MODE_INFO_EXT *const mbmi_ext = x->mbmi_ext;
const struct scale_factors *const sf =
get_ref_scale_factors_const(cm, ref_frame);
const YV12_BUFFER_CONFIG *yv12 = get_ref_frame_yv12_buf(cm, ref_frame);
assert(yv12 != NULL);
const int ref_frame_idx = COMPACT_INDEX0_NRS(ref_frame);
if (scaled_ref_frame) {
// Setup pred block based on scaled reference, because av1_mv_pred() doesn't
// support scaling.
av1_setup_pred_block(xd, yv12_mb[ref_frame_idx], scaled_ref_frame, NULL,
NULL, num_planes);
} else {
av1_setup_pred_block(xd, yv12_mb[ref_frame_idx], yv12, sf, sf, num_planes);
}
#if CONFIG_SKIP_MODE_ENHANCEMENT
if (mbmi->skip_mode) return;
#endif // CONFIG_SKIP_MODE_ENHANCEMENT
// Gets an initial list of candidate vectors from neighbours and orders them
av1_find_mv_refs(
cm, xd, mbmi, ref_frame, mbmi_ext->ref_mv_count, xd->ref_mv_stack,
xd->weight, NULL, mbmi_ext->global_mvs
#if !CONFIG_C076_INTER_MOD_CTX
,
mbmi_ext->mode_context
#endif // !CONFIG_C076_INTER_MOD_CTX
#if CONFIG_EXTENDED_WARP_PREDICTION
,
xd->warp_param_stack,
ref_frame < INTER_REFS_PER_FRAME ? MAX_WARP_REF_CANDIDATES : 0,
xd->valid_num_warp_candidates
#endif // CONFIG_EXTENDED_WARP_PREDICTION
);
#if CONFIG_C076_INTER_MOD_CTX
av1_find_mode_ctx(cm, xd, mbmi_ext->mode_context, ref_frame);
#endif // CONFIG_C076_INTER_MOD_CTX
// TODO(Ravi): Populate mbmi_ext->ref_mv_stack[ref_frame][4] and
// mbmi_ext->weight[ref_frame][4] inside av1_find_mv_refs.
av1_copy_usable_ref_mv_stack_and_weight(xd, mbmi_ext, ref_frame);
// Further refinement that is encode side only to test the top few candidates
// in full and choose the best as the center point for subsequent searches.
// The current implementation doesn't support scaling.
av1_mv_pred(cpi, x, yv12_mb[ref_frame_idx][0].buf,
yv12_mb[ref_frame_idx][0].stride, ref_frame, block_size);
// Go back to unscaled reference.
if (scaled_ref_frame) {
// We had temporarily setup pred block based on scaled reference above. Go
// back to unscaled reference now, for subsequent use.
av1_setup_pred_block(xd, yv12_mb[ref_frame_idx], yv12, sf, sf, num_planes);
}
}
#define LEFT_TOP_MARGIN ((AOM_BORDER_IN_PIXELS - AOM_INTERP_EXTEND) << 3)
#define RIGHT_BOTTOM_MARGIN ((AOM_BORDER_IN_PIXELS - AOM_INTERP_EXTEND) << 3)
// TODO(jingning): this mv clamping function should be block size dependent.
static INLINE void clamp_mv2(MV *mv, const MACROBLOCKD *xd) {
const SubpelMvLimits mv_limits = { xd->mb_to_left_edge - LEFT_TOP_MARGIN,
xd->mb_to_right_edge + RIGHT_BOTTOM_MARGIN,
xd->mb_to_top_edge - LEFT_TOP_MARGIN,
xd->mb_to_bottom_edge +
RIGHT_BOTTOM_MARGIN };
clamp_mv(mv, &mv_limits);
}
/* If the current mode shares the same mv with other modes with higher cost,
* skip this mode. */
static int skip_repeated_mv(const AV1_COMMON *const cm,
const MACROBLOCK *const x,
PREDICTION_MODE this_mode,
const MV_REFERENCE_FRAME ref_frames[2],
InterModeSearchState *search_state) {
if (is_tip_ref_frame(ref_frames[0])) return 0;
const int is_comp_pred = is_inter_ref_frame(ref_frames[1]);
if (is_comp_pred) {
return 0;
}
if (!(this_mode == GLOBALMV || this_mode == NEARMV)) {
return 0;
}
const uint8_t ref_frame_type = av1_ref_frame_type(ref_frames);
const MB_MODE_INFO_EXT *const mbmi_ext = x->mbmi_ext;
const int ref_mv_count = mbmi_ext->ref_mv_count[ref_frame_type];
if (ref_mv_count > 1) {
return 0;
}
PREDICTION_MODE compare_mode = MB_MODE_COUNT;
if (this_mode == NEARMV && ref_mv_count == 1 &&
cm->global_motion[ref_frames[0]].wmtype <= TRANSLATION) {
compare_mode = GLOBALMV;
}
if (this_mode == GLOBALMV && ref_mv_count == 0 &&
cm->global_motion[ref_frames[0]].wmtype <= TRANSLATION) {
compare_mode = NEARMV;
}
if (this_mode == GLOBALMV && ref_mv_count == 1) {
compare_mode = NEARMV;
}
if (compare_mode == MB_MODE_COUNT) {
return 0;
}
const MV_REFERENCE_FRAME ref_frame0 = COMPACT_INDEX0_NRS(ref_frames[0]);
if (search_state->modelled_rd[compare_mode][0][ref_frame0] == INT64_MAX) {
return 0;
}
const int16_t mode_ctx =
av1_mode_context_analyzer(mbmi_ext->mode_context, ref_frames);
#if CONFIG_OPTFLOW_REFINEMENT || CONFIG_EXTENDED_WARP_PREDICTION
const MB_MODE_INFO *const mbmi = x->e_mbd.mi[0];
const int compare_cost = cost_mv_ref(&x->mode_costs, compare_mode, cm, mbmi,
#if CONFIG_EXTENDED_WARP_PREDICTION
&x->e_mbd,
#endif // CONFIG_EXTENDED_WARP_PREDICTION
mode_ctx);
const int this_cost = cost_mv_ref(&x->mode_costs, this_mode, cm, mbmi,
#if CONFIG_EXTENDED_WARP_PREDICTION
&x->e_mbd,
#endif // CONFIG_EXTENDED_WARP_PREDICTION
mode_ctx);
#else
const int compare_cost = cost_mv_ref(&x->mode_costs, compare_mode, mode_ctx);
const int this_cost = cost_mv_ref(&x->mode_costs, this_mode, mode_ctx);
#endif // CONFIG_OPTFLOW_REFINEMENT || CONFIG_EXTENDED_WARP_PREDICTION
// Only skip if the mode cost is larger than compare mode cost
if (this_cost > compare_cost) {
search_state->modelled_rd[this_mode][0][ref_frame0] =
search_state->modelled_rd[compare_mode][0][ref_frame0];
return 1;
}
return 0;
}
static INLINE int clamp_and_check_mv(int_mv *out_mv, int_mv in_mv,
const AV1_COMMON *cm,
const MACROBLOCK *x) {
#if CONFIG_C071_SUBBLK_WARPMV
(void)cm;
#endif // CONFIG_C071_SUBBLK_WARPMV
const MACROBLOCKD *const xd = &x->e_mbd;
*out_mv = in_mv;
clamp_mv2(&out_mv->as_mv, xd);
return av1_is_fullmv_in_range(&x->mv_limits,
get_fullmv_from_mv(&out_mv->as_mv),
cm->features.fr_mv_precision);
}
// To use single newmv directly for compound modes, need to clamp the mv to the
// valid mv range. Without this, encoder would generate out of range mv, and
// this is seen in 8k encoding.
static INLINE void clamp_mv_in_range(MACROBLOCK *const x, int_mv *mv,
int ref_idx
,
MvSubpelPrecision pb_mv_precision
) {
const int_mv ref_mv = av1_get_ref_mv(x, ref_idx);
SubpelMvLimits mv_limits;
av1_set_subpel_mv_search_range(&mv_limits, &x->mv_limits, &ref_mv.as_mv
,
pb_mv_precision
);
clamp_mv(&mv->as_mv, &mv_limits);
}
static int64_t handle_newmv(const AV1_COMP *const cpi, MACROBLOCK *const x,
const BLOCK_SIZE bsize, int_mv *cur_mv,
int *const rate_mv, HandleInterModeArgs *const args,
inter_mode_info *mode_info) {
MACROBLOCKD *const xd = &x->e_mbd;
MB_MODE_INFO *const mbmi = xd->mi[0];
const AV1_COMMON *const cm = &cpi->common;
const int is_comp_pred = has_second_ref(mbmi);
const PREDICTION_MODE this_mode = mbmi->mode;
const MV_REFERENCE_FRAME refs[2] = { COMPACT_INDEX0_NRS(mbmi->ref_frame[0]),
COMPACT_INDEX1_NRS(mbmi->ref_frame[1]) };
#if !CONFIG_SEP_COMP_DRL
const int ref_mv_idx = mbmi->ref_mv_idx;
#endif // !CONFIG_SEP_COMP_DRL
const MvSubpelPrecision pb_mv_precision = mbmi->pb_mv_precision;
if (is_comp_pred) {
int valid_mv0_found = 0;
int valid_precision_mv0 = NUM_MV_PRECISIONS;
for (int prev_mv_precision = pb_mv_precision;
prev_mv_precision <= mbmi->max_mv_precision; prev_mv_precision++) {
#if CONFIG_SEP_COMP_DRL
if (args->single_newmv_valid[prev_mv_precision][get_ref_mv_idx(mbmi, 0)]
[refs[0]]) {
#else
if (args->single_newmv_valid[prev_mv_precision][ref_mv_idx][refs[0]]) {
#endif // CONFIG_SEP_COMP_DRL
valid_mv0_found = 1;
valid_precision_mv0 = prev_mv_precision;
break;
}
}
int valid_mv1_found = 0;
int valid_precision_mv1 = NUM_MV_PRECISIONS;
for (int prev_mv_precision = pb_mv_precision;
prev_mv_precision <= mbmi->max_mv_precision; prev_mv_precision++) {
#if CONFIG_SEP_COMP_DRL
if (args->single_newmv_valid[prev_mv_precision][get_ref_mv_idx(mbmi, 1)]
[refs[1]]) {
#else
if (args->single_newmv_valid[prev_mv_precision][ref_mv_idx][refs[1]]) {
#endif // CONFIG_SEP_COMP_DRL
valid_mv1_found = 1;
valid_precision_mv1 = prev_mv_precision;
break;
}
}
const int valid_mv0 = valid_mv0_found;
const int valid_mv1 = valid_mv1_found;
#if CONFIG_OPTFLOW_REFINEMENT
if (this_mode == NEW_NEWMV || this_mode == NEW_NEWMV_OPTFLOW) {
#if CONFIG_SKIP_ME_FOR_OPFL_MODES
if (this_mode == NEW_NEWMV_OPTFLOW &&
args->comp_newmv_valid[av1_ref_frame_type(mbmi->ref_frame)]
#if CONFIG_SEP_COMP_DRL
[av1_ref_mv_idx_type(mbmi, mbmi->ref_mv_idx)]
#else
[mbmi->ref_mv_idx]
#endif
[pb_mv_precision]) {
cur_mv[0].as_int =
args->comp_newmv[av1_ref_frame_type(mbmi->ref_frame)]
#if CONFIG_SEP_COMP_DRL
[av1_ref_mv_idx_type(mbmi, mbmi->ref_mv_idx)]
#else
[mbmi->ref_mv_idx]
#endif
[pb_mv_precision][0]
.as_int;
cur_mv[1].as_int =
args->comp_newmv[av1_ref_frame_type(mbmi->ref_frame)]
#if CONFIG_SEP_COMP_DRL
[av1_ref_mv_idx_type(mbmi, mbmi->ref_mv_idx)]
#else
[mbmi->ref_mv_idx]
#endif
[pb_mv_precision][1]
.as_int;
*rate_mv = 0;
for (int i = 0; i < 2; ++i) {
const int_mv ref_mv = av1_get_ref_mv(x, i);
*rate_mv +=
av1_mv_bit_cost(&cur_mv[i].as_mv, &ref_mv.as_mv, pb_mv_precision,
&x->mv_costs, MV_COST_WEIGHT, 0);
}
} else {
#endif // CONFIG_SKIP_ME_FOR_OPFL_MODES
#else
if (this_mode == NEW_NEWMV) {
#endif // CONFIG_OPTFLOW_REFINEMENT
if (valid_mv0) {
cur_mv[0].as_int =
#if CONFIG_SEP_COMP_DRL
args->single_newmv[valid_precision_mv0][get_ref_mv_idx(mbmi, 0)]
[refs[0]]
.as_int;
#else
args->single_newmv[valid_precision_mv0][ref_mv_idx][refs[0]].as_int;
#endif // CONFIG_SEP_COMP_DRL
lower_mv_precision(&cur_mv[0].as_mv, pb_mv_precision);
clamp_mv_in_range(x, &cur_mv[0], 0, pb_mv_precision
);
}
if (valid_mv1) {
cur_mv[1].as_int =
#if CONFIG_SEP_COMP_DRL
args->single_newmv[valid_precision_mv1][get_ref_mv_idx(mbmi, 1)]
[refs[1]]
.as_int;
#else
args->single_newmv[valid_precision_mv1][ref_mv_idx][refs[1]].as_int;
#endif // CONFIG_SEP_COMP_DRL
lower_mv_precision(&cur_mv[1].as_mv, pb_mv_precision);
clamp_mv_in_range(x, &cur_mv[1], 1, pb_mv_precision
);
}
// aomenc1
if (cpi->sf.inter_sf.comp_inter_joint_search_thresh <= bsize ||
!valid_mv0 || !valid_mv1) {
// uint8_t mask_value = 32;
av1_joint_motion_search(cpi, x, bsize, cur_mv, NULL, 0, rate_mv);
} else {
*rate_mv = 0;
for (int i = 0; i < 2; ++i) {
const int_mv ref_mv = av1_get_ref_mv(x, i);
#if CONFIG_C071_SUBBLK_WARPMV
update_mv_precision(ref_mv.as_mv, pb_mv_precision,
&cur_mv[i].as_mv);
#endif // CONFIG_C071_SUBBLK_WARPMV
*rate_mv += av1_mv_bit_cost(&cur_mv[i].as_mv, &ref_mv.as_mv,
pb_mv_precision, &x->mv_costs,
MV_COST_WEIGHT, 0);
}
}
#if CONFIG_SKIP_ME_FOR_OPFL_MODES
if (this_mode == NEW_NEWMV) {
args->comp_newmv_valid[av1_ref_frame_type(mbmi->ref_frame)]
#if CONFIG_SEP_COMP_DRL
[av1_ref_mv_idx_type(mbmi, mbmi->ref_mv_idx)]
#else
[mbmi->ref_mv_idx]
#endif
[pb_mv_precision] = 1;
args->comp_newmv[av1_ref_frame_type(mbmi->ref_frame)]
#if CONFIG_SEP_COMP_DRL
[av1_ref_mv_idx_type(mbmi, mbmi->ref_mv_idx)]
#else
[mbmi->ref_mv_idx]
#endif
[pb_mv_precision][0]
.as_int = cur_mv[0].as_int;
args->comp_newmv[av1_ref_frame_type(mbmi->ref_frame)]
#if CONFIG_SEP_COMP_DRL
[av1_ref_mv_idx_type(mbmi, mbmi->ref_mv_idx)]
#else
[mbmi->ref_mv_idx]
#endif
[pb_mv_precision][1]
.as_int = cur_mv[1].as_int;
}
}
#endif // CONFIG_SKIP_ME_FOR_OPFL_MODES
#if CONFIG_OPTFLOW_REFINEMENT
} else if (this_mode == NEAR_NEWMV || this_mode == NEAR_NEWMV_OPTFLOW) {
#else
} else if (this_mode == NEAR_NEWMV) {
#endif // CONFIG_OPTFLOW_REFINEMENT
if (valid_mv1) {
cur_mv[1].as_int =
#if CONFIG_SEP_COMP_DRL
args->single_newmv[valid_precision_mv1][get_ref_mv_idx(mbmi, 1)]
[refs[1]]
.as_int;
#else
args->single_newmv[valid_precision_mv1][ref_mv_idx][refs[1]].as_int;
#endif // CONFIG_SEP_COMP_DRL
lower_mv_precision(&cur_mv[1].as_mv, pb_mv_precision);
clamp_mv_in_range(x, &cur_mv[1], 1, pb_mv_precision
);
}
if (cm->seq_params.enable_adaptive_mvd) {
assert(mbmi->pb_mv_precision == mbmi->max_mv_precision);
av1_compound_single_motion_search_interinter(cpi, x, bsize, cur_mv,
NULL, 0, rate_mv, 1);
#if CONFIG_VQ_MVD_CODING
if (cur_mv->as_int == INVALID_MV) return INT64_MAX;
#endif
} else {
// aomenc2
if (cpi->sf.inter_sf.comp_inter_joint_search_thresh <= bsize ||
!valid_mv1) {
av1_compound_single_motion_search_interinter(cpi, x, bsize, cur_mv,
NULL, 0, rate_mv, 1);
} else {
const int_mv ref_mv = av1_get_ref_mv(x, 1);
#if CONFIG_C071_SUBBLK_WARPMV
update_mv_precision(ref_mv.as_mv, pb_mv_precision,
&cur_mv[1].as_mv);
#endif // CONFIG_C071_SUBBLK_WARPMV
*rate_mv =
av1_mv_bit_cost(&cur_mv[1].as_mv, &ref_mv.as_mv, pb_mv_precision,
&x->mv_costs, MV_COST_WEIGHT
,
0
);
}
}
} else if (is_joint_mvd_coding_mode(this_mode)) {
if (!cm->seq_params.enable_joint_mvd) return INT64_MAX;
const int same_side = is_ref_frame_same_side(cm, mbmi);
// skip JOINT_NEWMV mode when two reference frames are from same side
if (same_side) return INT64_MAX;
const int first_ref_dist =
cm->ref_frame_relative_dist[mbmi->ref_frame[0]];
const int sec_ref_dist = cm->ref_frame_relative_dist[mbmi->ref_frame[1]];
if (first_ref_dist != sec_ref_dist) return INT64_MAX;
const int jmvd_base_ref_list = get_joint_mvd_base_ref_list(cm, mbmi);
const int valid_mv_base = (!jmvd_base_ref_list && valid_mv0) ||
(jmvd_base_ref_list && valid_mv1);
if (valid_mv_base && !is_joint_amvd_coding_mode(mbmi->mode)) {
cur_mv[jmvd_base_ref_list].as_int =
args->single_newmv[jmvd_base_ref_list == 0 ? valid_precision_mv0
: valid_precision_mv1]
#if CONFIG_SEP_COMP_DRL
[get_ref_mv_idx(mbmi, 1)]
#else
[ref_mv_idx]
#endif // CONFIG_SEP_COMP_DRL
[refs[jmvd_base_ref_list]]
.as_int;
lower_mv_precision(&cur_mv[jmvd_base_ref_list].as_mv, pb_mv_precision);
clamp_mv_in_range(x, &cur_mv[jmvd_base_ref_list], jmvd_base_ref_list
,
pb_mv_precision);
}
av1_compound_single_motion_search_interinter(
cpi, x, bsize, cur_mv, NULL, 0, rate_mv, jmvd_base_ref_list);
#if CONFIG_VQ_MVD_CODING
if (cur_mv->as_int == INVALID_MV) return INT64_MAX;
#endif // CONFIG_VQ_MVD_CODING
} else {
#if CONFIG_OPTFLOW_REFINEMENT
assert(this_mode == NEW_NEARMV || this_mode == NEW_NEARMV_OPTFLOW);
#else
assert(this_mode == NEW_NEARMV);
#endif // CONFIG_OPTFLOW_REFINEMENT
if (valid_mv0) {
cur_mv[0].as_int =
#if CONFIG_SEP_COMP_DRL
args->single_newmv[valid_precision_mv0][get_ref_mv_idx(mbmi, 0)]
[refs[0]]
.as_int;
#else
args->single_newmv[valid_precision_mv0][ref_mv_idx][refs[0]].as_int;
#endif // CONFIG_SEP_COMP_DRL
lower_mv_precision(&cur_mv[0].as_mv, pb_mv_precision);
clamp_mv_in_range(x, &cur_mv[0], 0, pb_mv_precision
);
}
if (cm->seq_params.enable_adaptive_mvd) {
assert(mbmi->pb_mv_precision == mbmi->max_mv_precision);
av1_compound_single_motion_search_interinter(cpi, x, bsize, cur_mv,
NULL, 0, rate_mv, 0);
#if CONFIG_VQ_MVD_CODING
if (cur_mv->as_int == INVALID_MV) return INT64_MAX;
#endif // CONFIG_VQ_MVD_CODING
} else {
// aomenc3
if (cpi->sf.inter_sf.comp_inter_joint_search_thresh <= bsize ||
!valid_mv0) {
av1_compound_single_motion_search_interinter(cpi, x, bsize, cur_mv,
NULL, 0, rate_mv, 0);
} else {
const int_mv ref_mv = av1_get_ref_mv(x, 0);
#if CONFIG_C071_SUBBLK_WARPMV
update_mv_precision(ref_mv.as_mv, pb_mv_precision, &cur_mv[0].as_mv);
#endif // CONFIG_C071_SUBBLK_WARPMV
*rate_mv =
av1_mv_bit_cost(&cur_mv[0].as_mv, &ref_mv.as_mv, pb_mv_precision,
&x->mv_costs, MV_COST_WEIGHT, 0
);
}
}
}
} else if (this_mode == AMVDNEWMV) {
const int ref_idx = 0;
int_mv best_mv;
assert(mbmi->pb_mv_precision == mbmi->max_mv_precision);
av1_amvd_single_motion_search(cpi, x, bsize, &best_mv.as_mv, rate_mv,
ref_idx);
if (best_mv.as_int == INVALID_MV) return INT64_MAX;
cur_mv[0].as_int = best_mv.as_int;
} else {
// Single ref case.
const int ref_idx = 0;
int_mv best_mv;
int valid_precision_mv0 = NUM_MV_PRECISIONS;
int do_refine_ms = (cpi->sf.flexmv_sf.fast_motion_search_low_precision &&
pb_mv_precision < mbmi->max_mv_precision) &&
is_pb_mv_precision_active(&cpi->common, mbmi, bsize);
if (do_refine_ms) {
int valid_mv0_found = 0;
for (int prev_mv_precision = pb_mv_precision;
prev_mv_precision <= mbmi->max_mv_precision; prev_mv_precision++) {
#if CONFIG_SEP_COMP_DRL
assert(get_ref_mv_idx(mbmi, 1) == get_ref_mv_idx(mbmi, 0));
if (args->single_newmv_valid[prev_mv_precision][get_ref_mv_idx(mbmi, 0)]
[refs[0]]) {
#else
if (args->single_newmv_valid[prev_mv_precision][ref_mv_idx][refs[0]]) {
#endif // CONFIG_SEP_COMP_DRL
valid_mv0_found = 1;
valid_precision_mv0 = prev_mv_precision;
break;
}
}
do_refine_ms &= valid_mv0_found;
}
if (do_refine_ms) {
int_mv start_mv;
assert(valid_precision_mv0 > pb_mv_precision &&
valid_precision_mv0 < NUM_MV_PRECISIONS);
start_mv.as_int =
#if CONFIG_SEP_COMP_DRL
args->single_newmv[valid_precision_mv0][get_ref_mv_idx(mbmi, 0)]
[refs[0]]
.as_int;
#else
args->single_newmv[valid_precision_mv0][ref_mv_idx][refs[0]].as_int;
#endif // CONFIG_SEP_COMP_DRL
lower_mv_precision(&start_mv.as_mv, pb_mv_precision);
clamp_mv_in_range(x, &start_mv, 0, pb_mv_precision);
av1_single_motion_search_high_precision(cpi, x, bsize, ref_idx, rate_mv,
mode_info, &start_mv, &best_mv);
} else {
int search_range = INT_MAX;
#if CONFIG_SEP_COMP_DRL
if (cpi->sf.mv_sf.reduce_search_range && mbmi->ref_mv_idx[0] > 0) {
#else
if (cpi->sf.mv_sf.reduce_search_range && mbmi->ref_mv_idx > 0) {
#endif // CONFIG_SEP_COMP_DRL
const MV ref_mv = av1_get_ref_mv(x, ref_idx).as_mv;
int min_mv_diff = INT_MAX;
int best_match = -1;
MV best_mv1 = { 0 };
#if CONFIG_SEP_COMP_DRL
assert(ref_idx == 0);
for (int idx = 0; idx < mbmi->ref_mv_idx[ref_idx]; ++idx) {
MV prev_ref_mv = av1_get_ref_mv_from_stack(ref_idx, mbmi->ref_frame,
idx, x->mbmi_ext, mbmi)
.as_mv;
#else
for (int idx = 0; idx < mbmi->ref_mv_idx; ++idx) {
MV prev_ref_mv = av1_get_ref_mv_from_stack(ref_idx, mbmi->ref_frame,
idx, x->mbmi_ext)
.as_mv;
#endif // CONFIG_SEP_COMP_DRL
const int ref_mv_diff = AOMMAX(abs(ref_mv.row - prev_ref_mv.row),
abs(ref_mv.col - prev_ref_mv.col));
if (min_mv_diff > ref_mv_diff) {
min_mv_diff = ref_mv_diff;
best_match = idx;
best_mv1 = prev_ref_mv;
}
}
if (min_mv_diff < (16 << 3)) {
if (args->single_newmv_valid[pb_mv_precision][best_match][refs[0]]) {
search_range = min_mv_diff;
search_range += AOMMAX(
abs(args->single_newmv[pb_mv_precision][best_match][refs[0]]
.as_mv.row -
best_mv1.row),
abs(args->single_newmv[pb_mv_precision][best_match][refs[0]]
.as_mv.col -
best_mv1.col));
// Get full pixel search range.
search_range = (search_range + 4) >> 3;
}
}
}
av1_single_motion_search(cpi, x, bsize, ref_idx, rate_mv, search_range,
mode_info, &best_mv
#if CONFIG_EXTENDED_WARP_PREDICTION
,
NULL
#endif // CONFIG_EXTENDED_WARP_PREDICTION
);
}
if (best_mv.as_int == INVALID_MV) return INT64_MAX;
#if CONFIG_SEP_COMP_DRL
args->single_newmv[pb_mv_precision][get_ref_mv_idx(mbmi, 0)][refs[0]] =
best_mv;
args->single_newmv_rate[pb_mv_precision][get_ref_mv_idx(mbmi, 0)][refs[0]] =
*rate_mv;
args->single_newmv_valid[pb_mv_precision][get_ref_mv_idx(mbmi, 0)]
[refs[0]] = 1;
#else
args->single_newmv[pb_mv_precision][ref_mv_idx][refs[0]] = best_mv;
args->single_newmv_rate[pb_mv_precision][ref_mv_idx][refs[0]] = *rate_mv;
args->single_newmv_valid[pb_mv_precision][ref_mv_idx][refs[0]] = 1;
#endif // CONFIG_SEP_COMP_DRL
cur_mv[0].as_int = best_mv.as_int;
}
return 0;
}
#if CONFIG_EXTENDED_WARP_PREDICTION
static int cost_warp_delta_param(int index, int value,
const ModeCosts *mode_costs) {
assert(2 <= index && index <= 5);
int index_type = (index == 2 || index == 5) ? 0 : 1;
int coded_value = (value / WARP_DELTA_STEP) + WARP_DELTA_CODED_MAX;
assert(0 <= coded_value && coded_value < WARP_DELTA_NUM_SYMBOLS);
return mode_costs->warp_delta_param_cost[index_type][coded_value];
}
int av1_cost_warp_delta(const AV1_COMMON *cm, const MACROBLOCKD *xd,
const MB_MODE_INFO *mbmi,
const MB_MODE_INFO_EXT *mbmi_ext,
const ModeCosts *mode_costs) {
(void)xd;
if (!allow_warp_parameter_signaling(cm, mbmi)) {
return 0;
}
const WarpedMotionParams *params = &mbmi->wm_params[0];
WarpedMotionParams base_params;
av1_get_warp_base_params(
cm, mbmi, &base_params, NULL,
mbmi_ext->warp_param_stack[av1_ref_frame_type(mbmi->ref_frame)]);
// The RDO stage should not give us a model which is not warpable.
// Such models can still be signalled, but are effectively useless
// as we'll just fall back to translational motion
assert(!params->invalid);
int rate = 0;
// TODO(rachelbarker): Allow signaling warp type?
rate += cost_warp_delta_param(2, params->wmmat[2] - base_params.wmmat[2],
mode_costs);
rate += cost_warp_delta_param(3, params->wmmat[3] - base_params.wmmat[3],
mode_costs);
return rate;
}
static INLINE int select_modes_to_search(const AV1_COMP *const cpi,
int allowed_motion_modes,
int eval_motion_mode,
int skip_motion_mode) {
int modes_to_search = allowed_motion_modes;
// Modify the set of motion modes to consider according to speed features.
// For example, if SIMPLE_TRANSLATION has already been searched according to
// the motion_mode_for_winner_cand speed feature, avoid searching it again.
if (cpi->sf.winner_mode_sf.motion_mode_for_winner_cand) {
if (!eval_motion_mode) {
modes_to_search = (1 << SIMPLE_TRANSLATION);
} else {
// Skip translation, as will have already been evaluated
modes_to_search &= ~(1 << SIMPLE_TRANSLATION);
}
}
if (skip_motion_mode) {
modes_to_search &= (1 << SIMPLE_TRANSLATION);
}
return modes_to_search;
}
// Find the bit cost of signaling the warp_ref_idx
static INLINE int get_warp_ref_idx_cost(const MB_MODE_INFO *mbmi,
const MACROBLOCK *x) {
if (mbmi->max_num_warp_candidates <= 1) {
assert(mbmi->warp_ref_idx == 0);
return 0;
}
int cost = 0;
const ModeCosts *mode_costs = &x->mode_costs;
int max_idx_bits = mbmi->max_num_warp_candidates - 1;
for (int bit_idx = 0; bit_idx < max_idx_bits; ++bit_idx) {
int warp_ctx = 0;
int bit_ctx = bit_idx < 2 ? bit_idx : 2;
int codec_bit = (mbmi->warp_ref_idx != bit_idx);
cost += mode_costs->warp_ref_idx_cost[bit_ctx][warp_ctx][codec_bit];
if (mbmi->warp_ref_idx == bit_idx) break;
}
return cost;
}
#else
static INLINE void update_mode_start_end_index(const AV1_COMP *const cpi,
int *mode_index_start,
int *mode_index_end,
int last_motion_mode_allowed,
int interintra_allowed,
int eval_motion_mode) {
*mode_index_start = (int)SIMPLE_TRANSLATION;
*mode_index_end = (int)last_motion_mode_allowed + interintra_allowed;
if (cpi->sf.winner_mode_sf.motion_mode_for_winner_cand) {
if (!eval_motion_mode) {
*mode_index_end = (int)SIMPLE_TRANSLATION;
} else {
// Set the start index appropriately to process motion modes other than
// simple translation
*mode_index_start = 1;
}
}
}
#endif // CONFIG_EXTENDED_WARP_PREDICTION
#if CONFIG_DERIVED_MVD_SIGN
#define NUMBER_OF_ITER_PER_COMP 4
// Get the other non-signaled MVD for joint MVD mode
static int get_othermv_for_jointmv_mode(
const AV1_COMP *const cpi, BLOCK_SIZE bsize, MACROBLOCK *x,
MB_MODE_INFO *mbmi, MV this_mv, MV *other_mv, MvSubpelPrecision precision,
int is_adaptive_mvd, int jmvd_base_ref_list) {
const AV1_COMMON *cm = &cpi->common;
const int same_side = is_ref_frame_same_side(cm, mbmi);
assert(jmvd_base_ref_list == get_joint_mvd_base_ref_list(cm, mbmi));
assert(is_joint_mvd_coding_mode(mbmi->mode));
const int_mv ref_mvs[2] = { av1_get_ref_mv(x, 0), av1_get_ref_mv(x, 1) };
int first_ref_dist =
cm->ref_frame_relative_dist[mbmi->ref_frame[jmvd_base_ref_list]];
int sec_ref_dist =
cm->ref_frame_relative_dist[mbmi->ref_frame[1 - jmvd_base_ref_list]];
assert(first_ref_dist >= sec_ref_dist);
sec_ref_dist = same_side ? sec_ref_dist : -sec_ref_dist;
MV other_mvd = { 0, 0 };
MV diff = { 0, 0 };
MV low_prec_refmv = ref_mvs[jmvd_base_ref_list].as_mv;
#if BUGFIX_AMVD_AMVR
if (!is_adaptive_mvd)
#endif // BUGFIX_AMVD_AMVR
#if CONFIG_C071_SUBBLK_WARPMV
if (precision < MV_PRECISION_HALF_PEL)
#endif // CONFIG_C071_SUBBLK_WARPMV
lower_mv_precision(&low_prec_refmv, precision);
diff.row = this_mv.row - low_prec_refmv.row;
diff.col = this_mv.col - low_prec_refmv.col;
get_mv_projection(&other_mvd, diff, sec_ref_dist, first_ref_dist);
scale_other_mvd(&other_mvd, mbmi->jmvd_scale_mode, mbmi->mode);
#if !CONFIG_C071_SUBBLK_WARPMV
// TODO(Mohammed): Do we need to apply block level lower mv precision?
lower_mv_precision(&other_mvd, features->fr_mv_precision);
#endif // !CONFIG_C071_SUBBLK_WARPMV
other_mv->row =
(int)(ref_mvs[1 - jmvd_base_ref_list].as_mv.row + other_mvd.row);
other_mv->col =
(int)(ref_mvs[1 - jmvd_base_ref_list].as_mv.col + other_mvd.col);
SUBPEL_MOTION_SEARCH_PARAMS ms_params;
av1_make_default_subpel_ms_params(&ms_params, cpi, x, bsize,
&ref_mvs[1 - jmvd_base_ref_list].as_mv,
mbmi->pb_mv_precision, NULL);
const SubpelMvLimits *other_mv_limits = &ms_params.mv_limits;
return av1_is_subpelmv_in_range(other_mv_limits, *other_mv);
}
// Cost of signaling sign of last non-zero MVD component
static int get_last_sign_cost(MACROBLOCK *x, int is_adaptive_mvd, MV mv_diff[2],
int start_signaled_mv_ref_idx,
int num_signaled_mvd) {
int last_sign = -1;
int last_comp = -1;
for (int ref_idx = start_signaled_mv_ref_idx;
ref_idx < start_signaled_mv_ref_idx + num_signaled_mvd; ++ref_idx) {
for (int comp = 0; comp < 2; comp++) {
int16_t this_mvd_comp =
comp == 0 ? mv_diff[ref_idx].row : mv_diff[ref_idx].col;
if (this_mvd_comp) {
last_sign = (this_mvd_comp < 0);
last_comp = comp;
}
}
}
assert(last_sign == 0 || last_sign == 1);
return (av1_mv_sign_cost(last_sign, last_comp, &x->mv_costs, MV_COST_WEIGHT,
7, is_adaptive_mvd));
}
// Generate the prediction and compute model RD for a given MV
static void av1_get_model_rd(const AV1_COMP *const cpi, MACROBLOCKD *xd,
MACROBLOCK *x, BLOCK_SIZE bsize,
const BUFFER_SET *orig_dst, MV this_mvs[2],
MV ref_mvs[2], int num_signaled_mvd, int *rate_sum,
int64_t *dist_sum, int *mv_rate,
int signaled_mv_ref_idx) {
const AV1_COMMON *cm = &cpi->common;
MB_MODE_INFO *mbmi = xd->mi[0];
const int is_adaptive_mvd = enable_adaptive_mvd_resolution(cm, mbmi);
int is_compound = has_second_ref(mbmi);
int tmp_skip_txfm_sb;
int64_t tmp_skip_sse_sb;
int plane_from = AOM_PLANE_Y;
int plane_to = AOM_PLANE_Y;
// build the predictor
av1_enc_build_inter_predictor(cm, xd, xd->mi_row, xd->mi_col, orig_dst, bsize,
plane_from, plane_to);
// Compute the MVcosts for all signaled MVDs
int this_mv_rate = av1_mv_bit_cost(
&this_mvs[signaled_mv_ref_idx], &ref_mvs[signaled_mv_ref_idx],
mbmi->pb_mv_precision, &x->mv_costs, MV_COST_WEIGHT, is_adaptive_mvd);
if (num_signaled_mvd == 2) {
this_mv_rate += av1_mv_bit_cost(
&this_mvs[!signaled_mv_ref_idx], &ref_mvs[!signaled_mv_ref_idx],
mbmi->pb_mv_precision, &x->mv_costs, MV_COST_WEIGHT, is_adaptive_mvd);
}
if (is_compound) {
model_rd_sb_fn[MODELRD_TYPE_MASKED_COMPOUND](
cpi, bsize, x, xd, plane_from, plane_to, rate_sum, dist_sum,
&tmp_skip_txfm_sb, &tmp_skip_sse_sb, NULL, NULL, NULL);
} else {
if (mbmi->motion_mode == INTERINTRA) {
model_rd_sb_fn[MODELRD_TYPE_INTERINTRA](cpi, bsize, x, xd, plane_from,
plane_to, rate_sum, dist_sum,
NULL, NULL, NULL, NULL, NULL);
} else {
model_rd_sb_fn[MODELRD_CURVFIT](cpi, bsize, x, xd, plane_from, plane_to,
rate_sum, dist_sum, NULL, NULL, NULL,
NULL, NULL);
}
}
*mv_rate = this_mv_rate;
}
// Check if this MVD is valid for derive sign
static INLINE int is_this_mvds_valid_for_derivesign(
const MV mvd[2], const MvSubpelPrecision precision,
const int is_adaptive_mvd, const int start_signaled_mv_ref_idx,
const int num_signaled_mvd, int *modified_last_sign,
int *modified_last_comp, int *modified_num_non_zero_comp,
int th_for_num_nonzero) {
(void)is_adaptive_mvd;
int num_nonzero_mvd_comp = 0;
int precision_shift = MV_PRECISION_ONE_EIGHTH_PEL - precision;
int last_sign = -1;
int sum_mvd = 0;
int last_comp = -1;
for (int ref_idx = start_signaled_mv_ref_idx;
ref_idx < start_signaled_mv_ref_idx + num_signaled_mvd; ++ref_idx) {
for (int comp = 0; comp < 2; comp++) {
int this_mvd_comp = comp == 0 ? mvd[ref_idx].row : mvd[ref_idx].col;
if (this_mvd_comp) {
last_sign = (this_mvd_comp < 0);
num_nonzero_mvd_comp++;
last_comp = comp;
sum_mvd += (abs(this_mvd_comp) >> precision_shift);
}
}
}
if (modified_last_sign) *modified_last_sign = last_sign;
if (modified_last_comp) *modified_last_comp = last_comp;
if (modified_num_non_zero_comp)
*modified_num_non_zero_comp = num_nonzero_mvd_comp;
if (num_nonzero_mvd_comp < th_for_num_nonzero) return 1;
return (last_sign == (sum_mvd & 0x1));
}
// Motion search for sign derivation if only one MVD is signaled
static int av1_adjust_mvs_for_derive_sign_single_mvd(
const AV1_COMP *const cpi, MACROBLOCK *x, BLOCK_SIZE bsize,
const BUFFER_SET *orig_dst, int signaled_mv_ref_idx, int num_signaled_mvd,
MV mv_diff[2], MV ref_mvs[2], int rate2_nocoeff, int rate_mv0,
int *tmp_rate_mv) {
const AV1_COMMON *cm = &cpi->common;
MACROBLOCKD *xd = &x->e_mbd;
MB_MODE_INFO *mbmi = xd->mi[0];
int is_compound = has_second_ref(mbmi);
const int is_adaptive_mvd = enable_adaptive_mvd_resolution(cm, mbmi);
int th_for_num_nonzero = get_derive_sign_nzero_th(mbmi);
const int joint_mvd_mode = is_joint_mvd_coding_mode(mbmi->mode);
assert(!is_adaptive_mvd);
assert(num_signaled_mvd == 1);
(void)num_signaled_mvd;
int rate_sum;
int64_t dist_sum;
int rate_without_mv = rate2_nocoeff - rate_mv0;
int best_mv_rate = rate_mv0;
SubpelMvLimits *mv_limits[2] = { NULL, NULL };
assert(
IMPLIES(is_adaptive_mvd, mbmi->pb_mv_precision == MV_PRECISION_QTR_PEL));
assert(
IMPLIES(!is_compound, signaled_mv_ref_idx == 0 && num_signaled_mvd == 1));
const int mv_delta =
1 << (MV_PRECISION_ONE_EIGHTH_PEL - mbmi->pb_mv_precision);
assert(!is_valid_sign_mvd_single(mv_diff[signaled_mv_ref_idx],
mbmi->pb_mv_precision, is_adaptive_mvd,
th_for_num_nonzero));
// Get the MV limits for both references
SUBPEL_MOTION_SEARCH_PARAMS ms_params[2];
for (int ref_idx = 0; ref_idx < 1 + is_compound; ref_idx++) {
av1_make_default_subpel_ms_params(&ms_params[ref_idx], cpi, x, bsize,
&ref_mvs[ref_idx], mbmi->pb_mv_precision,
NULL);
mv_limits[ref_idx] = &ms_params[ref_idx].mv_limits;
}
int64_t best_model_rd = INT64_MAX;
const MV initial_mvs[2] = { mbmi->mv[0].as_mv, mbmi->mv[1].as_mv };
MV best_mvs[2] = { mbmi->mv[0].as_mv, mbmi->mv[1].as_mv };
const MV initial_mvd[2] = { mv_diff[0], mv_diff[1] };
int search_range = 1;
for (int row_mvd_idx = -search_range; row_mvd_idx <= search_range;
row_mvd_idx++) {
for (int col_mvd_idx = -search_range; col_mvd_idx <= search_range;
col_mvd_idx++) {
const MV this_mvd = {
initial_mvd[signaled_mv_ref_idx].row + row_mvd_idx * mv_delta,
initial_mvd[signaled_mv_ref_idx].col + col_mvd_idx * mv_delta
};
if (!is_valid_sign_mvd_single(this_mvd, mbmi->pb_mv_precision,
is_adaptive_mvd, th_for_num_nonzero))
continue;
// Get the last sign
int last_nonzero_sign = -1;
int last_comp = -1;
int num_nonzero_mvd_comp = (this_mvd.row != 0) + (this_mvd.col != 0);
if (this_mvd.col) {
last_nonzero_sign = this_mvd.col < 0;
last_comp = 1;
} else if (this_mvd.row) {
last_nonzero_sign = this_mvd.row < 0;
last_comp = 0;
}
MV this_mvs[2] = { initial_mvs[0], initial_mvs[1] };
update_mv_component_from_mvd(this_mvd.row, ref_mvs[signaled_mv_ref_idx],
0, is_adaptive_mvd, mbmi->pb_mv_precision,
&this_mvs[signaled_mv_ref_idx]);
update_mv_component_from_mvd(this_mvd.col, ref_mvs[signaled_mv_ref_idx],
1, is_adaptive_mvd, mbmi->pb_mv_precision,
&this_mvs[signaled_mv_ref_idx]);
if (av1_is_subpelmv_in_range(mv_limits[signaled_mv_ref_idx],
this_mvs[signaled_mv_ref_idx])) {
mbmi->mv[signaled_mv_ref_idx].as_mv = this_mvs[signaled_mv_ref_idx];
if (is_compound) {
if (joint_mvd_mode) {
MV other_mv;
int valid = get_othermv_for_jointmv_mode(
cpi, bsize, x, mbmi, this_mvs[signaled_mv_ref_idx], &other_mv,
mbmi->pb_mv_precision, is_adaptive_mvd, signaled_mv_ref_idx);
if (!valid) continue;
mbmi->mv[!signaled_mv_ref_idx].as_mv = other_mv;
} else {
mbmi->mv[!signaled_mv_ref_idx].as_mv =
this_mvs[!signaled_mv_ref_idx];
}
}
int this_mv_rate;
MV this_mv[2] = { mbmi->mv[0].as_mv, mbmi->mv[1].as_mv };
av1_get_model_rd(cpi, xd, x, bsize, orig_dst, this_mv, ref_mvs,
num_signaled_mvd, &rate_sum, &dist_sum, &this_mv_rate,
signaled_mv_ref_idx);
if (num_nonzero_mvd_comp >= th_for_num_nonzero) {
assert(last_comp != -1);
assert(last_nonzero_sign != -1);
int last_sign_cost =
av1_mv_sign_cost(last_nonzero_sign, last_comp, &x->mv_costs,
MV_COST_WEIGHT, 7, is_adaptive_mvd);
this_mv_rate -= last_sign_cost;
}
int64_t comp_model_rd_cur = RDCOST(
x->rdmult, rate_without_mv + this_mv_rate + rate_sum, dist_sum);
if (comp_model_rd_cur < best_model_rd) {
best_model_rd = comp_model_rd_cur;
best_mvs[signaled_mv_ref_idx] = mbmi->mv[signaled_mv_ref_idx].as_mv;
if (is_compound)
best_mvs[!signaled_mv_ref_idx] =
mbmi->mv[!signaled_mv_ref_idx].as_mv;
best_mv_rate = this_mv_rate;
assert(this_mvs[signaled_mv_ref_idx].row ==
mbmi->mv[signaled_mv_ref_idx].as_mv.row);
assert(this_mvs[signaled_mv_ref_idx].col ==
mbmi->mv[signaled_mv_ref_idx].as_mv.col);
if (is_compound && !joint_mvd_mode) {
assert(this_mvs[!signaled_mv_ref_idx].row ==
mbmi->mv[!signaled_mv_ref_idx].as_mv.row);
assert(this_mvs[!signaled_mv_ref_idx].col ==
mbmi->mv[!signaled_mv_ref_idx].as_mv.col);
}
}
}
}
}
mbmi->mv[0].as_mv = best_mvs[0];
if (is_compound) mbmi->mv[1].as_mv = best_mvs[1];
*tmp_rate_mv = best_mv_rate;
return (best_model_rd != INT64_MAX);
}
// Perform refinement for sign derivation
static int av1_adjust_mvs_for_derive_sign(const AV1_COMP *const cpi,
MACROBLOCK *x, BLOCK_SIZE bsize,
const BUFFER_SET *orig_dst,
int start_signaled_mv_ref_idx,
int num_signaled_mvd, MV mv_diff[2],
MV ref_mvs[2], int rate2_nocoeff,
int rate_mv0, int *tmp_rate_mv) {
const AV1_COMMON *cm = &cpi->common;
MACROBLOCKD *xd = &x->e_mbd;
MB_MODE_INFO *mbmi = xd->mi[0];
int is_compound = has_second_ref(mbmi);
const int is_adaptive_mvd = enable_adaptive_mvd_resolution(cm, mbmi);
int th_for_num_nonzero = get_derive_sign_nzero_th(mbmi);
assert(is_adaptive_mvd == 0);
const int joint_mvd_mode = is_joint_mvd_coding_mode(mbmi->mode);
int rate_sum; //, tmp_skip_txfm_sb;
int64_t dist_sum; //, tmp_skip_sse_sb;
int rate_without_mv = rate2_nocoeff - rate_mv0;
int best_mv_rate = rate_mv0;
if (num_signaled_mvd < 2) {
return av1_adjust_mvs_for_derive_sign_single_mvd(
cpi, x, bsize, orig_dst, start_signaled_mv_ref_idx, num_signaled_mvd,
mv_diff, ref_mvs, rate2_nocoeff, rate_mv0, tmp_rate_mv);
}
SubpelMvLimits *mv_limits[2] = { NULL, NULL };
assert(
IMPLIES(is_adaptive_mvd, mbmi->pb_mv_precision == MV_PRECISION_QTR_PEL));
const int mv_delta =
1 << (MV_PRECISION_ONE_EIGHTH_PEL - mbmi->pb_mv_precision);
assert(IMPLIES(!is_compound,
start_signaled_mv_ref_idx == 0 && num_signaled_mvd == 1));
SUBPEL_MOTION_SEARCH_PARAMS ms_params[2];
for (int ref_idx = start_signaled_mv_ref_idx;
ref_idx < start_signaled_mv_ref_idx + num_signaled_mvd; ++ref_idx) {
av1_make_default_subpel_ms_params(&ms_params[ref_idx], cpi, x, bsize,
&ref_mvs[ref_idx], mbmi->pb_mv_precision,
NULL);
mv_limits[ref_idx] = &ms_params[ref_idx].mv_limits;
}
int64_t best_model_rd = INT64_MAX;
const MV initial_mvs[2] = { mbmi->mv[0].as_mv, mbmi->mv[1].as_mv };
MV best_mvs[2] = { mbmi->mv[0].as_mv, mbmi->mv[1].as_mv };
// Get the position of the last non-zero mv component
int initial_last_non_zero_pos = 0;
int curr_pos = 0;
int initial_num_nonzero_mvd_comp = 0;
int initial_last_sign = -1;
for (int ref_idx = start_signaled_mv_ref_idx;
ref_idx < start_signaled_mv_ref_idx + num_signaled_mvd; ++ref_idx) {
for (int comp = 0; comp < 2; comp++) {
int16_t this_mvd_comp =
comp == 0 ? mv_diff[ref_idx].row : mv_diff[ref_idx].col;
if (this_mvd_comp) {
initial_last_non_zero_pos = curr_pos;
initial_last_sign = (this_mvd_comp < 0);
initial_num_nonzero_mvd_comp++;
}
curr_pos++;
}
}
curr_pos = 0;
int offsets[NUMBER_OF_ITER_PER_COMP] = { 1, -1, 3, -3 };
assert(initial_num_nonzero_mvd_comp >= th_for_num_nonzero);
// int max_num_iterations = is_4k_or_larger ? 2 : 4;
for (int ref_idx = start_signaled_mv_ref_idx;
ref_idx < start_signaled_mv_ref_idx + num_signaled_mvd; ++ref_idx) {
for (int comp = 0; comp < 2; comp++) {
int16_t this_mvd_comp =
comp == 0 ? mv_diff[ref_idx].row : mv_diff[ref_idx].col;
assert(IMPLIES(curr_pos > initial_last_non_zero_pos, this_mvd_comp == 0));
if (curr_pos <= initial_last_non_zero_pos || best_model_rd == INT64_MAX) {
const int max_num_iterations = 2;
for (int iteration = 0; iteration < max_num_iterations; iteration++) {
int16_t modified_mvd_comp =
this_mvd_comp + mv_delta * offsets[iteration];
if (abs(modified_mvd_comp) > MV_MAX) continue;
MV modified_mvds[2] = { mv_diff[0], mv_diff[1] };
if (comp == 0) {
modified_mvds[ref_idx].row = modified_mvd_comp;
} else {
modified_mvds[ref_idx].col = modified_mvd_comp;
}
int modified_last_sign = -1;
int modified_last_comp = -1;
int modified_num_non_zero_comp = 0;
if (!is_this_mvds_valid_for_derivesign(
modified_mvds, mbmi->pb_mv_precision, is_adaptive_mvd,
start_signaled_mv_ref_idx, num_signaled_mvd,
&modified_last_sign, &modified_last_comp,
&modified_num_non_zero_comp, th_for_num_nonzero))
continue;
// It is not allowed to modify the last non-zero MVD to 0
// It is also not allowed to modify any coeff to 0 if
// (num_nonzero_mvd_comp == get_derive_sign_nzero_th(mbmi);)
if (modified_mvd_comp == 0 &&
(initial_num_nonzero_mvd_comp == th_for_num_nonzero ||
curr_pos == initial_last_non_zero_pos))
continue;
// Not allowed to change the sign of the last component
if (curr_pos == initial_last_non_zero_pos &&
((initial_last_sign != (modified_mvd_comp < 0))))
continue;
MV this_mvs[2] = { initial_mvs[0], initial_mvs[1] };
update_mv_component_from_mvd(
modified_mvd_comp, ref_mvs[ref_idx], comp,
enable_adaptive_mvd_resolution(cm, mbmi), mbmi->pb_mv_precision,
&this_mvs[ref_idx]);
if (av1_is_subpelmv_in_range(mv_limits[ref_idx], this_mvs[ref_idx])) {
mbmi->mv[ref_idx].as_mv = this_mvs[ref_idx];
if (is_compound) {
if (joint_mvd_mode) {
MV other_mv;
int valid = get_othermv_for_jointmv_mode(
cpi, bsize, x, mbmi, this_mvs[ref_idx], &other_mv,
mbmi->pb_mv_precision, is_adaptive_mvd,
start_signaled_mv_ref_idx);
if (!valid) continue;
mbmi->mv[!ref_idx].as_mv = other_mv;
} else {
// assert(av1_is_subpelmv_in_range(mv_limits[!ref_idx],
// this_mvs[!ref_idx]));
mbmi->mv[!ref_idx].as_mv = this_mvs[!ref_idx];
}
}
int this_mv_rate;
MV this_mv[2] = { mbmi->mv[0].as_mv, mbmi->mv[1].as_mv };
av1_get_model_rd(cpi, xd, x, bsize, orig_dst, this_mv, ref_mvs,
num_signaled_mvd, &rate_sum, &dist_sum,
&this_mv_rate, start_signaled_mv_ref_idx);
if (modified_num_non_zero_comp >= th_for_num_nonzero) {
this_mv_rate -= av1_mv_sign_cost(
modified_last_sign, modified_last_comp, &x->mv_costs,
MV_COST_WEIGHT, 7, is_adaptive_mvd);
}
int64_t comp_model_rd_cur = RDCOST(
x->rdmult, rate_without_mv + this_mv_rate + rate_sum, dist_sum);
if (comp_model_rd_cur < best_model_rd) {
best_model_rd = comp_model_rd_cur;
best_mvs[ref_idx] = mbmi->mv[ref_idx].as_mv;
if (is_compound) best_mvs[!ref_idx] = mbmi->mv[!ref_idx].as_mv;
best_mv_rate = this_mv_rate;
assert(this_mvs[ref_idx].row == mbmi->mv[ref_idx].as_mv.row);
assert(this_mvs[ref_idx].col == mbmi->mv[ref_idx].as_mv.col);
if (is_compound && !joint_mvd_mode) {
assert(this_mvs[!ref_idx].row == mbmi->mv[!ref_idx].as_mv.row);
assert(this_mvs[!ref_idx].col == mbmi->mv[!ref_idx].as_mv.col);
}
}
}
}
}
curr_pos++;
}
}
mbmi->mv[0].as_mv = best_mvs[0];
if (is_compound) mbmi->mv[1].as_mv = best_mvs[1];
*tmp_rate_mv = best_mv_rate;
assert(IMPLIES(best_model_rd != INT64_MAX,
!(mbmi->mv[0].as_mv.row == initial_mvs[0].row &&
mbmi->mv[0].as_mv.col == initial_mvs[0].col &&
mbmi->mv[1].as_mv.row == initial_mvs[1].row &&
mbmi->mv[1].as_mv.col == initial_mvs[1].col)));
return (best_model_rd != INT64_MAX);
}
#endif
/*!\brief AV1 motion mode search
*
* \ingroup inter_mode_search
* Function to search over and determine the motion mode. It will update
* mbmi->motion_mode and determine any necessary side information for the
* selected motion mode. It will also perform the full transform search, unless
* the input parameter do_tx_search indicates to do an estimation of the RD
* rather than an RD corresponding to a full transform search. It will return
* the RD for the final motion_mode.
* Do the RD search for a given inter mode and compute all information relevant
* to the input mode. It will compute the best MV,
* compound parameters (if the mode is a compound mode) and interpolation filter
* parameters.
*
* \param[in] cpi Top-level encoder structure.
* \param[in] tile_data Pointer to struct holding adaptive
* data/contexts/models for the tile during
* encoding.
* \param[in] x Pointer to struct holding all the data for
* the current macroblock.
* \param[in] bsize Current block size.
* \param[in,out] rd_stats Struct to keep track of the overall RD
* information.
* \param[in,out] rd_stats_y Struct to keep track of the RD information
* for only the Y plane.
* \param[in,out] rd_stats_uv Struct to keep track of the RD information
* for only the UV planes.
* \param[in] args HandleInterModeArgs struct holding
* miscellaneous arguments for inter mode
* search. See the documentation for this
* struct for a description of each member.
* \param[in] ref_best_rd Best RD found so far for this block.
* It is used for early termination of this
* search if the RD exceeds this value.
* \param[in,out] ref_skip_rd A length 2 array, where skip_rd[0] is the
* best total RD for a skip mode so far, and
* skip_rd[1] is the best RD for a skip mode so
* far in luma. This is used as a speed feature
* to skip the transform search if the computed
* skip RD for the current mode is not better
* than the best skip_rd so far.
* \param[in,out] rate_mv The rate associated with the motion vectors.
* This will be modified if a motion search is
* done in the motion mode search.
* \param[in,out] orig_dst A prediction buffer to hold a computed
* prediction. This will eventually hold the
* final prediction, and the tmp_dst info will
* be copied here.
* \param[in,out] best_est_rd Estimated RD for motion mode search if
* do_tx_search (see below) is 0.
* \param[in] do_tx_search Parameter to indicate whether or not to do
* a full transform search. This will compute
* an estimated RD for the modes without the
* transform search and later perform the full
* transform search on the best candidates.
* \param[in] inter_modes_info InterModesInfo struct to hold inter mode
* information to perform a full transform
* search only on winning candidates searched
* with an estimate for transform coding RD.
* \param[in] eval_motion_mode Boolean whether or not to evaluate motion
* motion modes other than SIMPLE_TRANSLATION.
* \return Returns INT64_MAX if the determined motion mode is invalid and the
* current motion mode being tested should be skipped. It returns 0 if the
* motion mode search is a success.
*/
static int64_t motion_mode_rd(
const AV1_COMP *const cpi, TileDataEnc *tile_data, MACROBLOCK *const x,
BLOCK_SIZE bsize, RD_STATS *rd_stats, RD_STATS *rd_stats_y,
RD_STATS *rd_stats_uv, HandleInterModeArgs *const args, int64_t ref_best_rd,
int64_t *ref_skip_rd, int *rate_mv, const BUFFER_SET *orig_dst,
int64_t *best_est_rd, int do_tx_search, InterModesInfo *inter_modes_info,
int eval_motion_mode) {
const AV1_COMMON *const cm = &cpi->common;
const FeatureFlags *const features = &cm->features;
TxfmSearchInfo *txfm_info = &x->txfm_search_info;
const int num_planes = av1_num_planes(cm);
MACROBLOCKD *xd = &x->e_mbd;
MB_MODE_INFO *mbmi = xd->mi[0];
#if CONFIG_EXTENDED_WARP_PREDICTION
MB_MODE_INFO_EXT *mbmi_ext = x->mbmi_ext;
#endif // CONFIG_EXTENDED_WARP_PREDICTION
const int is_comp_pred = has_second_ref(mbmi);
const PREDICTION_MODE this_mode = mbmi->mode;
const int rate2_nocoeff = rd_stats->rate;
int best_xskip_txfm = 0;
RD_STATS best_rd_stats, best_rd_stats_y, best_rd_stats_uv;
uint8_t best_blk_skip[MAX_MIB_SIZE * MAX_MIB_SIZE];
TX_TYPE best_tx_type_map[MAX_MIB_SIZE * MAX_MIB_SIZE];
CctxType best_cctx_type_map[MAX_MIB_SIZE * MAX_MIB_SIZE];
const int rate_mv0 =
#if CONFIG_EXTENDED_WARP_PREDICTION
this_mode == WARPMV ? 0 :
#endif // CONFIG_EXTENDED_WARP_PREDICTION
*rate_mv;
#if !CONFIG_EXTENDED_WARP_PREDICTION
const int interintra_allowed =
cm->seq_params.enable_interintra_compound && is_interintra_allowed(mbmi);
#endif // !CONFIG_EXTENDED_WARP_PREDICTION
int pts0[SAMPLES_ARRAY_SIZE], pts_inref0[SAMPLES_ARRAY_SIZE];
#if CONFIG_COMPOUND_WARP_CAUSAL
int pts1[SAMPLES_ARRAY_SIZE], pts_inref1[SAMPLES_ARRAY_SIZE];
#endif // CONFIG_COMPOUND_WARP_CAUSAL
#if CONFIG_EXTENDED_WARP_PREDICTION
assert(IMPLIES(mbmi->mode == WARPMV, (rate_mv0 == 0)));
#endif // CONFIG_EXTENDED_WARP_PREDICTION
assert(mbmi->ref_frame[1] != INTRA_FRAME);
const MV_REFERENCE_FRAME ref_frame_1 = mbmi->ref_frame[1];
(void)tile_data;
av1_invalid_rd_stats(&best_rd_stats);
aom_clear_system_state();
#if CONFIG_COMPOUND_WARP_CAUSAL
mbmi->num_proj_ref[0] = 1; // assume num_proj_ref >=1
mbmi->num_proj_ref[1] = 1; // assume num_proj_ref >=1
mbmi->wm_params[0].invalid = 1;
mbmi->wm_params[1].invalid = 1;
#else
mbmi->num_proj_ref = 1; // assume num_proj_ref >=1
#endif // CONFIG_COMPOUND_WARP_CAUSAL
#if CONFIG_EXTENDED_WARP_PREDICTION
mbmi->warp_ref_idx = 0;
mbmi->max_num_warp_candidates = 0;
mbmi->warpmv_with_mvd_flag = 0;
int allowed_motion_modes = motion_mode_allowed(
cm, xd, mbmi_ext->ref_mv_stack[mbmi->ref_frame[0]], mbmi);
if ((allowed_motion_modes & (1 << WARPED_CAUSAL))) {
// Collect projection samples used in least squares approximation of
// the warped motion parameters if WARPED_CAUSAL is going to be searched.
#if CONFIG_COMPOUND_WARP_CAUSAL
mbmi->num_proj_ref[0] = av1_findSamples(cm, xd, pts0, pts_inref0, 0);
if (has_second_ref(mbmi))
mbmi->num_proj_ref[1] = av1_findSamples(cm, xd, pts1, pts_inref1, 1);
else
mbmi->num_proj_ref[1] = 0;
#else
mbmi->num_proj_ref = av1_findSamples(cm, xd, pts0, pts_inref0);
#endif // CONFIG_COMPOUND_WARP_CAUSAL
}
#if CONFIG_COMPOUND_WARP_CAUSAL
const int total_samples0 = mbmi->num_proj_ref[0];
const int total_samples1 = mbmi->num_proj_ref[1];
if ((total_samples0 == 0 && total_samples1 == 0)) {
#else
const int total_samples = mbmi->num_proj_ref;
if (total_samples == 0) {
#endif // CONFIG_COMPOUND_WARP_CAUSAL
// Do not search WARPED_CAUSAL if there are no samples to use to determine
// warped parameters.
allowed_motion_modes &= ~(1 << WARPED_CAUSAL);
}
#else
MOTION_MODE last_motion_mode_allowed = motion_mode_allowed(cm, xd, mbmi);
if (last_motion_mode_allowed == WARPED_CAUSAL) {
// Collect projection samples used in least squares approximation of
// the warped motion parameters if WARPED_CAUSAL is going to be searched.
mbmi->num_proj_ref = av1_findSamples(cm, xd, pts0, pts_inref0);
}
const int total_samples = mbmi->num_proj_ref;
if (total_samples == 0) {
// Do not search WARPED_CAUSAL if there are no samples to use to determine
// warped parameters.
last_motion_mode_allowed = OBMC_CAUSAL;
}
#endif // CONFIG_EXTENDED_WARP_PREDICTION
#if CONFIG_EXTENDED_WARP_PREDICTION
int_mv previous_mvs[MAX_WARP_REF_CANDIDATES];
for (int w_ref_idx = 0; w_ref_idx < MAX_WARP_REF_CANDIDATES; w_ref_idx++) {
previous_mvs[w_ref_idx].as_int = INVALID_MV;
}
#endif // CONFIG_EXTENDED_WARP_PREDICTION
#if CONFIG_COMPOUND_WARP_CAUSAL
mbmi->num_proj_ref[0] = 0; // assume num_proj_ref >=1 ??????????
mbmi->num_proj_ref[1] = 0; // assume num_proj_ref >=1
#endif // CONFIG_COMPOUND_WARP_CAUSAL
int num_rd_check = 0;
const MB_MODE_INFO base_mbmi = *mbmi;
MB_MODE_INFO best_mbmi;
#if CONFIG_C071_SUBBLK_WARPMV
SUBMB_INFO best_submi[MAX_MIB_SIZE * MAX_MIB_SIZE];
SUBMB_INFO base_submi[MAX_MIB_SIZE * MAX_MIB_SIZE];
store_submi(xd, cm, base_submi, bsize);
#endif // CONFIG_C071_SUBBLK_WARPMV
const int interp_filter = features->interp_filter;
const int switchable_rate =
av1_is_interp_needed(cm, xd)
? av1_get_switchable_rate(x, xd, interp_filter)
: 0;
int64_t best_rd = INT64_MAX;
int best_rate_mv = rate_mv0;
const int mi_row = xd->mi_row;
const int mi_col = xd->mi_col;
#if CONFIG_EXTENDED_WARP_PREDICTION
int modes_to_search =
(base_mbmi.mode == WARPMV)
? allowed_motion_modes
: select_modes_to_search(cpi, allowed_motion_modes, eval_motion_mode,
args->skip_motion_mode);
#else
int mode_index_start, mode_index_end;
// Modify the start and end index according to speed features. For example,
// if SIMPLE_TRANSLATION has already been searched according to
// the motion_mode_for_winner_cand speed feature, update the mode_index_start
// to avoid searching it again.
update_mode_start_end_index(cpi, &mode_index_start, &mode_index_end,
last_motion_mode_allowed, interintra_allowed,
eval_motion_mode);
#endif // CONFIG_EXTENDED_WARP_PREDICTION
// Main function loop. This loops over all of the possible motion modes and
// computes RD to determine the best one. This process includes computing
// any necessary side information for the motion mode and performing the
// transform search.
#if CONFIG_EXTENDED_WARP_PREDICTION
for (int mode_index = SIMPLE_TRANSLATION; mode_index < MOTION_MODES;
mode_index++) {
if ((modes_to_search & (1 << mode_index)) == 0) continue;
#else
for (int mode_index = mode_index_start; mode_index <= mode_index_end;
mode_index++) {
if (args->skip_motion_mode && mode_index) continue;
const int is_interintra_mode = mode_index > (int)last_motion_mode_allowed;
#endif // CONFIG_EXTENDED_WARP_PREDICTION
#if CONFIG_EXTENDED_WARP_PREDICTION
int is_warpmv_warp_causal =
(mode_index == WARPED_CAUSAL) && (base_mbmi.mode == WARPMV);
int max_warp_ref_idx = 1;
uint8_t valid_num_candidates = 0;
if (mode_index == WARP_DELTA || is_warpmv_warp_causal) {
max_warp_ref_idx =
(base_mbmi.mode == GLOBALMV || base_mbmi.mode == NEARMV ||
base_mbmi.mode == AMVDNEWMV)
? 1
: MAX_WARP_REF_CANDIDATES;
if (is_warpmv_warp_causal) {
max_warp_ref_idx = MAX_WARP_REF_CANDIDATES;
}
av1_find_warp_delta_base_candidates(
xd, &base_mbmi,
mbmi_ext->warp_param_stack[av1_ref_frame_type(base_mbmi.ref_frame)],
xd->warp_param_stack[av1_ref_frame_type(base_mbmi.ref_frame)],
xd->valid_num_warp_candidates[av1_ref_frame_type(
base_mbmi.ref_frame)],
&valid_num_candidates);
if (is_warpmv_warp_causal) {
if (valid_num_candidates > max_warp_ref_idx)
valid_num_candidates = max_warp_ref_idx;
}
}
for (int warp_ref_idx = 0; warp_ref_idx < max_warp_ref_idx;
warp_ref_idx++) {
if (mode_index == WARP_DELTA && warp_ref_idx >= valid_num_candidates)
continue;
if (is_warpmv_warp_causal && warp_ref_idx >= valid_num_candidates)
continue;
for (int warpmv_with_mvd_flag = 0;
warpmv_with_mvd_flag < (1 + (base_mbmi.mode == WARPMV));
warpmv_with_mvd_flag++) {
#endif // CONFIG_EXTENDED_WARP_PREDICTION
int tmp_rate2 = rate2_nocoeff;
int tmp_rate_mv = rate_mv0;
*mbmi = base_mbmi;
#if CONFIG_C071_SUBBLK_WARPMV
update_submi(xd, cm, base_submi, bsize);
#endif // CONFIG_C071_SUBBLK_WARPMV
#if CONFIG_EXTENDED_WARP_PREDICTION
mbmi->warp_ref_idx = warp_ref_idx;
mbmi->max_num_warp_candidates =
(mode_index == WARP_DELTA || is_warpmv_warp_causal)
? max_warp_ref_idx
: 0;
assert(valid_num_candidates <= mbmi->max_num_warp_candidates);
mbmi->motion_mode = (MOTION_MODE)mode_index;
if (mbmi->motion_mode != INTERINTRA) {
assert(mbmi->ref_frame[1] != INTRA_FRAME);
}
#else
if (is_interintra_mode) {
// Only use SIMPLE_TRANSLATION for interintra
mbmi->motion_mode = SIMPLE_TRANSLATION;
} else {
mbmi->motion_mode = (MOTION_MODE)mode_index;
assert(mbmi->ref_frame[1] != INTRA_FRAME);
}
#endif // CONFIG_EXTENDED_WARP_PREDICTION
#if CONFIG_EXTENDED_WARP_PREDICTION
if (warpmv_with_mvd_flag && !allow_warpmv_with_mvd_coding(cm, mbmi))
continue;
mbmi->warpmv_with_mvd_flag = warpmv_with_mvd_flag;
// Only WARP_DELTA and WARPED_CAUSAL are supported for WARPMV mode
assert(IMPLIES(mbmi->mode == WARPMV, mbmi->motion_mode == WARP_DELTA ||
is_warpmv_warp_causal));
#endif // CONFIG_EXTENDED_WARP_PREDICTION
// Do not search OBMC if the probability of selecting it is below a
// predetermined threshold for this update_type and block size.
const FRAME_UPDATE_TYPE update_type =
get_frame_update_type(&cpi->gf_group);
const int prune_obmc = cpi->frame_probs.obmc_probs[update_type][bsize] <
cpi->sf.inter_sf.prune_obmc_prob_thresh;
#if CONFIG_EXTENDED_WARP_PREDICTION
bool enable_obmc =
(cm->features.enabled_motion_modes & (1 << OBMC_CAUSAL)) != 0;
#else
bool enable_obmc = cpi->oxcf.motion_mode_cfg.enable_obmc;
#endif // CONFIG_EXTENDED_WARP_PREDICTION
if ((!enable_obmc || cpi->sf.inter_sf.disable_obmc || prune_obmc) &&
mbmi->motion_mode == OBMC_CAUSAL)
continue;
if (is_warp_mode(mbmi->motion_mode)) {
mbmi->interp_fltr = av1_unswitchable_filter(interp_filter);
}
#if CONFIG_EXTENDED_WARP_PREDICTION
if (mbmi->motion_mode == SIMPLE_TRANSLATION) {
#else
if (mbmi->motion_mode == SIMPLE_TRANSLATION && !is_interintra_mode) {
#endif // CONFIG_EXTENDED_WARP_PREDICTION
// SIMPLE_TRANSLATION mode: no need to recalculate.
// The prediction is calculated before motion_mode_rd() is called in
// handle_inter_mode()
#if CONFIG_DERIVED_MVD_SIGN
if (is_mvd_sign_derive_allowed(cm, xd, mbmi)) {
MV mv_diff[2] = { kZeroMv, kZeroMv };
MV ref_mvs[2] = { kZeroMv, kZeroMv };
int num_signaled_mvd = 0;
int start_signaled_mvd_idx = 0;
int num_nonzero_mvd = 0;
int th_for_num_nonzero = get_derive_sign_nzero_th(mbmi);
if (need_mv_adjustment(xd, cm, x, mbmi, bsize, mv_diff, ref_mvs,
mbmi->pb_mv_precision, &num_signaled_mvd,
&start_signaled_mvd_idx, &num_nonzero_mvd)) {
if (!av1_adjust_mvs_for_derive_sign(
cpi, x, bsize, orig_dst, start_signaled_mvd_idx,
num_signaled_mvd, mv_diff, ref_mvs, rate2_nocoeff,
rate_mv0, &tmp_rate_mv))
continue;
tmp_rate2 = rate2_nocoeff - rate_mv0 + tmp_rate_mv;
assert(!need_mv_adjustment(
xd, cm, x, mbmi, bsize, mv_diff, ref_mvs,
mbmi->pb_mv_precision, &num_signaled_mvd,
&start_signaled_mvd_idx, &num_nonzero_mvd));
// Rebuild the predictor with updated MV
av1_enc_build_inter_predictor(cm, xd, mi_row, mi_col, orig_dst,
bsize, 0, av1_num_planes(cm) - 1);
} else if (num_nonzero_mvd >= th_for_num_nonzero) {
int last_sign_cost = get_last_sign_cost(
x, enable_adaptive_mvd_resolution(cm, mbmi), mv_diff,
start_signaled_mvd_idx, num_signaled_mvd);
tmp_rate_mv = rate_mv0 - last_sign_cost;
tmp_rate2 = rate2_nocoeff - last_sign_cost;
assert(tmp_rate_mv >= 0);
}
} // if (is_mvd_sign_derive_allowed(cm, xd, mbmi))
#endif
} else if (mbmi->motion_mode == OBMC_CAUSAL) {
// OBMC_CAUSAL not allowed for compound prediction
assert(!is_comp_pred);
if (this_mode == NEWMV) {
av1_single_motion_search(cpi, x, bsize, 0, &tmp_rate_mv, INT_MAX,
NULL, &mbmi->mv[0]
#if CONFIG_EXTENDED_WARP_PREDICTION
,
NULL
#endif // CONFIG_EXTENDED_WARP_PREDICTION
);
tmp_rate2 = rate2_nocoeff - rate_mv0 + tmp_rate_mv;
}
// Build the inter predictor by blending the predictor
// corresponding to this MV, and the neighboring blocks using the
// OBMC model
av1_enc_build_inter_predictor(cm, xd, mi_row, mi_col, orig_dst, bsize,
0, av1_num_planes(cm) - 1);
av1_build_obmc_inter_prediction(
cm, xd, args->above_pred_buf, args->above_pred_stride,
args->left_pred_buf, args->left_pred_stride);
} else if (mbmi->motion_mode == WARPED_CAUSAL) {
int pts[SAMPLES_ARRAY_SIZE], pts_inref[SAMPLES_ARRAY_SIZE];
#if CONFIG_COMPOUND_WARP_CAUSAL
mbmi->wm_params[0].wmtype = DEFAULT_WMTYPE;
mbmi->wm_params[1].wmtype = DEFAULT_WMTYPE;
#else
#if CONFIG_EXTENDED_WARP_PREDICTION
mbmi->wm_params[0].wmtype = DEFAULT_WMTYPE;
#else
mbmi->wm_params.wmtype = DEFAULT_WMTYPE;
#endif // CONFIG_EXTENDED_WARP_PREDICTION
#endif // CONFIG_COMPOUND_WARP_CAUSAL
#if CONFIG_EXTENDED_WARP_PREDICTION
int_mv warp_ref_mv = mbmi->mv[0];
// Build the motion vector of the WARPMV mode
if (mbmi->mode == WARPMV) {
WarpedMotionParams ref_model =
mbmi_ext
->warp_param_stack[av1_ref_frame_type(mbmi->ref_frame)]
[mbmi->warp_ref_idx]
.wm_params;
mbmi->mv[0] = get_mv_from_wrl(xd, &ref_model,
mbmi->warpmv_with_mvd_flag
? mbmi->pb_mv_precision
: MV_PRECISION_ONE_EIGHTH_PEL,
bsize, xd->mi_col, xd->mi_row);
if (!is_warp_candidate_inside_of_frame(cm, xd, mbmi->mv[0]))
continue;
assert(mbmi->pb_mv_precision == mbmi->max_mv_precision);
warp_ref_mv.as_int = mbmi->mv[0].as_int;
// search MVD if mbmi->warpmv_with_mvd_flag is used.
if (mbmi->warpmv_with_mvd_flag) {
if (previous_mvs[mbmi->warp_ref_idx].as_int == INVALID_MV) {
int tmp_trans_ratemv = 0;
av1_single_motion_search(cpi, x, bsize, 0, &tmp_trans_ratemv,
16, NULL, &mbmi->mv[0], &warp_ref_mv);
previous_mvs[mbmi->warp_ref_idx].as_int = mbmi->mv[0].as_int;
} else {
mbmi->mv[0].as_int = previous_mvs[mbmi->warp_ref_idx].as_int;
}
}
}
#endif // CONFIG_EXTENDED_WARP_PREDICTION
#if CONFIG_COMPOUND_WARP_CAUSAL
int l0_invalid = 1, l1_invalid = 1;
mbmi->num_proj_ref[0] = total_samples0;
mbmi->num_proj_ref[1] = total_samples1;
memcpy(pts, pts0, total_samples0 * 2 * sizeof(*pts0));
memcpy(pts_inref, pts_inref0,
total_samples0 * 2 * sizeof(*pts_inref0));
// Select the samples according to motion vector difference
if (mbmi->num_proj_ref[0] > 1) {
mbmi->num_proj_ref[0] =
av1_selectSamples(&mbmi->mv[0].as_mv, pts, pts_inref,
mbmi->num_proj_ref[0], bsize);
}
// Compute the warped motion parameters with a least squares fit
// using the collected samples
mbmi->wm_params[0].invalid = l0_invalid = av1_find_projection(
mbmi->num_proj_ref[0], pts, pts_inref, bsize, mbmi->mv[0].as_mv,
&mbmi->wm_params[0], mi_row, mi_col);
if (has_second_ref(mbmi)) {
memcpy(pts, pts1, total_samples1 * 2 * sizeof(*pts1));
memcpy(pts_inref, pts_inref1,
total_samples1 * 2 * sizeof(*pts_inref1));
// Select the samples according to motion vector difference
if (mbmi->num_proj_ref[1] > 1) {
mbmi->num_proj_ref[1] =
av1_selectSamples(&mbmi->mv[1].as_mv, pts, pts_inref,
mbmi->num_proj_ref[1], bsize);
}
// Compute the warped motion parameters with a least squares fit
// using the collected samples
mbmi->wm_params[1].invalid = l1_invalid = av1_find_projection(
mbmi->num_proj_ref[1], pts, pts_inref, bsize, mbmi->mv[1].as_mv,
&mbmi->wm_params[1], mi_row, mi_col);
}
if (!l0_invalid && (!has_second_ref(mbmi) || !l1_invalid)) {
#else
memcpy(pts, pts0, total_samples * 2 * sizeof(*pts0));
memcpy(pts_inref, pts_inref0, total_samples * 2 * sizeof(*pts_inref0));
// Select the samples according to motion vector difference
if (mbmi->num_proj_ref > 1) {
mbmi->num_proj_ref = av1_selectSamples(
&mbmi->mv[0].as_mv, pts, pts_inref, mbmi->num_proj_ref, bsize);
}
// Compute the warped motion parameters with a least squares fit
// using the collected samples
#if CONFIG_EXTENDED_WARP_PREDICTION
if (!av1_find_projection(mbmi->num_proj_ref, pts, pts_inref, bsize,
mbmi->mv[0].as_mv, &mbmi->wm_params[0], mi_row,
mi_col)) {
#else
if (!av1_find_projection(mbmi->num_proj_ref, pts, pts_inref, bsize,
mbmi->mv[0].as_mv, &mbmi->wm_params, mi_row,
mi_col)) {
#endif // CONFIG_EXTENDED_WARP_PREDICTION
assert(!is_comp_pred);
#endif // CONFIG_COMPOUND_WARP_CAUSAL
#if CONFIG_COMPOUND_WARP_CAUSAL
if ((((this_mode == NEWMV || this_mode == NEW_NEWMV) && !l0_invalid)
#else
if ((this_mode == NEWMV
#endif // CONFIG_COMPOUND_WARP_CAUSAL
&& (mbmi->pb_mv_precision >= MV_PRECISION_ONE_PEL))
#if CONFIG_EXTENDED_WARP_PREDICTION
|| mbmi->warpmv_with_mvd_flag
#endif // CONFIG_EXTENDED_WARP_PREDICTION
) {
// Refine MV for NEWMV mode
const int_mv mv0 =
#if CONFIG_EXTENDED_WARP_PREDICTION
mbmi->mode == WARPMV ? warp_ref_mv :
#endif // CONFIG_EXTENDED_WARP_PREDICTION
mbmi->mv[0];
const int_mv ref_mv =
#if CONFIG_EXTENDED_WARP_PREDICTION
mbmi->warpmv_with_mvd_flag ? warp_ref_mv :
#endif // CONFIG_EXTENDED_WARP_PREDICTION
av1_get_ref_mv(x, 0);
const MvSubpelPrecision pb_mv_precision = mbmi->pb_mv_precision;
SUBPEL_MOTION_SEARCH_PARAMS ms_params;
av1_make_default_subpel_ms_params(&ms_params, cpi, x, bsize,
&ref_mv.as_mv, pb_mv_precision,
NULL);
// Refine MV in a small range.
av1_refine_warped_mv(xd, cm, &ms_params, bsize, pts0, pts_inref0,
#if CONFIG_COMPOUND_WARP_CAUSAL
total_samples0, 0,
#else
total_samples,
#endif // CONFIG_COMPOUND_WARP_CAUSAL
cpi->sf.mv_sf.warp_search_method,
cpi->sf.mv_sf.warp_search_iters);
if (mv0.as_int != mbmi->mv[0].as_int
#if CONFIG_EXTENDED_WARP_PREDICTION
|| mbmi->warpmv_with_mvd_flag
#endif // CONFIG_EXTENDED_WARP_PREDICTION
) {
// Keep the refined MV and WM parameters.
#if CONFIG_COMPOUND_WARP_CAUSAL
// Keep the refined MV and WM parameters.
if (mbmi->mode == NEW_NEWMV) {
int tmp_rate_mv0 = av1_mv_bit_cost(
&mv0.as_mv, &ref_mv.as_mv, pb_mv_precision, &x->mv_costs,
MV_COST_WEIGHT, ms_params.mv_cost_params.is_adaptive_mvd);
tmp_rate_mv = av1_mv_bit_cost(
&mbmi->mv[0].as_mv, &ref_mv.as_mv, pb_mv_precision,
&x->mv_costs, MV_COST_WEIGHT,
ms_params.mv_cost_params.is_adaptive_mvd);
tmp_rate2 = rate2_nocoeff - tmp_rate_mv0 + tmp_rate_mv;
} else {
tmp_rate_mv = av1_mv_bit_cost(
&mbmi->mv[0].as_mv, &ref_mv.as_mv, pb_mv_precision,
&x->mv_costs, MV_COST_WEIGHT,
ms_params.mv_cost_params.is_adaptive_mvd);
tmp_rate2 = rate2_nocoeff - rate_mv0 + tmp_rate_mv;
}
#else
tmp_rate_mv =
av1_mv_bit_cost(&mbmi->mv[0].as_mv, &ref_mv.as_mv,
pb_mv_precision, &x->mv_costs, MV_COST_WEIGHT,
ms_params.mv_cost_params.is_adaptive_mvd);
tmp_rate2 = rate2_nocoeff - rate_mv0 + tmp_rate_mv;
#if CONFIG_EXTENDED_WARP_PREDICTION
assert(IMPLIES(mbmi->mode == WARPMV, mbmi->warpmv_with_mvd_flag));
#endif // CONFIG_EXTENDED_WARP_PREDICTION
#endif // CONFIG_COMPOUND_WARP_CAUSAL
}
}
#if CONFIG_COMPOUND_WARP_CAUSAL
if (!l1_invalid && this_mode == NEW_NEWMV) {
// Refine MV for NEWMV mode
const int_mv mv1 = mbmi->mv[1];
const int_mv ref_mv = av1_get_ref_mv(x, 1);
const MvSubpelPrecision pb_mv_precision = mbmi->pb_mv_precision;
SUBPEL_MOTION_SEARCH_PARAMS ms_params;
av1_make_default_subpel_ms_params(&ms_params, cpi, x, bsize,
&ref_mv.as_mv,
pb_mv_precision,
NULL);
// Refine MV in a small range.
av1_refine_warped_mv(xd, cm, &ms_params, bsize, pts1, pts_inref1,
total_samples1, 1,
cpi->sf.mv_sf.warp_search_method,