blob: e315aeecde2962639f1b3dd072545a9540e21214 [file] [log] [blame]
/*
* Copyright (c) 2016, Alliance for Open Media. All rights reserved.
*
* This source code is subject to the terms of the BSD 2 Clause License and
* the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
* was not distributed with this source code in the LICENSE file, you can
* obtain it at www.aomedia.org/license/software. If the Alliance for Open
* Media Patent License 1.0 was not distributed with this source code in the
* PATENTS file, you can obtain it at www.aomedia.org/license/patent.
*/
#include <assert.h>
#include <math.h>
#include <stdbool.h>
#include "config/aom_config.h"
#include "config/aom_dsp_rtcd.h"
#include "config/av1_rtcd.h"
#include "aom_dsp/aom_dsp_common.h"
#include "aom_dsp/blend.h"
#include "aom_mem/aom_mem.h"
#include "aom_ports/aom_timer.h"
#include "aom_ports/mem.h"
#include "av1/common/av1_common_int.h"
#include "av1/common/cfl.h"
#include "av1/common/blockd.h"
#include "av1/common/common.h"
#include "av1/common/common_data.h"
#include "av1/common/entropy.h"
#include "av1/common/entropymode.h"
#include "av1/common/idct.h"
#include "av1/common/mvref_common.h"
#include "av1/common/obmc.h"
#include "av1/common/pred_common.h"
#include "av1/common/quant_common.h"
#include "av1/common/reconinter.h"
#include "av1/common/reconintra.h"
#include "av1/common/scan.h"
#include "av1/common/seg_common.h"
#include "av1/common/txb_common.h"
#include "av1/common/warped_motion.h"
#include "av1/encoder/aq_variance.h"
#include "av1/encoder/av1_quantize.h"
#include "av1/encoder/cost.h"
#include "av1/encoder/compound_type.h"
#include "av1/encoder/encodemb.h"
#include "av1/encoder/encodemv.h"
#include "av1/encoder/encoder.h"
#include "av1/encoder/encodetxb.h"
#include "av1/encoder/hybrid_fwd_txfm.h"
#include "av1/encoder/interp_search.h"
#include "av1/encoder/intra_mode_search.h"
#include "av1/encoder/intra_mode_search_utils.h"
#include "av1/encoder/mcomp.h"
#include "av1/encoder/ml.h"
#include "av1/encoder/mode_prune_model_weights.h"
#include "av1/encoder/model_rd.h"
#include "av1/encoder/motion_search_facade.h"
#include "av1/encoder/palette.h"
#include "av1/encoder/pustats.h"
#include "av1/encoder/random.h"
#include "av1/encoder/ratectrl.h"
#include "av1/encoder/rd.h"
#include "av1/encoder/rdopt.h"
#include "av1/encoder/reconinter_enc.h"
#include "av1/encoder/tokenize.h"
#include "av1/encoder/tpl_model.h"
#include "av1/encoder/tx_search.h"
#include "av1/encoder/var_based_part.h"
#define LAST_NEW_MV_INDEX 6
// Mode_threshold multiplication factor table for prune_inter_modes_if_skippable
// The values are kept in Q12 format and equation used to derive is
// (2.5 - ((float)x->qindex / MAXQ) * 1.5)
#define MODE_THRESH_QBITS 12
static const int mode_threshold_mul_factor[QINDEX_RANGE] = {
10240, 10216, 10192, 10168, 10144, 10120, 10095, 10071, 10047, 10023, 9999,
9975, 9951, 9927, 9903, 9879, 9854, 9830, 9806, 9782, 9758, 9734,
9710, 9686, 9662, 9638, 9614, 9589, 9565, 9541, 9517, 9493, 9469,
9445, 9421, 9397, 9373, 9349, 9324, 9300, 9276, 9252, 9228, 9204,
9180, 9156, 9132, 9108, 9083, 9059, 9035, 9011, 8987, 8963, 8939,
8915, 8891, 8867, 8843, 8818, 8794, 8770, 8746, 8722, 8698, 8674,
8650, 8626, 8602, 8578, 8553, 8529, 8505, 8481, 8457, 8433, 8409,
8385, 8361, 8337, 8312, 8288, 8264, 8240, 8216, 8192, 8168, 8144,
8120, 8096, 8072, 8047, 8023, 7999, 7975, 7951, 7927, 7903, 7879,
7855, 7831, 7806, 7782, 7758, 7734, 7710, 7686, 7662, 7638, 7614,
7590, 7566, 7541, 7517, 7493, 7469, 7445, 7421, 7397, 7373, 7349,
7325, 7301, 7276, 7252, 7228, 7204, 7180, 7156, 7132, 7108, 7084,
7060, 7035, 7011, 6987, 6963, 6939, 6915, 6891, 6867, 6843, 6819,
6795, 6770, 6746, 6722, 6698, 6674, 6650, 6626, 6602, 6578, 6554,
6530, 6505, 6481, 6457, 6433, 6409, 6385, 6361, 6337, 6313, 6289,
6264, 6240, 6216, 6192, 6168, 6144, 6120, 6096, 6072, 6048, 6024,
5999, 5975, 5951, 5927, 5903, 5879, 5855, 5831, 5807, 5783, 5758,
5734, 5710, 5686, 5662, 5638, 5614, 5590, 5566, 5542, 5518, 5493,
5469, 5445, 5421, 5397, 5373, 5349, 5325, 5301, 5277, 5253, 5228,
5204, 5180, 5156, 5132, 5108, 5084, 5060, 5036, 5012, 4987, 4963,
4939, 4915, 4891, 4867, 4843, 4819, 4795, 4771, 4747, 4722, 4698,
4674, 4650, 4626, 4602, 4578, 4554, 4530, 4506, 4482, 4457, 4433,
4409, 4385, 4361, 4337, 4313, 4289, 4265, 4241, 4216, 4192, 4168,
4144, 4120, 4096
};
static const THR_MODES av1_default_mode_order[MAX_MODES] = {
THR_NEARESTMV,
THR_NEARESTL2,
THR_NEARESTL3,
THR_NEARESTB,
THR_NEARESTA2,
THR_NEARESTA,
THR_NEARESTG,
THR_NEWMV,
THR_NEWL2,
THR_NEWL3,
THR_NEWB,
THR_NEWA2,
THR_NEWA,
THR_NEWG,
THR_NEARMV,
THR_NEARL2,
THR_NEARL3,
THR_NEARB,
THR_NEARA2,
THR_NEARA,
THR_NEARG,
THR_GLOBALMV,
THR_GLOBALL2,
THR_GLOBALL3,
THR_GLOBALB,
THR_GLOBALA2,
THR_GLOBALA,
THR_GLOBALG,
THR_COMP_NEAREST_NEARESTLA,
THR_COMP_NEAREST_NEARESTL2A,
THR_COMP_NEAREST_NEARESTL3A,
THR_COMP_NEAREST_NEARESTGA,
THR_COMP_NEAREST_NEARESTLB,
THR_COMP_NEAREST_NEARESTL2B,
THR_COMP_NEAREST_NEARESTL3B,
THR_COMP_NEAREST_NEARESTGB,
THR_COMP_NEAREST_NEARESTLA2,
THR_COMP_NEAREST_NEARESTL2A2,
THR_COMP_NEAREST_NEARESTL3A2,
THR_COMP_NEAREST_NEARESTGA2,
THR_COMP_NEAREST_NEARESTLL2,
THR_COMP_NEAREST_NEARESTLL3,
THR_COMP_NEAREST_NEARESTLG,
THR_COMP_NEAREST_NEARESTBA,
THR_COMP_NEAR_NEARLB,
THR_COMP_NEW_NEWLB,
THR_COMP_NEW_NEARESTLB,
THR_COMP_NEAREST_NEWLB,
THR_COMP_NEW_NEARLB,
THR_COMP_NEAR_NEWLB,
THR_COMP_GLOBAL_GLOBALLB,
THR_COMP_NEAR_NEARLA,
THR_COMP_NEW_NEWLA,
THR_COMP_NEW_NEARESTLA,
THR_COMP_NEAREST_NEWLA,
THR_COMP_NEW_NEARLA,
THR_COMP_NEAR_NEWLA,
THR_COMP_GLOBAL_GLOBALLA,
THR_COMP_NEAR_NEARL2A,
THR_COMP_NEW_NEWL2A,
THR_COMP_NEW_NEARESTL2A,
THR_COMP_NEAREST_NEWL2A,
THR_COMP_NEW_NEARL2A,
THR_COMP_NEAR_NEWL2A,
THR_COMP_GLOBAL_GLOBALL2A,
THR_COMP_NEAR_NEARL3A,
THR_COMP_NEW_NEWL3A,
THR_COMP_NEW_NEARESTL3A,
THR_COMP_NEAREST_NEWL3A,
THR_COMP_NEW_NEARL3A,
THR_COMP_NEAR_NEWL3A,
THR_COMP_GLOBAL_GLOBALL3A,
THR_COMP_NEAR_NEARGA,
THR_COMP_NEW_NEWGA,
THR_COMP_NEW_NEARESTGA,
THR_COMP_NEAREST_NEWGA,
THR_COMP_NEW_NEARGA,
THR_COMP_NEAR_NEWGA,
THR_COMP_GLOBAL_GLOBALGA,
THR_COMP_NEAR_NEARL2B,
THR_COMP_NEW_NEWL2B,
THR_COMP_NEW_NEARESTL2B,
THR_COMP_NEAREST_NEWL2B,
THR_COMP_NEW_NEARL2B,
THR_COMP_NEAR_NEWL2B,
THR_COMP_GLOBAL_GLOBALL2B,
THR_COMP_NEAR_NEARL3B,
THR_COMP_NEW_NEWL3B,
THR_COMP_NEW_NEARESTL3B,
THR_COMP_NEAREST_NEWL3B,
THR_COMP_NEW_NEARL3B,
THR_COMP_NEAR_NEWL3B,
THR_COMP_GLOBAL_GLOBALL3B,
THR_COMP_NEAR_NEARGB,
THR_COMP_NEW_NEWGB,
THR_COMP_NEW_NEARESTGB,
THR_COMP_NEAREST_NEWGB,
THR_COMP_NEW_NEARGB,
THR_COMP_NEAR_NEWGB,
THR_COMP_GLOBAL_GLOBALGB,
THR_COMP_NEAR_NEARLA2,
THR_COMP_NEW_NEWLA2,
THR_COMP_NEW_NEARESTLA2,
THR_COMP_NEAREST_NEWLA2,
THR_COMP_NEW_NEARLA2,
THR_COMP_NEAR_NEWLA2,
THR_COMP_GLOBAL_GLOBALLA2,
THR_COMP_NEAR_NEARL2A2,
THR_COMP_NEW_NEWL2A2,
THR_COMP_NEW_NEARESTL2A2,
THR_COMP_NEAREST_NEWL2A2,
THR_COMP_NEW_NEARL2A2,
THR_COMP_NEAR_NEWL2A2,
THR_COMP_GLOBAL_GLOBALL2A2,
THR_COMP_NEAR_NEARL3A2,
THR_COMP_NEW_NEWL3A2,
THR_COMP_NEW_NEARESTL3A2,
THR_COMP_NEAREST_NEWL3A2,
THR_COMP_NEW_NEARL3A2,
THR_COMP_NEAR_NEWL3A2,
THR_COMP_GLOBAL_GLOBALL3A2,
THR_COMP_NEAR_NEARGA2,
THR_COMP_NEW_NEWGA2,
THR_COMP_NEW_NEARESTGA2,
THR_COMP_NEAREST_NEWGA2,
THR_COMP_NEW_NEARGA2,
THR_COMP_NEAR_NEWGA2,
THR_COMP_GLOBAL_GLOBALGA2,
THR_COMP_NEAR_NEARLL2,
THR_COMP_NEW_NEWLL2,
THR_COMP_NEW_NEARESTLL2,
THR_COMP_NEAREST_NEWLL2,
THR_COMP_NEW_NEARLL2,
THR_COMP_NEAR_NEWLL2,
THR_COMP_GLOBAL_GLOBALLL2,
THR_COMP_NEAR_NEARLL3,
THR_COMP_NEW_NEWLL3,
THR_COMP_NEW_NEARESTLL3,
THR_COMP_NEAREST_NEWLL3,
THR_COMP_NEW_NEARLL3,
THR_COMP_NEAR_NEWLL3,
THR_COMP_GLOBAL_GLOBALLL3,
THR_COMP_NEAR_NEARLG,
THR_COMP_NEW_NEWLG,
THR_COMP_NEW_NEARESTLG,
THR_COMP_NEAREST_NEWLG,
THR_COMP_NEW_NEARLG,
THR_COMP_NEAR_NEWLG,
THR_COMP_GLOBAL_GLOBALLG,
THR_COMP_NEAR_NEARBA,
THR_COMP_NEW_NEWBA,
THR_COMP_NEW_NEARESTBA,
THR_COMP_NEAREST_NEWBA,
THR_COMP_NEW_NEARBA,
THR_COMP_NEAR_NEWBA,
THR_COMP_GLOBAL_GLOBALBA,
THR_DC,
THR_PAETH,
THR_SMOOTH,
THR_SMOOTH_V,
THR_SMOOTH_H,
THR_H_PRED,
THR_V_PRED,
THR_D135_PRED,
THR_D203_PRED,
THR_D157_PRED,
THR_D67_PRED,
THR_D113_PRED,
THR_D45_PRED,
};
/*!\cond */
typedef struct SingleInterModeState {
int64_t rd;
MV_REFERENCE_FRAME ref_frame;
int valid;
} SingleInterModeState;
typedef struct InterModeSearchState {
int64_t best_rd;
int64_t best_skip_rd[2];
MB_MODE_INFO best_mbmode;
int best_rate_y;
int best_rate_uv;
int best_mode_skippable;
int best_skip2;
THR_MODES best_mode_index;
int num_available_refs;
int64_t dist_refs[REF_FRAMES];
int dist_order_refs[REF_FRAMES];
int64_t mode_threshold[MAX_MODES];
int64_t best_intra_rd;
unsigned int best_pred_sse;
/*!
* \brief Keep track of best intra rd for use in compound mode.
*/
int64_t best_pred_rd[REFERENCE_MODES];
// Save a set of single_newmv for each checked ref_mv.
int_mv single_newmv[MAX_REF_MV_SEARCH][REF_FRAMES];
int single_newmv_rate[MAX_REF_MV_SEARCH][REF_FRAMES];
int single_newmv_valid[MAX_REF_MV_SEARCH][REF_FRAMES];
int64_t modelled_rd[MB_MODE_COUNT][MAX_REF_MV_SEARCH][REF_FRAMES];
// The rd of simple translation in single inter modes
int64_t simple_rd[MB_MODE_COUNT][MAX_REF_MV_SEARCH][REF_FRAMES];
int64_t best_single_rd[REF_FRAMES];
PREDICTION_MODE best_single_mode[REF_FRAMES];
// Single search results by [directions][modes][reference frames]
SingleInterModeState single_state[2][SINGLE_INTER_MODE_NUM][FWD_REFS];
int single_state_cnt[2][SINGLE_INTER_MODE_NUM];
SingleInterModeState single_state_modelled[2][SINGLE_INTER_MODE_NUM]
[FWD_REFS];
int single_state_modelled_cnt[2][SINGLE_INTER_MODE_NUM];
MV_REFERENCE_FRAME single_rd_order[2][SINGLE_INTER_MODE_NUM][FWD_REFS];
IntraModeSearchState intra_search_state;
RD_STATS best_y_rdcost;
} InterModeSearchState;
/*!\endcond */
void av1_inter_mode_data_init(TileDataEnc *tile_data) {
for (int i = 0; i < BLOCK_SIZES_ALL; ++i) {
InterModeRdModel *md = &tile_data->inter_mode_rd_models[i];
md->ready = 0;
md->num = 0;
md->dist_sum = 0;
md->ld_sum = 0;
md->sse_sum = 0;
md->sse_sse_sum = 0;
md->sse_ld_sum = 0;
}
}
static int get_est_rate_dist(const TileDataEnc *tile_data, BLOCK_SIZE bsize,
int64_t sse, int *est_residue_cost,
int64_t *est_dist) {
const InterModeRdModel *md = &tile_data->inter_mode_rd_models[bsize];
if (md->ready) {
if (sse < md->dist_mean) {
*est_residue_cost = 0;
*est_dist = sse;
} else {
*est_dist = (int64_t)round(md->dist_mean);
const double est_ld = md->a * sse + md->b;
// Clamp estimated rate cost by INT_MAX / 2.
// TODO(angiebird@google.com): find better solution than clamping.
if (fabs(est_ld) < 1e-2) {
*est_residue_cost = INT_MAX / 2;
} else {
double est_residue_cost_dbl = ((sse - md->dist_mean) / est_ld);
if (est_residue_cost_dbl < 0) {
*est_residue_cost = 0;
} else {
*est_residue_cost =
(int)AOMMIN((int64_t)round(est_residue_cost_dbl), INT_MAX / 2);
}
}
if (*est_residue_cost <= 0) {
*est_residue_cost = 0;
*est_dist = sse;
}
}
return 1;
}
return 0;
}
void av1_inter_mode_data_fit(TileDataEnc *tile_data, int rdmult) {
for (int bsize = 0; bsize < BLOCK_SIZES_ALL; ++bsize) {
const int block_idx = inter_mode_data_block_idx(bsize);
InterModeRdModel *md = &tile_data->inter_mode_rd_models[bsize];
if (block_idx == -1) continue;
if ((md->ready == 0 && md->num < 200) || (md->ready == 1 && md->num < 64)) {
continue;
} else {
if (md->ready == 0) {
md->dist_mean = md->dist_sum / md->num;
md->ld_mean = md->ld_sum / md->num;
md->sse_mean = md->sse_sum / md->num;
md->sse_sse_mean = md->sse_sse_sum / md->num;
md->sse_ld_mean = md->sse_ld_sum / md->num;
} else {
const double factor = 3;
md->dist_mean =
(md->dist_mean * factor + (md->dist_sum / md->num)) / (factor + 1);
md->ld_mean =
(md->ld_mean * factor + (md->ld_sum / md->num)) / (factor + 1);
md->sse_mean =
(md->sse_mean * factor + (md->sse_sum / md->num)) / (factor + 1);
md->sse_sse_mean =
(md->sse_sse_mean * factor + (md->sse_sse_sum / md->num)) /
(factor + 1);
md->sse_ld_mean =
(md->sse_ld_mean * factor + (md->sse_ld_sum / md->num)) /
(factor + 1);
}
const double my = md->ld_mean;
const double mx = md->sse_mean;
const double dx = sqrt(md->sse_sse_mean);
const double dxy = md->sse_ld_mean;
md->a = (dxy - mx * my) / (dx * dx - mx * mx);
md->b = my - md->a * mx;
md->ready = 1;
md->num = 0;
md->dist_sum = 0;
md->ld_sum = 0;
md->sse_sum = 0;
md->sse_sse_sum = 0;
md->sse_ld_sum = 0;
}
(void)rdmult;
}
}
static inline void inter_mode_data_push(TileDataEnc *tile_data,
BLOCK_SIZE bsize, int64_t sse,
int64_t dist, int residue_cost) {
if (residue_cost == 0 || sse == dist) return;
const int block_idx = inter_mode_data_block_idx(bsize);
if (block_idx == -1) return;
InterModeRdModel *rd_model = &tile_data->inter_mode_rd_models[bsize];
if (rd_model->num < INTER_MODE_RD_DATA_OVERALL_SIZE) {
const double ld = (sse - dist) * 1. / residue_cost;
++rd_model->num;
rd_model->dist_sum += dist;
rd_model->ld_sum += ld;
rd_model->sse_sum += sse;
rd_model->sse_sse_sum += (double)sse * (double)sse;
rd_model->sse_ld_sum += sse * ld;
}
}
static inline void inter_modes_info_push(InterModesInfo *inter_modes_info,
int mode_rate, int64_t sse, int64_t rd,
RD_STATS *rd_cost, RD_STATS *rd_cost_y,
RD_STATS *rd_cost_uv,
const MB_MODE_INFO *mbmi) {
const int num = inter_modes_info->num;
assert(num < MAX_INTER_MODES);
inter_modes_info->mbmi_arr[num] = *mbmi;
inter_modes_info->mode_rate_arr[num] = mode_rate;
inter_modes_info->sse_arr[num] = sse;
inter_modes_info->est_rd_arr[num] = rd;
inter_modes_info->rd_cost_arr[num] = *rd_cost;
inter_modes_info->rd_cost_y_arr[num] = *rd_cost_y;
inter_modes_info->rd_cost_uv_arr[num] = *rd_cost_uv;
++inter_modes_info->num;
}
static int compare_rd_idx_pair(const void *a, const void *b) {
if (((RdIdxPair *)a)->rd == ((RdIdxPair *)b)->rd) {
// To avoid inconsistency in qsort() ordering when two elements are equal,
// using idx as tie breaker. Refer aomedia:2928
if (((RdIdxPair *)a)->idx == ((RdIdxPair *)b)->idx)
return 0;
else if (((RdIdxPair *)a)->idx > ((RdIdxPair *)b)->idx)
return 1;
else
return -1;
} else if (((const RdIdxPair *)a)->rd > ((const RdIdxPair *)b)->rd) {
return 1;
} else {
return -1;
}
}
static inline void inter_modes_info_sort(const InterModesInfo *inter_modes_info,
RdIdxPair *rd_idx_pair_arr) {
if (inter_modes_info->num == 0) {
return;
}
for (int i = 0; i < inter_modes_info->num; ++i) {
rd_idx_pair_arr[i].idx = i;
rd_idx_pair_arr[i].rd = inter_modes_info->est_rd_arr[i];
}
qsort(rd_idx_pair_arr, inter_modes_info->num, sizeof(rd_idx_pair_arr[0]),
compare_rd_idx_pair);
}
// Similar to get_horver_correlation, but also takes into account first
// row/column, when computing horizontal/vertical correlation.
void av1_get_horver_correlation_full_c(const int16_t *diff, int stride,
int width, int height, float *hcorr,
float *vcorr) {
// The following notation is used:
// x - current pixel
// y - left neighbor pixel
// z - top neighbor pixel
int64_t x_sum = 0, x2_sum = 0, xy_sum = 0, xz_sum = 0;
int64_t x_firstrow = 0, x_finalrow = 0, x_firstcol = 0, x_finalcol = 0;
int64_t x2_firstrow = 0, x2_finalrow = 0, x2_firstcol = 0, x2_finalcol = 0;
// First, process horizontal correlation on just the first row
x_sum += diff[0];
x2_sum += diff[0] * diff[0];
x_firstrow += diff[0];
x2_firstrow += diff[0] * diff[0];
for (int j = 1; j < width; ++j) {
const int16_t x = diff[j];
const int16_t y = diff[j - 1];
x_sum += x;
x_firstrow += x;
x2_sum += x * x;
x2_firstrow += x * x;
xy_sum += x * y;
}
// Process vertical correlation in the first column
x_firstcol += diff[0];
x2_firstcol += diff[0] * diff[0];
for (int i = 1; i < height; ++i) {
const int16_t x = diff[i * stride];
const int16_t z = diff[(i - 1) * stride];
x_sum += x;
x_firstcol += x;
x2_sum += x * x;
x2_firstcol += x * x;
xz_sum += x * z;
}
// Now process horiz and vert correlation through the rest unit
for (int i = 1; i < height; ++i) {
for (int j = 1; j < width; ++j) {
const int16_t x = diff[i * stride + j];
const int16_t y = diff[i * stride + j - 1];
const int16_t z = diff[(i - 1) * stride + j];
x_sum += x;
x2_sum += x * x;
xy_sum += x * y;
xz_sum += x * z;
}
}
for (int j = 0; j < width; ++j) {
x_finalrow += diff[(height - 1) * stride + j];
x2_finalrow +=
diff[(height - 1) * stride + j] * diff[(height - 1) * stride + j];
}
for (int i = 0; i < height; ++i) {
x_finalcol += diff[i * stride + width - 1];
x2_finalcol += diff[i * stride + width - 1] * diff[i * stride + width - 1];
}
int64_t xhor_sum = x_sum - x_finalcol;
int64_t xver_sum = x_sum - x_finalrow;
int64_t y_sum = x_sum - x_firstcol;
int64_t z_sum = x_sum - x_firstrow;
int64_t x2hor_sum = x2_sum - x2_finalcol;
int64_t x2ver_sum = x2_sum - x2_finalrow;
int64_t y2_sum = x2_sum - x2_firstcol;
int64_t z2_sum = x2_sum - x2_firstrow;
const float num_hor = (float)(height * (width - 1));
const float num_ver = (float)((height - 1) * width);
const float xhor_var_n = x2hor_sum - (xhor_sum * xhor_sum) / num_hor;
const float xver_var_n = x2ver_sum - (xver_sum * xver_sum) / num_ver;
const float y_var_n = y2_sum - (y_sum * y_sum) / num_hor;
const float z_var_n = z2_sum - (z_sum * z_sum) / num_ver;
const float xy_var_n = xy_sum - (xhor_sum * y_sum) / num_hor;
const float xz_var_n = xz_sum - (xver_sum * z_sum) / num_ver;
if (xhor_var_n > 0 && y_var_n > 0) {
*hcorr = xy_var_n / sqrtf(xhor_var_n * y_var_n);
*hcorr = *hcorr < 0 ? 0 : *hcorr;
} else {
*hcorr = 1.0;
}
if (xver_var_n > 0 && z_var_n > 0) {
*vcorr = xz_var_n / sqrtf(xver_var_n * z_var_n);
*vcorr = *vcorr < 0 ? 0 : *vcorr;
} else {
*vcorr = 1.0;
}
}
static int64_t get_sse(const AV1_COMP *cpi, const MACROBLOCK *x,
int64_t *sse_y) {
const AV1_COMMON *cm = &cpi->common;
const int num_planes = av1_num_planes(cm);
const MACROBLOCKD *xd = &x->e_mbd;
const MB_MODE_INFO *mbmi = xd->mi[0];
int64_t total_sse = 0;
for (int plane = 0; plane < num_planes; ++plane) {
if (plane && !xd->is_chroma_ref) break;
const struct macroblock_plane *const p = &x->plane[plane];
const struct macroblockd_plane *const pd = &xd->plane[plane];
const BLOCK_SIZE bs =
get_plane_block_size(mbmi->bsize, pd->subsampling_x, pd->subsampling_y);
unsigned int sse;
cpi->ppi->fn_ptr[bs].vf(p->src.buf, p->src.stride, pd->dst.buf,
pd->dst.stride, &sse);
total_sse += sse;
if (!plane && sse_y) *sse_y = sse;
}
total_sse <<= 4;
return total_sse;
}
int64_t av1_block_error_c(const tran_low_t *coeff, const tran_low_t *dqcoeff,
intptr_t block_size, int64_t *ssz) {
int i;
int64_t error = 0, sqcoeff = 0;
for (i = 0; i < block_size; i++) {
const int diff = coeff[i] - dqcoeff[i];
error += diff * diff;
sqcoeff += coeff[i] * coeff[i];
}
*ssz = sqcoeff;
return error;
}
int64_t av1_block_error_lp_c(const int16_t *coeff, const int16_t *dqcoeff,
intptr_t block_size) {
int64_t error = 0;
for (int i = 0; i < block_size; i++) {
const int diff = coeff[i] - dqcoeff[i];
error += diff * diff;
}
return error;
}
#if CONFIG_AV1_HIGHBITDEPTH
int64_t av1_highbd_block_error_c(const tran_low_t *coeff,
const tran_low_t *dqcoeff, intptr_t block_size,
int64_t *ssz, int bd) {
int i;
int64_t error = 0, sqcoeff = 0;
int shift = 2 * (bd - 8);
int rounding = shift > 0 ? 1 << (shift - 1) : 0;
for (i = 0; i < block_size; i++) {
const int64_t diff = coeff[i] - dqcoeff[i];
error += diff * diff;
sqcoeff += (int64_t)coeff[i] * (int64_t)coeff[i];
}
assert(error >= 0 && sqcoeff >= 0);
error = (error + rounding) >> shift;
sqcoeff = (sqcoeff + rounding) >> shift;
*ssz = sqcoeff;
return error;
}
#endif
static int conditional_skipintra(PREDICTION_MODE mode,
PREDICTION_MODE best_intra_mode) {
if (mode == D113_PRED && best_intra_mode != V_PRED &&
best_intra_mode != D135_PRED)
return 1;
if (mode == D67_PRED && best_intra_mode != V_PRED &&
best_intra_mode != D45_PRED)
return 1;
if (mode == D203_PRED && best_intra_mode != H_PRED &&
best_intra_mode != D45_PRED)
return 1;
if (mode == D157_PRED && best_intra_mode != H_PRED &&
best_intra_mode != D135_PRED)
return 1;
return 0;
}
static int cost_mv_ref(const ModeCosts *const mode_costs, PREDICTION_MODE mode,
int16_t mode_context) {
if (is_inter_compound_mode(mode)) {
return mode_costs
->inter_compound_mode_cost[mode_context][INTER_COMPOUND_OFFSET(mode)];
}
int mode_cost = 0;
int16_t mode_ctx = mode_context & NEWMV_CTX_MASK;
assert(is_inter_mode(mode));
if (mode == NEWMV) {
mode_cost = mode_costs->newmv_mode_cost[mode_ctx][0];
return mode_cost;
} else {
mode_cost = mode_costs->newmv_mode_cost[mode_ctx][1];
mode_ctx = (mode_context >> GLOBALMV_OFFSET) & GLOBALMV_CTX_MASK;
if (mode == GLOBALMV) {
mode_cost += mode_costs->zeromv_mode_cost[mode_ctx][0];
return mode_cost;
} else {
mode_cost += mode_costs->zeromv_mode_cost[mode_ctx][1];
mode_ctx = (mode_context >> REFMV_OFFSET) & REFMV_CTX_MASK;
mode_cost += mode_costs->refmv_mode_cost[mode_ctx][mode != NEARESTMV];
return mode_cost;
}
}
}
static inline PREDICTION_MODE get_single_mode(PREDICTION_MODE this_mode,
int ref_idx) {
return ref_idx ? compound_ref1_mode(this_mode)
: compound_ref0_mode(this_mode);
}
static inline void estimate_ref_frame_costs(
const AV1_COMMON *cm, const MACROBLOCKD *xd, const ModeCosts *mode_costs,
int segment_id, unsigned int *ref_costs_single,
unsigned int (*ref_costs_comp)[REF_FRAMES]) {
int seg_ref_active =
segfeature_active(&cm->seg, segment_id, SEG_LVL_REF_FRAME);
if (seg_ref_active) {
memset(ref_costs_single, 0, REF_FRAMES * sizeof(*ref_costs_single));
int ref_frame;
for (ref_frame = 0; ref_frame < REF_FRAMES; ++ref_frame)
memset(ref_costs_comp[ref_frame], 0,
REF_FRAMES * sizeof((*ref_costs_comp)[0]));
} else {
int intra_inter_ctx = av1_get_intra_inter_context(xd);
ref_costs_single[INTRA_FRAME] =
mode_costs->intra_inter_cost[intra_inter_ctx][0];
unsigned int base_cost = mode_costs->intra_inter_cost[intra_inter_ctx][1];
for (int i = LAST_FRAME; i <= ALTREF_FRAME; ++i)
ref_costs_single[i] = base_cost;
const int ctx_p1 = av1_get_pred_context_single_ref_p1(xd);
const int ctx_p2 = av1_get_pred_context_single_ref_p2(xd);
const int ctx_p3 = av1_get_pred_context_single_ref_p3(xd);
const int ctx_p4 = av1_get_pred_context_single_ref_p4(xd);
const int ctx_p5 = av1_get_pred_context_single_ref_p5(xd);
const int ctx_p6 = av1_get_pred_context_single_ref_p6(xd);
// Determine cost of a single ref frame, where frame types are represented
// by a tree:
// Level 0: add cost whether this ref is a forward or backward ref
ref_costs_single[LAST_FRAME] += mode_costs->single_ref_cost[ctx_p1][0][0];
ref_costs_single[LAST2_FRAME] += mode_costs->single_ref_cost[ctx_p1][0][0];
ref_costs_single[LAST3_FRAME] += mode_costs->single_ref_cost[ctx_p1][0][0];
ref_costs_single[GOLDEN_FRAME] += mode_costs->single_ref_cost[ctx_p1][0][0];
ref_costs_single[BWDREF_FRAME] += mode_costs->single_ref_cost[ctx_p1][0][1];
ref_costs_single[ALTREF2_FRAME] +=
mode_costs->single_ref_cost[ctx_p1][0][1];
ref_costs_single[ALTREF_FRAME] += mode_costs->single_ref_cost[ctx_p1][0][1];
// Level 1: if this ref is forward ref,
// add cost whether it is last/last2 or last3/golden
ref_costs_single[LAST_FRAME] += mode_costs->single_ref_cost[ctx_p3][2][0];
ref_costs_single[LAST2_FRAME] += mode_costs->single_ref_cost[ctx_p3][2][0];
ref_costs_single[LAST3_FRAME] += mode_costs->single_ref_cost[ctx_p3][2][1];
ref_costs_single[GOLDEN_FRAME] += mode_costs->single_ref_cost[ctx_p3][2][1];
// Level 1: if this ref is backward ref
// then add cost whether this ref is altref or backward ref
ref_costs_single[BWDREF_FRAME] += mode_costs->single_ref_cost[ctx_p2][1][0];
ref_costs_single[ALTREF2_FRAME] +=
mode_costs->single_ref_cost[ctx_p2][1][0];
ref_costs_single[ALTREF_FRAME] += mode_costs->single_ref_cost[ctx_p2][1][1];
// Level 2: further add cost whether this ref is last or last2
ref_costs_single[LAST_FRAME] += mode_costs->single_ref_cost[ctx_p4][3][0];
ref_costs_single[LAST2_FRAME] += mode_costs->single_ref_cost[ctx_p4][3][1];
// Level 2: last3 or golden
ref_costs_single[LAST3_FRAME] += mode_costs->single_ref_cost[ctx_p5][4][0];
ref_costs_single[GOLDEN_FRAME] += mode_costs->single_ref_cost[ctx_p5][4][1];
// Level 2: bwdref or altref2
ref_costs_single[BWDREF_FRAME] += mode_costs->single_ref_cost[ctx_p6][5][0];
ref_costs_single[ALTREF2_FRAME] +=
mode_costs->single_ref_cost[ctx_p6][5][1];
if (cm->current_frame.reference_mode != SINGLE_REFERENCE) {
// Similar to single ref, determine cost of compound ref frames.
// cost_compound_refs = cost_first_ref + cost_second_ref
const int bwdref_comp_ctx_p = av1_get_pred_context_comp_bwdref_p(xd);
const int bwdref_comp_ctx_p1 = av1_get_pred_context_comp_bwdref_p1(xd);
const int ref_comp_ctx_p = av1_get_pred_context_comp_ref_p(xd);
const int ref_comp_ctx_p1 = av1_get_pred_context_comp_ref_p1(xd);
const int ref_comp_ctx_p2 = av1_get_pred_context_comp_ref_p2(xd);
const int comp_ref_type_ctx = av1_get_comp_reference_type_context(xd);
unsigned int ref_bicomp_costs[REF_FRAMES] = { 0 };
ref_bicomp_costs[LAST_FRAME] = ref_bicomp_costs[LAST2_FRAME] =
ref_bicomp_costs[LAST3_FRAME] = ref_bicomp_costs[GOLDEN_FRAME] =
base_cost + mode_costs->comp_ref_type_cost[comp_ref_type_ctx][1];
ref_bicomp_costs[BWDREF_FRAME] = ref_bicomp_costs[ALTREF2_FRAME] = 0;
ref_bicomp_costs[ALTREF_FRAME] = 0;
// cost of first ref frame
ref_bicomp_costs[LAST_FRAME] +=
mode_costs->comp_ref_cost[ref_comp_ctx_p][0][0];
ref_bicomp_costs[LAST2_FRAME] +=
mode_costs->comp_ref_cost[ref_comp_ctx_p][0][0];
ref_bicomp_costs[LAST3_FRAME] +=
mode_costs->comp_ref_cost[ref_comp_ctx_p][0][1];
ref_bicomp_costs[GOLDEN_FRAME] +=
mode_costs->comp_ref_cost[ref_comp_ctx_p][0][1];
ref_bicomp_costs[LAST_FRAME] +=
mode_costs->comp_ref_cost[ref_comp_ctx_p1][1][0];
ref_bicomp_costs[LAST2_FRAME] +=
mode_costs->comp_ref_cost[ref_comp_ctx_p1][1][1];
ref_bicomp_costs[LAST3_FRAME] +=
mode_costs->comp_ref_cost[ref_comp_ctx_p2][2][0];
ref_bicomp_costs[GOLDEN_FRAME] +=
mode_costs->comp_ref_cost[ref_comp_ctx_p2][2][1];
// cost of second ref frame
ref_bicomp_costs[BWDREF_FRAME] +=
mode_costs->comp_bwdref_cost[bwdref_comp_ctx_p][0][0];
ref_bicomp_costs[ALTREF2_FRAME] +=
mode_costs->comp_bwdref_cost[bwdref_comp_ctx_p][0][0];
ref_bicomp_costs[ALTREF_FRAME] +=
mode_costs->comp_bwdref_cost[bwdref_comp_ctx_p][0][1];
ref_bicomp_costs[BWDREF_FRAME] +=
mode_costs->comp_bwdref_cost[bwdref_comp_ctx_p1][1][0];
ref_bicomp_costs[ALTREF2_FRAME] +=
mode_costs->comp_bwdref_cost[bwdref_comp_ctx_p1][1][1];
// cost: if one ref frame is forward ref, the other ref is backward ref
int ref0, ref1;
for (ref0 = LAST_FRAME; ref0 <= GOLDEN_FRAME; ++ref0) {
for (ref1 = BWDREF_FRAME; ref1 <= ALTREF_FRAME; ++ref1) {
ref_costs_comp[ref0][ref1] =
ref_bicomp_costs[ref0] + ref_bicomp_costs[ref1];
}
}
// cost: if both ref frames are the same side.
const int uni_comp_ref_ctx_p = av1_get_pred_context_uni_comp_ref_p(xd);
const int uni_comp_ref_ctx_p1 = av1_get_pred_context_uni_comp_ref_p1(xd);
const int uni_comp_ref_ctx_p2 = av1_get_pred_context_uni_comp_ref_p2(xd);
ref_costs_comp[LAST_FRAME][LAST2_FRAME] =
base_cost + mode_costs->comp_ref_type_cost[comp_ref_type_ctx][0] +
mode_costs->uni_comp_ref_cost[uni_comp_ref_ctx_p][0][0] +
mode_costs->uni_comp_ref_cost[uni_comp_ref_ctx_p1][1][0];
ref_costs_comp[LAST_FRAME][LAST3_FRAME] =
base_cost + mode_costs->comp_ref_type_cost[comp_ref_type_ctx][0] +
mode_costs->uni_comp_ref_cost[uni_comp_ref_ctx_p][0][0] +
mode_costs->uni_comp_ref_cost[uni_comp_ref_ctx_p1][1][1] +
mode_costs->uni_comp_ref_cost[uni_comp_ref_ctx_p2][2][0];
ref_costs_comp[LAST_FRAME][GOLDEN_FRAME] =
base_cost + mode_costs->comp_ref_type_cost[comp_ref_type_ctx][0] +
mode_costs->uni_comp_ref_cost[uni_comp_ref_ctx_p][0][0] +
mode_costs->uni_comp_ref_cost[uni_comp_ref_ctx_p1][1][1] +
mode_costs->uni_comp_ref_cost[uni_comp_ref_ctx_p2][2][1];
ref_costs_comp[BWDREF_FRAME][ALTREF_FRAME] =
base_cost + mode_costs->comp_ref_type_cost[comp_ref_type_ctx][0] +
mode_costs->uni_comp_ref_cost[uni_comp_ref_ctx_p][0][1];
} else {
int ref0, ref1;
for (ref0 = LAST_FRAME; ref0 <= GOLDEN_FRAME; ++ref0) {
for (ref1 = BWDREF_FRAME; ref1 <= ALTREF_FRAME; ++ref1)
ref_costs_comp[ref0][ref1] = 512;
}
ref_costs_comp[LAST_FRAME][LAST2_FRAME] = 512;
ref_costs_comp[LAST_FRAME][LAST3_FRAME] = 512;
ref_costs_comp[LAST_FRAME][GOLDEN_FRAME] = 512;
ref_costs_comp[BWDREF_FRAME][ALTREF_FRAME] = 512;
}
}
}
static inline void store_coding_context(
#if CONFIG_INTERNAL_STATS
MACROBLOCK *x, PICK_MODE_CONTEXT *ctx, int mode_index,
#else
MACROBLOCK *x, PICK_MODE_CONTEXT *ctx,
#endif // CONFIG_INTERNAL_STATS
int skippable) {
MACROBLOCKD *const xd = &x->e_mbd;
// Take a snapshot of the coding context so it can be
// restored if we decide to encode this way
ctx->rd_stats.skip_txfm = x->txfm_search_info.skip_txfm;
ctx->skippable = skippable;
#if CONFIG_INTERNAL_STATS
ctx->best_mode_index = mode_index;
#endif // CONFIG_INTERNAL_STATS
ctx->mic = *xd->mi[0];
av1_copy_mbmi_ext_to_mbmi_ext_frame(&ctx->mbmi_ext_best, &x->mbmi_ext,
av1_ref_frame_type(xd->mi[0]->ref_frame));
}
static inline void setup_buffer_ref_mvs_inter(
const AV1_COMP *const cpi, MACROBLOCK *x, MV_REFERENCE_FRAME ref_frame,
BLOCK_SIZE block_size, struct buf_2d yv12_mb[REF_FRAMES][MAX_MB_PLANE]) {
const AV1_COMMON *cm = &cpi->common;
const int num_planes = av1_num_planes(cm);
const YV12_BUFFER_CONFIG *scaled_ref_frame =
av1_get_scaled_ref_frame(cpi, ref_frame);
MACROBLOCKD *const xd = &x->e_mbd;
MB_MODE_INFO *const mbmi = xd->mi[0];
MB_MODE_INFO_EXT *const mbmi_ext = &x->mbmi_ext;
const struct scale_factors *const sf =
get_ref_scale_factors_const(cm, ref_frame);
const YV12_BUFFER_CONFIG *yv12 = get_ref_frame_yv12_buf(cm, ref_frame);
assert(yv12 != NULL);
if (scaled_ref_frame) {
// Setup pred block based on scaled reference, because av1_mv_pred() doesn't
// support scaling.
av1_setup_pred_block(xd, yv12_mb[ref_frame], scaled_ref_frame, NULL, NULL,
num_planes);
} else {
av1_setup_pred_block(xd, yv12_mb[ref_frame], yv12, sf, sf, num_planes);
}
// Gets an initial list of candidate vectors from neighbours and orders them
av1_find_mv_refs(cm, xd, mbmi, ref_frame, mbmi_ext->ref_mv_count,
xd->ref_mv_stack, xd->weight, NULL, mbmi_ext->global_mvs,
mbmi_ext->mode_context);
// TODO(Ravi): Populate mbmi_ext->ref_mv_stack[ref_frame][4] and
// mbmi_ext->weight[ref_frame][4] inside av1_find_mv_refs.
av1_copy_usable_ref_mv_stack_and_weight(xd, mbmi_ext, ref_frame);
// Further refinement that is encode side only to test the top few candidates
// in full and choose the best as the center point for subsequent searches.
// The current implementation doesn't support scaling.
av1_mv_pred(cpi, x, yv12_mb[ref_frame][0].buf, yv12_mb[ref_frame][0].stride,
ref_frame, block_size);
// Go back to unscaled reference.
if (scaled_ref_frame) {
// We had temporarily setup pred block based on scaled reference above. Go
// back to unscaled reference now, for subsequent use.
av1_setup_pred_block(xd, yv12_mb[ref_frame], yv12, sf, sf, num_planes);
}
}
#define LEFT_TOP_MARGIN ((AOM_BORDER_IN_PIXELS - AOM_INTERP_EXTEND) << 3)
#define RIGHT_BOTTOM_MARGIN ((AOM_BORDER_IN_PIXELS - AOM_INTERP_EXTEND) << 3)
// TODO(jingning): this mv clamping function should be block size dependent.
static inline void clamp_mv2(MV *mv, const MACROBLOCKD *xd) {
const SubpelMvLimits mv_limits = { xd->mb_to_left_edge - LEFT_TOP_MARGIN,
xd->mb_to_right_edge + RIGHT_BOTTOM_MARGIN,
xd->mb_to_top_edge - LEFT_TOP_MARGIN,
xd->mb_to_bottom_edge +
RIGHT_BOTTOM_MARGIN };
clamp_mv(mv, &mv_limits);
}
/* If the current mode shares the same mv with other modes with higher cost,
* skip this mode. */
static int skip_repeated_mv(const AV1_COMMON *const cm,
const MACROBLOCK *const x,
PREDICTION_MODE this_mode,
const MV_REFERENCE_FRAME ref_frames[2],
InterModeSearchState *search_state) {
const int is_comp_pred = ref_frames[1] > INTRA_FRAME;
const uint8_t ref_frame_type = av1_ref_frame_type(ref_frames);
const MB_MODE_INFO_EXT *const mbmi_ext = &x->mbmi_ext;
const int ref_mv_count = mbmi_ext->ref_mv_count[ref_frame_type];
PREDICTION_MODE compare_mode = MB_MODE_COUNT;
if (!is_comp_pred) {
if (this_mode == NEARMV) {
if (ref_mv_count == 0) {
// NEARMV has the same motion vector as NEARESTMV
compare_mode = NEARESTMV;
}
if (ref_mv_count == 1 &&
cm->global_motion[ref_frames[0]].wmtype <= TRANSLATION) {
// NEARMV has the same motion vector as GLOBALMV
compare_mode = GLOBALMV;
}
}
if (this_mode == GLOBALMV) {
if (ref_mv_count == 0 &&
cm->global_motion[ref_frames[0]].wmtype <= TRANSLATION) {
// GLOBALMV has the same motion vector as NEARESTMV
compare_mode = NEARESTMV;
}
if (ref_mv_count == 1) {
// GLOBALMV has the same motion vector as NEARMV
compare_mode = NEARMV;
}
}
if (compare_mode != MB_MODE_COUNT) {
// Use modelled_rd to check whether compare mode was searched
if (search_state->modelled_rd[compare_mode][0][ref_frames[0]] !=
INT64_MAX) {
const int16_t mode_ctx =
av1_mode_context_analyzer(mbmi_ext->mode_context, ref_frames);
const int compare_cost =
cost_mv_ref(&x->mode_costs, compare_mode, mode_ctx);
const int this_cost = cost_mv_ref(&x->mode_costs, this_mode, mode_ctx);
// Only skip if the mode cost is larger than compare mode cost
if (this_cost > compare_cost) {
search_state->modelled_rd[this_mode][0][ref_frames[0]] =
search_state->modelled_rd[compare_mode][0][ref_frames[0]];
return 1;
}
}
}
}
return 0;
}
static inline int clamp_and_check_mv(int_mv *out_mv, int_mv in_mv,
const AV1_COMMON *cm,
const MACROBLOCK *x) {
const MACROBLOCKD *const xd = &x->e_mbd;
*out_mv = in_mv;
lower_mv_precision(&out_mv->as_mv, cm->features.allow_high_precision_mv,
cm->features.cur_frame_force_integer_mv);
clamp_mv2(&out_mv->as_mv, xd);
return av1_is_fullmv_in_range(&x->mv_limits,
get_fullmv_from_mv(&out_mv->as_mv));
}
// To use single newmv directly for compound modes, need to clamp the mv to the
// valid mv range. Without this, encoder would generate out of range mv, and
// this is seen in 8k encoding.
static inline void clamp_mv_in_range(MACROBLOCK *const x, int_mv *mv,
int ref_idx) {
const int_mv ref_mv = av1_get_ref_mv(x, ref_idx);
SubpelMvLimits mv_limits;
av1_set_subpel_mv_search_range(&mv_limits, &x->mv_limits, &ref_mv.as_mv);
clamp_mv(&mv->as_mv, &mv_limits);
}
static int64_t handle_newmv(const AV1_COMP *const cpi, MACROBLOCK *const x,
const BLOCK_SIZE bsize, int_mv *cur_mv,
int *const rate_mv, HandleInterModeArgs *const args,
inter_mode_info *mode_info) {
MACROBLOCKD *const xd = &x->e_mbd;
MB_MODE_INFO *const mbmi = xd->mi[0];
const int is_comp_pred = has_second_ref(mbmi);
const PREDICTION_MODE this_mode = mbmi->mode;
const int refs[2] = { mbmi->ref_frame[0],
mbmi->ref_frame[1] < 0 ? 0 : mbmi->ref_frame[1] };
const int ref_mv_idx = mbmi->ref_mv_idx;
if (is_comp_pred) {
const int valid_mv0 = args->single_newmv_valid[ref_mv_idx][refs[0]];
const int valid_mv1 = args->single_newmv_valid[ref_mv_idx][refs[1]];
if (this_mode == NEW_NEWMV) {
if (valid_mv0) {
cur_mv[0].as_int = args->single_newmv[ref_mv_idx][refs[0]].as_int;
clamp_mv_in_range(x, &cur_mv[0], 0);
}
if (valid_mv1) {
cur_mv[1].as_int = args->single_newmv[ref_mv_idx][refs[1]].as_int;
clamp_mv_in_range(x, &cur_mv[1], 1);
}
*rate_mv = 0;
for (int i = 0; i < 2; ++i) {
const int_mv ref_mv = av1_get_ref_mv(x, i);
*rate_mv += av1_mv_bit_cost(&cur_mv[i].as_mv, &ref_mv.as_mv,
x->mv_costs->nmv_joint_cost,
x->mv_costs->mv_cost_stack, MV_COST_WEIGHT);
}
} else if (this_mode == NEAREST_NEWMV || this_mode == NEAR_NEWMV) {
if (valid_mv1) {
cur_mv[1].as_int = args->single_newmv[ref_mv_idx][refs[1]].as_int;
clamp_mv_in_range(x, &cur_mv[1], 1);
}
const int_mv ref_mv = av1_get_ref_mv(x, 1);
*rate_mv = av1_mv_bit_cost(&cur_mv[1].as_mv, &ref_mv.as_mv,
x->mv_costs->nmv_joint_cost,
x->mv_costs->mv_cost_stack, MV_COST_WEIGHT);
} else {
assert(this_mode == NEW_NEARESTMV || this_mode == NEW_NEARMV);
if (valid_mv0) {
cur_mv[0].as_int = args->single_newmv[ref_mv_idx][refs[0]].as_int;
clamp_mv_in_range(x, &cur_mv[0], 0);
}
const int_mv ref_mv = av1_get_ref_mv(x, 0);
*rate_mv = av1_mv_bit_cost(&cur_mv[0].as_mv, &ref_mv.as_mv,
x->mv_costs->nmv_joint_cost,
x->mv_costs->mv_cost_stack, MV_COST_WEIGHT);
}
} else {
// Single ref case.
const int ref_idx = 0;
int search_range = INT_MAX;
if (cpi->sf.mv_sf.reduce_search_range && mbmi->ref_mv_idx > 0) {
const MV ref_mv = av1_get_ref_mv(x, ref_idx).as_mv;
int min_mv_diff = INT_MAX;
int best_match = -1;
MV prev_ref_mv[2] = { { 0 } };
for (int idx = 0; idx < mbmi->ref_mv_idx; ++idx) {
prev_ref_mv[idx] = av1_get_ref_mv_from_stack(ref_idx, mbmi->ref_frame,
idx, &x->mbmi_ext)
.as_mv;
const int ref_mv_diff = AOMMAX(abs(ref_mv.row - prev_ref_mv[idx].row),
abs(ref_mv.col - prev_ref_mv[idx].col));
if (min_mv_diff > ref_mv_diff) {
min_mv_diff = ref_mv_diff;
best_match = idx;
}
}
if (min_mv_diff < (16 << 3)) {
if (args->single_newmv_valid[best_match][refs[0]]) {
search_range = min_mv_diff;
search_range +=
AOMMAX(abs(args->single_newmv[best_match][refs[0]].as_mv.row -
prev_ref_mv[best_match].row),
abs(args->single_newmv[best_match][refs[0]].as_mv.col -
prev_ref_mv[best_match].col));
// Get full pixel search range.
search_range = (search_range + 4) >> 3;
}
}
}
int_mv best_mv;
av1_single_motion_search(cpi, x, bsize, ref_idx, rate_mv, search_range,
mode_info, &best_mv, args);
if (best_mv.as_int == INVALID_MV) return INT64_MAX;
args->single_newmv[ref_mv_idx][refs[0]] = best_mv;
args->single_newmv_rate[ref_mv_idx][refs[0]] = *rate_mv;
args->single_newmv_valid[ref_mv_idx][refs[0]] = 1;
cur_mv[0].as_int = best_mv.as_int;
// Return after single_newmv is set.
if (mode_info[mbmi->ref_mv_idx].skip) return INT64_MAX;
}
return 0;
}
static inline void update_mode_start_end_index(
const AV1_COMP *const cpi, const MB_MODE_INFO *const mbmi,
int *mode_index_start, int *mode_index_end, int last_motion_mode_allowed,
int interintra_allowed, int eval_motion_mode) {
*mode_index_start = (int)SIMPLE_TRANSLATION;
*mode_index_end = (int)last_motion_mode_allowed + interintra_allowed;
if (cpi->sf.winner_mode_sf.motion_mode_for_winner_cand) {
if (!eval_motion_mode) {
*mode_index_end = (int)SIMPLE_TRANSLATION;
} else {
// Set the start index appropriately to process motion modes other than
// simple translation
*mode_index_start = 1;
}
}
if (cpi->sf.inter_sf.extra_prune_warped && mbmi->bsize > BLOCK_16X16)
*mode_index_end = SIMPLE_TRANSLATION;
}
/*!\brief AV1 motion mode search
*
* \ingroup inter_mode_search
* Function to search over and determine the motion mode. It will update
* mbmi->motion_mode to one of SIMPLE_TRANSLATION, OBMC_CAUSAL, or
* WARPED_CAUSAL and determine any necessary side information for the selected
* motion mode. It will also perform the full transform search, unless the
* input parameter do_tx_search indicates to do an estimation of the RD rather
* than an RD corresponding to a full transform search. It will return the
* RD for the final motion_mode.
* Do the RD search for a given inter mode and compute all information relevant
* to the input mode. It will compute the best MV,
* compound parameters (if the mode is a compound mode) and interpolation filter
* parameters.
*
* \param[in] cpi Top-level encoder structure.
* \param[in] tile_data Pointer to struct holding adaptive
* data/contexts/models for the tile during
* encoding.
* \param[in] x Pointer to struct holding all the data for
* the current macroblock.
* \param[in] bsize Current block size.
* \param[in,out] rd_stats Struct to keep track of the overall RD
* information.
* \param[in,out] rd_stats_y Struct to keep track of the RD information
* for only the Y plane.
* \param[in,out] rd_stats_uv Struct to keep track of the RD information
* for only the UV planes.
* \param[in] args HandleInterModeArgs struct holding
* miscellaneous arguments for inter mode
* search. See the documentation for this
* struct for a description of each member.
* \param[in] ref_best_rd Best RD found so far for this block.
* It is used for early termination of this
* search if the RD exceeds this value.
* \param[in,out] ref_skip_rd A length 2 array, where skip_rd[0] is the
* best total RD for a skip mode so far, and
* skip_rd[1] is the best RD for a skip mode so
* far in luma. This is used as a speed feature
* to skip the transform search if the computed
* skip RD for the current mode is not better
* than the best skip_rd so far.
* \param[in,out] rate_mv The rate associated with the motion vectors.
* This will be modified if a motion search is
* done in the motion mode search.
* \param[in,out] orig_dst A prediction buffer to hold a computed
* prediction. This will eventually hold the
* final prediction, and the tmp_dst info will
* be copied here.
* \param[in,out] best_est_rd Estimated RD for motion mode search if
* do_tx_search (see below) is 0.
* \param[in] do_tx_search Parameter to indicate whether or not to do
* a full transform search. This will compute
* an estimated RD for the modes without the
* transform search and later perform the full
* transform search on the best candidates.
* \param[in] inter_modes_info InterModesInfo struct to hold inter mode
* information to perform a full transform
* search only on winning candidates searched
* with an estimate for transform coding RD.
* \param[in] eval_motion_mode Boolean whether or not to evaluate motion
* motion modes other than SIMPLE_TRANSLATION.
* \param[out] yrd Stores the rdcost corresponding to encoding
* the luma plane.
* \return Returns INT64_MAX if the determined motion mode is invalid and the
* current motion mode being tested should be skipped. It returns 0 if the
* motion mode search is a success.
*/
static int64_t motion_mode_rd(
const AV1_COMP *const cpi, TileDataEnc *tile_data, MACROBLOCK *const x,
BLOCK_SIZE bsize, RD_STATS *rd_stats, RD_STATS *rd_stats_y,
RD_STATS *rd_stats_uv, HandleInterModeArgs *const args, int64_t ref_best_rd,
int64_t *ref_skip_rd, int *rate_mv, const BUFFER_SET *orig_dst,
int64_t *best_est_rd, int do_tx_search, InterModesInfo *inter_modes_info,
int eval_motion_mode, int64_t *yrd) {
const AV1_COMMON *const cm = &cpi->common;
const FeatureFlags *const features = &cm->features;
TxfmSearchInfo *txfm_info = &x->txfm_search_info;
const int num_planes = av1_num_planes(cm);
MACROBLOCKD *xd = &x->e_mbd;
MB_MODE_INFO *mbmi = xd->mi[0];
const int is_comp_pred = has_second_ref(mbmi);
const PREDICTION_MODE this_mode = mbmi->mode;
const int rate2_nocoeff = rd_stats->rate;
int best_xskip_txfm = 0;
RD_STATS best_rd_stats, best_rd_stats_y, best_rd_stats_uv;
uint8_t best_blk_skip[MAX_MIB_SIZE * MAX_MIB_SIZE];
uint8_t best_tx_type_map[MAX_MIB_SIZE * MAX_MIB_SIZE];
const int rate_mv0 = *rate_mv;
const int interintra_allowed = cm->seq_params->enable_interintra_compound &&
is_interintra_allowed(mbmi) &&
mbmi->compound_idx;
WARP_SAMPLE_INFO *const warp_sample_info =
&x->warp_sample_info[mbmi->ref_frame[0]];
int *pts0 = warp_sample_info->pts;
int *pts_inref0 = warp_sample_info->pts_inref;
assert(mbmi->ref_frame[1] != INTRA_FRAME);
const MV_REFERENCE_FRAME ref_frame_1 = mbmi->ref_frame[1];
av1_invalid_rd_stats(&best_rd_stats);
mbmi->num_proj_ref = 1; // assume num_proj_ref >=1
MOTION_MODE last_motion_mode_allowed = SIMPLE_TRANSLATION;
*yrd = INT64_MAX;
if (features->switchable_motion_mode) {
// Determine which motion modes to search if more than SIMPLE_TRANSLATION
// is allowed.
last_motion_mode_allowed = motion_mode_allowed(
xd->global_motion, xd, mbmi, features->allow_warped_motion);
}
if (last_motion_mode_allowed == WARPED_CAUSAL) {
// Collect projection samples used in least squares approximation of
// the warped motion parameters if WARPED_CAUSAL is going to be searched.
if (warp_sample_info->num < 0) {
warp_sample_info->num = av1_findSamples(cm, xd, pts0, pts_inref0);
}
mbmi->num_proj_ref = warp_sample_info->num;
}
const int total_samples = mbmi->num_proj_ref;
if (total_samples == 0) {
// Do not search WARPED_CAUSAL if there are no samples to use to determine
// warped parameters.
last_motion_mode_allowed = OBMC_CAUSAL;
}
const MB_MODE_INFO base_mbmi = *mbmi;
MB_MODE_INFO best_mbmi;
const int interp_filter = features->interp_filter;
const int switchable_rate =
av1_is_interp_needed(xd)
? av1_get_switchable_rate(x, xd, interp_filter,
cm->seq_params->enable_dual_filter)
: 0;
int64_t best_rd = INT64_MAX;
int best_rate_mv = rate_mv0;
const int mi_row = xd->mi_row;
const int mi_col = xd->mi_col;
int mode_index_start, mode_index_end;
const int txfm_rd_gate_level =
get_txfm_rd_gate_level(cm->seq_params->enable_masked_compound,
cpi->sf.inter_sf.txfm_rd_gate_level, bsize,
TX_SEARCH_MOTION_MODE, eval_motion_mode);
// Modify the start and end index according to speed features. For example,
// if SIMPLE_TRANSLATION has already been searched according to
// the motion_mode_for_winner_cand speed feature, update the mode_index_start
// to avoid searching it again.
update_mode_start_end_index(cpi, mbmi, &mode_index_start, &mode_index_end,
last_motion_mode_allowed, interintra_allowed,
eval_motion_mode);
// Main function loop. This loops over all of the possible motion modes and
// computes RD to determine the best one. This process includes computing
// any necessary side information for the motion mode and performing the
// transform search.
for (int mode_index = mode_index_start; mode_index <= mode_index_end;
mode_index++) {
if (args->skip_motion_mode && mode_index) continue;
int tmp_rate2 = rate2_nocoeff;
const int is_interintra_mode = mode_index > (int)last_motion_mode_allowed;
int tmp_rate_mv = rate_mv0;
*mbmi = base_mbmi;
if (is_interintra_mode) {
// Only use SIMPLE_TRANSLATION for interintra
mbmi->motion_mode = SIMPLE_TRANSLATION;
} else {
mbmi->motion_mode = (MOTION_MODE)mode_index;
assert(mbmi->ref_frame[1] != INTRA_FRAME);
}
// Do not search OBMC if the probability of selecting it is below a
// predetermined threshold for this update_type and block size.
const FRAME_UPDATE_TYPE update_type =
get_frame_update_type(&cpi->ppi->gf_group, cpi->gf_frame_index);
int use_actual_frame_probs = 1;
int prune_obmc;
#if CONFIG_FPMT_TEST
use_actual_frame_probs =
(cpi->ppi->fpmt_unit_test_cfg == PARALLEL_SIMULATION_ENCODE) ? 0 : 1;
if (!use_actual_frame_probs) {
prune_obmc = cpi->ppi->temp_frame_probs.obmc_probs[update_type][bsize] <
cpi->sf.inter_sf.prune_obmc_prob_thresh;
}
#endif
if (use_actual_frame_probs) {
prune_obmc = cpi->ppi->frame_probs.obmc_probs[update_type][bsize] <
cpi->sf.inter_sf.prune_obmc_prob_thresh;
}
if ((!cpi->oxcf.motion_mode_cfg.enable_obmc || prune_obmc) &&
mbmi->motion_mode == OBMC_CAUSAL)
continue;
if (mbmi->motion_mode == SIMPLE_TRANSLATION && !is_interintra_mode) {
// SIMPLE_TRANSLATION mode: no need to recalculate.
// The prediction is calculated before motion_mode_rd() is called in
// handle_inter_mode()
} else if (mbmi->motion_mode == OBMC_CAUSAL) {
const uint32_t cur_mv = mbmi->mv[0].as_int;
// OBMC_CAUSAL not allowed for compound prediction
assert(!is_comp_pred);
if (have_newmv_in_inter_mode(this_mode)) {
av1_single_motion_search(cpi, x, bsize, 0, &tmp_rate_mv, INT_MAX, NULL,
&mbmi->mv[0], NULL);
tmp_rate2 = rate2_nocoeff - rate_mv0 + tmp_rate_mv;
}
if ((mbmi->mv[0].as_int != cur_mv) || eval_motion_mode) {
// Build the predictor according to the current motion vector if it has
// not already been built
av1_enc_build_inter_predictor(cm, xd, mi_row, mi_col, orig_dst, bsize,
0, av1_num_planes(cm) - 1);
}
// Build the inter predictor by blending the predictor corresponding to
// this MV, and the neighboring blocks using the OBMC model
av1_build_obmc_inter_prediction(
cm, xd, args->above_pred_buf, args->above_pred_stride,
args->left_pred_buf, args->left_pred_stride);
#if !CONFIG_REALTIME_ONLY
} else if (mbmi->motion_mode == WARPED_CAUSAL) {
int pts[SAMPLES_ARRAY_SIZE], pts_inref[SAMPLES_ARRAY_SIZE];
mbmi->motion_mode = WARPED_CAUSAL;
mbmi->wm_params.wmtype = DEFAULT_WMTYPE;
mbmi->interp_filters =
av1_broadcast_interp_filter(av1_unswitchable_filter(interp_filter));
memcpy(pts, pts0, total_samples * 2 * sizeof(*pts0));
memcpy(pts_inref, pts_inref0, total_samples * 2 * sizeof(*pts_inref0));
// Select the samples according to motion vector difference
if (mbmi->num_proj_ref > 1) {
mbmi->num_proj_ref = av1_selectSamples(
&mbmi->mv[0].as_mv, pts, pts_inref, mbmi->num_proj_ref, bsize);
}
// Compute the warped motion parameters with a least squares fit
// using the collected samples
if (!av1_find_projection(mbmi->num_proj_ref, pts, pts_inref, bsize,
mbmi->mv[0].as_mv.row, mbmi->mv[0].as_mv.col,
&mbmi->wm_params, mi_row, mi_col)) {
assert(!is_comp_pred);
if (have_newmv_in_inter_mode(this_mode)) {
// Refine MV for NEWMV mode
const int_mv mv0 = mbmi->mv[0];
const WarpedMotionParams wm_params0 = mbmi->wm_params;
const int num_proj_ref0 = mbmi->num_proj_ref;
const int_mv ref_mv = av1_get_ref_mv(x, 0);
SUBPEL_MOTION_SEARCH_PARAMS ms_params;
av1_make_default_subpel_ms_params(&ms_params, cpi, x, bsize,
&ref_mv.as_mv, NULL);
// Refine MV in a small range.
av1_refine_warped_mv(xd, cm, &ms_params, bsize, pts0, pts_inref0,
total_samples, cpi->sf.mv_sf.warp_search_method,
cpi->sf.mv_sf.warp_search_iters);
if (mv0.as_int != mbmi->mv[0].as_int) {
// Keep the refined MV and WM parameters.
tmp_rate_mv = av1_mv_bit_cost(
&mbmi->mv[0].as_mv, &ref_mv.as_mv, x->mv_costs->nmv_joint_cost,
x->mv_costs->mv_cost_stack, MV_COST_WEIGHT);
tmp_rate2 = rate2_nocoeff - rate_mv0 + tmp_rate_mv;
} else {
// Restore the old MV and WM parameters.
mbmi->mv[0] = mv0;
mbmi->wm_params = wm_params0;
mbmi->num_proj_ref = num_proj_ref0;
}
}
// Build the warped predictor
av1_enc_build_inter_predictor(cm, xd, mi_row, mi_col, NULL, bsize, 0,
av1_num_planes(cm) - 1);
} else {
continue;
}
#endif // !CONFIG_REALTIME_ONLY
} else if (is_interintra_mode) {
const int ret =
av1_handle_inter_intra_mode(cpi, x, bsize, mbmi, args, ref_best_rd,
&tmp_rate_mv, &tmp_rate2, orig_dst);
if (ret < 0) continue;
}
// If we are searching newmv and the mv is the same as refmv, skip the
// current mode
if (!av1_check_newmv_joint_nonzero(cm, x)) continue;
// Update rd_stats for the current motion mode
txfm_info->skip_txfm = 0;
rd_stats->dist = 0;
rd_stats->sse = 0;
rd_stats->skip_txfm = 1;
rd_stats->rate = tmp_rate2;
const ModeCosts *mode_costs = &x->mode_costs;
if (mbmi->motion_mode != WARPED_CAUSAL) rd_stats->rate += switchable_rate;
if (interintra_allowed) {
rd_stats->rate +=
mode_costs->interintra_cost[size_group_lookup[bsize]]
[mbmi->ref_frame[1] == INTRA_FRAME];
}
if ((last_motion_mode_allowed > SIMPLE_TRANSLATION) &&
(mbmi->ref_frame[1] != INTRA_FRAME)) {
if (last_motion_mode_allowed == WARPED_CAUSAL) {
rd_stats->rate +=
mode_costs->motion_mode_cost[bsize][mbmi->motion_mode];
} else {
rd_stats->rate +=
mode_costs->motion_mode_cost1[bsize][mbmi->motion_mode];
}
}
int64_t this_yrd = INT64_MAX;
if (!do_tx_search) {
// Avoid doing a transform search here to speed up the overall mode
// search. It will be done later in the mode search if the current
// motion mode seems promising.
int64_t curr_sse = -1;
int64_t sse_y = -1;
int est_residue_cost = 0;
int64_t est_dist = 0;
int64_t est_rd = 0;
if (cpi->sf.inter_sf.inter_mode_rd_model_estimation == 1) {
curr_sse = get_sse(cpi, x, &sse_y);
const int has_est_rd = get_est_rate_dist(tile_data, bsize, curr_sse,
&est_residue_cost, &est_dist);
(void)has_est_rd;
assert(has_est_rd);
} else if (cpi->sf.inter_sf.inter_mode_rd_model_estimation == 2 ||
cpi->sf.rt_sf.use_nonrd_pick_mode) {
model_rd_sb_fn[MODELRD_TYPE_MOTION_MODE_RD](
cpi, bsize, x, xd, 0, num_planes - 1, &est_residue_cost, &est_dist,
NULL, &curr_sse, NULL, NULL, NULL);
sse_y = x->pred_sse[xd->mi[0]->ref_frame[0]];
}
est_rd = RDCOST(x->rdmult, rd_stats->rate + est_residue_cost, est_dist);
if (est_rd * 0.80 > *best_est_rd) {
mbmi->ref_frame[1] = ref_frame_1;
continue;
}
const int mode_rate = rd_stats->rate;
rd_stats->rate += est_residue_cost;
rd_stats->dist = est_dist;
rd_stats->rdcost = est_rd;
if (rd_stats->rdcost < *best_est_rd) {
*best_est_rd = rd_stats->rdcost;
assert(sse_y >= 0);
ref_skip_rd[1] = txfm_rd_gate_level
? RDCOST(x->rdmult, mode_rate, (sse_y << 4))
: INT64_MAX;
}
if (cm->current_frame.reference_mode == SINGLE_REFERENCE) {
if (!is_comp_pred) {
assert(curr_sse >= 0);
inter_modes_info_push(inter_modes_info, mode_rate, curr_sse,
rd_stats->rdcost, rd_stats, rd_stats_y,
rd_stats_uv, mbmi);
}
} else {
assert(curr_sse >= 0);
inter_modes_info_push(inter_modes_info, mode_rate, curr_sse,
rd_stats->rdcost, rd_stats, rd_stats_y,
rd_stats_uv, mbmi);
}
mbmi->skip_txfm = 0;
} else {
// Perform full transform search
int64_t skip_rd = INT64_MAX;
int64_t skip_rdy = INT64_MAX;
if (txfm_rd_gate_level) {
// Check if the mode is good enough based on skip RD
int64_t sse_y = INT64_MAX;
int64_t curr_sse = get_sse(cpi, x, &sse_y);
skip_rd = RDCOST(x->rdmult, rd_stats->rate, curr_sse);
skip_rdy = RDCOST(x->rdmult, rd_stats->rate, (sse_y << 4));
int eval_txfm = check_txfm_eval(x, bsize, ref_skip_rd[0], skip_rd,
txfm_rd_gate_level, 0);
if (!eval_txfm) continue;
}
// Do transform search
const int mode_rate = rd_stats->rate;
if (!av1_txfm_search(cpi, x, bsize, rd_stats, rd_stats_y, rd_stats_uv,
rd_stats->rate, ref_best_rd)) {
if (rd_stats_y->rate == INT_MAX && mode_index == 0) {
return INT64_MAX;
}
continue;
}
const int skip_ctx = av1_get_skip_txfm_context(xd);
const int y_rate =
rd_stats->skip_txfm
? x->mode_costs.skip_txfm_cost[skip_ctx][1]
: (rd_stats_y->rate + x->mode_costs.skip_txfm_cost[skip_ctx][0]);
this_yrd = RDCOST(x->rdmult, y_rate + mode_rate, rd_stats_y->dist);
const int64_t curr_rd = RDCOST(x->rdmult, rd_stats->rate, rd_stats->dist);
if (curr_rd < ref_best_rd) {
ref_best_rd = curr_rd;
ref_skip_rd[0] = skip_rd;
ref_skip_rd[1] = skip_rdy;
}
if (cpi->sf.inter_sf.inter_mode_rd_model_estimation == 1) {
inter_mode_data_push(
tile_data, mbmi->bsize, rd_stats->sse, rd_stats->dist,
rd_stats_y->rate + rd_stats_uv->rate +
mode_costs->skip_txfm_cost[skip_ctx][mbmi->skip_txfm]);
}
}
if (this_mode == GLOBALMV || this_mode == GLOBAL_GLOBALMV) {
if (is_nontrans_global_motion(xd, xd->mi[0])) {
mbmi->interp_filters =
av1_broadcast_interp_filter(av1_unswitchable_filter(interp_filter));
}
}
const int64_t tmp_rd = RDCOST(x->rdmult, rd_stats->rate, rd_stats->dist);
if (mode_index == 0) {
args->simple_rd[this_mode][mbmi->ref_mv_idx][mbmi->ref_frame[0]] = tmp_rd;
}
if (mode_index == 0 || tmp_rd < best_rd) {
// Update best_rd data if this is the best motion mode so far
best_mbmi = *mbmi;
best_rd = tmp_rd;
best_rd_stats = *rd_stats;
best_rd_stats_y = *rd_stats_y;
best_rate_mv = tmp_rate_mv;
*yrd = this_yrd;
if (num_planes > 1) best_rd_stats_uv = *rd_stats_uv;
memcpy(best_blk_skip, txfm_info->blk_skip,
sizeof(txfm_info->blk_skip[0]) * xd->height * xd->width);
av1_copy_array(best_tx_type_map, xd->tx_type_map, xd->height * xd->width);
best_xskip_txfm = mbmi->skip_txfm;
}
}
// Update RD and mbmi stats for selected motion mode
mbmi->ref_frame[1] = ref_frame_1;
*rate_mv = best_rate_mv;
if (best_rd == INT64_MAX || !av1_check_newmv_joint_nonzero(cm, x)) {
av1_invalid_rd_stats(rd_stats);
restore_dst_buf(xd, *orig_dst, num_planes);
return INT64_MAX;
}
*mbmi = best_mbmi;
*rd_stats = best_rd_stats;
*rd_stats_y = best_rd_stats_y;
if (num_planes > 1) *rd_stats_uv = best_rd_stats_uv;
memcpy(txfm_info->blk_skip, best_blk_skip,
sizeof(txfm_info->blk_skip[0]) * xd->height * xd->width);
av1_copy_array(xd->tx_type_map, best_tx_type_map, xd->height * xd->width);
txfm_info->skip_txfm = best_xskip_txfm;
restore_dst_buf(xd, *orig_dst, num_planes);
return 0;
}
static int64_t skip_mode_rd(RD_STATS *rd_stats, const AV1_COMP *const cpi,
MACROBLOCK *const x, BLOCK_SIZE bsize,
const BUFFER_SET *const orig_dst, int64_t best_rd) {
assert(bsize < BLOCK_SIZES_ALL);
const AV1_COMMON *cm = &cpi->common;
const int num_planes = av1_num_planes(cm);
MACROBLOCKD *const xd = &x->e_mbd;
const int mi_row = xd->mi_row;
const int mi_col = xd->mi_col;
int64_t total_sse = 0;
int64_t this_rd = INT64_MAX;
const int skip_mode_ctx = av1_get_skip_mode_context(xd);
rd_stats->rate = x->mode_costs.skip_mode_cost[skip_mode_ctx][1];
for (int plane = 0; plane < num_planes; ++plane) {
// Call av1_enc_build_inter_predictor() for one plane at a time.
av1_enc_build_inter_predictor(cm, xd, mi_row, mi_col, orig_dst, bsize,
plane, plane);
const struct macroblockd_plane *const pd = &xd->plane[plane];
const BLOCK_SIZE plane_bsize =
get_plane_block_size(bsize, pd->subsampling_x, pd->subsampling_y);
av1_subtract_plane(x, plane_bsize, plane);
int64_t sse =
av1_pixel_diff_dist(x, plane, 0, 0, plane_bsize, plane_bsize, NULL);
if (is_cur_buf_hbd(xd)) sse = ROUND_POWER_OF_TWO(sse, (xd->bd - 8) * 2);
sse <<= 4;
total_sse += sse;
// When current rd cost is more than the best rd, skip evaluation of
// remaining planes.
this_rd = RDCOST(x->rdmult, rd_stats->rate, total_sse);
if (this_rd > best_rd) break;
}
rd_stats->dist = rd_stats->sse = total_sse;
rd_stats->rdcost = this_rd;
restore_dst_buf(xd, *orig_dst, num_planes);
return 0;
}
// Check NEARESTMV, NEARMV, GLOBALMV ref mvs for duplicate and skip the relevant
// mode
// Note(rachelbarker): This speed feature currently does not interact correctly
// with global motion. The issue is that, when global motion is used, GLOBALMV
// produces a different prediction to NEARESTMV/NEARMV even if the motion
// vectors are the same. Thus GLOBALMV should not be pruned in this case.
static inline int check_repeat_ref_mv(const MB_MODE_INFO_EXT *mbmi_ext,
int ref_idx,
const MV_REFERENCE_FRAME *ref_frame,
PREDICTION_MODE single_mode) {
const uint8_t ref_frame_type = av1_ref_frame_type(ref_frame);
const int ref_mv_count = mbmi_ext->ref_mv_count[ref_frame_type];
assert(single_mode != NEWMV);
if (single_mode == NEARESTMV) {
return 0;
} else if (single_mode == NEARMV) {
// when ref_mv_count = 0, NEARESTMV and NEARMV are same as GLOBALMV
// when ref_mv_count = 1, NEARMV is same as GLOBALMV
if (ref_mv_count < 2) return 1;
} else if (single_mode == GLOBALMV) {
// when ref_mv_count == 0, GLOBALMV is same as NEARESTMV
if (ref_mv_count == 0) return 1;
// when ref_mv_count == 1, NEARMV is same as GLOBALMV
else if (ref_mv_count == 1)
return 0;
int stack_size = AOMMIN(USABLE_REF_MV_STACK_SIZE, ref_mv_count);
// Check GLOBALMV is matching with any mv in ref_mv_stack
for (int ref_mv_idx = 0; ref_mv_idx < stack_size; ref_mv_idx++) {
int_mv this_mv;
if (ref_idx == 0)
this_mv = mbmi_ext->ref_mv_stack[ref_frame_type][ref_mv_idx].this_mv;
else
this_mv = mbmi_ext->ref_mv_stack[ref_frame_type][ref_mv_idx].comp_mv;
if (this_mv.as_int == mbmi_ext->global_mvs[ref_frame[ref_idx]].as_int)
return 1;
}
}
return 0;
}
static inline int get_this_mv(int_mv *this_mv, PREDICTION_MODE this_mode,
int ref_idx, int ref_mv_idx,
int skip_repeated_ref_mv,
const MV_REFERENCE_FRAME *ref_frame,
const MB_MODE_INFO_EXT *mbmi_ext) {
const PREDICTION_MODE single_mode = get_single_mode(this_mode, ref_idx);
assert(is_inter_singleref_mode(single_mode));
if (single_mode == NEWMV) {
this_mv->as_int = INVALID_MV;
} else if (single_mode == GLOBALMV) {
if (skip_repeated_ref_mv &&
check_repeat_ref_mv(mbmi_ext, ref_idx, ref_frame, single_mode))
return 0;
*this_mv = mbmi_ext->global_mvs[ref_frame[ref_idx]];
} else {
assert(single_mode == NEARMV || single_mode == NEARESTMV);
const uint8_t ref_frame_type = av1_ref_frame_type(ref_frame);
const int ref_mv_offset = single_mode == NEARESTMV ? 0 : ref_mv_idx + 1;
if (ref_mv_offset < mbmi_ext->ref_mv_count[ref_frame_type]) {
assert(ref_mv_offset >= 0);
if (ref_idx == 0) {
*this_mv =
mbmi_ext->ref_mv_stack[ref_frame_type][ref_mv_offset].this_mv;
} else {
*this_mv =
mbmi_ext->ref_mv_stack[ref_frame_type][ref_mv_offset].comp_mv;
}
} else {
if (skip_repeated_ref_mv &&
check_repeat_ref_mv(mbmi_ext, ref_idx, ref_frame, single_mode))
return 0;
*this_mv = mbmi_ext->global_mvs[ref_frame[ref_idx]];
}
}
return 1;
}
// Skip NEARESTMV and NEARMV modes based on refmv weight computed in ref mv list
// population
static inline int skip_nearest_near_mv_using_refmv_weight(
const MACROBLOCK *const x, const PREDICTION_MODE this_mode,
const int8_t ref_frame_type, PREDICTION_MODE best_mode) {
if (this_mode != NEARESTMV && this_mode != NEARMV) return 0;
// Do not skip the mode if the current block has not yet obtained a valid
// inter mode.
if (!is_inter_mode(best_mode)) return 0;
const MACROBLOCKD *xd = &x->e_mbd;
// Do not skip the mode if both the top and left neighboring blocks are not
// available.
if (!xd->left_available || !xd->up_available) return 0;
const MB_MODE_INFO_EXT *const mbmi_ext = &x->mbmi_ext;
const uint16_t *const ref_mv_weight = mbmi_ext->weight[ref_frame_type];
const int ref_mv_count =
AOMMIN(MAX_REF_MV_SEARCH, mbmi_ext->ref_mv_count[ref_frame_type]);
if (ref_mv_count == 0) return 0;
// If ref mv list has at least one nearest candidate do not prune NEARESTMV
if (this_mode == NEARESTMV && ref_mv_weight[0] >= REF_CAT_LEVEL) return 0;
// Count number of ref mvs populated from nearest candidates
int nearest_refmv_count = 0;
for (int ref_mv_idx = 0; ref_mv_idx < ref_mv_count; ref_mv_idx++) {
if (ref_mv_weight[ref_mv_idx] >= REF_CAT_LEVEL) nearest_refmv_count++;
}
// nearest_refmv_count indicates the closeness of block motion characteristics
// with respect to its spatial neighbor. Smaller value of nearest_refmv_count
// w.r.t to ref_mv_count means less correlation with its spatial neighbors.
// Hence less possibility for NEARESTMV and NEARMV modes becoming the best
// mode since these modes work well for blocks that shares similar motion
// characteristics with its neighbor. Thus, NEARMV mode is pruned when
// nearest_refmv_count is relatively smaller than ref_mv_count and NEARESTMV
// mode is pruned if none of the ref mvs are populated from nearest candidate.
const int prune_thresh = 1 + (ref_mv_count >= 2);
if (nearest_refmv_count < prune_thresh) return 1;
return 0;
}
// This function update the non-new mv for the current prediction mode
static inline int build_cur_mv(int_mv *cur_mv, PREDICTION_MODE this_mode,
const AV1_COMMON *cm, const MACROBLOCK *x,
int skip_repeated_ref_mv) {
const MACROBLOCKD *xd = &x->e_mbd;
const MB_MODE_INFO *mbmi = xd->mi[0];
const int is_comp_pred = has_second_ref(mbmi);
int ret = 1;
for (int i = 0; i < is_comp_pred + 1; ++i) {
int_mv this_mv;
this_mv.as_int = INVALID_MV;
ret = get_this_mv(&this_mv, this_mode, i, mbmi->ref_mv_idx,
skip_repeated_ref_mv, mbmi->ref_frame, &x->mbmi_ext);
if (!ret) return 0;
const PREDICTION_MODE single_mode = get_single_mode(this_mode, i);
if (single_mode == NEWMV) {
const uint8_t ref_frame_type = av1_ref_frame_type(mbmi->ref_frame);
cur_mv[i] =
(i == 0) ? x->mbmi_ext.ref_mv_stack[ref_frame_type][mbmi->ref_mv_idx]
.this_mv
: x->mbmi_ext.ref_mv_stack[ref_frame_type][mbmi->ref_mv_idx]
.comp_mv;
} else {
ret &= clamp_and_check_mv(cur_mv + i, this_mv, cm, x);
}
}
return ret;
}
static inline int get_drl_cost(const MB_MODE_INFO *mbmi,
const MB_MODE_INFO_EXT *mbmi_ext,
const int (*const drl_mode_cost0)[2],
int8_t ref_frame_type) {
int cost = 0;
if (mbmi->mode == NEWMV || mbmi->mode == NEW_NEWMV) {
for (int idx = 0; idx < 2; ++idx) {
if (mbmi_ext->ref_mv_count[ref_frame_type] > idx + 1) {
uint8_t drl_ctx = av1_drl_ctx(mbmi_ext->weight[ref_frame_type], idx);
cost += drl_mode_cost0[drl_ctx][mbmi->ref_mv_idx != idx];
if (mbmi->ref_mv_idx == idx) return cost;
}
}
return cost;
}
if (have_nearmv_in_inter_mode(mbmi->mode)) {
for (int idx = 1; idx < 3; ++idx) {
if (mbmi_ext->ref_mv_count[ref_frame_type] > idx + 1) {
uint8_t drl_ctx = av1_drl_ctx(mbmi_ext->weight[ref_frame_type], idx);
cost += drl_mode_cost0[drl_ctx][mbmi->ref_mv_idx != (idx - 1)];
if (mbmi->ref_mv_idx == (idx - 1)) return cost;
}
}
return cost;
}
return cost;
}
static inline int is_single_newmv_valid(const HandleInterModeArgs *const args,
const MB_MODE_INFO *const mbmi,
PREDICTION_MODE this_mode) {
for (int ref_idx = 0; ref_idx < 2; ++ref_idx) {
const PREDICTION_MODE single_mode = get_single_mode(this_mode, ref_idx);
const MV_REFERENCE_FRAME ref = mbmi->ref_frame[ref_idx];
if (single_mode == NEWMV &&
args->single_newmv_valid[mbmi->ref_mv_idx][ref] == 0) {
return 0;
}
}
return 1;
}
static int get_drl_refmv_count(const MACROBLOCK *const x,
const MV_REFERENCE_FRAME *ref_frame,
PREDICTION_MODE mode) {
const MB_MODE_INFO_EXT *const mbmi_ext = &x->mbmi_ext;
const int8_t ref_frame_type = av1_ref_frame_type(ref_frame);
const int has_nearmv = have_nearmv_in_inter_mode(mode) ? 1 : 0;
const int ref_mv_count = mbmi_ext->ref_mv_count[ref_frame_type];
const int only_newmv = (mode == NEWMV || mode == NEW_NEWMV);
const int has_drl =
(has_nearmv && ref_mv_count > 2) || (only_newmv && ref_mv_count > 1);
const int ref_set =
has_drl ? AOMMIN(MAX_REF_MV_SEARCH, ref_mv_count - has_nearmv) : 1;
return ref_set;
}
// Checks if particular ref_mv_idx should be pruned.
static int prune_ref_mv_idx_using_qindex(const int reduce_inter_modes,
const int qindex,
const int ref_mv_idx) {
if (reduce_inter_modes >= 3) return 1;
// Q-index logic based pruning is enabled only for
// reduce_inter_modes = 2.
assert(reduce_inter_modes == 2);
// When reduce_inter_modes=2, pruning happens as below based on q index.
// For q index range between 0 and 85: prune if ref_mv_idx >= 1.
// For q index range between 86 and 170: prune if ref_mv_idx == 2.
// For q index range between 171 and 255: no pruning.
const int min_prune_ref_mv_idx = (qindex * 3 / QINDEX_RANGE) + 1;
return (ref_mv_idx >= min_prune_ref_mv_idx);
}
// Whether this reference motion vector can be skipped, based on initial
// heuristics.
static bool ref_mv_idx_early_breakout(
const SPEED_FEATURES *const sf,
const RefFrameDistanceInfo *const ref_frame_dist_info, MACROBLOCK *x,
const HandleInterModeArgs *const args, int64_t ref_best_rd,
int ref_mv_idx) {
MACROBLOCKD *xd = &x->e_mbd;
MB_MODE_INFO *mbmi = xd->mi[0];
const MB_MODE_INFO_EXT *const mbmi_ext = &x->mbmi_ext;
const int8_t ref_frame_type = av1_ref_frame_type(mbmi->ref_frame);
const int is_comp_pred = has_second_ref(mbmi);
if (sf->inter_sf.reduce_inter_modes && ref_mv_idx > 0) {
if (mbmi->ref_frame[0] == LAST2_FRAME ||
mbmi->ref_frame[0] == LAST3_FRAME ||
mbmi->ref_frame[1] == LAST2_FRAME ||
mbmi->ref_frame[1] == LAST3_FRAME) {
const int has_nearmv = have_nearmv_in_inter_mode(mbmi->mode) ? 1 : 0;
if (mbmi_ext->weight[ref_frame_type][ref_mv_idx + has_nearmv] <
REF_CAT_LEVEL) {
return true;
}
}
// TODO(any): Experiment with reduce_inter_modes for compound prediction
if (sf->inter_sf.reduce_inter_modes >= 2 && !is_comp_pred &&
have_newmv_in_inter_mode(mbmi->mode)) {
if (mbmi->ref_frame[0] != ref_frame_dist_info->nearest_past_ref &&
mbmi->ref_frame[0] != ref_frame_dist_info->nearest_future_ref) {
const int has_nearmv = have_nearmv_in_inter_mode(mbmi->mode) ? 1 : 0;
const int do_prune = prune_ref_mv_idx_using_qindex(
sf->inter_sf.reduce_inter_modes, x->qindex, ref_mv_idx);
if (do_prune &&
(mbmi_ext->weight[ref_frame_type][ref_mv_idx + has_nearmv] <
REF_CAT_LEVEL)) {
return true;
}
}
}
}
mbmi->ref_mv_idx = ref_mv_idx;
if (is_comp_pred && (!is_single_newmv_valid(args, mbmi, mbmi->mode))) {
return true;
}
size_t est_rd_rate = args->ref_frame_cost + args->single_comp_cost;
const int drl_cost = get_drl_cost(
mbmi, mbmi_ext, x->mode_costs.drl_mode_cost0, ref_frame_type);
est_rd_rate += drl_cost;
if (RDCOST(x->rdmult, est_rd_rate, 0) > ref_best_rd &&
mbmi->mode != NEARESTMV && mbmi->mode != NEAREST_NEARESTMV) {
return true;
}
return false;
}
// Compute the estimated RD cost for the motion vector with simple translation.
static int64_t simple_translation_pred_rd(AV1_COMP *const cpi, MACROBLOCK *x,
RD_STATS *rd_stats,
HandleInterModeArgs *args,
int ref_mv_idx, int64_t ref_best_rd,
BLOCK_SIZE bsize) {
MACROBLOCKD *xd = &x->e_mbd;
MB_MODE_INFO *mbmi = xd->mi[0];
MB_MODE_INFO_EXT *const mbmi_ext = &x->mbmi_ext;
const int8_t ref_frame_type = av1_ref_frame_type(mbmi->ref_frame);
const AV1_COMMON *cm = &cpi->common;
const int is_comp_pred = has_second_ref(mbmi);
const ModeCosts *mode_costs = &x->mode_costs;
struct macroblockd_plane *p = xd->plane;
const BUFFER_SET orig_dst = {
{ p[0].dst.buf, p[1].dst.buf, p[2].dst.buf },
{ p[0].dst.stride, p[1].dst.stride, p[2].dst.stride },
};
av1_init_rd_stats(rd_stats);
mbmi->interinter_comp.type = COMPOUND_AVERAGE;
mbmi->comp_group_idx = 0;
mbmi->compound_idx = 1;
if (mbmi->ref_frame[1] == INTRA_FRAME) {
mbmi->ref_frame[1] = NONE_FRAME;
}
int16_t mode_ctx =
av1_mode_context_analyzer(mbmi_ext->mode_context, mbmi->ref_frame);
mbmi->num_proj_ref = 0;
mbmi->motion_mode = SIMPLE_TRANSLATION;
mbmi->ref_mv_idx = ref_mv_idx;
rd_stats->rate += args->ref_frame_cost + args->single_comp_cost;
const int drl_cost =
get_drl_cost(mbmi, mbmi_ext, mode_costs->drl_mode_cost0, ref_frame_type);
rd_stats->rate += drl_cost;
int_mv cur_mv[2];
if (!build_cur_mv(cur_mv, mbmi->mode, cm, x, 0)) {
return INT64_MAX;
}
assert(have_nearmv_in_inter_mode(mbmi->mode));
for (int i = 0; i < is_comp_pred + 1; ++i) {
mbmi->mv[i].as_int = cur_mv[i].as_int;
}
const int ref_mv_cost = cost_mv_ref(mode_costs, mbmi->mode, mode_ctx);
rd_stats->rate += ref_mv_cost;
if (RDCOST(x->rdmult, rd_stats->rate, 0) > ref_best_rd) {
return INT64_MAX;
}
mbmi->motion_mode = SIMPLE_TRANSLATION;
mbmi->num_proj_ref = 0;
if (is_comp_pred) {
// Only compound_average
mbmi->interinter_comp.type = COMPOUND_AVERAGE;
mbmi->comp_group_idx = 0;
mbmi->compound_idx = 1;
}
set_default_interp_filters(mbmi, cm->features.interp_filter);
const int mi_row = xd->mi_row;
const int mi_col = xd->mi_col;
av1_enc_build_inter_predictor(cm, xd, mi_row, mi_col, &orig_dst, bsize,
AOM_PLANE_Y, AOM_PLANE_Y);
int est_rate;
int64_t est_dist;
model_rd_sb_fn[MODELRD_CURVFIT](cpi, bsize, x, xd, 0, 0, &est_rate, &est_dist,
NULL, NULL, NULL, NULL, NULL);
return RDCOST(x->rdmult, rd_stats->rate + est_rate, est_dist);
}
// Represents a set of integers, from 0 to sizeof(int) * 8, as bits in
// an integer. 0 for the i-th bit means that integer is excluded, 1 means
// it is included.
static inline void mask_set_bit(int *mask, int index) { *mask |= (1 << index); }
static inline bool mask_check_bit(int mask, int index) {
return (mask >> index) & 0x1;
}
// Before performing the full MV search in handle_inter_mode, do a simple
// translation search and see if we can eliminate any motion vectors.
// Returns an integer where, if the i-th bit is set, it means that the i-th
// motion vector should be searched. This is only set for NEAR_MV.
static int ref_mv_idx_to_search(AV1_COMP *const cpi, MACROBLOCK *x,
RD_STATS *rd_stats,
HandleInterModeArgs *const args,
int64_t ref_best_rd, BLOCK_SIZE bsize,
const int ref_set) {
// If the number of ref mv count is equal to 1, do not prune the same. It
// is better to evaluate the same than to prune it.
if (ref_set == 1) return 1;
AV1_COMMON *const cm = &cpi->common;
const MACROBLOCKD *const xd = &x->e_mbd;
const MB_MODE_INFO *const mbmi = xd->mi[0];
const PREDICTION_MODE this_mode = mbmi->mode;
// Only search indices if they have some chance of being good.
int good_indices = 0;
for (int i = 0; i < ref_set; ++i) {
if (ref_mv_idx_early_breakout(&cpi->sf, &cpi->ref_frame_dist_info, x, args,
ref_best_rd, i)) {
continue;
}
mask_set_bit(&good_indices, i);
}
// Only prune in NEARMV mode, if the speed feature is set, and the block size
// is large enough. If these conditions are not met, return all good indices
// found so far.
if (!cpi->sf.inter_sf.prune_mode_search_simple_translation)
return good_indices;
if (!have_nearmv_in_inter_mode(this_mode)) return good_indices;
if (num_pels_log2_lookup[bsize] <= 6) return good_indices;
// Do not prune when there is internal resizing. TODO(elliottk) fix this
// so b/2384 can be resolved.
if (av1_is_scaled(get_ref_scale_factors(cm, mbmi->ref_frame[0])) ||
(mbmi->ref_frame[1] > 0 &&
av1_is_scaled(get_ref_scale_factors(cm, mbmi->ref_frame[1])))) {
return good_indices;
}
// Calculate the RD cost for the motion vectors using simple translation.
int64_t idx_rdcost[] = { INT64_MAX, INT64_MAX, INT64_MAX };
for (int ref_mv_idx = 0; ref_mv_idx < ref_set; ++ref_mv_idx) {
// If this index is bad, ignore it.
if (!mask_check_bit(good_indices, ref_mv_idx)) {
continue;
}
idx_rdcost[ref_mv_idx] = simple_translation_pred_rd(
cpi, x, rd_stats, args, ref_mv_idx, ref_best_rd, bsize);
}
// Find the index with the best RD cost.
int best_idx = 0;
for (int i = 1; i < MAX_REF_MV_SEARCH; ++i) {
if (idx_rdcost[i] < idx_rdcost[best_idx]) {
best_idx = i;
}
}
// Only include indices that are good and within a % of the best.
const double dth = has_second_ref(mbmi) ? 1.05 : 1.001;
// If the simple translation cost is not within this multiple of the
// best RD, skip it. Note that the cutoff is derived experimentally.
const double ref_dth = 5;
int result = 0;
for (int i = 0; i < ref_set; ++i) {
if (mask_check_bit(good_indices, i) &&
(1.0 * idx_rdcost[i]) / idx_rdcost[best_idx] < dth &&
(1.0 * idx_rdcost[i]) / ref_best_rd < ref_dth) {
mask_set_bit(&result, i);
}
}
return result;
}
/*!\brief Motion mode information for inter mode search speedup.
*
* Used in a speed feature to search motion modes other than
* SIMPLE_TRANSLATION only on winning candidates.
*/
typedef struct motion_mode_candidate {
/*!
* Mode info for the motion mode candidate.
*/
MB_MODE_INFO mbmi;
/*!
* Rate describing the cost of the motion vectors for this candidate.
*/
int rate_mv;
/*!
* Rate before motion mode search and transform coding is applied.
*/
int rate2_nocoeff;
/*!
* An integer value 0 or 1 which indicates whether or not to skip the motion
* mode search and default to SIMPLE_TRANSLATION as a speed feature for this
* candidate.
*/
int skip_motion_mode;
/*!
* Total RD cost for this candidate.
*/
int64_t rd_cost;
} motion_mode_candidate;
/*!\cond */
typedef struct motion_mode_best_st_candidate {
motion_mode_candidate motion_mode_cand[MAX_WINNER_MOTION_MODES];
int num_motion_mode_cand;
} motion_mode_best_st_candidate;
// Checks if the current reference frame matches with neighbouring block's
// (top/left) reference frames
static inline int ref_match_found_in_nb_blocks(MB_MODE_INFO *cur_mbmi,
MB_MODE_INFO *nb_mbmi) {
MV_REFERENCE_FRAME nb_ref_frames[2] = { nb_mbmi->ref_frame[0],
nb_mbmi->ref_frame[1] };
MV_REFERENCE_FRAME cur_ref_frames[2] = { cur_mbmi->ref_frame[0],
cur_mbmi->ref_frame[1] };
const int is_cur_comp_pred = has_second_ref(cur_mbmi);
int match_found = 0;
for (int i = 0; i < (is_cur_comp_pred + 1); i++) {
if ((cur_ref_frames[i] == nb_ref_frames[0]) ||
(cur_ref_frames[i] == nb_ref_frames[1]))
match_found = 1;
}
return match_found;
}
static inline int find_ref_match_in_above_nbs(const int total_mi_cols,
MACROBLOCKD *xd) {
if (!xd->up_available) return 1;
const int mi_col = xd->mi_col;
MB_MODE_INFO **cur_mbmi = xd->mi;
// prev_row_mi points into the mi array, starting at the beginning of the
// previous row.
MB_MODE_INFO **prev_row_mi = xd->mi - mi_col - 1 * xd->mi_stride;
const int end_col = AOMMIN(mi_col + xd->width, total_mi_cols);
uint8_t mi_step;
for (int above_mi_col = mi_col; above_mi_col < end_col;
above_mi_col += mi_step) {
MB_MODE_INFO **above_mi = prev_row_mi + above_mi_col;
mi_step = mi_size_wide[above_mi[0]->bsize];
int match_found = 0;
if (is_inter_block(*above_mi))
match_found = ref_match_found_in_nb_blocks(*cur_mbmi, *above_mi);
if (match_found) return 1;
}
return 0;
}
static inline int find_ref_match_in_left_nbs(const int total_mi_rows,
MACROBLOCKD *xd) {
if (!xd->left_available) return 1;
const int mi_row = xd->mi_row;
MB_MODE_INFO **cur_mbmi = xd->mi;
// prev_col_mi points into the mi array, starting at the top of the
// previous column
MB_MODE_INFO **prev_col_mi = xd->mi - 1 - mi_row * xd->mi_stride;
const int end_row = AOMMIN(mi_row + xd->height, total_mi_rows);
uint8_t mi_step;
for (int left_mi_row = mi_row; left_mi_row < end_row;
left_mi_row += mi_step) {
MB_MODE_INFO **left_mi = prev_col_mi + left_mi_row * xd->mi_stride;
mi_step = mi_size_high[left_mi[0]->bsize];
int match_found = 0;
if (is_inter_block(*left_mi))
match_found = ref_match_found_in_nb_blocks(*cur_mbmi, *left_mi);
if (match_found) return 1;
}
return 0;
}
/*!\endcond */
/*! \brief Struct used to hold TPL data to
* narrow down parts of the inter mode search.
*/
typedef struct {
/*!
* The best inter cost out of all of the reference frames.
*/
int64_t best_inter_cost;
/*!
* The inter cost for each reference frame.
*/
int64_t ref_inter_cost[INTER_REFS_PER_FRAME];
} PruneInfoFromTpl;
#if !CONFIG_REALTIME_ONLY
// TODO(Remya): Check if get_tpl_stats_b() can be reused
static inline void get_block_level_tpl_stats(
AV1_COMP *cpi, BLOCK_SIZE bsize, int mi_row, int mi_col, int *valid_refs,
PruneInfoFromTpl *inter_cost_info_from_tpl) {
AV1_COMMON *const cm = &cpi->common;
assert(IMPLIES(cpi->ppi->gf_group.size > 0,
cpi->gf_frame_index < cpi->ppi->gf_group.size));
const int tpl_idx = cpi->gf_frame_index;
TplParams *const tpl_data = &cpi->ppi->tpl_data;
if (!av1_tpl_stats_ready(tpl_data, tpl_idx)) return;
const TplDepFrame *tpl_frame = &tpl_data->tpl_frame[tpl_idx];
const TplDepStats *tpl_stats = tpl_frame->tpl_stats_ptr;
const int mi_wide = mi_size_wide[bsize];
const int mi_high = mi_size_high[bsize];
const int tpl_stride = tpl_frame->stride;
const int step = 1 << tpl_data->tpl_stats_block_mis_log2;
const int mi_col_sr =
coded_to_superres_mi(mi_col, cm->superres_scale_denominator);
const int mi_col_end_sr =
coded_to_superres_mi(mi_col + mi_wide, cm->superres_scale_denominator);
const int mi_cols_sr = av1_pixels_to_mi(cm->superres_upscaled_width);
const int row_step = step;
const int col_step_sr =
coded_to_superres_mi(step, cm->superres_scale_denominator);
for (int row = mi_row; row < AOMMIN(mi_row + mi_high, cm->mi_params.mi_rows);
row += row_step) {
for (int col = mi_col_sr; col < AOMMIN(mi_col_end_sr, mi_cols_sr);
col += col_step_sr) {
const TplDepStats *this_stats = &tpl_stats[av1_tpl_ptr_pos(
row, col, tpl_stride, tpl_data->tpl_stats_block_mis_log2)];
// Sums up the inter cost of corresponding ref frames
for (int ref_idx = 0; ref_idx < INTER_REFS_PER_FRAME; ref_idx++) {
inter_cost_info_from_tpl->ref_inter_cost[ref_idx] +=
this_stats->pred_error[ref_idx];
}
}
}
// Computes the best inter cost (minimum inter_cost)
int64_t best_inter_cost = INT64_MAX;
for (int ref_idx = 0; ref_idx < INTER_REFS_PER_FRAME; ref_idx++) {
const int64_t cur_inter_cost =
inter_cost_info_from_tpl->ref_inter_cost[ref_idx];
// For invalid ref frames, cur_inter_cost = 0 and has to be handled while
// calculating the minimum inter_cost
if (cur_inter_cost != 0 && (cur_inter_cost < best_inter_cost) &&
valid_refs[ref_idx])
best_inter_cost = cur_inter_cost;
}
inter_cost_info_from_tpl->best_inter_cost = best_inter_cost;
}
#endif
static inline int prune_modes_based_on_tpl_stats(
PruneInfoFromTpl *inter_cost_info_from_tpl, const int *refs, int ref_mv_idx,
const PREDICTION_MODE this_mode, int prune_mode_level) {
const int have_newmv = have_newmv_in_inter_mode(this_mode);
if ((prune_mode_level < 2) && have_newmv) return 0;
const int64_t best_inter_cost = inter_cost_info_from_tpl->best_inter_cost;
if (best_inter_cost == INT64_MAX) return 0;
const int prune_level = prune_mode_level - 1;
int64_t cur_inter_cost;
const int is_globalmv =
(this_mode == GLOBALMV) || (this_mode == GLOBAL_GLOBALMV);
const int prune_index = is_globalmv ? MAX_REF_MV_SEARCH : ref_mv_idx;
// Thresholds used for pruning:
// Lower value indicates aggressive pruning and higher value indicates
// conservative pruning which is set based on ref_mv_idx and speed feature.
// 'prune_index' 0, 1, 2 corresponds to ref_mv indices 0, 1 and 2. prune_index
// 3 corresponds to GLOBALMV/GLOBAL_GLOBALMV
static const int tpl_inter_mode_prune_mul_factor[3][MAX_REF_MV_SEARCH + 1] = {
{ 6, 6, 6, 4 }, { 6, 4, 4, 4 }, { 5, 4, 4, 4 }
};
const int is_comp_pred = (refs[1] > INTRA_FRAME);
if (!is_comp_pred) {
cur_inter_cost = inter_cost_info_from_tpl->ref_inter_cost[refs[0] - 1];
} else {
const int64_t inter_cost_ref0 =
inter_cost_info_from_tpl->ref_inter_cost[refs[0] - 1];
const int64_t inter_cost_ref1 =
inter_cost_info_from_tpl->ref_inter_cost[refs[1] - 1];
// Choose maximum inter_cost among inter_cost_ref0 and inter_cost_ref1 for
// more aggressive pruning
cur_inter_cost = AOMMAX(inter_cost_ref0, inter_cost_ref1);
}
// Prune the mode if cur_inter_cost is greater than threshold times
// best_inter_cost
if (cur_inter_cost >
((tpl_inter_mode_prune_mul_factor[prune_level][prune_index] *
best_inter_cost) >>
2))
return 1;
return 0;
}
/*!\brief High level function to select parameters for compound mode.
*
* \ingroup inter_mode_search
* The main search functionality is done in the call to av1_compound_type_rd().
*
* \param[in] cpi Top-level encoder structure.
* \param[in] x Pointer to struct holding all the data for
* the current macroblock.
* \param[in] args HandleInterModeArgs struct holding
* miscellaneous arguments for inter mode
* search. See the documentation for this
* struct for a description of each member.
* \param[in] ref_best_rd Best RD found so far for this block.
* It is used for early termination of this
* search if the RD exceeds this value.
* \param[in,out] cur_mv Current motion vector.
* \param[in] bsize Current block size.
* \param[in,out] compmode_interinter_cost RD of the selected interinter
compound mode.
* \param[in,out] rd_buffers CompoundTypeRdBuffers struct to hold all
* allocated buffers for the compound
* predictors and masks in the compound type
* search.
* \param[in,out] orig_dst A prediction buffer to hold a computed
* prediction. This will eventually hold the
* final prediction, and the tmp_dst info will
* be copied here.
* \param[in] tmp_dst A temporary prediction buffer to hold a
* computed prediction.
* \param[in,out] rate_mv The rate associated with the motion vectors.
* This will be modified if a motion search is
* done in the motion mode search.
* \param[in,out] rd_stats Struct to keep track of the overall RD
* information.
* \param[in,out] skip_rd An array of length 2 where skip_rd[0] is the
* best total RD for a skip mode so far, and
* skip_rd[1] is the best RD for a skip mode so
* far in luma. This is used as a speed feature
* to skip the transform search if the computed
* skip RD for the current mode is not better
* than the best skip_rd so far.
* \param[in,out] skip_build_pred Indicates whether or not to build the inter
* predictor. If this is 0, the inter predictor
* has already been built and thus we can avoid
* repeating computation.
* \return Returns 1 if this mode is worse than one already seen and 0 if it is
* a viable candidate.
*/
static int process_compound_inter_mode(
AV1_COMP *const cpi, MACROBLOCK *x, HandleInterModeArgs *args,
int64_t ref_best_rd, int_mv *cur_mv, BLOCK_SIZE bsize,
int *compmode_interinter_cost, const CompoundTypeRdBuffers *rd_buffers,
const BUFFER_SET *orig_dst, const BUFFER_SET *tmp_dst, int *rate_mv,
RD_STATS *rd_stats, int64_t *skip_rd, int *skip_build_pred) {
MACROBLOCKD *xd = &x->e_mbd;
MB_MODE_INFO *mbmi = xd->mi[0];
const AV1_COMMON *cm = &cpi->common;
const int masked_compound_used = is_any_masked_compound_used(bsize) &&
cm->seq_params->enable_masked_compound;
int mode_search_mask = (1 << COMPOUND_AVERAGE) | (1 << COMPOUND_DISTWTD) |
(1 << COMPOUND_WEDGE) | (1 << COMPOUND_DIFFWTD);
const int num_planes = av1_num_planes(cm);
const int mi_row = xd->mi_row;
const int mi_col = xd->mi_col;
int is_luma_interp_done = 0;
set_default_interp_filters(mbmi, cm->features.interp_filter);
int64_t best_rd_compound;
int64_t rd_thresh;
const int comp_type_rd_shift = COMP_TYPE_RD_THRESH_SHIFT;
const int comp_type_rd_scale = COMP_TYPE_RD_THRESH_SCALE;
rd_thresh = get_rd_thresh_from_best_rd(ref_best_rd, (1 << comp_type_rd_shift),
comp_type_rd_scale);
// Select compound type and any parameters related to that type
// (for example, the mask parameters if it is a masked mode) and compute
// the RD
*compmode_interinter_cost = av1_compound_type_rd(
cpi, x, args, bsize, cur_mv, mode_search_mask, masked_compound_used,
orig_dst, tmp_dst, rd_buffers, rate_mv, &best_rd_compound, rd_stats,
ref_best_rd, skip_rd[1], &is_luma_interp_done, rd_thresh);
if (ref_best_rd < INT64_MAX &&
(best_rd_compound >> comp_type_rd_shift) * comp_type_rd_scale >
ref_best_rd) {
restore_dst_buf(xd, *orig_dst, num_planes);
return 1;
}
// Build only uv predictor for COMPOUND_AVERAGE.
// Note there is no need to call av1_enc_build_inter_predictor
// for luma if COMPOUND_AVERAGE is selected because it is the first
// candidate in av1_compound_type_rd, which means it used the dst_buf
// rather than the tmp_buf.
if (mbmi->interinter_comp.type == COMPOUND_AVERAGE && is_luma_interp_done) {
if (num_planes > 1) {
av1_enc_build_inter_predictor(cm, xd, mi_row, mi_col, orig_dst, bsize,
AOM_PLANE_U, num_planes - 1);
}
*skip_build_pred = 1;
}
return 0;
}
// Speed feature to prune out MVs that are similar to previous MVs if they
// don't achieve the best RD advantage.
static int prune_ref_mv_idx_search(int ref_mv_idx, int best_ref_mv_idx,
int_mv save_mv[MAX_REF_MV_SEARCH - 1][2],
MB_MODE_INFO *mbmi, int pruning_factor) {
int i;
const int is_comp_pred = has_second_ref(mbmi);
const int thr = (1 + is_comp_pred) << (pruning_factor + 1);
// Skip the evaluation if an MV match is found.
if (ref_mv_idx > 0) {
for (int idx = 0; idx < ref_mv_idx; ++idx) {
if (save_mv[idx][0].as_int == INVALID_MV) continue;
int mv_diff = 0;
for (i = 0; i < 1 + is_comp_pred; ++i) {
mv_diff += abs(save_mv[idx][i].as_mv.row - mbmi->mv[i].as_mv.row) +
abs(save_mv[idx][i].as_mv.col - mbmi->mv[i].as_mv.col);
}
// If this mode is not the best one, and current MV is similar to
// previous stored MV, terminate this ref_mv_idx evaluation.
if (best_ref_mv_idx == -1 && mv_diff <= thr) return 1;
}
}
if (ref_mv_idx < MAX_REF_MV_SEARCH - 1) {
for (i = 0; i < is_comp_pred + 1; ++i)
save_mv[ref_mv_idx][i].as_int = mbmi->mv[i].as_int;
}
return 0;
}
/*!\brief Prunes ZeroMV Search Using Best NEWMV's SSE
*
* \ingroup inter_mode_search
*
* Compares the sse of zero mv and the best sse found in single new_mv. If the
* sse of the zero_mv is higher, returns 1 to signal zero_mv can be skipped.
* Else returns 0.
*
* Note that the sse of here comes from single_motion_search. So it is
* interpolated with the filter in motion search, not the actual interpolation
* filter used in encoding.
*
* \param[in] fn_ptr A table of function pointers to compute SSE.
* \param[in] x Pointer to struct holding all the data for
* the current macroblock.
* \param[in] bsize The current block_size.
* \param[in] args The args to handle_inter_mode, used to track
* the best SSE.
* \param[in] prune_zero_mv_with_sse The argument holds speed feature
* prune_zero_mv_with_sse value
* \return Returns 1 if zero_mv is pruned, 0 otherwise.
*/
static inline int prune_zero_mv_with_sse(const aom_variance_fn_ptr_t *fn_ptr,
const MACROBLOCK *x, BLOCK_SIZE bsize,
const HandleInterModeArgs *args,
int prune_zero_mv_with_sse) {
const MACROBLOCKD *xd = &x->e_mbd;
const MB_MODE_INFO *mbmi = xd->mi[0];
const int is_comp_pred = has_second_ref(mbmi);
const MV_REFERENCE_FRAME *refs = mbmi->ref_frame;
for (int idx = 0; idx < 1 + is_comp_pred; idx++) {
if (xd->global_motion[refs[idx]].wmtype != IDENTITY) {
// Pruning logic only works for IDENTITY type models
// Note: In theory we could apply similar logic for TRANSLATION
// type models, but we do not code these due to a spec bug
// (see comments in gm_get_motion_vector() in av1/common/mv.h)
assert(xd->global_motion[refs[idx]].wmtype != TRANSLATION);
return 0;
}
// Don't prune if we have invalid data
assert(mbmi->mv[idx].as_int == 0);
if (args->best_single_sse_in_refs[refs[idx]] == INT32_MAX) {
return 0;
}
}
// Sum up the sse of ZEROMV and best NEWMV
unsigned int this_sse_sum = 0;
unsigned int best_sse_sum = 0;
for (int idx = 0; idx < 1 + is_comp_pred; idx++) {
const struct macroblock_plane *const p = &x->plane[AOM_PLANE_Y];
const struct macroblockd_plane *pd = xd->plane;
const struct buf_2d *src_buf = &p->src;
const struct buf_2d *ref_buf = &pd->pre[idx];
const uint8_t *src = src_buf->buf;
const uint8_t *ref = ref_buf->buf;
const int src_stride = src_buf->stride;
const int ref_stride = ref_buf->stride;
unsigned int this_sse;
fn_ptr[bsize].vf(ref, ref_stride, src, src_stride, &this_sse);
this_sse_sum += this_sse;
const unsigned int best_sse = args->best_single_sse_in_refs[refs[idx]];
best_sse_sum += best_sse;
}
const double mul = prune_zero_mv_with_sse > 1 ? 1.00 : 1.25;
if ((double)this_sse_sum > (mul * (double)best_sse_sum)) {
return 1;
}
return 0;
}
/*!\brief Searches for interpolation filter in realtime mode during winner eval
*
* \ingroup inter_mode_search
*
* Does a simple interpolation filter search during winner mode evaluation. This
* is currently only used by realtime mode as \ref
* av1_interpolation_filter_search is not called during realtime encoding.
*
* This function only searches over two possible filters. EIGHTTAP_REGULAR is
* always search. For lowres clips (<= 240p), MULTITAP_SHARP is also search. For
* higher res slips (>240p), EIGHTTAP_SMOOTH is also searched.
* *
* \param[in] cpi Pointer to the compressor. Used for feature
* flags.
* \param[in,out] x Pointer to macroblock. This is primarily
* used to access the buffers.
* \param[in] mi_row The current row in mi unit (4X4 pixels).
* \param[in] mi_col The current col in mi unit (4X4 pixels).
* \param[in] bsize The current block_size.
* \return Returns true if a predictor is built in xd->dst, false otherwise.
*/
static inline bool fast_interp_search(const AV1_COMP *cpi, MACROBLOCK *x,
int mi_row, int mi_col,
BLOCK_SIZE bsize) {
static const InterpFilters filters_ref_set[3] = {
{ EIGHTTAP_REGULAR, EIGHTTAP_REGULAR },
{ EIGHTTAP_SMOOTH, EIGHTTAP_SMOOTH },
{ MULTITAP_SHARP, MULTITAP_SHARP }
};
const AV1_COMMON *const cm = &cpi->common;
MACROBLOCKD *const xd = &x->e_mbd;
MB_MODE_INFO *const mi = xd->mi[0];
int64_t best_cost = INT64_MAX;
int best_filter_index = -1;
// dst_bufs[0] sores the new predictor, and dist_bifs[1] stores the best
const int num_planes = av1_num_planes(cm);
const int is_240p_or_lesser = AOMMIN(cm->width, cm->height) <= 240;
assert(is_inter_mode(mi->mode));
assert(mi->motion_mode == SIMPLE_TRANSLATION);
assert(!is_inter_compound_mode(mi->mode));
if (!av1_is_interp_needed(xd)) {
return false;
}
struct macroblockd_plane *pd = xd->plane;
const BUFFER_SET orig_dst = {
{ pd[0].dst.buf, pd[1].dst.buf, pd[2].dst.buf },
{ pd[0].dst.stride, pd[1].dst.stride, pd[2].dst.stride },
};
uint8_t *const tmp_buf = get_buf_by_bd(xd, x->tmp_pred_bufs[0]);
const BUFFER_SET tmp_dst = { { tmp_buf, tmp_buf + 1 * MAX_SB_SQUARE,
tmp_buf + 2 * MAX_SB_SQUARE },
{ MAX_SB_SIZE, MAX_SB_SIZE, MAX_SB_SIZE } };
const BUFFER_SET *dst_bufs[2] = { &orig_dst, &tmp_dst };
for (int i = 0; i < 3; ++i) {
if (is_240p_or_lesser) {
if (filters_ref_set[i].x_filter == EIGHTTAP_SMOOTH) {
continue;
}
} else