CWG-D027 (compound weighted prediction)
This contribution proposes an extension to the COMPOUND_AVERAGE mode that enables adaptive weighting of the two prediction signals in compound prediction mode. Specifically, when the two reference frames are from different directions, five weighting factors {8, 10, 6, 12, 4} are supported. In contrast, when the reference frames are from the same direction, five different weighting factors {8, 12, 4, 20, -4} are supported.
STATS_CHANGED
diff --git a/aom/aom_encoder.h b/aom/aom_encoder.h
index 32b82a4..a1bd401 100644
--- a/aom/aom_encoder.h
+++ b/aom/aom_encoder.h
@@ -309,6 +309,12 @@
*/
unsigned int enable_bawp;
#endif // CONFIG_BAWP
+#if CONFIG_CWP
+ /*!\brief enable compound weighted prediction
+ *
+ */
+ unsigned int enable_cwp;
+#endif // CONFIG_BAWP
/*!\brief enable Forward skip coding
*
*/
diff --git a/apps/aomenc.c b/apps/aomenc.c
index cc80bff..a3e5929 100644
--- a/apps/aomenc.c
+++ b/apps/aomenc.c
@@ -443,6 +443,9 @@
#if CONFIG_BAWP
&g_av1_codec_arg_defs.enable_bawp,
#endif // CONFIG_BAWP
+#if CONFIG_CWP
+ &g_av1_codec_arg_defs.enable_cwp,
+#endif // CONFIG_CWP
&g_av1_codec_arg_defs.enable_fsc,
#if CONFIG_ORIP
&g_av1_codec_arg_defs.enable_orip,
@@ -641,6 +644,9 @@
#if CONFIG_BAWP
config->enable_bawp = 1;
#endif // CONFIG_BAWP
+#if CONFIG_CWP
+ config->enable_cwp = 1;
+#endif // CONFIG_BAWP
config->enable_fsc = 1;
#if CONFIG_ORIP
config->enable_orip = 1;
@@ -1544,6 +1550,10 @@
fprintf(stdout, " : BAWP (%d)\n",
encoder_cfg->enable_bawp);
#endif // CONFIG_BAWP
+#if CONFIG_CWP
+ fprintf(stdout, " : CWP (%d)\n",
+ encoder_cfg->enable_cwp);
+#endif // CONFIG_CWP
fprintf(stdout,
" : GlobalMotion (%d), "
diff --git a/av1/arg_defs.c b/av1/arg_defs.c
index d6b37da..baf23a6 100644
--- a/av1/arg_defs.c
+++ b/av1/arg_defs.c
@@ -410,6 +410,11 @@
"Enable block adaptive weighted prediction (BAWP)"
"(0: false, 1: true (default))"),
#endif // CONFIG_BAWP
+#if CONFIG_CWP
+ .enable_cwp = ARG_DEF(NULL, "enable-cwp", 1,
+ "Enable compound weighted prediction (CWP)"
+ "(0: false, 1: true (default))"),
+#endif // CONFIG_CWP
.enable_fsc = ARG_DEF(NULL, "enable-fsc", 1,
"Enable forward skip coding"
"(0: false, 1: true (default))"),
diff --git a/av1/arg_defs.h b/av1/arg_defs.h
index ecc6d6b..f68fe96 100644
--- a/av1/arg_defs.h
+++ b/av1/arg_defs.h
@@ -163,6 +163,9 @@
#if CONFIG_BAWP
arg_def_t enable_bawp;
#endif // CONFIG_BAWP
+#if CONFIG_CWP
+ arg_def_t enable_cwp;
+#endif // CONFIG_CWP
arg_def_t enable_fsc;
#if CONFIG_ORIP
arg_def_t enable_orip;
diff --git a/av1/av1_cx_iface.c b/av1/av1_cx_iface.c
index 7999ef1..46aaf30 100644
--- a/av1/av1_cx_iface.c
+++ b/av1/av1_cx_iface.c
@@ -137,7 +137,10 @@
#if CONFIG_BAWP
int enable_bawp; // enable block adaptive weighted prediction
#endif // CONFIG_BAWP
- int enable_fsc; // enable forward skip coding
+#if CONFIG_CWP
+ int enable_cwp; // enable compound weighted prediction
+#endif // CONFIG_CWP
+ int enable_fsc; // enable forward skip coding
#if CONFIG_ORIP
int enable_orip; // enable ORIP
#endif // CONFIG_ORIP
@@ -468,6 +471,9 @@
#if CONFIG_BAWP
1, // enable block adaptive weighted prediction (BAWP)
#endif // CONFIG_BAWP
+#if CONFIG_CWP
+ 1, // enable compound weighted prediction (CWP)
+#endif // CONFIG_CWP
1, // enable forward skip coding
#if CONFIG_ORIP
1, // enable ORIP
@@ -960,6 +966,9 @@
#if CONFIG_BAWP
cfg->enable_bawp = extra_cfg->enable_bawp;
#endif // CONFIG_BAWP
+#if CONFIG_CWP
+ cfg->enable_cwp = extra_cfg->enable_cwp;
+#endif // CONFIG_CWP
cfg->enable_fsc = extra_cfg->enable_fsc;
#if CONFIG_ORIP
cfg->enable_orip = extra_cfg->enable_orip;
@@ -1074,6 +1083,9 @@
#if CONFIG_BAWP
extra_cfg->enable_bawp = cfg->enable_bawp;
#endif // CONFIG_BAWP
+#if CONFIG_CWP
+ extra_cfg->enable_cwp = cfg->enable_cwp;
+#endif // CONFIG_CWP
extra_cfg->enable_fsc = cfg->enable_fsc;
#if CONFIG_ORIP
extra_cfg->enable_orip = cfg->enable_orip;
@@ -1391,6 +1403,9 @@
#if CONFIG_BAWP
tool_cfg->enable_bawp = extra_cfg->enable_bawp;
#endif // CONFIG_BAWP
+#if CONFIG_CWP
+ tool_cfg->enable_cwp = extra_cfg->enable_cwp;
+#endif // CONFIG_CWP
tool_cfg->force_video_mode = extra_cfg->force_video_mode;
tool_cfg->enable_palette = extra_cfg->enable_palette;
// FIXME(debargha): Should this be:
@@ -3804,6 +3819,11 @@
err_string)) {
extra_cfg.enable_bawp = arg_parse_int_helper(&arg, err_string);
#endif // CONFIG_BAWP
+#if CONFIG_CWP
+ } else if (arg_match_helper(&arg, &g_av1_codec_arg_defs.enable_cwp, argv,
+ err_string)) {
+ extra_cfg.enable_cwp = arg_parse_int_helper(&arg, err_string);
+#endif // CONFIG_CWP
} else if (arg_match_helper(&arg, &g_av1_codec_arg_defs.enable_fsc, argv,
err_string)) {
extra_cfg.enable_fsc = arg_parse_int_helper(&arg, err_string);
@@ -4289,6 +4309,9 @@
#if CONFIG_BAWP
1,
#endif // CONFIG_BAWP
+#if CONFIG_CWP
+ 1,
+#endif // CONFIG_CWP
1,
#if CONFIG_ORIP
1,
diff --git a/av1/common/av1_common_int.h b/av1/common/av1_common_int.h
index 1f527c2..ac7781a 100644
--- a/av1/common/av1_common_int.h
+++ b/av1/common/av1_common_int.h
@@ -435,8 +435,11 @@
#if CONFIG_BAWP
uint8_t enable_bawp; // enables/disables block adaptive weighted prediction
#endif // CONFIG_BAWP
- uint8_t enable_fsc; // enables/disables forward skip coding
- uint8_t enable_filter_intra; // enables/disables filterintra
+#if CONFIG_CWP
+ uint8_t enable_cwp; // enables/disables compound weighted prediction
+#endif // CONFIG_CWP
+ uint8_t enable_fsc; // enables/disables forward skip coding
+ uint8_t enable_filter_intra; // enables/disables filterintra
uint8_t enable_intra_edge_filter; // enables/disables edge upsampling
#if CONFIG_ORIP
@@ -715,6 +718,12 @@
*/
bool enable_bawp;
#endif // CONFIG_BAWP
+#if CONFIG_CWP
+ /*!
+ * Enables/disables compound weighted prediction
+ */
+ bool enable_cwp;
+#endif // CONFIG_CWP
#if CONFIG_EXTENDED_WARP_PREDICTION
/*!
* Bit mask of enabled motion modes for this frame
diff --git a/av1/common/blockd.h b/av1/common/blockd.h
index 531c8aa..60fa8bb 100644
--- a/av1/common/blockd.h
+++ b/av1/common/blockd.h
@@ -487,6 +487,11 @@
/*! \brief The bawp parameters offset*/
int32_t bawp_beta[3][2]; //[yuv][ref0/1], current only [0][0] is used.
#endif // CONFIG_BAWP
+
+#if CONFIG_CWP
+ //! Index for compound weighted prediction parameters.
+ int8_t cwp_idx;
+#endif // CONFIG_CWP
/**@}*/
/*****************************************************************************
@@ -3335,6 +3340,25 @@
}
#endif // CONFIG_EXT_RECUR_PARTITIONS
+#if CONFIG_CWP
+// check whether compound weighted prediction can be allowed
+static INLINE int is_cwp_allowed(const MB_MODE_INFO *mbmi) {
+ if (mbmi->skip_mode) return 1;
+ int use_cwp = has_second_ref(mbmi) && mbmi->mode < NEAR_NEARMV_OPTFLOW &&
+ mbmi->interinter_comp.type == COMPOUND_AVERAGE &&
+ mbmi->motion_mode == SIMPLE_TRANSLATION;
+ use_cwp &=
+ (mbmi->mode == NEAR_NEARMV || is_joint_mvd_coding_mode(mbmi->mode));
+ use_cwp &= (mbmi->jmvd_scale_mode == 0);
+ return use_cwp;
+}
+// Return the index for compound weighted prediction
+static INLINE int8_t get_cwp_idx(const MB_MODE_INFO *mbmi) {
+ assert(mbmi->cwp_idx <= CWP_MAX && mbmi->cwp_idx >= CWP_MIN);
+ return mbmi->cwp_idx;
+}
+#endif
+
/*!\endcond */
#ifdef __cplusplus
diff --git a/av1/common/common_data.h b/av1/common/common_data.h
index 7c9b96b..4a96ef3 100644
--- a/av1/common/common_data.h
+++ b/av1/common/common_data.h
@@ -77,6 +77,14 @@
4, 5, 5, 6, 7, 7, 8, 9, 9, 10, 11, 11, 12, 13, 13, 14, 6, 6, 8, 8, 10, 10
};
+#if CONFIG_CWP
+// Supported weighting factor for compound weighted prediction
+static const int8_t cwp_weighting_factor[2][MAX_CWP_NUM] = {
+ { 8, 12, 4, 10, 6 },
+ { 8, 12, 4, 20, -4 },
+};
+#endif // CONFIG_CWP
+
#if CONFIG_EXT_RECUR_PARTITIONS
/* clang-format off */
// This table covers all square blocks and 1:2/2:1 rectangular blocks
diff --git a/av1/common/entropy.c b/av1/common/entropy.c
index 7a19310..402178f 100644
--- a/av1/common/entropy.c
+++ b/av1/common/entropy.c
@@ -170,6 +170,10 @@
#else
RESET_CDF_COUNTER(fc->inter_compound_mode_cdf, INTER_COMPOUND_MODES);
#endif // CONFIG_OPTFLOW_REFINEMENT
+
+#if CONFIG_CWP
+ RESET_CDF_COUNTER(fc->cwp_idx_cdf, 2);
+#endif
#if CONFIG_IMPROVED_JMVD
RESET_CDF_COUNTER(fc->jmvd_scale_mode_cdf, JOINT_NEWMV_SCALE_FACTOR_CNT);
RESET_CDF_COUNTER(fc->jmvd_amvd_scale_mode_cdf, JOINT_AMVD_SCALE_FACTOR_CNT);
diff --git a/av1/common/entropymode.c b/av1/common/entropymode.c
index 24a9fe2..9572d82 100644
--- a/av1/common/entropymode.c
+++ b/av1/common/entropymode.c
@@ -1336,6 +1336,21 @@
{ AOM_CDF2(16618) }, { AOM_CDF2(14980) }, { AOM_CDF2(15963) }
};
#endif // CONFIG_C076_INTER_MOD_CTX
+
+#if CONFIG_CWP
+static const aom_cdf_prob default_cwp_idx_cdf[MAX_CWP_CONTEXTS][MAX_CWP_NUM - 1]
+ [CDF_SIZE(2)] = {
+ { { AOM_CDF2(16384) },
+ { AOM_CDF2(16384) },
+ { AOM_CDF2(16384) },
+ { AOM_CDF2(16384) } },
+ { { AOM_CDF2(16384) },
+ { AOM_CDF2(16384) },
+ { AOM_CDF2(16384) },
+ { AOM_CDF2(16384) } },
+ };
+#endif // CONFIG_CWP
+
#if CONFIG_IMPROVED_JMVD
static const aom_cdf_prob
default_jmvd_scale_mode_cdf[CDF_SIZE(JOINT_NEWMV_SCALE_FACTOR_CNT)] = {
@@ -2941,6 +2956,10 @@
#if CONFIG_OPTFLOW_REFINEMENT
av1_copy(fc->use_optflow_cdf, default_use_optflow_cdf);
#endif // CONFIG_OPTFLOW_REFINEMENT
+
+#if CONFIG_CWP
+ av1_copy(fc->cwp_idx_cdf, default_cwp_idx_cdf);
+#endif // CONFIG_CWP
#if CONFIG_IMPROVED_JMVD
av1_copy(fc->jmvd_scale_mode_cdf, default_jmvd_scale_mode_cdf);
av1_copy(fc->jmvd_amvd_scale_mode_cdf, default_jmvd_amvd_scale_mode_cdf);
diff --git a/av1/common/entropymode.h b/av1/common/entropymode.h
index f270393..a38b5f6 100644
--- a/av1/common/entropymode.h
+++ b/av1/common/entropymode.h
@@ -200,6 +200,10 @@
aom_cdf_prob inter_compound_mode_cdf[INTER_COMPOUND_MODE_CONTEXTS]
[CDF_SIZE(INTER_COMPOUND_MODES)];
#endif // CONFIG_OPTFLOW_REFINEMENT
+
+#if CONFIG_CWP
+ aom_cdf_prob cwp_idx_cdf[MAX_CWP_CONTEXTS][MAX_CWP_NUM - 1][CDF_SIZE(2)];
+#endif // CONFIG_CWP
#if CONFIG_IMPROVED_JMVD
aom_cdf_prob jmvd_scale_mode_cdf[CDF_SIZE(JOINT_NEWMV_SCALE_FACTOR_CNT)];
aom_cdf_prob jmvd_amvd_scale_mode_cdf[CDF_SIZE(JOINT_AMVD_SCALE_FACTOR_CNT)];
diff --git a/av1/common/enums.h b/av1/common/enums.h
index 9c251a0..d194d6f 100644
--- a/av1/common/enums.h
+++ b/av1/common/enums.h
@@ -912,6 +912,19 @@
#define INTER_COMPOUND_MODE_CONTEXTS 8
#endif // CONFIG_C076_INTER_MOD_CTX
+#if CONFIG_CWP
+// Number of supported factors for compound weighted prediction
+#define MAX_CWP_NUM 5
+// maximum value for the supported factors
+#define CWP_MAX 20
+// minimum value for the supported factors
+#define CWP_MIN -4
+// Weighting factor for simple averge prediction
+#define CWP_EQUAL 8
+#define CWP_WEIGHT_BITS 4
+#define MAX_CWP_CONTEXTS 2
+#endif
+
#define DELTA_Q_SMALL 3
#define DELTA_Q_PROBS (DELTA_Q_SMALL)
#define DEFAULT_DELTA_Q_RES_PERCEPTUAL 4
diff --git a/av1/common/mv.h b/av1/common/mv.h
index 42cac87..e537bfd 100644
--- a/av1/common/mv.h
+++ b/av1/common/mv.h
@@ -533,6 +533,10 @@
// candidate, and so does not allow WARP_EXTEND
int row_offset;
int col_offset;
+#if CONFIG_CWP
+ // Record the cwp index of the neighboring blocks
+ int8_t cwp_idx;
+#endif // CONFIG_CWP
#endif // CONFIG_EXTENDED_WARP_PREDICTION
} CANDIDATE_MV;
diff --git a/av1/common/mvref_common.c b/av1/common/mvref_common.c
index 36da9e0..fccd5a0 100644
--- a/av1/common/mvref_common.c
+++ b/av1/common/mvref_common.c
@@ -222,6 +222,9 @@
ref_mv_stack[index].row_offset = OFFSET_NONSPATIAL;
ref_mv_stack[index].col_offset = OFFSET_NONSPATIAL;
#endif // CONFIG_EXTENDED_WARP_PREDICTION
+#if CONFIG_CWP
+ ref_mv_stack[index].cwp_idx = derived_mv_stack[derived_idx].cwp_idx;
+#endif // CONFIG_CWP
ref_mv_weight[index] = REF_CAT_LEVEL;
++(*refmv_count);
}
@@ -249,6 +252,9 @@
ref_mv_stack[index].row_offset = OFFSET_NONSPATIAL;
ref_mv_stack[index].col_offset = OFFSET_NONSPATIAL;
#endif // CONFIG_EXTENDED_WARP_PREDICTION
+#if CONFIG_CWP
+ ref_mv_stack[index].cwp_idx = derived_mv_stack[derived_idx].cwp_idx;
+#endif // CONFIG_CWP
#if CONFIG_SKIP_MODE_DRL_WITH_REF_IDX
if (mbmi->skip_mode) {
ref_frame_idx0[index] = rf[0];
@@ -323,6 +329,9 @@
ref_mv_stack[index].row_offset = OFFSET_NONSPATIAL;
ref_mv_stack[index].col_offset = OFFSET_NONSPATIAL;
#endif // CONFIG_EXTENDED_WARP_PREDICTION
+#if CONFIG_CWP
+ ref_mv_stack[index].cwp_idx = candidate->cwp_idx;
+#endif // CONFIG_CWP
++(*refmv_count);
}
if (have_newmv_in_inter_mode(candidate->mode)) ++*newmv_count;
@@ -464,6 +473,9 @@
ref_mv_stack[index].comp_mv = this_refmv[1];
ref_frame_idx0[index] = candidate->ref_frame[0];
ref_frame_idx1[index] = candidate->ref_frame[1];
+#if CONFIG_CWP
+ ref_mv_stack[index].cwp_idx = candidate->cwp_idx;
+#endif // CONFIG_CWP
ref_mv_weight[index] = weight;
++(*refmv_count);
}
@@ -523,6 +535,9 @@
ref_mv_stack[index].row_offset = row_offset;
ref_mv_stack[index].col_offset = col_offset;
#endif // CONFIG_EXTENDED_WARP_PREDICTION
+#if CONFIG_CWP
+ ref_mv_stack[index].cwp_idx = candidate->cwp_idx;
+#endif // CONFIG_CWP
ref_mv_weight[index] = weight;
++(*refmv_count);
}
@@ -586,6 +601,9 @@
*derived_mv_count < MAX_REF_MV_STACK_SIZE) {
derived_mv_stack[index].this_mv = this_refmv;
derived_mv_weight[index] = weight;
+#if CONFIG_CWP
+ derived_mv_stack[index].cwp_idx = candidate->cwp_idx;
+#endif // CONFIG_CWP
++(*derived_mv_count);
}
}
@@ -640,6 +658,9 @@
ref_mv_stack[index].row_offset = OFFSET_NONSPATIAL;
ref_mv_stack[index].col_offset = OFFSET_NONSPATIAL;
#endif // CONFIG_EXTENDED_WARP_PREDICTION
+#if CONFIG_CWP
+ ref_mv_stack[index].cwp_idx = candidate->cwp_idx;
+#endif // CONFIG_CWP
++(*refmv_count);
}
if (have_newmv_in_inter_mode(candidate->mode)) ++*newmv_count;
@@ -700,6 +721,9 @@
derived_mv_stack[index].this_mv = this_refmv[0];
derived_mv_stack[index].comp_mv = this_refmv[1];
derived_mv_weight[index] = weight;
+#if CONFIG_CWP
+ derived_mv_stack[index].cwp_idx = candidate->cwp_idx;
+#endif // CONFIG_CWP
++(*derived_mv_count);
}
}
@@ -1520,6 +1544,9 @@
ref_mv_stack[idx].row_offset = OFFSET_NONSPATIAL;
ref_mv_stack[idx].col_offset = OFFSET_NONSPATIAL;
#endif // CONFIG_EXTENDED_WARP_PREDICTION
+#if CONFIG_CWP
+ ref_mv_stack[idx].cwp_idx = CWP_EQUAL;
+#endif // CONFIG_CWP
ref_mv_weight[idx] = 2 * weight_unit;
++(*refmv_count);
#if CONFIG_C063_TMVP_IMPROVEMENT
@@ -1568,6 +1595,9 @@
if (idx == *refmv_count && *refmv_count < MAX_REF_MV_STACK_SIZE) {
ref_mv_stack[idx].this_mv.as_int = this_refmv.as_int;
ref_mv_stack[idx].comp_mv.as_int = comp_refmv.as_int;
+#if CONFIG_CWP
+ ref_mv_stack[idx].cwp_idx = CWP_EQUAL;
+#endif // CONFIG_CWP
ref_frame_idx0[idx] = rf[0];
ref_frame_idx1[idx] = rf[1];
ref_mv_weight[idx] = 2 * weight_unit;
@@ -1594,6 +1624,9 @@
ref_mv_stack[idx].row_offset = OFFSET_NONSPATIAL;
ref_mv_stack[idx].col_offset = OFFSET_NONSPATIAL;
#endif // CONFIG_EXTENDED_WARP_PREDICTION
+#if CONFIG_CWP
+ ref_mv_stack[idx].cwp_idx = CWP_EQUAL;
+#endif // CONFIG_CWP
ref_mv_weight[idx] = 2 * weight_unit;
++(*refmv_count);
#if CONFIG_C063_TMVP_IMPROVEMENT
@@ -1691,6 +1724,9 @@
ref_mv_stack[stack_idx].row_offset = OFFSET_NONSPATIAL;
ref_mv_stack[stack_idx].col_offset = OFFSET_NONSPATIAL;
#endif // CONFIG_EXTENDED_WARP_PREDICTION
+#if CONFIG_CWP
+ ref_mv_stack[stack_idx].cwp_idx = candidate->cwp_idx;
+#endif // CONFIG_CWP
// TODO(jingning): Set an arbitrary small number here. The weight
// doesn't matter as long as it is properly initialized.
@@ -1736,6 +1772,9 @@
ref_mv_stack[*refmv_count].row_offset = OFFSET_NONSPATIAL;
ref_mv_stack[*refmv_count].col_offset = OFFSET_NONSPATIAL;
#endif // CONFIG_EXTENDED_WARP_PREDICTION
+#if CONFIG_CWP
+ ref_mv_stack[*refmv_count].cwp_idx = cand_mv.cwp_idx;
+#endif // CONFIG_CWP
++*refmv_count;
return true;
@@ -1750,6 +1789,9 @@
uint8_t *refmv_count) {
ref_mv_stack[*refmv_count] = cand_mv;
ref_mv_weight[*refmv_count] = REF_CAT_LEVEL;
+#if CONFIG_CWP
+ ref_mv_stack[*refmv_count].cwp_idx = cand_mv.cwp_idx;
+#endif // CONFIG_CWP
++*refmv_count;
return true;
@@ -1812,6 +1854,9 @@
for (int k = 0; k < MAX_REF_MV_STACK_SIZE; k++) {
ref_mv_stack[k].row_offset = OFFSET_NONSPATIAL;
ref_mv_stack[k].col_offset = OFFSET_NONSPATIAL;
+#if CONFIG_CWP
+ ref_mv_stack[k].cwp_idx = CWP_EQUAL;
+#endif // CONFIG_CWP
}
#endif
@@ -2495,6 +2540,9 @@
ref_mv_stack[*refmv_count].row_offset = OFFSET_NONSPATIAL;
ref_mv_stack[*refmv_count].col_offset = OFFSET_NONSPATIAL;
#endif // CONFIG_EXTENDED_WARP_PREDICTION
+#if CONFIG_CWP
+ ref_mv_stack[*refmv_count].cwp_idx = CWP_EQUAL;
+#endif // CONFIG_CWP
} else {
ref_mv_stack[*refmv_count].this_mv = comp_list[0][0];
ref_mv_stack[*refmv_count].comp_mv = comp_list[0][1];
@@ -2502,6 +2550,9 @@
ref_mv_stack[*refmv_count].row_offset = OFFSET_NONSPATIAL;
ref_mv_stack[*refmv_count].col_offset = OFFSET_NONSPATIAL;
#endif // CONFIG_EXTENDED_WARP_PREDICTION
+#if CONFIG_CWP
+ ref_mv_stack[*refmv_count].cwp_idx = CWP_EQUAL;
+#endif // CONFIG_CWP
}
#if CONFIG_SKIP_MODE_DRL_WITH_REF_IDX
if (xd->mi[0]->skip_mode) {
@@ -2519,6 +2570,9 @@
ref_mv_stack[*refmv_count].row_offset = OFFSET_NONSPATIAL;
ref_mv_stack[*refmv_count].col_offset = OFFSET_NONSPATIAL;
#endif // CONFIG_EXTENDED_WARP_PREDICTION
+#if CONFIG_CWP
+ ref_mv_stack[*refmv_count].cwp_idx = CWP_EQUAL;
+#endif // CONFIG_CWP
#if CONFIG_SKIP_MODE_DRL_WITH_REF_IDX
if (xd->mi[0]->skip_mode) {
ref_frame_idx0[*refmv_count] = rf[0];
@@ -2613,6 +2667,9 @@
ref_mv_stack[*refmv_count].row_offset = OFFSET_NONSPATIAL;
ref_mv_stack[*refmv_count].col_offset = OFFSET_NONSPATIAL;
#endif // CONFIG_EXTENDED_WARP_PREDICTION
+#if CONFIG_CWP
+ ref_mv_stack[*refmv_count].cwp_idx = CWP_EQUAL;
+#endif // CONFIG_CWP
ref_mv_weight[*refmv_count] = REF_CAT_LEVEL;
(*refmv_count)++;
}
@@ -4052,6 +4109,9 @@
const int idx = (start_idx + count) % REF_MV_BANK_SIZE;
queue[idx].this_mv = mbmi->mv[0];
if (is_comp) queue[idx].comp_mv = mbmi->mv[1];
+#if CONFIG_CWP
+ queue[idx].cwp_idx = mbmi->cwp_idx;
+#endif // CONFIG_CWP
if (count < REF_MV_BANK_SIZE) {
++ref_mv_bank->rmb_count[ref_frame];
} else {
diff --git a/av1/common/reconinter.c b/av1/common/reconinter.c
index 824fe0a..bcf2e92 100644
--- a/av1/common/reconinter.c
+++ b/av1/common/reconinter.c
@@ -270,6 +270,10 @@
smooth_interintra_mask_buf[INTERINTRA_MODES][BLOCK_SIZES_ALL]
[MAX_WEDGE_SQUARE]);
+#if CONFIG_CWP
+DECLARE_ALIGNED(16, static int8_t, cwp_mask[2][MAX_CWP_NUM][MAX_SB_SQUARE]);
+#endif // CONFIG_CWP
+
static wedge_masks_type wedge_masks[BLOCK_SIZES_ALL][2];
#if CONFIG_WEDGE_MOD_EXT
@@ -394,6 +398,34 @@
};
#endif
+#if CONFIG_CWP
+// Init the cwp masks, called by init_cwp_masks
+static AOM_INLINE void build_cwp_mask(int8_t *mask, int stride,
+ BLOCK_SIZE plane_bsize, int8_t w) {
+ const int bw = block_size_wide[plane_bsize];
+ const int bh = block_size_high[plane_bsize];
+ for (int i = 0; i < bh; ++i) {
+ for (int j = 0; j < bw; ++j) mask[j] = w;
+ mask += stride;
+ }
+}
+// Init the cwp masks
+void init_cwp_masks() {
+ const int bs = BLOCK_128X128;
+ const int bw = block_size_wide[bs];
+ for (int list_idx = 0; list_idx < 2; ++list_idx) {
+ for (int idx = 0; idx < MAX_CWP_NUM; ++idx) {
+ int8_t weight = cwp_weighting_factor[list_idx][idx] * 4;
+ build_cwp_mask(cwp_mask[list_idx][idx], bw, bs, weight);
+ }
+ }
+}
+// Return the associated cwp mask
+const int8_t *av1_get_cwp_mask(int list_idx, int idx) {
+ return cwp_mask[list_idx][idx];
+}
+#endif // CONFIG_CWP
+
static const uint8_t *get_wedge_mask_inplace(int wedge_index, int neg,
BLOCK_SIZE sb_type) {
const uint8_t *master;
@@ -1793,6 +1825,9 @@
(mi->mode >= NEAR_NEARMV_OPTFLOW ||
(cm->features.opfl_refine_type == REFINE_ALL &&
mi->mode != GLOBAL_GLOBALMV &&
+#if CONFIG_CWP
+ mi->cwp_idx == CWP_EQUAL &&
+#endif // CONFIG_CWP
mi->interinter_comp.type == COMPOUND_AVERAGE)) &&
is_compound && is_opfl_refine_allowed(cm, mi);
assert(IMPLIES(use_optflow_refinement,
@@ -1881,6 +1916,18 @@
inter_pred_params.mask_comp.seg_mask = xd->seg_mask;
}
+#if CONFIG_CWP
+ if (ref == 1 && inter_pred_params.conv_params.do_average == 1) {
+ if (get_cwp_idx(mi) != CWP_EQUAL) {
+ int8_t weight = get_cwp_idx(mi);
+ assert(mi->cwp_idx >= CWP_MIN && mi->cwp_idx <= CWP_MAX);
+ inter_pred_params.conv_params.fwd_offset = weight;
+ inter_pred_params.conv_params.bck_offset =
+ (1 << CWP_WEIGHT_BITS) - weight;
+ }
+ }
+#endif // CONFIG_CWP
+
#if CONFIG_OPTFLOW_REFINEMENT
if (use_optflow_refinement && plane == 0) {
int n = opfl_get_subblock_size(bw, bh, plane
diff --git a/av1/common/reconinter.h b/av1/common/reconinter.h
index 18ea840..6e5b39b 100644
--- a/av1/common/reconinter.h
+++ b/av1/common/reconinter.h
@@ -309,6 +309,33 @@
}
#endif // CONFIG_WARP_REF_LIST
+#if CONFIG_CWP
+// Map the index to weighting factor for compound weighted prediction
+static INLINE int get_cwp_coding_idx(int val, int encode,
+ const AV1_COMMON *const cm,
+ const MB_MODE_INFO *const mbmi) {
+ int is_same_side = 0;
+ int cur_ref_side = 0;
+ int other_ref_side = 0;
+ if (has_second_ref(mbmi)) {
+ cur_ref_side = cm->ref_frame_side[mbmi->ref_frame[0]];
+ other_ref_side = cm->ref_frame_side[mbmi->ref_frame[1]];
+
+ is_same_side = (cur_ref_side > 0 && other_ref_side > 0) ||
+ (cur_ref_side == 0 && other_ref_side == 0);
+ }
+
+ if (encode) {
+ for (int i = 0; i < MAX_CWP_NUM; i++) {
+ if (cwp_weighting_factor[is_same_side][i] == val) return i;
+ }
+ return 0;
+ } else {
+ return cwp_weighting_factor[is_same_side][val];
+ }
+}
+#endif // CONFIG_CWP
+
#if CONFIG_ADAPTIVE_MVD
static INLINE int enable_adaptive_mvd_resolution(const AV1_COMMON *const cm,
const MB_MODE_INFO *mbmi) {
@@ -790,6 +817,13 @@
const uint8_t *av1_get_compound_type_mask(
const INTERINTER_COMPOUND_DATA *const comp_data, BLOCK_SIZE sb_type);
+#if CONFIG_CWP
+// Init the masks for compound weighted prediction
+void init_cwp_masks();
+// Get the mask for compound weighted prediction
+const int8_t *av1_get_cwp_mask(int list_idx, int idx);
+#endif // CONFIG_CWP
+
// build interintra_predictors for one plane
void av1_build_interintra_predictor(const AV1_COMMON *cm, MACROBLOCKD *xd,
uint16_t *pred, int stride,
diff --git a/av1/decoder/decodeframe.c b/av1/decoder/decodeframe.c
index 0b4284c..be60ddb 100644
--- a/av1/decoder/decodeframe.c
+++ b/av1/decoder/decodeframe.c
@@ -5998,6 +5998,9 @@
#if CONFIG_BAWP
seq_params->enable_bawp = aom_rb_read_bit(rb);
#endif // CONFIG_BAWP
+#if CONFIG_CWP
+ seq_params->enable_cwp = aom_rb_read_bit(rb);
+#endif // CONFIG_CWP
seq_params->enable_fsc = aom_rb_read_bit(rb);
#if CONFIG_CCSO
seq_params->enable_ccso = aom_rb_read_bit(rb);
@@ -7173,6 +7176,10 @@
features->enable_bawp = 0;
#endif // CONFIG_BAWP
+#if CONFIG_CWP
+ features->enable_cwp = seq_params->enable_cwp;
+#endif // CONFIG_CWP
+
features->reduced_tx_set_used = aom_rb_read_bit(rb);
if (features->allow_ref_frame_mvs && !frame_might_allow_ref_frame_mvs(cm)) {
diff --git a/av1/decoder/decodemv.c b/av1/decoder/decodemv.c
index 62302df..80a8846 100644
--- a/av1/decoder/decodemv.c
+++ b/av1/decoder/decodemv.c
@@ -607,6 +607,27 @@
}
#endif // CONFIG_IMPROVED_JMVD && CONFIG_JOINT_MVD
+#if CONFIG_CWP
+// Read index for the weighting factor of compound weighted prediction
+static int read_cwp_idx(MACROBLOCKD *xd, aom_reader *r, const AV1_COMMON *cm,
+ MB_MODE_INFO *const mbmi) {
+ int8_t cwp_idx = 0;
+ int bit_cnt = 0;
+ const int ctx = 0;
+ for (int idx = 0; idx < MAX_CWP_NUM - 1; ++idx) {
+ const int tmp_idx = aom_read_symbol(
+ r, xd->tile_ctx->cwp_idx_cdf[ctx][bit_cnt], 2, ACCT_STR);
+ cwp_idx = idx + tmp_idx;
+ if (!tmp_idx) break;
+ ++bit_cnt;
+ }
+ assert(cwp_idx <= CWP_MAX);
+
+ // convert index to weight
+ return get_cwp_coding_idx(cwp_idx, 0, cm, mbmi);
+}
+#endif // CONFIG_CWP
+
static PREDICTION_MODE read_inter_compound_mode(MACROBLOCKD *xd, aom_reader *r,
#if CONFIG_OPTFLOW_REFINEMENT
const AV1_COMMON *cm,
@@ -1384,6 +1405,9 @@
xd->ref_mv_stack[INTRA_FRAME][i].row_offset = OFFSET_NONSPATIAL;
xd->ref_mv_stack[INTRA_FRAME][i].col_offset = OFFSET_NONSPATIAL;
#endif // CONFIG_EXTENDED_WARP_PREDICTION
+#if CONFIG_CWP
+ xd->ref_mv_stack[INTRA_FRAME][i].cwp_idx = CWP_EQUAL;
+#endif // CONFIG_CWP
}
#endif // CONFIG_BVP_IMPROVEMENT
@@ -2637,6 +2661,14 @@
#endif // CONFIG_C076_INTER_MOD_CTX
mbmi->ref_mv_idx = 0;
+
+#if CONFIG_CWP
+ mbmi->cwp_idx = CWP_EQUAL;
+#endif // CONFIG_CWP
+#if CONFIG_IMPROVED_JMVD && CONFIG_JOINT_MVD
+ mbmi->jmvd_scale_mode = 0;
+#endif // CONFIG_IMPROVED_JMVD && CONFIG_JOINT_MVD
+
#if CONFIG_WARP_REF_LIST
mbmi->warp_ref_idx = 0;
mbmi->max_num_warp_candidates = 0;
@@ -2650,8 +2682,12 @@
if (mbmi->skip_mode) {
assert(is_compound);
#if CONFIG_SKIP_MODE_ENHANCEMENT && CONFIG_OPTFLOW_REFINEMENT
- mbmi->mode =
- (cm->features.opfl_refine_type ? NEAR_NEARMV_OPTFLOW : NEAR_NEARMV);
+ mbmi->mode = (cm->features.opfl_refine_type
+#if CONFIG_CWP
+ && !cm->features.enable_cwp
+#endif // CONFIG_CWP
+ ? NEAR_NEARMV_OPTFLOW
+ : NEAR_NEARMV);
#else
mbmi->mode = NEAR_NEARMV;
#endif // CONFIG_SKIP_MODE_ENHANCEMENT && CONFIG_OPTFLOW_REFINEMENT
@@ -2787,8 +2823,15 @@
if (mbmi->skip_mode) {
#if CONFIG_SKIP_MODE_ENHANCEMENT && CONFIG_OPTFLOW_REFINEMENT
+#if CONFIG_CWP
+ assert(mbmi->mode ==
+ (cm->features.opfl_refine_type && !cm->features.enable_cwp
+ ? NEAR_NEARMV_OPTFLOW
+ : NEAR_NEARMV));
+#else // CONFIG_CWP
assert(mbmi->mode ==
(cm->features.opfl_refine_type ? NEAR_NEARMV_OPTFLOW : NEAR_NEARMV));
+#endif // CONFIG_CWP
#else
assert(mbmi->mode == NEAR_NEARMV);
#endif // CONFIG_SKIP_MODE_ENHANCEMENT && CONFIG_OPTFLOW_REFINEMENT
@@ -2959,6 +3002,16 @@
}
}
}
+#if CONFIG_CWP
+ mbmi->cwp_idx = CWP_EQUAL;
+ if (cm->features.enable_cwp) {
+ if (is_cwp_allowed(mbmi) && !mbmi->skip_mode)
+ mbmi->cwp_idx = read_cwp_idx(xd, r, cm, mbmi);
+ if (is_cwp_allowed(mbmi) && mbmi->skip_mode)
+ mbmi->cwp_idx =
+ xd->skip_mvp_candidate_list.ref_mv_stack[mbmi->ref_mv_idx].cwp_idx;
+ }
+#endif // CONFIG_CWP
read_mb_interp_filter(xd, features->interp_filter, cm, mbmi, r);
@@ -3094,6 +3147,11 @@
mbmi->fsc_mode[PLANE_TYPE_Y] = 0;
mbmi->fsc_mode[PLANE_TYPE_UV] = 0;
+
+#if CONFIG_CWP
+ mbmi->cwp_idx = CWP_EQUAL;
+#endif // CONFIG_CWP
+
#if CONFIG_WARP_REF_LIST
mbmi->warp_ref_idx = 0;
mbmi->max_num_warp_candidates = 0;
diff --git a/av1/encoder/bitstream.c b/av1/encoder/bitstream.c
index 25d3ca5..9ccd20c 100644
--- a/av1/encoder/bitstream.c
+++ b/av1/encoder/bitstream.c
@@ -207,6 +207,24 @@
}
#endif // CONFIG_IMPROVED_JMVD && CONFIG_JOINT_MVD
+#if CONFIG_CWP
+// Write the index for the weighting factor of compound weighted prediction
+static AOM_INLINE void write_cwp_idx(MACROBLOCKD *xd, aom_writer *w,
+ const AV1_COMMON *const cm,
+ const MB_MODE_INFO *const mbmi) {
+ const int8_t final_idx = get_cwp_coding_idx(mbmi->cwp_idx, 1, cm, mbmi);
+
+ int bit_cnt = 0;
+ const int ctx = 0;
+ for (int idx = 0; idx < MAX_CWP_NUM - 1; ++idx) {
+ aom_write_symbol(w, final_idx != idx,
+ xd->tile_ctx->cwp_idx_cdf[ctx][bit_cnt], 2);
+ if (final_idx == idx) break;
+ ++bit_cnt;
+ }
+}
+#endif // CONFIG_CWP
+
static AOM_INLINE void write_inter_compound_mode(MACROBLOCKD *xd, aom_writer *w,
PREDICTION_MODE mode,
#if CONFIG_OPTFLOW_REFINEMENT
@@ -2197,6 +2215,10 @@
}
}
}
+#if CONFIG_CWP
+ if (cm->features.enable_cwp && is_cwp_allowed(mbmi) && !mbmi->skip_mode)
+ write_cwp_idx(xd, w, cm, mbmi);
+#endif // CONFIG_CWP
write_mb_interp_filter(cm, xd, w);
}
}
@@ -4441,6 +4463,9 @@
#if CONFIG_BAWP
aom_wb_write_bit(wb, seq_params->enable_bawp);
#endif // CONFIG_BAWP
+#if CONFIG_CWP
+ aom_wb_write_bit(wb, seq_params->enable_cwp);
+#endif // CONFIG_CWP
aom_wb_write_bit(wb, seq_params->enable_fsc);
#if CONFIG_CCSO
aom_wb_write_bit(wb, seq_params->enable_ccso);
diff --git a/av1/encoder/block.h b/av1/encoder/block.h
index 24ecf79..1ffe255 100644
--- a/av1/encoder/block.h
+++ b/av1/encoder/block.h
@@ -418,6 +418,10 @@
int is_global[2];
//! Current parameters for interinter mode.
INTERINTER_COMPOUND_DATA interinter_comp;
+#if CONFIG_CWP
+ //! Index for compound weighted prediction parameters.
+ int cwp_idx;
+#endif // CONFIG_CWP
} COMP_RD_STATS;
/*! \brief Contains buffers used to speed up rdopt for obmc.
@@ -939,6 +943,10 @@
int inter_compound_mode_cost[INTER_COMPOUND_MODE_CONTEXTS]
[INTER_COMPOUND_MODES];
#endif // CONFIG_OPTFLOW_REFINEMENT
+#if CONFIG_CWP
+ //! cwp_idx_cost for compound weighted prediction
+ int cwp_idx_cost[MAX_CWP_CONTEXTS][MAX_CWP_NUM - 1][2];
+#endif // CONFIG_CWP
#if CONFIG_IMPROVED_JMVD && CONFIG_JOINT_MVD
//! jmvd_scale_mode_cost for JOINT_NEWMV
int jmvd_scale_mode_cost[JOINT_NEWMV_SCALE_FACTOR_CNT];
diff --git a/av1/encoder/compound_type.c b/av1/encoder/compound_type.c
index c80874a..931de36 100644
--- a/av1/encoder/compound_type.c
+++ b/av1/encoder/compound_type.c
@@ -38,6 +38,10 @@
// Check if interp filter matches with previous case
if (st->interp_fltr != mi->interp_fltr) return 0;
+#if CONFIG_CWP
+ if (st->cwp_idx != mi->cwp_idx) return 0;
+#endif // CONFIG_CWP
+
const MACROBLOCKD *const xd = &x->e_mbd;
// Match MV and reference indices
for (int i = 0; i < 2; ++i) {
@@ -87,6 +91,10 @@
int32_t *comp_model_rate,
int64_t *comp_model_dist, int *comp_rs2,
int *match_index) {
+#if CONFIG_CWP
+ if (mbmi->cwp_idx != CWP_EQUAL) return 0;
+#endif // CONFIG_CWP
+
for (int j = 0; j < x->comp_rd_stats_idx; ++j) {
if (is_comp_rd_match(cpi, x, &x->comp_rd_stats[j], mbmi, comp_rate,
comp_dist, comp_model_rate, comp_model_dist,
@@ -974,6 +982,9 @@
COMPOUND_TYPE cur_type) {
mbmi->interinter_comp.type = cur_type;
mbmi->comp_group_idx = (cur_type >= COMPOUND_WEDGE);
+#if CONFIG_CWP
+ mbmi->cwp_idx = (cur_type == COMPOUND_AVERAGE) ? mbmi->cwp_idx : CWP_EQUAL;
+#endif // CONFIG_CWP
}
// When match is found, populate the compound type data
@@ -1008,6 +1019,9 @@
best_type_stats->comp_best_model_rd = comp_model_rd_cur;
best_type_stats->best_compound_data = mbmi->interinter_comp;
best_type_stats->best_compmode_interinter_cost = rs2;
+#if CONFIG_CWP
+ best_type_stats->cwp_idx = mbmi->cwp_idx;
+#endif // CONFIG_CWP
}
// Updates best_mv for masked compound types
@@ -1032,6 +1046,9 @@
MACROBLOCK *x, const MB_MODE_INFO *const mbmi, const int32_t *comp_rate,
const int64_t *comp_dist, const int32_t *comp_model_rate,
const int64_t *comp_model_dist, const int_mv *cur_mv, const int *comp_rs2) {
+#if CONFIG_CWP
+ if (mbmi->cwp_idx != CWP_EQUAL) return;
+#endif // CONFIG_CWP
const int offset = x->comp_rd_stats_idx;
if (offset < MAX_COMP_RD_STATS) {
COMP_RD_STATS *const rd_stats = x->comp_rd_stats + offset;
@@ -1045,6 +1062,9 @@
rd_stats->mode = mbmi->mode;
rd_stats->interp_fltr = mbmi->interp_fltr;
rd_stats->ref_mv_idx = mbmi->ref_mv_idx;
+#if CONFIG_CWP
+ rd_stats->cwp_idx = mbmi->cwp_idx;
+#endif // CONFIG_CWP
const MACROBLOCKD *const xd = &x->e_mbd;
for (int i = 0; i < 2; ++i) {
const WarpedMotionParams *const wm =
@@ -1322,6 +1342,10 @@
best_type_stats.best_compmode_interinter_cost = 0;
best_type_stats.comp_best_model_rd = INT64_MAX;
+#if CONFIG_CWP
+ best_type_stats.cwp_idx = CWP_EQUAL;
+#endif // CONFIG_CWP
+
int tmp_rate_mv;
const int num_pix = 1 << num_pels_log2_lookup[bsize];
const int mask_len = 2 * num_pix * sizeof(uint8_t);
@@ -1394,6 +1418,7 @@
// Loop over valid compound types
for (int i = 0; i < valid_type_count; i++) {
cur_type = valid_comp_types[i];
+
comp_model_rd_cur = INT64_MAX;
tmp_rate_mv = *rate_mv;
best_rd_cur = INT64_MAX;
@@ -1402,6 +1427,13 @@
if (cur_type < COMPOUND_WEDGE) {
update_mbmi_for_compound_type(mbmi, cur_type);
rs2 = masked_type_cost[cur_type];
+
+#if CONFIG_CWP
+ if (cm->features.enable_cwp && is_cwp_allowed(mbmi) && !mbmi->skip_mode) {
+ rs2 += av1_get_cwp_idx_cost(mbmi->cwp_idx, cm, x);
+ }
+#endif // CONFIG_CWP
+
const int64_t mode_rd = RDCOST(x->rdmult, rs2 + rd_stats->rate, 0);
if (mode_rd < ref_best_rd) {
// Reuse data if matching record is found
@@ -1504,6 +1536,11 @@
mbmi->interinter_comp = best_type_stats.best_compound_data;
memcpy(xd->seg_mask, buffers->tmp_best_mask_buf, mask_len);
}
+#if CONFIG_CWP
+ // update best cwp_idx
+ mbmi->cwp_idx = best_type_stats.cwp_idx;
+#endif // CONFIG_CWP
+
if (have_newmv_in_inter_mode(this_mode)) {
mbmi->mv[0].as_int = best_mv[0].as_int;
mbmi->mv[1].as_int = best_mv[1].as_int;
diff --git a/av1/encoder/compound_type.h b/av1/encoder/compound_type.h
index 7543aa0..eb66e4f 100644
--- a/av1/encoder/compound_type.h
+++ b/av1/encoder/compound_type.h
@@ -25,6 +25,10 @@
INTERINTER_COMPOUND_DATA best_compound_data;
int64_t comp_best_model_rd;
int best_compmode_interinter_cost;
+#if CONFIG_CWP
+ // Index for the weighting factor of compound weighted prediction
+ int8_t cwp_idx;
+#endif // CONFIG_CWP
} BEST_COMP_TYPE_STATS;
#define IGNORE_MODE -1
diff --git a/av1/encoder/encodeframe_utils.c b/av1/encoder/encodeframe_utils.c
index e0cd21d..b8b3bf9 100644
--- a/av1/encoder/encodeframe_utils.c
+++ b/av1/encoder/encodeframe_utils.c
@@ -1250,6 +1250,9 @@
AVERAGE_CDF(ctx_left->inter_compound_mode_cdf,
ctx_tr->inter_compound_mode_cdf, INTER_COMPOUND_MODES);
#endif // CONFIG_OPTFLOW_REFINEMENT
+#if CONFIG_CWP
+ AVERAGE_CDF(ctx_left->cwp_idx_cdf, ctx_tr->cwp_idx_cdf, 2);
+#endif // CONFIG_CWP
#if CONFIG_IMPROVED_JMVD
AVERAGE_CDF(ctx_left->jmvd_scale_mode_cdf, ctx_tr->jmvd_scale_mode_cdf,
JOINT_NEWMV_SCALE_FACTOR_CNT);
diff --git a/av1/encoder/encoder.c b/av1/encoder/encoder.c
index a0f81a5..ce237f4 100644
--- a/av1/encoder/encoder.c
+++ b/av1/encoder/encoder.c
@@ -203,6 +203,9 @@
av1_init_me_luts();
av1_rc_init_minq_luts();
av1_init_wedge_masks();
+#if CONFIG_CWP
+ init_cwp_masks();
+#endif // CONFIG_CWP
}
static void update_reference_segmentation_map(AV1_COMP *cpi) {
@@ -461,6 +464,9 @@
#if CONFIG_BAWP
seq->enable_bawp = tool_cfg->enable_bawp;
#endif // CONFIG_BAWP
+#if CONFIG_CWP
+ seq->enable_cwp = tool_cfg->enable_cwp;
+#endif // CONFIG_CWP
#if CONFIG_EXTENDED_WARP_PREDICTION
seq->seq_enabled_motion_modes =
oxcf->motion_mode_cfg.seq_enabled_motion_modes;
@@ -3310,6 +3316,9 @@
#if CONFIG_BAWP
features->enable_bawp = seq_params->enable_bawp;
#endif
+#if CONFIG_CWP
+ features->enable_cwp = seq_params->enable_cwp;
+#endif // CONFIG_CWP
cpi->last_frame_type = current_frame->frame_type;
diff --git a/av1/encoder/encoder.h b/av1/encoder/encoder.h
index 025fc88..1376974 100644
--- a/av1/encoder/encoder.h
+++ b/av1/encoder/encoder.h
@@ -872,6 +872,10 @@
// enable block adaptive weighted prediction
int enable_bawp;
#endif // CONFIG_BAWP
+#if CONFIG_CWP
+ // enable compound weighted prediction
+ int enable_cwp;
+#endif // CONFIG_CWP
// When enabled, video mode should be used even for single frame input.
bool force_video_mode;
// Indicates if the error resiliency features should be enabled.
diff --git a/av1/encoder/firstpass.c b/av1/encoder/firstpass.c
index b2735be..0339541 100644
--- a/av1/encoder/firstpass.c
+++ b/av1/encoder/firstpass.c
@@ -688,6 +688,9 @@
xd->mi[0]->tx_size = TX_4X4;
xd->mi[0]->ref_frame[0] = get_closest_pastcur_ref_index(cm);
xd->mi[0]->ref_frame[1] = NONE_FRAME;
+#if CONFIG_CWP
+ xd->mi[0]->cwp_idx = CWP_EQUAL;
+#endif // CONFIG_CWP
av1_enc_build_inter_predictor(cm, xd, mb_row * mb_scale, mb_col * mb_scale,
NULL, bsize, AOM_PLANE_Y, AOM_PLANE_Y);
av1_encode_sby_pass1(cpi, x, bsize);
diff --git a/av1/encoder/mcomp.c b/av1/encoder/mcomp.c
index c1e2379..1a6e44e 100644
--- a/av1/encoder/mcomp.c
+++ b/av1/encoder/mcomp.c
@@ -136,6 +136,11 @@
enable_adaptive_mvd_resolution(&cpi->common, mbmi);
#endif // CONFIG_ADAPTIVE_MVD
+#if CONFIG_CWP
+ ms_params->xd = xd;
+ ms_params->cm = &cpi->common;
+#endif // CONFIG_CWP
+
// High level params
ms_params->bsize = bsize;
ms_params->vfp = &cpi->fn_ptr[bsize];
@@ -1170,6 +1175,17 @@
return bestsme;
}
+#if CONFIG_CWP
+// Set weighting factor for two reference frames
+static INLINE void set_cmp_weight(const MB_MODE_INFO *mi, int invert_mask,
+ DIST_WTD_COMP_PARAMS *jcp_param) {
+ int weight = get_cwp_idx(mi);
+ weight = invert_mask ? (1 << CWP_WEIGHT_BITS) - weight : weight;
+ jcp_param->fwd_offset = weight;
+ jcp_param->bck_offset = (1 << CWP_WEIGHT_BITS) - weight;
+}
+#endif // CONFIG_CWP
+
static INLINE int get_mvpred_compound_sad(
const FULLPEL_MOTION_SEARCH_PARAMS *ms_params,
const struct buf_2d *const src, const uint16_t *const ref_address,
@@ -1187,6 +1203,16 @@
return vfp->msdf(src_buf, src_stride, ref_address, ref_stride, second_pred,
mask, mask_stride, invert_mask);
} else if (second_pred) {
+#if CONFIG_CWP
+ const MB_MODE_INFO *mi = ms_params->xd->mi[0];
+ if (get_cwp_idx(mi) != CWP_EQUAL) {
+ DIST_WTD_COMP_PARAMS jcp_param;
+ set_cmp_weight(mi, invert_mask, &jcp_param);
+
+ return vfp->jsdaf(src_buf, src_stride, ref_address, ref_stride,
+ second_pred, &jcp_param);
+ }
+#endif // CONFIG_CWP
return vfp->sdaf(src_buf, src_stride, ref_address, ref_stride, second_pred);
} else {
return ms_params->sdf(src_buf, src_stride, ref_address, ref_stride);
@@ -2799,6 +2825,27 @@
return var;
}
+#if CONFIG_CWP
+// Get the cost for compound weighted prediction
+int av1_get_cwp_idx_cost(int8_t cwp_idx, const AV1_COMMON *const cm,
+ const MACROBLOCK *x) {
+ assert(cwp_idx >= CWP_MIN && cwp_idx <= CWP_MAX);
+ const MACROBLOCKD *xd = &x->e_mbd;
+ MB_MODE_INFO *mi = xd->mi[0];
+ int cost = 0;
+ int bit_cnt = 0;
+ const int ctx = 0;
+
+ const int8_t final_idx = get_cwp_coding_idx(cwp_idx, 1, cm, mi);
+ for (int idx = 0; idx < MAX_CWP_NUM - 1; ++idx) {
+ cost += x->mode_costs.cwp_idx_cost[ctx][bit_cnt][final_idx != idx];
+ if (final_idx == idx) return cost;
+ ++bit_cnt;
+ }
+ return cost;
+}
+#endif // CONFIG_CWP
+
#if CONFIG_BVP_IMPROVEMENT
int av1_get_ref_mvpred_var_cost(const AV1_COMP *cpi, const MACROBLOCKD *xd,
const FULLPEL_MOTION_SEARCH_PARAMS *ms_params) {
@@ -3424,9 +3471,22 @@
subpel_y_q3, ref, ref_stride, mask, mask_stride, invert_mask, xd->bd,
subpel_search_type);
} else {
- aom_highbd_comp_avg_upsampled_pred(
- xd, cm, mi_row, mi_col, this_mv, pred, second_pred, w, h, subpel_x_q3,
- subpel_y_q3, ref, ref_stride, xd->bd, subpel_search_type);
+#if CONFIG_CWP
+ if (get_cwp_idx(xd->mi[0]) != CWP_EQUAL) {
+ DIST_WTD_COMP_PARAMS jcp_param;
+ set_cmp_weight(xd->mi[0], invert_mask, &jcp_param);
+
+ aom_highbd_dist_wtd_comp_avg_upsampled_pred(
+ xd, cm, mi_row, mi_col, this_mv, pred, second_pred, w, h,
+ subpel_x_q3, subpel_y_q3, ref, ref_stride, xd->bd, &jcp_param,
+ subpel_search_type);
+ } else
+#endif // CONFIG_CWP
+
+ aom_highbd_comp_avg_upsampled_pred(xd, cm, mi_row, mi_col, this_mv,
+ pred, second_pred, w, h, subpel_x_q3,
+ subpel_y_q3, ref, ref_stride, xd->bd,
+ subpel_search_type);
}
} else {
aom_highbd_upsampled_pred(xd, cm, mi_row, mi_col, this_mv, pred, w, h,
diff --git a/av1/encoder/mcomp.h b/av1/encoder/mcomp.h
index 4f209d9..4268c35 100644
--- a/av1/encoder/mcomp.h
+++ b/av1/encoder/mcomp.h
@@ -625,6 +625,12 @@
(mv.row >= mv_limits->row_min) && (mv.row <= mv_limits->row_max);
}
+#if CONFIG_CWP
+// Returns the cost for signaling the index of compound weighted prediction
+int av1_get_cwp_idx_cost(int8_t cwp_idx, const AV1_COMMON *const cm,
+ const MACROBLOCK *x);
+#endif // CONFIG_CWP
+
#if CONFIG_BVP_IMPROVEMENT
// Returns the cost of using the current mv during the motion search
int av1_get_mv_err_cost(const MV *mv, const MV_COST_PARAMS *mv_cost_params);
diff --git a/av1/encoder/motion_search_facade.c b/av1/encoder/motion_search_facade.c
index 5d9d4fd..baf9b14 100644
--- a/av1/encoder/motion_search_facade.c
+++ b/av1/encoder/motion_search_facade.c
@@ -1485,6 +1485,9 @@
mbmi->use_intrabc[1] = 0;
#endif // CONFIG_IBC_SR_EXT
+#if CONFIG_CWP
+ mbmi->cwp_idx = CWP_EQUAL;
+#endif // CONFIG_CWP
#if CONFIG_FLEX_MVRES
set_default_max_mv_precision(mbmi, xd->sbi->sb_mv_precision);
set_mv_precision(mbmi, mbmi->max_mv_precision);
diff --git a/av1/encoder/partition_search.c b/av1/encoder/partition_search.c
index 3a3c97e..9787a8e 100644
--- a/av1/encoder/partition_search.c
+++ b/av1/encoder/partition_search.c
@@ -938,6 +938,31 @@
}
#endif // CONFIG_BVP_IMPROVEMENT
+#if CONFIG_CWP
+// Update the stats for compound weighted prediction
+static void update_cwp_idx_stats(FRAME_CONTEXT *fc, FRAME_COUNTS *counts,
+ const AV1_COMMON *const cm, MACROBLOCKD *xd) {
+#if !CONFIG_ENTROPY_STATS
+ (void)counts;
+#endif // !CONFIG_ENTROPY_STATS
+ const MB_MODE_INFO *mbmi = xd->mi[0];
+
+ assert(mbmi->cwp_idx >= CWP_MIN && mbmi->cwp_idx <= CWP_MAX);
+ int bit_cnt = 0;
+ const int ctx = 0;
+
+ int8_t final_idx = get_cwp_coding_idx(mbmi->cwp_idx, 1, cm, mbmi);
+ for (int idx = 0; idx < MAX_CWP_NUM - 1; ++idx) {
+#if CONFIG_ENTROPY_STATS
+ counts->cwp_idx[bit_cnt][final_idx != idx]++;
+#endif // CONFIG_ENTROPY_STATS
+ update_cdf(fc->cwp_idx_cdf[ctx][bit_cnt], final_idx != idx, 2);
+ if (final_idx == idx) break;
+ ++bit_cnt;
+ }
+}
+#endif // CONFIG_CWP
+
#if CONFIG_EXTENDED_WARP_PREDICTION
static void update_warp_delta_param_stats(int index, int value,
#if CONFIG_ENTROPY_STATS
@@ -1598,6 +1623,12 @@
#endif // CONFIG_WEDGE_MOD_EXT
}
}
+
+#if CONFIG_CWP
+ if (cm->features.enable_cwp && is_cwp_allowed(mbmi) && !mbmi->skip_mode) {
+ update_cwp_idx_stats(fc, td->counts, cm, xd);
+ }
+#endif // CONFIG_CWP
}
}
diff --git a/av1/encoder/rd.c b/av1/encoder/rd.c
index 9d7f360..45a170f 100644
--- a/av1/encoder/rd.c
+++ b/av1/encoder/rd.c
@@ -625,6 +625,14 @@
av1_cost_tokens_from_cdf(mode_costs->comp_group_idx_cost[i],
fc->comp_group_idx_cdf[i], NULL);
}
+#if CONFIG_CWP
+ for (j = 0; j < MAX_CWP_CONTEXTS; j++) {
+ for (i = 0; i < MAX_CWP_NUM - 1; ++i) {
+ av1_cost_tokens_from_cdf(mode_costs->cwp_idx_cost[j][i],
+ fc->cwp_idx_cdf[j][i], NULL);
+ }
+ }
+#endif // CONFIG_CWP
}
}
diff --git a/av1/encoder/rdopt.c b/av1/encoder/rdopt.c
index 0d9e809..2122e3b 100644
--- a/av1/encoder/rdopt.c
+++ b/av1/encoder/rdopt.c
@@ -3018,6 +3018,9 @@
}
mbmi->motion_mode = SIMPLE_TRANSLATION;
+#if CONFIG_CWP
+ mbmi->cwp_idx = CWP_EQUAL;
+#endif // CONFIG_CWP
mbmi->num_proj_ref = 0;
if (is_comp_pred) {
// Only compound_average
@@ -3032,6 +3035,7 @@
const int mi_row = xd->mi_row;
const int mi_col = xd->mi_col;
+
av1_enc_build_inter_predictor(cm, xd, mi_row, mi_col, &orig_dst, bsize,
AOM_PLANE_Y, AOM_PLANE_Y);
int est_rate;
@@ -3648,6 +3652,12 @@
int mode_search_mask =
(1 << COMPOUND_AVERAGE) | (1 << COMPOUND_WEDGE) | (1 << COMPOUND_DIFFWTD);
+#if CONFIG_CWP
+ if (get_cwp_idx(mbmi) != CWP_EQUAL) {
+ mode_search_mask = (1 << COMPOUND_AVERAGE);
+ }
+#endif // CONFIG_CWP
+
const int num_planes = av1_num_planes(cm);
const int mi_row = xd->mi_row;
const int mi_col = xd->mi_col;
@@ -3730,6 +3740,110 @@
return 0;
}
+#if CONFIG_CWP
+// Calculate SSE when using compound weighted prediction
+uint64_t av1_cwp_sse_from_residuals_c(const int16_t *r1, const int16_t *d,
+ const int8_t *m, int N) {
+ uint64_t csse = 0;
+ int i;
+
+ for (i = 0; i < N; i++) {
+ int32_t t = (1 << WEDGE_WEIGHT_BITS) * r1[i] + m[i] * d[i];
+ t = clamp(t, INT16_MIN, INT16_MAX);
+ csse += t * t;
+ }
+ return ROUND_POWER_OF_TWO(csse, 2 * WEDGE_WEIGHT_BITS);
+}
+
+// Select a subset of cwp weighting factors
+static void set_cwp_search_mask(const AV1_COMP *const cpi, MACROBLOCK *const x,
+ const BLOCK_SIZE bsize, uint16_t *const p0,
+ uint16_t *const p1, int16_t *residual1,
+ int16_t *diff10, int stride, int *mask) {
+ MACROBLOCKD *xd = &x->e_mbd;
+ const int bw = block_size_wide[bsize];
+ const int bh = block_size_high[bsize];
+ // get inter predictors to use for masked compound modes
+ av1_build_inter_predictor_single_buf_y(xd, bsize, 0, p0, stride);
+ av1_build_inter_predictor_single_buf_y(xd, bsize, 1, p1, stride);
+ const struct buf_2d *const src = &x->plane[0].src;
+
+ aom_highbd_subtract_block(bh, bw, residual1, bw, src->buf, src->stride, p1,
+ bw, xd->bd);
+ aom_highbd_subtract_block(bh, bw, diff10, bw, p1, bw, p0, bw, xd->bd);
+
+ MB_MODE_INFO *const mbmi = xd->mi[0];
+
+ const AV1_COMMON *const cm = &cpi->common;
+ const int same_side = is_ref_frame_same_side(cm, mbmi);
+
+ const int N = 1 << num_pels_log2_lookup[bsize];
+ int rate;
+ int64_t dist;
+ int cwp_index;
+ int64_t best_rd = INT64_MAX;
+ const int bd_round = (xd->bd - 8) * 2;
+
+ const int8_t *tmp_mask;
+ int rate_cwp_idx;
+
+ int idx_list[MAX_CWP_NUM];
+ int64_t cost_list[MAX_CWP_NUM];
+
+ for (int i = 0; i < MAX_CWP_NUM; i++) {
+ idx_list[i] = i;
+ cost_list[i] = INT64_MAX;
+ }
+
+ for (cwp_index = 0; cwp_index < MAX_CWP_NUM; cwp_index++) {
+ if (cwp_index == 0) continue;
+
+ tmp_mask = av1_get_cwp_mask(same_side, cwp_index);
+
+ // compute rd for mask
+ uint64_t sse = av1_cwp_sse_from_residuals_c(residual1, diff10, tmp_mask, N);
+ sse = ROUND_POWER_OF_TWO(sse, bd_round);
+
+ model_rd_sse_fn[MODELRD_TYPE_MASKED_COMPOUND](cpi, x, bsize, 0, sse, N,
+ &rate, &dist);
+ int8_t cur_cwp = cwp_weighting_factor[same_side][cwp_index];
+ rate_cwp_idx = av1_get_cwp_idx_cost(cur_cwp, cm, x);
+ const int64_t rd0 = RDCOST(x->rdmult, rate + rate_cwp_idx, dist);
+ if (rd0 < best_rd) {
+ best_rd = rd0;
+ }
+
+ cost_list[cwp_index] = rd0;
+ }
+
+ // sort cwp in ascending order
+ for (int i = 0; i < MAX_CWP_NUM - 1; i++) {
+ for (int j = 0; j < (MAX_CWP_NUM - 1) - i; j++) {
+ if (cost_list[j] > cost_list[j + 1]) {
+ int64_t tmp_cost = cost_list[j];
+ cost_list[j] = cost_list[j + 1];
+ cost_list[j + 1] = tmp_cost;
+
+ int tmp_idx = idx_list[j];
+ idx_list[j] = idx_list[j + 1];
+ idx_list[j + 1] = tmp_idx;
+ }
+ }
+ }
+
+ int th = 2;
+ for (int i = 0; i < MAX_CWP_NUM; i++) {
+ if (i < th) {
+ mask[idx_list[i]] = 1;
+ } else {
+ mask[idx_list[i]] = 0;
+ }
+ }
+
+ return;
+}
+#endif // CONFIG_CWP
+
/*!\brief AV1 inter mode RD computation
*
* \ingroup inter_mode_search
@@ -4096,6 +4210,10 @@
continue;
}
#endif // CONFIG_IMPROVED_JMVD
+#if CONFIG_CWP
+ int best_cwp_idx = CWP_EQUAL;
+ int64_t best_cwp_cost = INT64_MAX;
+#endif // CONFIG_CWP
for (int ref_mv_idx = 0; ref_mv_idx < ref_set; ++ref_mv_idx) {
#if CONFIG_IMPROVED_JMVD
// apply early termination method to jmvd scaling factors
@@ -4105,103 +4223,36 @@
continue;
}
#endif // CONFIG_IMPROVED_JMVD
-#if CONFIG_FLEX_MVRES
+#if CONFIG_CWP
+ mbmi->cwp_idx = CWP_EQUAL;
+ const int same_side = is_ref_frame_same_side(cm, mbmi);
+ int cwp_loop_num = cm->features.enable_cwp ? MAX_CWP_NUM : 1;
+ if (best_cwp_idx == CWP_EQUAL && ref_mv_idx > 0) cwp_loop_num = 1;
- // Initialize compound mode data
- mbmi->interinter_comp.type = COMPOUND_AVERAGE;
- mbmi->comp_group_idx = 0;
- if (mbmi->ref_frame[1] == INTRA_FRAME) mbmi->ref_frame[1] = NONE_FRAME;
+ int cwp_search_mask[MAX_CWP_NUM] = { 0 };
+ av1_zero(cwp_search_mask);
+ // Loop all supported weighting factors for CWP
+ for (int cwp_search_idx = 0; cwp_search_idx < cwp_loop_num;
+ cwp_search_idx++) {
+ mbmi->ref_mv_idx = ref_mv_idx;
+ mbmi->interinter_comp.type = COMPOUND_AVERAGE;
+ mbmi->comp_group_idx = 0;
+ mbmi->motion_mode = SIMPLE_TRANSLATION;
- mbmi->num_proj_ref = 0;
- mbmi->motion_mode = SIMPLE_TRANSLATION;
- mbmi->ref_mv_idx = ref_mv_idx;
- set_mv_precision(mbmi, mbmi->max_mv_precision);
- if (
-#if CONFIG_WARPMV
- mbmi->mode != WARPMV &&
-#endif // CONFIG_WARPMV
- prune_modes_based_on_tpl && !ref_match_found_in_above_nb &&
- !ref_match_found_in_left_nb && (ref_best_rd != INT64_MAX)) {
- // Skip mode if TPL model indicates it will not be beneficial.
- if (prune_modes_based_on_tpl_stats(
- &cm->features, inter_cost_info_from_tpl, refs, ref_mv_idx,
- this_mode, cpi->sf.inter_sf.prune_inter_modes_based_on_tpl))
- continue;
- }
- const int drl_cost =
- get_drl_cost(cm->features.max_drl_bits, mbmi, mbmi_ext, x);
+ mbmi->cwp_idx = cwp_weighting_factor[same_side][cwp_search_idx];
-#if CONFIG_FLEX_MVRES
- MvSubpelPrecision best_precision_so_far = mbmi->max_mv_precision;
- int64_t best_precision_rd_so_far = INT64_MAX;
- set_precision_set(cm, xd, mbmi, bsize, ref_mv_idx);
- set_most_probable_mv_precision(cm, mbmi, bsize);
- const PRECISION_SET *precision_def =
- &av1_mv_precision_sets[mbmi->mb_precision_set];
- for (int precision_dx = precision_def->num_precisions - 1;
- precision_dx >= 0; precision_dx--) {
- MvSubpelPrecision pb_mv_precision =
- precision_def->precision[precision_dx];
- mbmi->pb_mv_precision = pb_mv_precision;
- if (!is_pb_mv_precision_active(cm, mbmi, bsize) &&
- (pb_mv_precision != mbmi->max_mv_precision)) {
- continue;
- }
- assert(pb_mv_precision <= mbmi->max_mv_precision);
-#if CONFIG_IMPROVED_JMVD
- // apply early termination method to jmvd scaling factors
- if (cpi->sf.inter_sf.early_terminate_jmvd_scale_factor) {
- if (scale_index > 0 && (!is_inter_compound_mode(best_ref_mode)) &&
- mbmi->pb_mv_precision <= MV_PRECISION_HALF_PEL &&
- best_mbmi.jmvd_scale_mode == 0 &&
- best_mbmi.pb_mv_precision > MV_PRECISION_HALF_PEL)
+ if (mbmi->cwp_idx != CWP_EQUAL) {
+ if (!is_cwp_allowed(mbmi)) break;
+ if (cwp_search_mask[cwp_search_idx] == 0) {
continue;
- }
-#endif // CONFIG_IMPROVED_JMVD
-
- if (is_pb_mv_precision_active(cm, mbmi, bsize)) {
- if (cpi->sf.flexmv_sf.terminate_early_4_pel_precision &&
- pb_mv_precision < MV_PRECISION_FOUR_PEL &&
- best_precision_so_far >= MV_PRECISION_QTR_PEL)
- continue;
- if (mbmi->ref_mv_idx) {
- if (cpi->sf.flexmv_sf.do_not_search_8_pel_precision &&
- mbmi->pb_mv_precision == MV_PRECISION_8_PEL)
- continue;
-
- if (cpi->sf.flexmv_sf.do_not_search_4_pel_precision &&
- mbmi->pb_mv_precision == MV_PRECISION_FOUR_PEL)
- continue;
}
}
-
-#endif
-#endif
-
-#if !CONFIG_FLEX_MVRES && !CONFIG_BAWP
- mode_info[ref_mv_idx].full_search_mv.as_int = INVALID_MV;
- mode_info[ref_mv_idx].mv.as_int = INVALID_MV;
- mode_info[ref_mv_idx].rd = INT64_MAX;
- if (
-#if CONFIG_WARPMV
- mbmi->mode != WARPMV &&
-#endif // CONFIG_WARPMV
-
- !mask_check_bit(idx_mask, ref_mv_idx)) {
- // MV did not perform well in simple translation search. Skip it.
- continue;
+ if (mbmi->cwp_idx == -1) {
+ break;
}
-#endif // !CONFIG_FLEX_MVRES && !CONFIG_BAWP
-#if !CONFIG_FLEX_MVRES
- if (prune_modes_based_on_tpl && !ref_match_found_in_above_nb &&
- !ref_match_found_in_left_nb && (ref_best_rd != INT64_MAX)) {
- // Skip mode if TPL model indicates it will not be beneficial.
- if (prune_modes_based_on_tpl_stats(
- &cm->features, inter_cost_info_from_tpl, refs, ref_mv_idx,
- this_mode, cpi->sf.inter_sf.prune_inter_modes_based_on_tpl))
- continue;
- }
- av1_init_rd_stats(rd_stats);
+#endif // CONFIG_CWP
+#if CONFIG_FLEX_MVRES
+
// Initialize compound mode data
mbmi->interinter_comp.type = COMPOUND_AVERAGE;
mbmi->comp_group_idx = 0;
@@ -4210,156 +4261,252 @@
mbmi->num_proj_ref = 0;
mbmi->motion_mode = SIMPLE_TRANSLATION;
mbmi->ref_mv_idx = ref_mv_idx;
- // Compute cost for signalling this DRL index
- rd_stats->rate = base_rate;
- const int drl_cost =
- get_drl_cost(cm->features.max_drl_bits, mbmi, mbmi_ext, x);
-
- rd_stats->rate += drl_cost;
-#if CONFIG_BAWP
- mode_info[0][ref_mv_idx].drl_cost = drl_cost;
- mode_info[1][ref_mv_idx].drl_cost = drl_cost;
-#else
- mode_info[ref_mv_idx].drl_cost = drl_cost;
-#endif
-#endif //! CONFIG_FLEX_MVRES
-
- int rs = 0;
- int compmode_interinter_cost = 0;
- int_mv cur_mv[2];
- // TODO(Cherma): Extend this speed feature to support compound mode
- int skip_repeated_ref_mv =
- is_comp_pred ? 0 : cpi->sf.inter_sf.skip_repeated_ref_mv;
- // Generate the current mv according to the prediction mode
+ set_mv_precision(mbmi, mbmi->max_mv_precision);
if (
#if CONFIG_WARPMV
mbmi->mode != WARPMV &&
#endif // CONFIG_WARPMV
- !build_cur_mv(cur_mv, this_mode, cm, x, skip_repeated_ref_mv)) {
- continue;
+ prune_modes_based_on_tpl && !ref_match_found_in_above_nb &&
+ !ref_match_found_in_left_nb && (ref_best_rd != INT64_MAX)) {
+ // Skip mode if TPL model indicates it will not be beneficial.
+ if (prune_modes_based_on_tpl_stats(
+ &cm->features, inter_cost_info_from_tpl, refs, ref_mv_idx,
+ this_mode, cpi->sf.inter_sf.prune_inter_modes_based_on_tpl))
+ continue;
}
+ const int drl_cost =
+ get_drl_cost(cm->features.max_drl_bits, mbmi, mbmi_ext, x);
+
+#if CONFIG_FLEX_MVRES
+ MvSubpelPrecision best_precision_so_far = mbmi->max_mv_precision;
+ int64_t best_precision_rd_so_far = INT64_MAX;
+ set_precision_set(cm, xd, mbmi, bsize, ref_mv_idx);
+ set_most_probable_mv_precision(cm, mbmi, bsize);
+ const PRECISION_SET *precision_def =
+ &av1_mv_precision_sets[mbmi->mb_precision_set];
+ for (int precision_dx = precision_def->num_precisions - 1;
+ precision_dx >= 0; precision_dx--) {
+ MvSubpelPrecision pb_mv_precision =
+ precision_def->precision[precision_dx];
+ mbmi->pb_mv_precision = pb_mv_precision;
+ if (!is_pb_mv_precision_active(cm, mbmi, bsize) &&
+ (pb_mv_precision != mbmi->max_mv_precision)) {
+ continue;
+ }
+ assert(pb_mv_precision <= mbmi->max_mv_precision);
+#if CONFIG_IMPROVED_JMVD
+ // apply early termination method to jmvd scaling factors
+ if (cpi->sf.inter_sf.early_terminate_jmvd_scale_factor) {
+ if (scale_index > 0 && (!is_inter_compound_mode(best_ref_mode)) &&
+ mbmi->pb_mv_precision <= MV_PRECISION_HALF_PEL &&
+ best_mbmi.jmvd_scale_mode == 0 &&
+ best_mbmi.pb_mv_precision > MV_PRECISION_HALF_PEL)
+ continue;
+ }
+#endif // CONFIG_IMPROVED_JMVD
+
+ if (is_pb_mv_precision_active(cm, mbmi, bsize)) {
+ if (cpi->sf.flexmv_sf.terminate_early_4_pel_precision &&
+ pb_mv_precision < MV_PRECISION_FOUR_PEL &&
+ best_precision_so_far >= MV_PRECISION_QTR_PEL)
+ continue;
+ if (mbmi->ref_mv_idx) {
+ if (cpi->sf.flexmv_sf.do_not_search_8_pel_precision &&
+ mbmi->pb_mv_precision == MV_PRECISION_8_PEL)
+ continue;
+
+ if (cpi->sf.flexmv_sf.do_not_search_4_pel_precision &&
+ mbmi->pb_mv_precision == MV_PRECISION_FOUR_PEL)
+ continue;
+ }
+ }
+
+#endif
+#endif
+
+#if !CONFIG_FLEX_MVRES && !CONFIG_BAWP
+ mode_info[ref_mv_idx].full_search_mv.as_int = INVALID_MV;
+ mode_info[ref_mv_idx].mv.as_int = INVALID_MV;
+ mode_info[ref_mv_idx].rd = INT64_MAX;
+ if (
#if CONFIG_WARPMV
- // For WARPMV mode we will build MV in the later stage
- // Currently initialize to 0
- if (mbmi->mode == WARPMV) {
- cur_mv[0].as_int = 0;
- cur_mv[1].as_int = 0;
- assert(ref_mv_idx == 0);
- }
+ mbmi->mode != WARPMV &&
+#endif // CONFIG_WARPMV
+
+ !mask_check_bit(idx_mask, ref_mv_idx)) {
+ // MV did not perform well in simple translation search. Skip it.
+ continue;
+ }
+#endif // !CONFIG_FLEX_MVRES && !CONFIG_BAWP
+#if !CONFIG_FLEX_MVRES
+ if (prune_modes_based_on_tpl && !ref_match_found_in_above_nb &&
+ !ref_match_found_in_left_nb && (ref_best_rd != INT64_MAX)) {
+ // Skip mode if TPL model indicates it will not be beneficial.
+ if (prune_modes_based_on_tpl_stats(
+ &cm->features, inter_cost_info_from_tpl, refs, ref_mv_idx,
+ this_mode, cpi->sf.inter_sf.prune_inter_modes_based_on_tpl))
+ continue;
+ }
+ av1_init_rd_stats(rd_stats);
+ // Initialize compound mode data
+ mbmi->interinter_comp.type = COMPOUND_AVERAGE;
+ mbmi->comp_group_idx = 0;
+ if (mbmi->ref_frame[1] == INTRA_FRAME)
+ mbmi->ref_frame[1] = NONE_FRAME;
+
+ mbmi->num_proj_ref = 0;
+ mbmi->motion_mode = SIMPLE_TRANSLATION;
+ mbmi->ref_mv_idx = ref_mv_idx;
+ // Compute cost for signalling this DRL index
+ rd_stats->rate = base_rate;
+ const int drl_cost =
+ get_drl_cost(cm->features.max_drl_bits, mbmi, mbmi_ext, x);
+
+ rd_stats->rate += drl_cost;
+#if CONFIG_BAWP
+ mode_info[0][ref_mv_idx].drl_cost = drl_cost;
+ mode_info[1][ref_mv_idx].drl_cost = drl_cost;
+#else
+ mode_info[ref_mv_idx].drl_cost = drl_cost;
+#endif
+#endif //! CONFIG_FLEX_MVRES
+
+ int rs = 0;
+ int compmode_interinter_cost = 0;
+ int_mv cur_mv[2];
+ // TODO(Cherma): Extend this speed feature to support compound mode
+ int skip_repeated_ref_mv =
+ is_comp_pred ? 0 : cpi->sf.inter_sf.skip_repeated_ref_mv;
+ // Generate the current mv according to the prediction mode
+ if (
+#if CONFIG_WARPMV
+ mbmi->mode != WARPMV &&
+#endif // CONFIG_WARPMV
+ !build_cur_mv(cur_mv, this_mode, cm, x, skip_repeated_ref_mv)) {
+ continue;
+ }
+#if CONFIG_WARPMV
+ // For WARPMV mode we will build MV in the later stage
+ // Currently initialize to 0
+ if (mbmi->mode == WARPMV) {
+ cur_mv[0].as_int = 0;
+ cur_mv[1].as_int = 0;
+ assert(ref_mv_idx == 0);
+ }
#endif // CONFIG_WARPMV
#if CONFIG_FLEX_MVRES
#if !CONFIG_BAWP
- mode_info[mbmi->pb_mv_precision][ref_mv_idx].full_search_mv.as_int =
- INVALID_MV;
- mode_info[mbmi->pb_mv_precision][ref_mv_idx].mv.as_int = INVALID_MV;
- mode_info[mbmi->pb_mv_precision][ref_mv_idx].rd = INT64_MAX;
- mode_info[mbmi->pb_mv_precision][ref_mv_idx].drl_cost = drl_cost;
- if (
-#if CONFIG_WARPMV
- mbmi->mode != WARPMV &&
-#endif // CONFIG_WARPMV
- !mask_check_bit(idx_mask[mbmi->pb_mv_precision], ref_mv_idx)) {
- // MV did not perform well in simple translation search. Skip it.
- continue;
- }
-#endif
-
- if (
-#if CONFIG_WARPMV
- mbmi->mode != WARPMV &&
-#endif // CONFIG_WARPMV
- cpi->sf.flexmv_sf.skip_similar_ref_mv &&
- skip_similar_ref_mv(cpi, x, bsize)) {
- continue;
- }
-
-#if CONFIG_WARPMV
- assert(IMPLIES(mbmi->mode == WARPMV,
- mbmi->pb_mv_precision == mbmi->max_mv_precision));
-#endif // CONFIG_WARPMV
-#endif
-
-#if CONFIG_BAWP
- int_mv bawp_off_mv[2];
- int64_t bawp_off_newmv_ret_val = 0;
- for (i = 0; i < is_comp_pred + 1; ++i) {
- bawp_off_mv[i].as_int = cur_mv[i].as_int;
- }
- int bawp_eanbled = cm->features.enable_bawp &&
- av1_allow_bawp(mbmi, xd->mi_row, xd->mi_col);
- for (int bawp_flag = 0; bawp_flag <= bawp_eanbled; bawp_flag++) {
- mbmi->bawp_flag = bawp_flag;
-
-#if CONFIG_FLEX_MVRES
- mode_info[bawp_flag][mbmi->pb_mv_precision][ref_mv_idx]
- .full_search_mv.as_int = INVALID_MV;
- mode_info[bawp_flag][mbmi->pb_mv_precision][ref_mv_idx].mv.as_int =
+ mode_info[mbmi->pb_mv_precision][ref_mv_idx].full_search_mv.as_int =
INVALID_MV;
- mode_info[bawp_flag][mbmi->pb_mv_precision][ref_mv_idx].rd =
- INT64_MAX;
- mode_info[bawp_flag][mbmi->pb_mv_precision][ref_mv_idx].drl_cost =
- drl_cost;
+ mode_info[mbmi->pb_mv_precision][ref_mv_idx].mv.as_int = INVALID_MV;
+ mode_info[mbmi->pb_mv_precision][ref_mv_idx].rd = INT64_MAX;
+ mode_info[mbmi->pb_mv_precision][ref_mv_idx].drl_cost = drl_cost;
+ if (
+#if CONFIG_WARPMV
+ mbmi->mode != WARPMV &&
+#endif // CONFIG_WARPMV
+ !mask_check_bit(idx_mask[mbmi->pb_mv_precision], ref_mv_idx)) {
+ // MV did not perform well in simple translation search. Skip it.
+ continue;
+ }
+#endif
if (
#if CONFIG_WARPMV
mbmi->mode != WARPMV &&
#endif // CONFIG_WARPMV
- !mask_check_bit(idx_mask[bawp_flag][mbmi->pb_mv_precision],
- ref_mv_idx)) {
+ cpi->sf.flexmv_sf.skip_similar_ref_mv &&
+ skip_similar_ref_mv(cpi, x, bsize)) {
+ continue;
+ }
+
+#if CONFIG_WARPMV
+ assert(IMPLIES(mbmi->mode == WARPMV,
+ mbmi->pb_mv_precision == mbmi->max_mv_precision));
+#endif // CONFIG_WARPMV
+#endif
+
+#if CONFIG_BAWP
+ int_mv bawp_off_mv[2];
+ int64_t bawp_off_newmv_ret_val = 0;
+ for (i = 0; i < is_comp_pred + 1; ++i) {
+ bawp_off_mv[i].as_int = cur_mv[i].as_int;
+ }
+ int bawp_eanbled = cm->features.enable_bawp &&
+ av1_allow_bawp(mbmi, xd->mi_row, xd->mi_col);
+ for (int bawp_flag = 0; bawp_flag <= bawp_eanbled; bawp_flag++) {
+ mbmi->bawp_flag = bawp_flag;
+
+#if CONFIG_FLEX_MVRES
+ mode_info[bawp_flag][mbmi->pb_mv_precision][ref_mv_idx]
+ .full_search_mv.as_int = INVALID_MV;
+ mode_info[bawp_flag][mbmi->pb_mv_precision][ref_mv_idx].mv.as_int =
+ INVALID_MV;
+ mode_info[bawp_flag][mbmi->pb_mv_precision][ref_mv_idx].rd =
+ INT64_MAX;
+ mode_info[bawp_flag][mbmi->pb_mv_precision][ref_mv_idx].drl_cost =
+ drl_cost;
+
+ if (
+#if CONFIG_WARPMV
+ mbmi->mode != WARPMV &&
+#endif // CONFIG_WARPMV
+ !mask_check_bit(idx_mask[bawp_flag][mbmi->pb_mv_precision],
+ ref_mv_idx)) {
+ // MV did not perform well in simple translation search. Skip it.
+ continue;
+ }
+#else
+ mode_info[bawp_flag][ref_mv_idx].full_search_mv.as_int = INVALID_MV;
+ mode_info[bawp_flag][ref_mv_idx].mv.as_int = INVALID_MV;
+ mode_info[bawp_flag][ref_mv_idx].rd = INT64_MAX;
+ mode_info[bawp_flag][ref_mv_idx].drl_cost = drl_cost;
+
+ if (
+#if CONFIG_WARPMV
+ mbmi->mode != WARPMV &&
+#endif // CONFIG_WARPMV
+ !mask_check_bit(idx_mask[bawp_flag], ref_mv_idx)) {
// MV did not perform well in simple translation search. Skip it.
continue;
}
-#else
- mode_info[bawp_flag][ref_mv_idx].full_search_mv.as_int = INVALID_MV;
- mode_info[bawp_flag][ref_mv_idx].mv.as_int = INVALID_MV;
- mode_info[bawp_flag][ref_mv_idx].rd = INT64_MAX;
- mode_info[bawp_flag][ref_mv_idx].drl_cost = drl_cost;
-
- if (
-#if CONFIG_WARPMV
- mbmi->mode != WARPMV &&
-#endif // CONFIG_WARPMV
- !mask_check_bit(idx_mask[bawp_flag], ref_mv_idx)) {
- // MV did not perform well in simple translation search. Skip it.
- continue;
- }
#endif // CONFIG_FLEX_MVRES
- if (mbmi->bawp_flag == 1) {
- for (i = 0; i < is_comp_pred + 1; ++i) {
- mbmi->mv[i].as_int = bawp_off_mv[i].as_int;
- cur_mv[i].as_int = bawp_off_mv[i].as_int;
- }
+ if (mbmi->bawp_flag == 1) {
+ for (i = 0; i < is_comp_pred + 1; ++i) {
+ mbmi->mv[i].as_int = bawp_off_mv[i].as_int;
+ cur_mv[i].as_int = bawp_off_mv[i].as_int;
+ }
#if CONFIG_FLEX_MVRES
- mode_info[1][mbmi->pb_mv_precision][ref_mv_idx]
- .full_search_mv.as_int =
- mode_info[0][mbmi->pb_mv_precision][ref_mv_idx]
- .full_search_mv.as_int;
- mode_info[1][mbmi->pb_mv_precision][ref_mv_idx].full_mv_rate =
- mode_info[0][mbmi->pb_mv_precision][ref_mv_idx].full_mv_rate;
+ mode_info[1][mbmi->pb_mv_precision][ref_mv_idx]
+ .full_search_mv.as_int =
+ mode_info[0][mbmi->pb_mv_precision][ref_mv_idx]
+ .full_search_mv.as_int;
+ mode_info[1][mbmi->pb_mv_precision][ref_mv_idx].full_mv_rate =
+ mode_info[0][mbmi->pb_mv_precision][ref_mv_idx].full_mv_rate;
#else
- mode_info[1][ref_mv_idx].full_search_mv.as_int =
- mode_info[0][ref_mv_idx].full_search_mv.as_int;
- mode_info[1][ref_mv_idx].full_mv_rate =
- mode_info[0][ref_mv_idx].full_mv_rate;
+ mode_info[1][ref_mv_idx].full_search_mv.as_int =
+ mode_info[0][ref_mv_idx].full_search_mv.as_int;
+ mode_info[1][ref_mv_idx].full_mv_rate =
+ mode_info[0][ref_mv_idx].full_mv_rate;
#endif // CONFIG_FLEX_MVRES
- if (bawp_off_newmv_ret_val != 0) continue;
- } else {
+ if (bawp_off_newmv_ret_val != 0) continue;
+ } else {
#endif
- // The above call to build_cur_mv does not handle NEWMV modes. Build
- // the mv here if we have NEWMV for any predictors.
- if (have_newmv_in_inter_mode(this_mode)) {
+ // The above call to build_cur_mv does not handle NEWMV modes.
+ // Build the mv here if we have NEWMV for any predictors.
+ if (have_newmv_in_inter_mode(this_mode)) {
#if CONFIG_COLLECT_COMPONENT_TIMING
- start_timing(cpi, handle_newmv_time);
+ start_timing(cpi, handle_newmv_time);
#endif
- newmv_ret_val =
- handle_newmv(cpi, x, bsize, cur_mv, &rate_mv, args,
+ newmv_ret_val =
+ handle_newmv(cpi, x, bsize, cur_mv, &rate_mv, args,
#if CONFIG_FLEX_MVRES
#if CONFIG_BAWP
- mode_info[bawp_flag][mbmi->pb_mv_precision]);
+ mode_info[bawp_flag][mbmi->pb_mv_precision]);
#else
- mode_info[mbmi->pb_mv_precision]);
+ mode_info[mbmi->pb_mv_precision]);
#endif
#else
#if CONFIG_BAWP
@@ -4369,67 +4516,68 @@
#endif
#endif
#if CONFIG_COLLECT_COMPONENT_TIMING
- end_timing(cpi, handle_newmv_time);
+ end_timing(cpi, handle_newmv_time);
#endif
#if CONFIG_BAWP
- for (i = 0; i < is_comp_pred + 1; ++i) {
- bawp_off_mv[i].as_int = cur_mv[i].as_int;
+ for (i = 0; i < is_comp_pred + 1; ++i) {
+ bawp_off_mv[i].as_int = cur_mv[i].as_int;
+ }
+ bawp_off_newmv_ret_val = newmv_ret_val;
+ if (newmv_ret_val != 0) continue;
}
- bawp_off_newmv_ret_val = newmv_ret_val;
- if (newmv_ret_val != 0) continue;
}
- }
- if (have_newmv_in_inter_mode(this_mode)) {
+ if (have_newmv_in_inter_mode(this_mode)) {
#else
if (newmv_ret_val != 0) continue;
#endif
#if CONFIG_C071_SUBBLK_WARPMV && CONFIG_FLEX_MVRES
- int mv_outlim = 0;
- for (int ref = 0; ref < is_comp_pred + 1; ref++) {
- const PREDICTION_MODE single_mode =
- get_single_mode(this_mode, ref);
- if (single_mode == NEWMV) {
- SUBPEL_MOTION_SEARCH_PARAMS ms_params;
- MV ref_mv = av1_get_ref_mv(x, ref).as_mv;
- if (mbmi->pb_mv_precision < MV_PRECISION_HALF_PEL)
- lower_mv_precision(&ref_mv, mbmi->pb_mv_precision);
- av1_make_default_subpel_ms_params(
- &ms_params, cpi, x, bsize, &ref_mv, pb_mv_precision, NULL);
- if (!av1_is_subpelmv_in_range(&ms_params.mv_limits,
- cur_mv[ref].as_mv)) {
- mv_outlim = 1;
- break;
+ int mv_outlim = 0;
+ for (int ref = 0; ref < is_comp_pred + 1; ref++) {
+ const PREDICTION_MODE single_mode =
+ get_single_mode(this_mode, ref);
+ if (single_mode == NEWMV) {
+ SUBPEL_MOTION_SEARCH_PARAMS ms_params;
+ MV ref_mv = av1_get_ref_mv(x, ref).as_mv;
+ if (mbmi->pb_mv_precision < MV_PRECISION_HALF_PEL)
+ lower_mv_precision(&ref_mv, mbmi->pb_mv_precision);
+ av1_make_default_subpel_ms_params(&ms_params, cpi, x, bsize,
+ &ref_mv, pb_mv_precision,
+ NULL);
+ if (!av1_is_subpelmv_in_range(&ms_params.mv_limits,
+ cur_mv[ref].as_mv)) {
+ mv_outlim = 1;
+ break;
+ }
}
}
- }
- if (mv_outlim) continue;
+ if (mv_outlim) continue;
#endif // CONFIG_C071_SUBBLK_WARPMV && CONFIG_FLEX_MVRES
- // skip NEWMV mode in drl if the motion search result is the same
- // as a previous result
+ // skip NEWMV mode in drl if the motion search result is the
+ // same as a previous result
#if CONFIG_FLEX_MVRES
- int skip_new_mv =
- cpi->sf.inter_sf.skip_repeated_newmv ||
- (mbmi->pb_mv_precision != mbmi->max_mv_precision &&
- cpi->sf.flexmv_sf.skip_repeated_newmv_low_prec);
- if (skip_new_mv &&
- skip_repeated_newmv(
- cpi, x, bsize, do_tx_search, this_mode,
- mbmi->pb_mv_precision,
+ int skip_new_mv =
+ cpi->sf.inter_sf.skip_repeated_newmv ||
+ (mbmi->pb_mv_precision != mbmi->max_mv_precision &&
+ cpi->sf.flexmv_sf.skip_repeated_newmv_low_prec);
+ if (skip_new_mv &&
+ skip_repeated_newmv(
+ cpi, x, bsize, do_tx_search, this_mode,
+ mbmi->pb_mv_precision,
#if CONFIG_BAWP
- mbmi->bawp_flag,
+ mbmi->bawp_flag,
#endif
- &best_mbmi, motion_mode_cand, &ref_best_rd, &best_rd_stats,
- &best_rd_stats_y,
+ &best_mbmi, motion_mode_cand, &ref_best_rd,
+ &best_rd_stats, &best_rd_stats_y,
#if CONFIG_BAWP
- &best_rd_stats_uv,
- mode_info[bawp_flag][mbmi->pb_mv_precision], args,
+ &best_rd_stats_uv,
+ mode_info[bawp_flag][mbmi->pb_mv_precision], args,
#else
- &best_rd_stats_uv, mode_info[mbmi->pb_mv_precision], args,
+ &best_rd_stats_uv, mode_info[mbmi->pb_mv_precision], args,
#endif
- drl_cost, refs, cur_mv, &best_rd, orig_dst, ref_mv_idx))
+ drl_cost, refs, cur_mv, &best_rd, orig_dst, ref_mv_idx))
#else
if (cpi->sf.inter_sf.skip_repeated_newmv &&
skip_repeated_newmv(
@@ -4446,52 +4594,52 @@
#endif
args, drl_cost, refs, cur_mv, &best_rd, orig_dst, ref_mv_idx))
#endif
- continue;
- }
+ continue;
+ }
#if CONFIG_FLEX_MVRES || CONFIG_BAWP
- av1_init_rd_stats(rd_stats);
- // Initialize compound mode data
- mbmi->interinter_comp.type = COMPOUND_AVERAGE;
- mbmi->comp_group_idx = 0;
- if (mbmi->ref_frame[1] == INTRA_FRAME)
- mbmi->ref_frame[1] = NONE_FRAME;
+ av1_init_rd_stats(rd_stats);
+ // Initialize compound mode data
+ mbmi->interinter_comp.type = COMPOUND_AVERAGE;
+ mbmi->comp_group_idx = 0;
+ if (mbmi->ref_frame[1] == INTRA_FRAME)
+ mbmi->ref_frame[1] = NONE_FRAME;
- mbmi->num_proj_ref = 0;
- mbmi->motion_mode = SIMPLE_TRANSLATION;
- mbmi->ref_mv_idx = ref_mv_idx;
+ mbmi->num_proj_ref = 0;
+ mbmi->motion_mode = SIMPLE_TRANSLATION;
+ mbmi->ref_mv_idx = ref_mv_idx;
- // Compute cost for signalling this DRL index
- rd_stats->rate = base_rate;
+ // Compute cost for signalling this DRL index
+ rd_stats->rate = base_rate;
#if CONFIG_FLEX_MVRES
- rd_stats->rate += flex_mv_cost[mbmi->pb_mv_precision];
+ rd_stats->rate += flex_mv_cost[mbmi->pb_mv_precision];
#endif
- rd_stats->rate += drl_cost;
+ rd_stats->rate += drl_cost;
#endif
#if CONFIG_IMPROVED_JMVD && CONFIG_JOINT_MVD
- if (is_joint_mvd_coding_mode(mbmi->mode)) {
- int jmvd_scale_mode_cost =
+ if (is_joint_mvd_coding_mode(mbmi->mode)) {
+ int jmvd_scale_mode_cost =
#if CONFIG_ADAPTIVE_MVD
- is_joint_amvd_coding_mode(mbmi->mode)
- ? mode_costs
- ->jmvd_amvd_scale_mode_cost[mbmi->jmvd_scale_mode]
- :
+ is_joint_amvd_coding_mode(mbmi->mode)
+ ? mode_costs
+ ->jmvd_amvd_scale_mode_cost[mbmi->jmvd_scale_mode]
+ :
#endif // CONFIG_ADAPTIVE_MVD
- mode_costs->jmvd_scale_mode_cost[mbmi->jmvd_scale_mode];
- rd_stats->rate += jmvd_scale_mode_cost;
- }
+ mode_costs->jmvd_scale_mode_cost[mbmi->jmvd_scale_mode];
+ rd_stats->rate += jmvd_scale_mode_cost;
+ }
#endif // CONFIG_IMPROVED_JMVD && CONFIG_JOINT_MVD
- rd_stats->rate += rate_mv;
+ rd_stats->rate += rate_mv;
- // Copy the motion vector for this mode into mbmi struct
- for (i = 0; i < is_comp_pred + 1; ++i) {
- mbmi->mv[i].as_int = cur_mv[i].as_int;
- }
+ // Copy the motion vector for this mode into mbmi struct
+ for (i = 0; i < is_comp_pred + 1; ++i) {
+ mbmi->mv[i].as_int = cur_mv[i].as_int;
+ }
#if CONFIG_C071_SUBBLK_WARPMV
#if CONFIG_FLEX_MVRES
- assert(check_mv_precision(cm, mbmi, x));
+ assert(check_mv_precision(cm, mbmi, x));
#endif
#else
#if CONFIG_FLEX_MVRES
@@ -4499,169 +4647,188 @@
#endif
#endif // CONFIG_C071_SUBBLK_WARPMV
- const int like_nearest = (mbmi->mode == NEARMV ||
+ const int like_nearest = (mbmi->mode == NEARMV ||
#if CONFIG_WARPMV
- mbmi->mode == WARPMV ||
+ mbmi->mode == WARPMV ||
#endif // CONFIG_WARPMV
#if CONFIG_OPTFLOW_REFINEMENT
- mbmi->mode == NEAR_NEARMV_OPTFLOW ||
+ mbmi->mode == NEAR_NEARMV_OPTFLOW ||
#endif // CONFIG_OPTFLOW_REFINEMENT
- mbmi->mode == NEAR_NEARMV) &&
- mbmi->ref_mv_idx == 0;
- if (RDCOST(x->rdmult, rd_stats->rate, 0) > ref_best_rd &&
- !like_nearest) {
- continue;
- }
-
- // Skip the rest of the search if prune_ref_mv_idx_search speed
- // feature is enabled, and the current MV is similar to a previous
- // one.
- if (cpi->sf.inter_sf.prune_ref_mv_idx_search && is_comp_pred &&
- prune_ref_mv_idx_search(&cm->features, ref_mv_idx,
- best_ref_mv_idx,
-#if CONFIG_FLEX_MVRES
- save_mv[mbmi->pb_mv_precision], mbmi,
-#else
-
- save_mv, mbmi,
-#endif
- cpi->sf.inter_sf.prune_ref_mv_idx_search))
- continue;
-
-#if CONFIG_COLLECT_COMPONENT_TIMING
- start_timing(cpi, compound_type_rd_time);
-#endif
- int skip_build_pred = 0;
- const int mi_row = xd->mi_row;
- const int mi_col = xd->mi_col;
-
- // Handle a compound predictor, continue if it is determined this
- // cannot be the best compound mode
- if (is_comp_pred
-#if IMPROVED_AMVD && CONFIG_JOINT_MVD
- && !is_joint_amvd_coding_mode(mbmi->mode)
-#endif // IMPROVED_AMVD && CONFIG_JOINT_MVD
- ) {
- const int not_best_mode = process_compound_inter_mode(
- cpi, x, args, ref_best_rd, cur_mv, bsize,
- &compmode_interinter_cost, rd_buffers, &orig_dst, &tmp_dst,
- &rate_mv, rd_stats, skip_rd, &skip_build_pred);
- if (not_best_mode) continue;
- }
-
-#if CONFIG_C071_SUBBLK_WARPMV
-#if CONFIG_FLEX_MVRES
- assert(check_mv_precision(cm, mbmi, x));
-#endif
-#else
-#if CONFIG_FLEX_MVRES
- assert(check_mv_precision(cm, mbmi));
-#endif
-#endif // CONFIG_C071_SUBBLK_WARPMV
-
-#if CONFIG_COLLECT_COMPONENT_TIMING
- end_timing(cpi, compound_type_rd_time);
-#endif
-
-#if CONFIG_COLLECT_COMPONENT_TIMING
- start_timing(cpi, interpolation_filter_search_time);
-#endif
- // Determine the interpolation filter for this mode
- ret_val = av1_interpolation_filter_search(
- x, cpi, tile_data, bsize, &tmp_dst, &orig_dst, &rd, &rs,
- &skip_build_pred, args, ref_best_rd);
-
-#if CONFIG_C071_SUBBLK_WARPMV
-#if CONFIG_FLEX_MVRES
- assert(check_mv_precision(cm, mbmi, x));
-#endif
-#else
-#if CONFIG_FLEX_MVRES
- assert(check_mv_precision(cm, mbmi));
-#endif
-#endif // CONFIG_C071_SUBBLK_WARPMV
-#if CONFIG_COLLECT_COMPONENT_TIMING
- end_timing(cpi, interpolation_filter_search_time);
-#endif
- if (args->modelled_rd != NULL && !is_comp_pred) {
- args->modelled_rd[this_mode][ref_mv_idx][refs[0]] = rd;
- }
-
-#if CONFIG_WARPMV
- if (mbmi->mode != WARPMV) {
-#endif // CONFIG_WARPMV
- if (ret_val != 0) {
- restore_dst_buf(xd, orig_dst, num_planes);
- continue;
- } else if (cpi->sf.inter_sf
- .model_based_post_interp_filter_breakout &&
- ref_best_rd != INT64_MAX &&
- (rd >> 3) * 3 > ref_best_rd) {
- restore_dst_buf(xd, orig_dst, num_planes);
+ mbmi->mode == NEAR_NEARMV) &&
+ mbmi->ref_mv_idx == 0;
+ if (RDCOST(x->rdmult, rd_stats->rate, 0) > ref_best_rd &&
+ !like_nearest) {
continue;
}
-#if CONFIG_WARPMV
- }
-#endif // CONFIG_WARPMV
- // Compute modelled RD if enabled
- if (args->modelled_rd != NULL) {
-#if CONFIG_OPTFLOW_REFINEMENT
- if (is_comp_pred && this_mode < NEAR_NEARMV_OPTFLOW) {
-#else
- if (is_comp_pred) {
-#endif // CONFIG_OPTFLOW_REFINEMENT
- const int mode0 = compound_ref0_mode(this_mode);
- const int mode1 = compound_ref1_mode(this_mode);
- const int64_t mrd =
- AOMMIN(args->modelled_rd[mode0][ref_mv_idx][refs[0]],
- args->modelled_rd[mode1][ref_mv_idx][refs[1]]);
- if ((rd >> 3) * 6 > mrd && ref_best_rd < INT64_MAX) {
+ // Skip the rest of the search if prune_ref_mv_idx_search speed
+ // feature is enabled, and the current MV is similar to a previous
+ // one.
+ if (cpi->sf.inter_sf.prune_ref_mv_idx_search && is_comp_pred &&
+ prune_ref_mv_idx_search(
+ &cm->features, ref_mv_idx, best_ref_mv_idx,
+#if CONFIG_FLEX_MVRES
+ save_mv[mbmi->pb_mv_precision], mbmi,
+#else
+
+ save_mv, mbmi,
+#endif
+ cpi->sf.inter_sf.prune_ref_mv_idx_search))
+ continue;
+
+#if CONFIG_COLLECT_COMPONENT_TIMING
+ start_timing(cpi, compound_type_rd_time);
+#endif
+ int skip_build_pred = 0;
+ const int mi_row = xd->mi_row;
+ const int mi_col = xd->mi_col;
+
+#if CONFIG_CWP
+ // set cwp_search_mask
+ if (is_cwp_allowed(mbmi) && mbmi->cwp_idx == CWP_EQUAL) {
+ set_cwp_search_mask(cpi, x, bsize, rd_buffers->pred0,
+ rd_buffers->pred1, rd_buffers->residual1,
+ rd_buffers->diff10, block_size_wide[bsize],
+ cwp_search_mask);
+ }
+#endif // CONFIG_CWP
+
+ // Handle a compound predictor, continue if it is determined this
+ // cannot be the best compound mode
+ if (is_comp_pred
+#if IMPROVED_AMVD && CONFIG_JOINT_MVD
+ && !is_joint_amvd_coding_mode(mbmi->mode)
+#endif // IMPROVED_AMVD && CONFIG_JOINT_MVD
+ ) {
+ const int not_best_mode = process_compound_inter_mode(
+ cpi, x, args, ref_best_rd, cur_mv, bsize,
+ &compmode_interinter_cost, rd_buffers, &orig_dst, &tmp_dst,
+ &rate_mv, rd_stats, skip_rd, &skip_build_pred);
+ if (not_best_mode) continue;
+ }
+
+#if CONFIG_CWP
+ if (cm->features.enable_cwp && is_comp_pred &&
+ is_joint_amvd_coding_mode(mbmi->mode)) {
+ if (is_cwp_allowed(mbmi)) {
+ compmode_interinter_cost =
+ av1_get_cwp_idx_cost(mbmi->cwp_idx, cm, x);
+ }
+ }
+#endif // CONFIG_CWP
+#if CONFIG_C071_SUBBLK_WARPMV
+#if CONFIG_FLEX_MVRES
+ assert(check_mv_precision(cm, mbmi, x));
+#endif
+#else
+#if CONFIG_FLEX_MVRES
+ assert(check_mv_precision(cm, mbmi));
+#endif
+#endif // CONFIG_C071_SUBBLK_WARPMV
+
+#if CONFIG_COLLECT_COMPONENT_TIMING
+ end_timing(cpi, compound_type_rd_time);
+#endif
+
+#if CONFIG_COLLECT_COMPONENT_TIMING
+ start_timing(cpi, interpolation_filter_search_time);
+#endif
+ // Determine the interpolation filter for this mode
+ ret_val = av1_interpolation_filter_search(
+ x, cpi, tile_data, bsize, &tmp_dst, &orig_dst, &rd, &rs,
+ &skip_build_pred, args, ref_best_rd);
+
+#if CONFIG_C071_SUBBLK_WARPMV
+#if CONFIG_FLEX_MVRES
+ assert(check_mv_precision(cm, mbmi, x));
+#endif
+#else
+#if CONFIG_FLEX_MVRES
+ assert(check_mv_precision(cm, mbmi));
+#endif
+#endif // CONFIG_C071_SUBBLK_WARPMV
+#if CONFIG_COLLECT_COMPONENT_TIMING
+ end_timing(cpi, interpolation_filter_search_time);
+#endif
+ if (args->modelled_rd != NULL && !is_comp_pred) {
+ args->modelled_rd[this_mode][ref_mv_idx][refs[0]] = rd;
+ }
+
+#if CONFIG_WARPMV
+ if (mbmi->mode != WARPMV) {
+#endif // CONFIG_WARPMV
+ if (ret_val != 0) {
+ restore_dst_buf(xd, orig_dst, num_planes);
+ continue;
+ } else if (cpi->sf.inter_sf
+ .model_based_post_interp_filter_breakout &&
+ ref_best_rd != INT64_MAX &&
+ (rd >> 3) * 3 > ref_best_rd) {
restore_dst_buf(xd, orig_dst, num_planes);
continue;
}
- }
- }
- rd_stats->rate += compmode_interinter_cost;
- if (skip_build_pred != 1
#if CONFIG_WARPMV
- && (mbmi->mode != WARPMV)
+ }
+#endif // CONFIG_WARPMV
+ // Compute modelled RD if enabled
+ if (args->modelled_rd != NULL) {
+#if CONFIG_OPTFLOW_REFINEMENT
+ if (is_comp_pred && this_mode < NEAR_NEARMV_OPTFLOW) {
+#else
+ if (is_comp_pred) {
+#endif // CONFIG_OPTFLOW_REFINEMENT
+ const int mode0 = compound_ref0_mode(this_mode);
+ const int mode1 = compound_ref1_mode(this_mode);
+ const int64_t mrd =
+ AOMMIN(args->modelled_rd[mode0][ref_mv_idx][refs[0]],
+ args->modelled_rd[mode1][ref_mv_idx][refs[1]]);
+
+ if ((rd >> 3) * 6 > mrd && ref_best_rd < INT64_MAX) {
+ restore_dst_buf(xd, orig_dst, num_planes);
+ continue;
+ }
+ }
+ }
+ rd_stats->rate += compmode_interinter_cost;
+ if (skip_build_pred != 1
+#if CONFIG_WARPMV
+ && (mbmi->mode != WARPMV)
#endif // CONFIG_WARPMV
- ) {
- // Build this inter predictor if it has not been previously built
- av1_enc_build_inter_predictor(cm, xd, mi_row, mi_col, &orig_dst,
- bsize, 0, av1_num_planes(cm) - 1);
- }
+ ) {
+ // Build this inter predictor if it has not been previously built
+ av1_enc_build_inter_predictor(cm, xd, mi_row, mi_col, &orig_dst,
+ bsize, 0, av1_num_planes(cm) - 1);
+ }
#if CONFIG_WARPMV
- // So far we did not make prediction for WARPMV mode
- assert(IMPLIES(mbmi->mode == WARPMV, skip_build_pred != 1));
+ // So far we did not make prediction for WARPMV mode
+ assert(IMPLIES(mbmi->mode == WARPMV, skip_build_pred != 1));
#endif // CONFIG_WARPMV
#if CONFIG_COLLECT_COMPONENT_TIMING
- start_timing(cpi, motion_mode_rd_time);
+ start_timing(cpi, motion_mode_rd_time);
#endif
- int rate2_nocoeff = rd_stats->rate;
+ int rate2_nocoeff = rd_stats->rate;
#if CONFIG_WARPMV
- assert(IMPLIES(mbmi->mode == WARPMV,
- (rd_stats->rate == base_rate && rate_mv == 0)));
+ assert(IMPLIES(mbmi->mode == WARPMV,
+ (rd_stats->rate == base_rate && rate_mv == 0)));
#endif // CONFIG_WARPMV
// Determine the motion mode. This will be one of SIMPLE_TRANSLATION,
// OBMC_CAUSAL or WARPED_CAUSAL or WARP_EXTEND or WARP_DELTA
- ret_val = motion_mode_rd(cpi, tile_data, x, bsize, rd_stats,
- rd_stats_y, rd_stats_uv, args, ref_best_rd,
- skip_rd, &rate_mv, &orig_dst, best_est_rd,
- do_tx_search, inter_modes_info, 0);
+ ret_val = motion_mode_rd(cpi, tile_data, x, bsize, rd_stats,
+ rd_stats_y, rd_stats_uv, args, ref_best_rd,
+ skip_rd, &rate_mv, &orig_dst, best_est_rd,
+ do_tx_search, inter_modes_info, 0);
#if CONFIG_COLLECT_COMPONENT_TIMING
- end_timing(cpi, motion_mode_rd_time);
+ end_timing(cpi, motion_mode_rd_time);
#endif
- assert(IMPLIES(!av1_check_newmv_joint_nonzero(cm, x),
- ret_val == INT64_MAX));
+ assert(IMPLIES(!av1_check_newmv_joint_nonzero(cm, x),
+ ret_val == INT64_MAX));
#if CONFIG_C071_SUBBLK_WARPMV
#if CONFIG_FLEX_MVRES
- assert(check_mv_precision(cm, mbmi, x));
+ assert(check_mv_precision(cm, mbmi, x));
#endif
#else
#if CONFIG_FLEX_MVRES
@@ -4669,34 +4836,35 @@
#endif
#endif // CONFIG_C071_SUBBLK_WARPMV
- if (ret_val != INT64_MAX) {
- int64_t tmp_rd = RDCOST(x->rdmult, rd_stats->rate, rd_stats->dist);
+ if (ret_val != INT64_MAX) {
+ int64_t tmp_rd =
+ RDCOST(x->rdmult, rd_stats->rate, rd_stats->dist);
#if CONFIG_FLEX_MVRES
- if (is_pb_mv_precision_active(cm, mbmi, bsize) &&
- tmp_rd < best_precision_rd_so_far) {
- best_precision_so_far = mbmi->pb_mv_precision;
- best_precision_rd_so_far = tmp_rd;
- }
+ if (is_pb_mv_precision_active(cm, mbmi, bsize) &&
+ tmp_rd < best_precision_rd_so_far) {
+ best_precision_so_far = mbmi->pb_mv_precision;
+ best_precision_rd_so_far = tmp_rd;
+ }
#if CONFIG_BAWP
- if (tmp_rd <
- mode_info[bawp_flag][mbmi->pb_mv_precision][ref_mv_idx].rd) {
- // Only update mode_info if the new result is actually better.
- mode_info[bawp_flag][mbmi->pb_mv_precision][ref_mv_idx]
- .mv.as_int = mbmi->mv[0].as_int;
- mode_info[bawp_flag][mbmi->pb_mv_precision][ref_mv_idx].rate_mv =
- rate_mv;
- mode_info[bawp_flag][mbmi->pb_mv_precision][ref_mv_idx].rd =
- tmp_rd;
- }
+ if (tmp_rd <
+ mode_info[bawp_flag][mbmi->pb_mv_precision][ref_mv_idx].rd) {
+ // Only update mode_info if the new result is actually better.
+ mode_info[bawp_flag][mbmi->pb_mv_precision][ref_mv_idx]
+ .mv.as_int = mbmi->mv[0].as_int;
+ mode_info[bawp_flag][mbmi->pb_mv_precision][ref_mv_idx]
+ .rate_mv = rate_mv;
+ mode_info[bawp_flag][mbmi->pb_mv_precision][ref_mv_idx].rd =
+ tmp_rd;
+ }
#else
- if (tmp_rd < mode_info[mbmi->pb_mv_precision][ref_mv_idx].rd) {
- // Only update mode_info if the new result is actually better.
- mode_info[mbmi->pb_mv_precision][ref_mv_idx].mv.as_int =
- mbmi->mv[0].as_int;
- mode_info[mbmi->pb_mv_precision][ref_mv_idx].rate_mv = rate_mv;
- mode_info[mbmi->pb_mv_precision][ref_mv_idx].rd = tmp_rd;
- }
+ if (tmp_rd < mode_info[mbmi->pb_mv_precision][ref_mv_idx].rd) {
+ // Only update mode_info if the new result is actually better.
+ mode_info[mbmi->pb_mv_precision][ref_mv_idx].mv.as_int =
+ mbmi->mv[0].as_int;
+ mode_info[mbmi->pb_mv_precision][ref_mv_idx].rate_mv = rate_mv;
+ mode_info[mbmi->pb_mv_precision][ref_mv_idx].rd = tmp_rd;
+ }
#endif
#else
#if CONFIG_BAWP
@@ -4716,38 +4884,38 @@
#endif // CONFIG_BAWP
#endif // CONFIG_FLEX_MVRES
- // Collect mode stats for multiwinner mode processing
- store_winner_mode_stats(
- &cpi->common, x, mbmi, rd_stats, rd_stats_y, rd_stats_uv, refs,
- mbmi->mode, NULL, bsize, tmp_rd,
- cpi->sf.winner_mode_sf.multi_winner_mode_type, do_tx_search);
- if (tmp_rd < best_rd) {
- // Update the best rd stats if we found the best mode so far
- best_rd_stats = *rd_stats;
- best_rd_stats_y = *rd_stats_y;
- best_rd_stats_uv = *rd_stats_uv;
- best_rd = tmp_rd;
- best_mbmi = *mbmi;
+ // Collect mode stats for multiwinner mode processing
+ store_winner_mode_stats(
+ &cpi->common, x, mbmi, rd_stats, rd_stats_y, rd_stats_uv,
+ refs, mbmi->mode, NULL, bsize, tmp_rd,
+ cpi->sf.winner_mode_sf.multi_winner_mode_type, do_tx_search);
+ if (tmp_rd < best_rd) {
+ // Update the best rd stats if we found the best mode so far
+ best_rd_stats = *rd_stats;
+ best_rd_stats_y = *rd_stats_y;
+ best_rd_stats_uv = *rd_stats_uv;
+ best_rd = tmp_rd;
+ best_mbmi = *mbmi;
#if CONFIG_C071_SUBBLK_WARPMV
- if (is_warp_mode(mbmi->motion_mode)) {
- store_submi(xd, cm, best_submi, bsize);
- }
+ if (is_warp_mode(mbmi->motion_mode)) {
+ store_submi(xd, cm, best_submi, bsize);
+ }
#endif // CONFIG_C071_SUBBLK_WARPMV
- best_xskip_txfm = txfm_info->skip_txfm;
- memcpy(best_blk_skip, txfm_info->blk_skip,
- sizeof(best_blk_skip[0]) * xd->height * xd->width);
- av1_copy_array(best_tx_type_map, xd->tx_type_map,
- xd->height * xd->width);
+ best_xskip_txfm = txfm_info->skip_txfm;
+ memcpy(best_blk_skip, txfm_info->blk_skip,
+ sizeof(best_blk_skip[0]) * xd->height * xd->width);
+ av1_copy_array(best_tx_type_map, xd->tx_type_map,
+ xd->height * xd->width);
#if CONFIG_CROSS_CHROMA_TX
- av1_copy_array(best_cctx_type_map, xd->cctx_type_map,
- xd->height * xd->width);
+ av1_copy_array(best_cctx_type_map, xd->cctx_type_map,
+ xd->height * xd->width);
#endif // CONFIG_CROSS_CHROMA_TX
- motion_mode_cand->rate_mv = rate_mv;
- motion_mode_cand->rate2_nocoeff = rate2_nocoeff;
- }
+ motion_mode_cand->rate_mv = rate_mv;
+ motion_mode_cand->rate2_nocoeff = rate2_nocoeff;
+ }
#if CONFIG_C071_SUBBLK_WARPMV
#if CONFIG_FLEX_MVRES
- assert(check_mv_precision(cm, mbmi, x));
+ assert(check_mv_precision(cm, mbmi, x));
#endif
#else
#if CONFIG_FLEX_MVRES
@@ -4755,18 +4923,29 @@
#endif
#endif // CONFIG_C071_SUBBLK_WARPMV
- if (tmp_rd < ref_best_rd) {
- ref_best_rd = tmp_rd;
- best_ref_mv_idx = ref_mv_idx;
+#if CONFIG_CWP
+ if (is_cwp_allowed(mbmi)) {
+ if (tmp_rd < best_cwp_cost) {
+ best_cwp_cost = tmp_rd;
+ best_cwp_idx = mbmi->cwp_idx;
+ }
+ }
+#endif // CONFIG_CWP
+ if (tmp_rd < ref_best_rd) {
+ ref_best_rd = tmp_rd;
+ best_ref_mv_idx = ref_mv_idx;
+ }
}
- }
- restore_dst_buf(xd, orig_dst, num_planes);
+ restore_dst_buf(xd, orig_dst, num_planes);
#if CONFIG_BAWP
- }
+ }
#endif
#if CONFIG_FLEX_MVRES
- }
+ }
#endif
+#if CONFIG_CWP
+ }
+#endif // CONFIG_CWP
}
#if CONFIG_IMPROVED_JMVD
}
@@ -5325,6 +5504,9 @@
mbmi->mv[0].as_mv = dv;
mbmi->interp_fltr = BILINEAR;
mbmi->skip_txfm[xd->tree_type == CHROMA_PART] = 0;
+#if CONFIG_CWP
+ mbmi->cwp_idx = CWP_EQUAL;
+#endif // CONFIG_CWP
#if CONFIG_WARP_REF_LIST
mbmi->warp_ref_idx = 0;
@@ -5558,8 +5740,12 @@
const MV_REFERENCE_FRAME second_ref_frame = skip_mode_info->ref_frame_idx_1;
#if CONFIG_OPTFLOW_REFINEMENT
- const PREDICTION_MODE this_mode =
- cm->features.opfl_refine_type ? NEAR_NEARMV_OPTFLOW : NEAR_NEARMV;
+ const PREDICTION_MODE this_mode = cm->features.opfl_refine_type
+#if CONFIG_CWP
+ && !cm->features.enable_cwp
+#endif // CONFIG_CWP
+ ? NEAR_NEARMV_OPTFLOW
+ : NEAR_NEARMV;
#else
const PREDICTION_MODE this_mode = NEAR_NEARMV;
#endif // CONFIG_OPTFLOW_REFINEMENT
@@ -5575,6 +5761,9 @@
mbmi->uv_mode = UV_DC_PRED;
mbmi->ref_frame[0] = ref_frame;
mbmi->ref_frame[1] = second_ref_frame;
+#if CONFIG_CWP
+ mbmi->cwp_idx = CWP_EQUAL;
+#endif // CONFIG_CWP
#if CONFIG_IBC_SR_EXT
mbmi->use_intrabc[xd->tree_type == CHROMA_PART] = 0;
#endif // CONFIG_IBC_SR_EXT
@@ -5610,22 +5799,26 @@
#endif // CONFIG_SKIP_MODE_DRL_WITH_REF_IDX
#if CONFIG_OPTFLOW_REFINEMENT
+#if CONFIG_CWP
+ assert(this_mode == (cm->features.opfl_refine_type && !cm->features.enable_cwp
+ ? NEAR_NEARMV_OPTFLOW
+ : NEAR_NEARMV));
+ assert(mbmi->mode ==
+ (cm->features.opfl_refine_type && !cm->features.enable_cwp
+ ? NEAR_NEARMV_OPTFLOW
+ : NEAR_NEARMV));
+#else // CONFIG_CWP
assert(this_mode ==
(cm->features.opfl_refine_type ? NEAR_NEARMV_OPTFLOW : NEAR_NEARMV));
assert(mbmi->mode ==
(cm->features.opfl_refine_type ? NEAR_NEARMV_OPTFLOW : NEAR_NEARMV));
+#endif // CONFIG_CWP
#else
assert(this_mode == NEAR_NEARMV);
assert(mbmi->mode == NEAR_NEARMV);
#endif
assert(mbmi->ref_mv_idx == 0);
-#if !CONFIG_SKIP_MODE_DRL_WITH_REF_IDX
- if (!build_cur_mv(mbmi->mv, this_mode, cm, x, 0)) {
- assert(av1_check_newmv_joint_nonzero(cm, x));
- return;
- }
-#endif // CONFIG_SKIP_MODE_DRL_WITH_REF_IDX
mbmi->fsc_mode[xd->tree_type == CHROMA_PART] = 0;
#if CONFIG_BAWP
mbmi->bawp_flag = 0;
@@ -5709,6 +5902,12 @@
mbmi->ref_frame[1] =
xd->skip_mvp_candidate_list.ref_frame1[mbmi->ref_mv_idx];
+#if CONFIG_CWP
+ // Infer the index of compound weighted prediction from DRL list
+ mbmi->cwp_idx =
+ xd->skip_mvp_candidate_list.ref_mv_stack[mbmi->ref_mv_idx].cwp_idx;
+#endif // CONFIG_CWP
+
if (!build_cur_mv(mbmi->mv, this_mode, cm, x, 0)) {
assert(av1_check_newmv_joint_nonzero(cm, x));
continue;
@@ -5755,6 +5954,7 @@
av1_rd_cost_update(x->rdmult, best_rd_cost);
search_state->best_rd = best_rd_cost->rdcost;
}
+
// loop of ref_mv_idx
const int ref_set = get_drl_refmv_count(cm->features.max_drl_bits, x,
mbmi->ref_frame, this_mode);
@@ -5783,6 +5983,8 @@
skip_mode_rd_stats.rate = mode_costs->skip_mode_cost[skip_mode_ctx][1];
// add ref_mv_idx rate
+ // MB_MODE_INFO_EXT *const mbmi_ext = x->mbmi_ext;
+ // add ref_mv_idx rate
const int drl_cost =
#if CONFIG_SKIP_MODE_DRL_WITH_REF_IDX
get_skip_drl_cost(cpi->common.features.max_drl_bits, mbmi, x);
@@ -5814,12 +6016,12 @@
search_state->best_mbmode.fsc_mode[xd->tree_type == CHROMA_PART] = 0;
-#if CONFIG_OPTFLOW_REFINEMENT
- search_state->best_mbmode.mode =
- (cm->features.opfl_refine_type ? NEAR_NEARMV_OPTFLOW : NEAR_NEARMV);
-#else
- search_state->best_mbmode.mode = NEAR_NEARMV;
-#endif // CONFIG_OPTFLOW_REFINEMENT
+ search_state->best_mbmode.mode = (cm->features.opfl_refine_type
+#if CONFIG_CWP
+ && !cm->features.enable_cwp
+#endif // CONFIG_CWP
+ ? NEAR_NEARMV_OPTFLOW
+ : NEAR_NEARMV);
search_state->best_mbmode.ref_frame[0] = mbmi->ref_frame[0];
search_state->best_mbmode.ref_frame[1] = mbmi->ref_frame[1];
search_state->best_mbmode.mv[0].as_int = mbmi->mv[0].as_int;
@@ -6698,6 +6900,7 @@
av1_zero(search_state->single_newmv);
av1_zero(search_state->single_newmv_rate);
av1_zero(search_state->single_newmv_valid);
+
for (int i = 0; i < MB_MODE_COUNT; ++i) {
for (int j = 0; j < MAX_REF_MV_SEARCH; ++j) {
for (int ref_frame = 0; ref_frame < SINGLE_REF_FRAMES; ++ref_frame) {
@@ -7022,6 +7225,9 @@
pmi->palette_size[1] = 0;
mbmi->filter_intra_mode_info.use_filter_intra = 0;
mbmi->mv[0].as_int = mbmi->mv[1].as_int = 0;
+#if CONFIG_CWP
+ mbmi->cwp_idx = CWP_EQUAL;
+#endif // CONFIG_CWP
mbmi->motion_mode = SIMPLE_TRANSLATION;
mbmi->interintra_mode = (INTERINTRA_MODE)(II_DC_PRED - 1);
set_default_interp_filters(mbmi,
@@ -7046,6 +7252,9 @@
#if CONFIG_BAWP
mbmi->bawp_flag = 0;
#endif
+#if CONFIG_IMPROVED_JMVD && CONFIG_JOINT_MVD
+ mbmi->jmvd_scale_mode = 0;
+#endif // CONFIG_IMPROVED_JMVD && CONFIG_JOINT_MVD
}
#if CONFIG_C071_SUBBLK_WARPMV
@@ -8915,6 +9124,9 @@
x->txfm_search_info.skip_txfm = 1;
mbmi->ref_mv_idx = 0;
+#if CONFIG_CWP
+ mbmi->cwp_idx = CWP_EQUAL;
+#endif // CONFIG_CWP
mbmi->motion_mode = SIMPLE_TRANSLATION;
#if CONFIG_FLEX_MVRES
diff --git a/build/cmake/aom_config_defaults.cmake b/build/cmake/aom_config_defaults.cmake
index d79083d..667da53 100644
--- a/build/cmake/aom_config_defaults.cmake
+++ b/build/cmake/aom_config_defaults.cmake
@@ -243,6 +243,7 @@
set_aom_config_var(CONFIG_SKIP_TXFM_OPT 1
"Enable to optimize the signaling of skip_txfm")
+set_aom_config_var(CONFIG_CWP 1 "Enables compound weighted prediction.")
# This is an encode-only change.
set_aom_config_var(CONFIG_MV_SEARCH_RANGE 1
diff --git a/common/args.c b/common/args.c
index ffec73b..099c775 100644
--- a/common/args.c
+++ b/common/args.c
@@ -96,6 +96,9 @@
#if CONFIG_BAWP
GET_PARAMS(enable_bawp);
#endif // CONFIG_BAWP
+#if CONFIG_CWP
+ GET_PARAMS(enable_cwp);
+#endif // CONFIG_CWP
GET_PARAMS(enable_fsc);
#if CONFIG_ORIP
GET_PARAMS(enable_orip);
diff --git a/common/av1_config.c b/common/av1_config.c
index e8a9215..e03427a 100644
--- a/common/av1_config.c
+++ b/common/av1_config.c
@@ -259,6 +259,9 @@
#if CONFIG_BAWP
AV1C_READ_BIT_OR_RETURN_ERROR(enable_bawp);
#endif // CONFIG_BAWP
+#if CONFIG_CWP
+ AV1C_READ_BIT_OR_RETURN_ERROR(enable_cwp);
+#endif // CONFIG_CWP
AV1C_READ_BIT_OR_RETURN_ERROR(enable_fsc);
#if CONFIG_CCSO
AV1C_READ_BIT_OR_RETURN_ERROR(enable_ccso);