Remove convolve_opt and cleanup
Convolve option with rounding is no longer required.
Change-Id: Ib798c97bb32749adea610ff53113dab79f67fb37
diff --git a/av1/common/convolve.h b/av1/common/convolve.h
index bb32fab..554d144 100644
--- a/av1/common/convolve.h
+++ b/av1/common/convolve.h
@@ -17,18 +17,11 @@
extern "C" {
#endif
-typedef enum CONVOLVE_OPT {
- // indicate the results in dst buf is rounded by FILTER_BITS or not
- CONVOLVE_OPT_ROUND,
- CONVOLVE_OPT_NO_ROUND,
-} CONVOLVE_OPT;
-
typedef int32_t CONV_BUF_TYPE;
typedef struct ConvolveParams {
int ref;
int do_average;
- CONVOLVE_OPT round;
CONV_BUF_TYPE *dst;
int dst_stride;
int round_0;
@@ -99,7 +92,6 @@
conv_params.ref = ref;
conv_params.do_average = do_average;
assert(IMPLIES(do_average, is_compound));
- conv_params.round = CONVOLVE_OPT_NO_ROUND;
conv_params.is_compound = is_compound;
conv_params.round_0 = ROUND0_BITS;
#if CONFIG_LOWPRECISION_BLEND
diff --git a/av1/common/reconinter.c b/av1/common/reconinter.c
index c6b1566..44370a2 100644
--- a/av1/common/reconinter.c
+++ b/av1/common/reconinter.c
@@ -726,6 +726,8 @@
int xs, int ys, int plane, const WarpTypesAllowed *warp_types, int p_col,
int p_row, int ref, MACROBLOCKD *xd) {
const MODE_INFO *mi = xd->mi[0];
+ (void)dst;
+ (void)dst_stride;
const INTERINTER_COMPOUND_DATA comp_data = {
mi->mbmi.wedge_index, mi->mbmi.wedge_sign, mi->mbmi.mask_type, xd->seg_mask,
@@ -736,8 +738,7 @@
// a temporary buffer, then will blend that temporary buffer with that from
// the other reference.
//
-// If the rounding mode is CONVOLVE_OPT_NO_ROUND
-// then the predictions are at 32-bits, so we'll need 32 bits per
+// The predictions are at 32-bits, so we'll need 32 bits per
// pixel. Otherwise, we'll need up to 16 bits per pixel if
// CONFIG_HIGHBITDEPTH or just 8 otherwise.
#define INTER_PRED_BYTES_PER_PIXEL 4
@@ -751,15 +752,12 @@
: tmp_buf;
const int tmp_buf_stride = MAX_SB_SIZE;
- const int is_conv_no_round = conv_params->round == CONVOLVE_OPT_NO_ROUND;
CONV_BUF_TYPE *org_dst = conv_params->dst;
int org_dst_stride = conv_params->dst_stride;
CONV_BUF_TYPE *tmp_buf32 = (CONV_BUF_TYPE *)tmp_buf;
- if (is_conv_no_round) {
- conv_params->dst = tmp_buf32;
- conv_params->dst_stride = tmp_buf_stride;
- assert(conv_params->do_average == 0);
- }
+ conv_params->dst = tmp_buf32;
+ conv_params->dst_stride = tmp_buf_stride;
+ assert(conv_params->do_average == 0);
// This will generate a prediction in tmp_buf for the second reference
av1_make_inter_predictor(pre, pre_stride, tmp_dst, MAX_SB_SIZE, subpel_x,
@@ -768,38 +766,14 @@
xd);
if (!plane && comp_data.interinter_compound_type == COMPOUND_SEG) {
- if (is_conv_no_round) {
- build_compound_seg_mask_d32(comp_data.seg_mask, comp_data.mask_type,
- org_dst, org_dst_stride, tmp_buf32,
- tmp_buf_stride, mi->mbmi.sb_type, h, w,
- conv_params, xd->bd);
- } else {
- if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
- build_compound_seg_mask_highbd(comp_data.seg_mask, comp_data.mask_type,
- dst, dst_stride, tmp_dst, MAX_SB_SIZE,
- mi->mbmi.sb_type, h, w, xd->bd);
- } else {
- build_compound_seg_mask(comp_data.seg_mask, comp_data.mask_type, dst,
- dst_stride, tmp_dst, MAX_SB_SIZE,
- mi->mbmi.sb_type, h, w);
- }
- }
+ build_compound_seg_mask_d32(
+ comp_data.seg_mask, comp_data.mask_type, org_dst, org_dst_stride,
+ tmp_buf32, tmp_buf_stride, mi->mbmi.sb_type, h, w, conv_params, xd->bd);
}
- if (is_conv_no_round) {
- build_masked_compound_no_round(org_dst, org_dst_stride, org_dst,
- org_dst_stride, tmp_buf32, tmp_buf_stride,
- &comp_data, mi->mbmi.sb_type, h, w);
-
- } else {
- if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH)
- build_masked_compound_highbd(dst, dst_stride, dst, dst_stride, tmp_dst,
- MAX_SB_SIZE, &comp_data, mi->mbmi.sb_type, h,
- w, xd->bd);
- else
- build_masked_compound(dst, dst_stride, dst, dst_stride, tmp_dst,
- MAX_SB_SIZE, &comp_data, mi->mbmi.sb_type, h, w);
- }
+ build_masked_compound_no_round(org_dst, org_dst_stride, org_dst,
+ org_dst_stride, tmp_buf32, tmp_buf_stride,
+ &comp_data, mi->mbmi.sb_type, h, w);
}
// TODO(sarahparker) av1_highbd_build_inter_predictor and
diff --git a/av1/common/reconinter.h b/av1/common/reconinter.h
index fdb2af9..dc07250 100644
--- a/av1/common/reconinter.h
+++ b/av1/common/reconinter.h
@@ -52,17 +52,9 @@
assert(conv_params->do_average == 0 || conv_params->do_average == 1);
assert(sf);
if (has_scale(xs, ys)) {
- // TODO(afergs, debargha): Use a different scale convolve function
- // that uses higher precision for subpel_x, subpel_y, xs, ys
- if (conv_params->round == CONVOLVE_OPT_NO_ROUND) {
- av1_convolve_2d_facade(src, src_stride, dst, dst_stride, w, h,
- interp_filters, subpel_x, xs, subpel_y, ys, 1,
- conv_params, sf);
- } else {
- assert(conv_params->round == CONVOLVE_OPT_ROUND);
- av1_convolve_scale(src, src_stride, dst, dst_stride, w, h, interp_filters,
- subpel_x, xs, subpel_y, ys, conv_params);
- }
+ av1_convolve_2d_facade(src, src_stride, dst, dst_stride, w, h,
+ interp_filters, subpel_x, xs, subpel_y, ys, 1,
+ conv_params, sf);
} else {
subpel_x >>= SCALE_EXTRA_BITS;
subpel_y >>= SCALE_EXTRA_BITS;
@@ -72,40 +64,9 @@
assert(subpel_y < SUBPEL_SHIFTS);
assert(xs <= SUBPEL_SHIFTS);
assert(ys <= SUBPEL_SHIFTS);
- if (conv_params->round == CONVOLVE_OPT_NO_ROUND) {
- av1_convolve_2d_facade(src, src_stride, dst, dst_stride, w, h,
- interp_filters, subpel_x, xs, subpel_y, ys, 0,
- conv_params, sf);
-
- } else {
- assert(conv_params->round == CONVOLVE_OPT_ROUND);
-
- InterpFilterParams filter_params_x, filter_params_y;
-#if CONFIG_SHORT_FILTER
- av1_get_convolve_filter_params(interp_filters, &filter_params_x,
- &filter_params_y, w, h);
-#else
- av1_get_convolve_filter_params(interp_filters, &filter_params_x,
- &filter_params_y);
-
-#endif
-
- if (w <= 2 || h <= 2) {
- av1_convolve_c(src, src_stride, dst, dst_stride, w, h, interp_filters,
- subpel_x, xs, subpel_y, ys, conv_params);
- } else if (filter_params_x.taps == SUBPEL_TAPS &&
- filter_params_y.taps == SUBPEL_TAPS) {
- const int16_t *kernel_x =
- av1_get_interp_filter_subpel_kernel(filter_params_x, subpel_x);
- const int16_t *kernel_y =
- av1_get_interp_filter_subpel_kernel(filter_params_y, subpel_y);
- sf->predict[subpel_x != 0][subpel_y != 0][conv_params->do_average](
- src, src_stride, dst, dst_stride, kernel_x, xs, kernel_y, ys, w, h);
- } else {
- av1_convolve(src, src_stride, dst, dst_stride, w, h, interp_filters,
- subpel_x, xs, subpel_y, ys, conv_params);
- }
- }
+ av1_convolve_2d_facade(src, src_stride, dst, dst_stride, w, h,
+ interp_filters, subpel_x, xs, subpel_y, ys, 0,
+ conv_params, sf);
}
}
@@ -116,19 +77,12 @@
int h, ConvolveParams *conv_params,
InterpFilters interp_filters, int xs,
int ys, int bd) {
- const int avg = conv_params->do_average;
- assert(avg == 0 || avg == 1);
-
+ assert(conv_params->do_average == 0 || conv_params->do_average == 1);
+ assert(sf);
if (has_scale(xs, ys)) {
- if (conv_params->round == CONVOLVE_OPT_NO_ROUND) {
- av1_highbd_convolve_2d_facade(src, src_stride, dst, dst_stride, w, h,
- interp_filters, subpel_x, xs, subpel_y, ys,
- 1, conv_params, sf, bd);
- } else {
- av1_highbd_convolve_scale(src, src_stride, dst, dst_stride, w, h,
- interp_filters, subpel_x, xs, subpel_y, ys, avg,
- bd);
- }
+ av1_highbd_convolve_2d_facade(src, src_stride, dst, dst_stride, w, h,
+ interp_filters, subpel_x, xs, subpel_y, ys, 1,
+ conv_params, sf, bd);
} else {
subpel_x >>= SCALE_EXTRA_BITS;
subpel_y >>= SCALE_EXTRA_BITS;
@@ -138,35 +92,9 @@
assert(subpel_y < SUBPEL_SHIFTS);
assert(xs <= SUBPEL_SHIFTS);
assert(ys <= SUBPEL_SHIFTS);
- if (conv_params->round == CONVOLVE_OPT_NO_ROUND) {
- av1_highbd_convolve_2d_facade(src, src_stride, dst, dst_stride, w, h,
- interp_filters, subpel_x, xs, subpel_y, ys,
- 0, conv_params, sf, bd);
- } else {
- InterpFilterParams filter_params_x, filter_params_y;
-#if CONFIG_SHORT_FILTER
- av1_get_convolve_filter_params(interp_filters, &filter_params_x,
- &filter_params_y, w, h);
-#else
- av1_get_convolve_filter_params(interp_filters, &filter_params_x,
- &filter_params_y);
-#endif
-
- if (filter_params_x.taps == SUBPEL_TAPS &&
- filter_params_y.taps == SUBPEL_TAPS && w > 2 && h > 2) {
- const int16_t *kernel_x =
- av1_get_interp_filter_subpel_kernel(filter_params_x, subpel_x);
- const int16_t *kernel_y =
- av1_get_interp_filter_subpel_kernel(filter_params_y, subpel_y);
- sf->highbd_predict[subpel_x != 0][subpel_y != 0][avg](
- src, src_stride, dst, dst_stride, kernel_x, xs, kernel_y, ys, w, h,
- bd);
- } else {
- av1_highbd_convolve(src, src_stride, dst, dst_stride, w, h,
- interp_filters, subpel_x, xs, subpel_y, ys, avg,
- bd);
- }
- }
+ av1_highbd_convolve_2d_facade(src, src_stride, dst, dst_stride, w, h,
+ interp_filters, subpel_x, xs, subpel_y, ys, 0,
+ conv_params, sf, bd);
}
}