Add temporary dummy mask for compound segmentation
This uses a segmentation mask (which is temporarily computed arbitrarily)
to blend predictors in compound prediction. The mask will be computed
using a color segmentation in a followup patch.
Change-Id: I2d24cf27a8589211f8a70779a5be2d61746406b9
diff --git a/av1/common/reconinter.c b/av1/common/reconinter.c
index b9df96b..a8b21e3 100644
--- a/av1/common/reconinter.c
+++ b/av1/common/reconinter.c
@@ -255,18 +255,44 @@
// TODO(sarahparker) this needs to be extended for other experiments and
// is currently only intended for ext_inter alone
#if CONFIG_EXT_INTER
-const uint8_t *av1_get_compound_type_mask(
- const INTERINTER_COMPOUND_DATA *const comp_data, BLOCK_SIZE sb_type,
- int invert) {
+const uint8_t *av1_get_compound_type_mask(INTERINTER_COMPOUND_DATA *comp_data,
+ BLOCK_SIZE sb_type, int invert) {
assert(is_masked_compound_type(comp_data->type));
switch (comp_data->type) {
case COMPOUND_WEDGE:
return av1_get_contiguous_soft_mask(
comp_data->wedge_index,
invert ? !comp_data->wedge_sign : comp_data->wedge_sign, sb_type);
+#if CONFIG_COMPOUND_SEGMENT
+ case COMPOUND_SEG:
+ if (invert) return comp_data->seg_mask[!comp_data->which];
+ return comp_data->seg_mask[comp_data->which];
+#endif // CONFIG_COMPOUND_SEGMENT
default: assert(0); return NULL;
}
}
+
+#if CONFIG_COMPOUND_SEGMENT
+// temporary placeholder mask, this will be generated using segmentation later
+void build_compound_seg_mask(INTERINTER_COMPOUND_DATA *comp_data,
+ const uint8_t *src0, int src0_stride,
+ const uint8_t *src1, int src1_stride,
+ BLOCK_SIZE sb_type, int h, int w) {
+ int block_stride = block_size_wide[sb_type];
+ int i, j;
+ (void)src0;
+ (void)src1;
+ (void)src0_stride;
+ (void)src1_stride;
+ for (i = 0; i < h; ++i)
+ for (j = 0; j < w; ++j) {
+ // if which == 0, put more weight on the first predictor
+ comp_data->seg_mask[0][i * block_stride + j] = 45;
+ comp_data->seg_mask[1][i * block_stride + j] =
+ AOM_BLEND_A64_MAX_ALPHA - 45;
+ }
+}
+#endif // CONFIG_COMPOUND_SEGMENT
#endif // CONFIG_EXT_INTER
static void init_wedge_master_masks() {
@@ -396,11 +422,11 @@
#endif // CONFIG_AOM_HIGHBITDEPTH
#endif // CONFIG_SUPERTX
-static void build_masked_compound(
- uint8_t *dst, int dst_stride, const uint8_t *src0, int src0_stride,
- const uint8_t *src1, int src1_stride,
- const INTERINTER_COMPOUND_DATA *const comp_data, BLOCK_SIZE sb_type, int h,
- int w) {
+static void build_masked_compound(uint8_t *dst, int dst_stride,
+ const uint8_t *src0, int src0_stride,
+ const uint8_t *src1, int src1_stride,
+ INTERINTER_COMPOUND_DATA *comp_data,
+ BLOCK_SIZE sb_type, int h, int w) {
// Derive subsampling from h and w passed in. May be refactored to
// pass in subsampling factors directly.
const int subh = (2 << b_height_log2_lookup[sb_type]) == h;
@@ -441,10 +467,12 @@
#if CONFIG_SUPERTX
int wedge_offset_x, int wedge_offset_y,
#endif // CONFIG_SUPERTX
- const MACROBLOCKD *xd) {
- const MODE_INFO *mi = xd->mi[0];
- const INTERINTER_COMPOUND_DATA *const comp_data =
- &mi->mbmi.interinter_compound_data;
+#if CONFIG_COMPOUND_SEGMENT
+ int plane,
+#endif // CONFIG_COMPOUND_SEGMENT
+ MACROBLOCKD *xd) {
+ MODE_INFO *mi = xd->mi[0];
+ INTERINTER_COMPOUND_DATA *comp_data = &mi->mbmi.interinter_compound_data;
// The prediction filter types used here should be those for
// the second reference block.
#if CONFIG_DUAL_FILTER
@@ -492,6 +520,11 @@
comp_data->wedge_sign, mi->mbmi.sb_type,
wedge_offset_x, wedge_offset_y, h, w);
#else
+#if CONFIG_COMPOUND_SEGMENT
+ if (!plane && comp_data->type == COMPOUND_SEG)
+ build_compound_seg_mask(comp_data, dst, dst_stride, tmp_dst, MAX_SB_SIZE,
+ mi->mbmi.sb_type, h, w);
+#endif // CONFIG_COMPOUND_SEGMENT
build_masked_compound(dst, dst_stride, dst, dst_stride, tmp_dst, MAX_SB_SIZE,
comp_data, mi->mbmi.sb_type, h, w);
#endif // CONFIG_SUPERTX
@@ -657,6 +690,9 @@
#if CONFIG_SUPERTX
wedge_offset_x, wedge_offset_y,
#endif // CONFIG_SUPERTX
+#if CONFIG_COMPOUND_SEGMENT
+ plane,
+#endif // CONFIG_COMPOUND_SEGMENT
xd);
else
#endif // CONFIG_EXT_INTER
@@ -726,6 +762,9 @@
#if CONFIG_SUPERTX
wedge_offset_x, wedge_offset_y,
#endif // CONFIG_SUPERTX
+#if CONFIG_COMPOUND_SEGMENT
+ plane,
+#endif // CONFIG_COMPOUND_SEGMENT
xd);
else
#else // CONFIG_EXT_INTER
@@ -2179,16 +2218,20 @@
static void build_wedge_inter_predictor_from_buf(
MACROBLOCKD *xd, int plane, int x, int y, int w, int h, uint8_t *ext_dst0,
int ext_dst_stride0, uint8_t *ext_dst1, int ext_dst_stride1) {
- const MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
+ MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
const int is_compound = has_second_ref(mbmi);
MACROBLOCKD_PLANE *const pd = &xd->plane[plane];
struct buf_2d *const dst_buf = &pd->dst;
uint8_t *const dst = dst_buf->buf + dst_buf->stride * y + x;
- const INTERINTER_COMPOUND_DATA *const comp_data =
- &mbmi->interinter_compound_data;
+ INTERINTER_COMPOUND_DATA *comp_data = &mbmi->interinter_compound_data;
if (is_compound &&
is_masked_compound_type(mbmi->interinter_compound_data.type)) {
+#if CONFIG_COMPOUND_SEGMENT
+ if (!plane && comp_data->type == COMPOUND_SEG)
+ build_compound_seg_mask(comp_data, ext_dst0, ext_dst_stride0, ext_dst1,
+ ext_dst_stride1, mbmi->sb_type, h, w);
+#endif // CONFIG_COMPOUND_SEGMENT
#if CONFIG_AOM_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH)
build_masked_compound_wedge_highbd(