Revert "Move compound segment mask buffer from mbmi to xd"

This reverts commit 2cf73eb84e1d324c69e9b4a118621a274e761123.

BUG=https://bugs.chromium.org/p/aomedia/issues/detail?id=503

Change-Id: I2b3a4a77854044cb7951c8e64961e72255dfea85
diff --git a/av1/common/blockd.h b/av1/common/blockd.h
index 1d88fc6..89cf4b2 100644
--- a/av1/common/blockd.h
+++ b/av1/common/blockd.h
@@ -322,18 +322,14 @@
 } RD_STATS;
 
 #if CONFIG_EXT_INTER
-// This struct is used to group function args that are commonly
-// sent together in functions related to interinter compound modes
 typedef struct {
-#if CONFIG_WEDGE
+  COMPOUND_TYPE type;
   int wedge_index;
   int wedge_sign;
-#endif  // CONFIG_WEDGE
 #if CONFIG_COMPOUND_SEGMENT
   SEG_MASK_TYPE mask_type;
-  uint8_t *seg_mask;
+  DECLARE_ALIGNED(16, uint8_t, seg_mask[2 * MAX_SB_SQUARE]);
 #endif  // CONFIG_COMPOUND_SEGMENT
-  COMPOUND_TYPE interinter_compound_type;
 } INTERINTER_COMPOUND_DATA;
 #endif  // CONFIG_EXT_INTER
 
@@ -391,21 +387,12 @@
 #endif  // CONFIG_EXT_INTRA
 
 #if CONFIG_EXT_INTER
-  // interintra members
   INTERINTRA_MODE interintra_mode;
   // TODO(debargha): Consolidate these flags
   int use_wedge_interintra;
   int interintra_wedge_index;
   int interintra_wedge_sign;
-  // interinter members
-  COMPOUND_TYPE interinter_compound_type;
-#if CONFIG_WEDGE
-  int wedge_index;
-  int wedge_sign;
-#endif  // CONFIG_WEDGE
-#if CONFIG_COMPOUND_SEGMENT
-  SEG_MASK_TYPE mask_type;
-#endif  // CONFIG_COMPOUND_SEGMENT
+  INTERINTER_COMPOUND_DATA interinter_compound_data;
 #endif  // CONFIG_EXT_INTER
   MOTION_MODE motion_mode;
 #if CONFIG_MOTION_VAR
@@ -675,10 +662,6 @@
   const EobThresholdMD *eob_threshold_md;
 #endif
 
-#if CONFIG_EXT_INTER && CONFIG_COMPOUND_SEGMENT
-  DECLARE_ALIGNED(16, uint8_t, seg_mask[2 * MAX_SB_SQUARE]);
-#endif  // CONFIG_EXT_INTER && CONFIG_COMPOUND_SEGMENT
-
 #if CONFIG_CFL
   CFL_CTX *cfl;
 #endif
diff --git a/av1/common/reconinter.c b/av1/common/reconinter.c
index 112b1c4..3394475 100644
--- a/av1/common/reconinter.c
+++ b/av1/common/reconinter.c
@@ -268,9 +268,9 @@
     uint8_t *mask_buffer, int h, int w, int stride,
 #endif
     BLOCK_SIZE sb_type) {
-  assert(is_masked_compound_type(comp_data->interinter_compound_type));
+  assert(is_masked_compound_type(comp_data->type));
   (void)sb_type;
-  switch (comp_data->interinter_compound_type) {
+  switch (comp_data->type) {
 #if CONFIG_WEDGE
     case COMPOUND_WEDGE:
       return av1_get_contiguous_soft_mask(comp_data->wedge_index,
@@ -286,9 +286,9 @@
 
 const uint8_t *av1_get_compound_type_mask(
     const INTERINTER_COMPOUND_DATA *const comp_data, BLOCK_SIZE sb_type) {
-  assert(is_masked_compound_type(comp_data->interinter_compound_type));
+  assert(is_masked_compound_type(comp_data->type));
   (void)sb_type;
-  switch (comp_data->interinter_compound_type) {
+  switch (comp_data->type) {
 #if CONFIG_WEDGE
     case COMPOUND_WEDGE:
       return av1_get_contiguous_soft_mask(comp_data->wedge_index,
@@ -596,7 +596,7 @@
   const int subw = (2 << b_width_log2_lookup[sb_type]) == w;
   const uint8_t *mask;
   size_t mask_stride;
-  switch (comp_data->interinter_compound_type) {
+  switch (comp_data->type) {
     case COMPOUND_WEDGE:
       mask = av1_get_soft_mask(comp_data->wedge_index, comp_data->wedge_sign,
                                sb_type, wedge_offset_x, wedge_offset_y);
@@ -624,7 +624,7 @@
   const int subw = (2 << b_width_log2_lookup[sb_type]) == w;
   const uint8_t *mask;
   size_t mask_stride;
-  switch (comp_data->interinter_compound_type) {
+  switch (comp_data->type) {
     case COMPOUND_WEDGE:
       mask = av1_get_soft_mask(comp_data->wedge_index, comp_data->wedge_sign,
                                sb_type, wedge_offset_x, wedge_offset_y);
@@ -699,17 +699,7 @@
 #endif  // CONFIG_GLOBAL_MOTION || CONFIG_WARPED_MOTION
                                      MACROBLOCKD *xd) {
   MODE_INFO *mi = xd->mi[0];
-  const INTERINTER_COMPOUND_DATA comp_data = {
-#if CONFIG_WEDGE
-    mi->mbmi.wedge_index,
-    mi->mbmi.wedge_sign,
-#endif  // CONFIG_WEDGE
-#if CONFIG_COMPOUND_SEGMENT
-    mi->mbmi.mask_type,
-    xd->seg_mask,
-#endif  // CONFIG_COMPOUND_SEGMENT
-    mi->mbmi.interinter_compound_type
-  };
+  INTERINTER_COMPOUND_DATA *comp_data = &mi->mbmi.interinter_compound_data;
 // The prediction filter types used here should be those for
 // the second reference block.
 #if CONFIG_DUAL_FILTER
@@ -736,13 +726,13 @@
 #endif
                            xs, ys, xd);
 #if CONFIG_COMPOUND_SEGMENT
-  if (!plane && comp_data.interinter_compound_type == COMPOUND_SEG) {
+  if (!plane && comp_data->type == COMPOUND_SEG) {
     if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH)
-      build_compound_seg_mask_highbd(comp_data.seg_mask, comp_data.mask_type,
+      build_compound_seg_mask_highbd(comp_data->seg_mask, comp_data->mask_type,
                                      dst, dst_stride, tmp_dst, MAX_SB_SIZE,
                                      mi->mbmi.sb_type, h, w, xd->bd);
     else
-      build_compound_seg_mask(comp_data.seg_mask, comp_data.mask_type, dst,
+      build_compound_seg_mask(comp_data->seg_mask, comp_data->mask_type, dst,
                               dst_stride, tmp_dst, MAX_SB_SIZE,
                               mi->mbmi.sb_type, h, w);
   }
@@ -751,20 +741,20 @@
 #if CONFIG_SUPERTX
   if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH)
     build_masked_compound_wedge_extend_highbd(
-        dst, dst_stride, dst, dst_stride, tmp_dst, MAX_SB_SIZE, &comp_data,
+        dst, dst_stride, dst, dst_stride, tmp_dst, MAX_SB_SIZE, comp_data,
         mi->mbmi.sb_type, wedge_offset_x, wedge_offset_y, h, w, xd->bd);
   else
     build_masked_compound_wedge_extend(
-        dst, dst_stride, dst, dst_stride, tmp_dst, MAX_SB_SIZE, &comp_data,
+        dst, dst_stride, dst, dst_stride, tmp_dst, MAX_SB_SIZE, comp_data,
         mi->mbmi.sb_type, wedge_offset_x, wedge_offset_y, h, w);
 #else
   if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH)
     build_masked_compound_highbd(dst, dst_stride, dst, dst_stride, tmp_dst,
-                                 MAX_SB_SIZE, &comp_data, mi->mbmi.sb_type, h,
-                                 w, xd->bd);
+                                 MAX_SB_SIZE, comp_data, mi->mbmi.sb_type, h, w,
+                                 xd->bd);
   else
     build_masked_compound(dst, dst_stride, dst, dst_stride, tmp_dst,
-                          MAX_SB_SIZE, &comp_data, mi->mbmi.sb_type, h, w);
+                          MAX_SB_SIZE, comp_data, mi->mbmi.sb_type, h, w);
 #endif  // CONFIG_SUPERTX
 
 #else  // CONFIG_HIGHBITDEPTH
@@ -779,18 +769,18 @@
 #endif
                            xs, ys, xd);
 #if CONFIG_COMPOUND_SEGMENT
-  if (!plane && comp_data.interinter_compound_type == COMPOUND_SEG)
-    build_compound_seg_mask(comp_data.seg_mask, comp_data.mask_type, dst,
+  if (!plane && comp_data->type == COMPOUND_SEG)
+    build_compound_seg_mask(comp_data->seg_mask, comp_data->mask_type, dst,
                             dst_stride, tmp_dst, MAX_SB_SIZE, mi->mbmi.sb_type,
                             h, w);
 #endif  // CONFIG_COMPOUND_SEGMENT
 #if CONFIG_SUPERTX
   build_masked_compound_wedge_extend(dst, dst_stride, dst, dst_stride, tmp_dst,
-                                     MAX_SB_SIZE, &comp_data, mi->mbmi.sb_type,
+                                     MAX_SB_SIZE, comp_data, mi->mbmi.sb_type,
                                      wedge_offset_x, wedge_offset_y, h, w);
 #else
   build_masked_compound(dst, dst_stride, dst, dst_stride, tmp_dst, MAX_SB_SIZE,
-                        &comp_data, mi->mbmi.sb_type, h, w);
+                        comp_data, mi->mbmi.sb_type, h, w);
 #endif  // CONFIG_SUPERTX
 #endif  // CONFIG_HIGHBITDEPTH
 #if CONFIG_COMPOUND_SEGMENT
@@ -997,7 +987,8 @@
                  (scaled_mv.col >> SUBPEL_BITS);
 
 #if CONFIG_EXT_INTER
-          if (ref && is_masked_compound_type(mi->mbmi.interinter_compound_type))
+          if (ref &&
+              is_masked_compound_type(mi->mbmi.interinter_compound_data.type))
             av1_make_masked_inter_predictor(
                 pre, pre_buf->stride, dst, dst_buf->stride, subpel_x, subpel_y,
                 sf, w, h, mi->mbmi.interp_filter, xs, ys,
@@ -1119,7 +1110,8 @@
 #endif  // CONFIG_GLOBAL_MOTION || CONFIG_WARPED_MOTION
       conv_params.ref = ref;
 #if CONFIG_EXT_INTER
-      if (ref && is_masked_compound_type(mi->mbmi.interinter_compound_type))
+      if (ref &&
+          is_masked_compound_type(mi->mbmi.interinter_compound_data.type))
         av1_make_masked_inter_predictor(
             pre[ref], pre_buf->stride, dst, dst_buf->stride,
             subpel_params[ref].subpel_x, subpel_params[ref].subpel_y, sf, w, h,
@@ -1876,8 +1868,8 @@
   if (is_interintra_pred(mbmi)) {
     mbmi->ref_frame[1] = NONE_FRAME;
   } else if (has_second_ref(mbmi) &&
-             is_masked_compound_type(mbmi->interinter_compound_type)) {
-    mbmi->interinter_compound_type = COMPOUND_AVERAGE;
+             is_masked_compound_type(mbmi->interinter_compound_data.type)) {
+    mbmi->interinter_compound_data.type = COMPOUND_AVERAGE;
     mbmi->ref_frame[1] = NONE_FRAME;
   }
 #endif  // CONFIG_EXT_INTER
@@ -2964,25 +2956,16 @@
   MACROBLOCKD_PLANE *const pd = &xd->plane[plane];
   struct buf_2d *const dst_buf = &pd->dst;
   uint8_t *const dst = dst_buf->buf + dst_buf->stride * y + x;
-  const INTERINTER_COMPOUND_DATA comp_data = {
-#if CONFIG_WEDGE
-    mbmi->wedge_index,
-    mbmi->wedge_sign,
-#endif  // CONFIG_WEDGE
-#if CONFIG_COMPOUND_SEGMENT
-    mbmi->mask_type,
-    xd->seg_mask,
-#endif  // CONFIG_COMPOUND_SEGMENT
-    mbmi->interinter_compound_type
-  };
+  INTERINTER_COMPOUND_DATA *comp_data = &mbmi->interinter_compound_data;
 
-  if (is_compound && is_masked_compound_type(mbmi->interinter_compound_type)) {
+  if (is_compound &&
+      is_masked_compound_type(mbmi->interinter_compound_data.type)) {
 #if CONFIG_COMPOUND_SEGMENT
     if (!plane && comp_data.interinter_compound_type == COMPOUND_SEG) {
 #if CONFIG_HIGHBITDEPTH
       if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH)
         build_compound_seg_mask_highbd(
-            comp_data.seg_mask, comp_data.mask_type,
+            comp_data->seg_mask, comp_data->mask_type,
             CONVERT_TO_BYTEPTR(ext_dst0), ext_dst_stride0,
             CONVERT_TO_BYTEPTR(ext_dst1), ext_dst_stride1, mbmi->sb_type, h, w,
             xd->bd);
@@ -2999,26 +2982,26 @@
     if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH)
       build_masked_compound_wedge_extend_highbd(
           dst, dst_buf->stride, CONVERT_TO_BYTEPTR(ext_dst0), ext_dst_stride0,
-          CONVERT_TO_BYTEPTR(ext_dst1), ext_dst_stride1, &comp_data,
+          CONVERT_TO_BYTEPTR(ext_dst1), ext_dst_stride1, comp_data,
           mbmi->sb_type, wedge_offset_x, wedge_offset_y, h, w, xd->bd);
     else
 #endif  // CONFIG_HIGHBITDEPTH
       build_masked_compound_wedge_extend(
           dst, dst_buf->stride, ext_dst0, ext_dst_stride0, ext_dst1,
-          ext_dst_stride1, &comp_data, mbmi->sb_type, wedge_offset_x,
+          ext_dst_stride1, comp_data, mbmi->sb_type, wedge_offset_x,
           wedge_offset_y, h, w);
 #else
 #if CONFIG_HIGHBITDEPTH
     if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH)
       build_masked_compound_highbd(
           dst, dst_buf->stride, CONVERT_TO_BYTEPTR(ext_dst0), ext_dst_stride0,
-          CONVERT_TO_BYTEPTR(ext_dst1), ext_dst_stride1, &comp_data,
+          CONVERT_TO_BYTEPTR(ext_dst1), ext_dst_stride1, comp_data,
           mbmi->sb_type, h, w, xd->bd);
     else
 #endif  // CONFIG_HIGHBITDEPTH
       build_masked_compound(dst, dst_buf->stride, ext_dst0, ext_dst_stride0,
-                            ext_dst1, ext_dst_stride1, &comp_data,
-                            mbmi->sb_type, h, w);
+                            ext_dst1, ext_dst_stride1, comp_data, mbmi->sb_type,
+                            h, w);
 #endif  // CONFIG_SUPERTX
   } else {
 #if CONFIG_HIGHBITDEPTH
diff --git a/av1/decoder/decodemv.c b/av1/decoder/decodemv.c
index ec0f877..808078e 100644
--- a/av1/decoder/decodemv.c
+++ b/av1/decoder/decodemv.c
@@ -2192,7 +2192,7 @@
 #endif  // CONFIG_MOTION_VAR || CONFIG_WARPED_MOTION
 
 #if CONFIG_EXT_INTER
-  mbmi->interinter_compound_type = COMPOUND_AVERAGE;
+  mbmi->interinter_compound_data.type = COMPOUND_AVERAGE;
   if (cm->reference_mode != SINGLE_REFERENCE &&
       is_inter_compound_mode(mbmi->mode)
 #if CONFIG_MOTION_VAR || CONFIG_WARPED_MOTION
@@ -2201,27 +2201,29 @@
       ) {
     if (is_any_masked_compound_used(bsize)) {
 #if CONFIG_COMPOUND_SEGMENT || CONFIG_WEDGE
-      mbmi->interinter_compound_type =
+      mbmi->interinter_compound_data.type =
           aom_read_tree(r, av1_compound_type_tree,
                         cm->fc->compound_type_prob[bsize], ACCT_STR);
 #endif  // CONFIG_COMPOUND_SEGMENT || CONFIG_WEDGE
 #if CONFIG_WEDGE
-      if (mbmi->interinter_compound_type == COMPOUND_WEDGE) {
-        mbmi->wedge_index =
+      if (mbmi->interinter_compound_data.type == COMPOUND_WEDGE) {
+        mbmi->interinter_compound_data.wedge_index =
             aom_read_literal(r, get_wedge_bits_lookup(bsize), ACCT_STR);
-        mbmi->wedge_sign = aom_read_bit(r, ACCT_STR);
+        mbmi->interinter_compound_data.wedge_sign = aom_read_bit(r, ACCT_STR);
       }
 #endif  // CONFIG_WEDGE
 #if CONFIG_COMPOUND_SEGMENT
-      if (mbmi->interinter_compound_type == COMPOUND_SEG) {
-        mbmi->mask_type = aom_read_literal(r, MAX_SEG_MASK_BITS, ACCT_STR);
+      if (mbmi->interinter_compound_data.type == COMPOUND_SEG) {
+        mbmi->interinter_compound_data.mask_type =
+            aom_read_literal(r, MAX_SEG_MASK_BITS, ACCT_STR);
       }
 #endif  // CONFIG_COMPOUND_SEGMENT
     } else {
-      mbmi->interinter_compound_type = COMPOUND_AVERAGE;
+      mbmi->interinter_compound_data.type = COMPOUND_AVERAGE;
     }
     if (xd->counts)
-      xd->counts->compound_interinter[bsize][mbmi->interinter_compound_type]++;
+      xd->counts
+          ->compound_interinter[bsize][mbmi->interinter_compound_data.type]++;
   }
 #endif  // CONFIG_EXT_INTER
 
diff --git a/av1/encoder/bitstream.c b/av1/encoder/bitstream.c
index 7cc6179..f7fbee2 100644
--- a/av1/encoder/bitstream.c
+++ b/av1/encoder/bitstream.c
@@ -2082,19 +2082,21 @@
 #endif  // CONFIG_MOTION_VAR
         && is_any_masked_compound_used(bsize)) {
 #if CONFIG_COMPOUND_SEGMENT || CONFIG_WEDGE
-      av1_write_token(w, av1_compound_type_tree,
-                      cm->fc->compound_type_prob[bsize],
-                      &compound_type_encodings[mbmi->interinter_compound_type]);
+      av1_write_token(
+          w, av1_compound_type_tree, cm->fc->compound_type_prob[bsize],
+          &compound_type_encodings[mbmi->interinter_compound_data.type]);
 #endif  // CONFIG_COMPOUND_SEGMENT || CONFIG_WEDGE
 #if CONFIG_WEDGE
-      if (mbmi->interinter_compound_type == COMPOUND_WEDGE) {
-        aom_write_literal(w, mbmi->wedge_index, get_wedge_bits_lookup(bsize));
-        aom_write_bit(w, mbmi->wedge_sign);
+      if (mbmi->interinter_compound_data.type == COMPOUND_WEDGE) {
+        aom_write_literal(w, mbmi->interinter_compound_data.wedge_index,
+                          get_wedge_bits_lookup(bsize));
+        aom_write_bit(w, mbmi->interinter_compound_data.wedge_sign);
       }
 #endif  // CONFIG_WEDGE
 #if CONFIG_COMPOUND_SEGMENT
-      if (mbmi->interinter_compound_type == COMPOUND_SEG) {
-        aom_write_literal(w, mbmi->mask_type, MAX_SEG_MASK_BITS);
+      if (mbmi->interinter_compound_data.type == COMPOUND_SEG) {
+        aom_write_literal(w, mbmi->interinter_compound_data.mask_type,
+                          MAX_SEG_MASK_BITS);
       }
 #endif  // CONFIG_COMPOUND_SEGMENT
     }
diff --git a/av1/encoder/encodeframe.c b/av1/encoder/encodeframe.c
index d254157..26459c9 100644
--- a/av1/encoder/encodeframe.c
+++ b/av1/encoder/encodeframe.c
@@ -2233,7 +2233,8 @@
             && mbmi->motion_mode == SIMPLE_TRANSLATION
 #endif  // CONFIG_MOTION_VAR || CONFIG_WARPED_MOTION
             ) {
-          counts->compound_interinter[bsize][mbmi->interinter_compound_type]++;
+          counts->compound_interinter[bsize]
+                                     [mbmi->interinter_compound_data.type]++;
         }
 #endif  // CONFIG_EXT_INTER
       }
diff --git a/av1/encoder/rdopt.c b/av1/encoder/rdopt.c
index a1096f7..250d3ee 100644
--- a/av1/encoder/rdopt.c
+++ b/av1/encoder/rdopt.c
@@ -7562,11 +7562,11 @@
 }
 
 static int64_t pick_interinter_wedge(const AV1_COMP *const cpi,
-                                     MACROBLOCK *const x,
+                                     const MACROBLOCK *const x,
                                      const BLOCK_SIZE bsize,
                                      const uint8_t *const p0,
                                      const uint8_t *const p1) {
-  MACROBLOCKD *const xd = &x->e_mbd;
+  const MACROBLOCKD *const xd = &x->e_mbd;
   MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
   const int bw = block_size_wide[bsize];
 
@@ -7583,18 +7583,19 @@
     rd = pick_wedge(cpi, x, bsize, p0, p1, &wedge_sign, &wedge_index);
   }
 
-  mbmi->wedge_sign = wedge_sign;
-  mbmi->wedge_index = wedge_index;
+  mbmi->interinter_compound_data.wedge_sign = wedge_sign;
+  mbmi->interinter_compound_data.wedge_index = wedge_index;
   return rd;
 }
 #endif  // CONFIG_WEDGE
 
 #if CONFIG_COMPOUND_SEGMENT
 static int64_t pick_interinter_seg(const AV1_COMP *const cpi,
-                                   MACROBLOCK *const x, const BLOCK_SIZE bsize,
+                                   const MACROBLOCK *const x,
+                                   const BLOCK_SIZE bsize,
                                    const uint8_t *const p0,
                                    const uint8_t *const p1) {
-  MACROBLOCKD *const xd = &x->e_mbd;
+  const MACROBLOCKD *const xd = &x->e_mbd;
   MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
   const struct buf_2d *const src = &x->plane[0].src;
   const int bw = block_size_wide[bsize];
@@ -7613,6 +7614,7 @@
 #else
   const int bd_round = 0;
 #endif  // CONFIG_HIGHBITDEPTH
+  INTERINTER_COMPOUND_DATA *comp_data = &mbmi->interinter_compound_data;
   DECLARE_ALIGNED(32, int16_t, r0[MAX_SB_SQUARE]);
   DECLARE_ALIGNED(32, int16_t, r1[MAX_SB_SQUARE]);
   DECLARE_ALIGNED(32, int16_t, d10[MAX_SB_SQUARE]);
@@ -7639,15 +7641,15 @@
 #if CONFIG_HIGHBITDEPTH
     if (hbd)
       build_compound_seg_mask_highbd(
-          xd->seg_mask, cur_mask_type, CONVERT_TO_BYTEPTR(p0), bw,
+          comp_data->seg_mask, cur_mask_type, CONVERT_TO_BYTEPTR(p0), bw,
           CONVERT_TO_BYTEPTR(p1), bw, bsize, bh, bw, xd->bd);
     else
 #endif  // CONFIG_HIGHBITDEPTH
-      build_compound_seg_mask(xd->seg_mask, cur_mask_type, p0, bw, p1, bw,
-                              bsize, bh, bw);
+      build_compound_seg_mask(comp_data->seg_mask, cur_mask_type, p0, bw, p1,
+                              bw, bsize, bh, bw);
 
     // compute rd for mask
-    sse = av1_wedge_sse_from_residuals(r1, d10, xd->seg_mask, N);
+    sse = av1_wedge_sse_from_residuals(r1, d10, comp_data->seg_mask, N);
     sse = ROUND_POWER_OF_TWO(sse, bd_round);
 
     model_rd_from_sse(cpi, xd, bsize, 0, sse, &rate, &dist);
@@ -7660,16 +7662,16 @@
   }
 
   // make final mask
-  mbmi->mask_type = best_mask_type;
+  comp_data->mask_type = best_mask_type;
 #if CONFIG_HIGHBITDEPTH
   if (hbd)
     build_compound_seg_mask_highbd(
-        xd->seg_mask, mbmi->mask_type, CONVERT_TO_BYTEPTR(p0), bw,
+        comp_data->seg_mask, comp_data->mask_type, CONVERT_TO_BYTEPTR(p0), bw,
         CONVERT_TO_BYTEPTR(p1), bw, bsize, bh, bw, xd->bd);
   else
 #endif  // CONFIG_HIGHBITDEPTH
-    build_compound_seg_mask(xd->seg_mask, mbmi->mask_type, p0, bw, p1, bw,
-                            bsize, bh, bw);
+    build_compound_seg_mask(comp_data->seg_mask, comp_data->mask_type, p0, bw,
+                            p1, bw, bsize, bh, bw);
 
   return best_rd;
 }
@@ -7698,12 +7700,13 @@
 #endif  // CONFIG_WEDGE && CONFIG_INTERINTRA
 
 #if CONFIG_COMPOUND_SEGMENT || CONFIG_WEDGE
-static int64_t pick_interinter_mask(const AV1_COMP *const cpi, MACROBLOCK *x,
+static int64_t pick_interinter_mask(const AV1_COMP *const cpi,
+                                    const MACROBLOCK *const x,
                                     const BLOCK_SIZE bsize,
                                     const uint8_t *const p0,
                                     const uint8_t *const p1) {
   const COMPOUND_TYPE compound_type =
-      x->e_mbd.mi[0]->mbmi.interinter_compound_type;
+      x->e_mbd.mi[0]->mbmi.interinter_compound_data.type;
   switch (compound_type) {
 #if CONFIG_WEDGE
     case COMPOUND_WEDGE: return pick_interinter_wedge(cpi, x, bsize, p0, p1);
@@ -7720,35 +7723,24 @@
                                              const BLOCK_SIZE bsize,
                                              const int this_mode, int mi_row,
                                              int mi_col) {
-  MACROBLOCKD *const xd = &x->e_mbd;
+  const MACROBLOCKD *const xd = &x->e_mbd;
   MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
   int_mv tmp_mv[2];
   int rate_mvs[2], tmp_rate_mv = 0;
-  const INTERINTER_COMPOUND_DATA compound_data = {
-#if CONFIG_WEDGE
-    mbmi->wedge_index,
-    mbmi->wedge_sign,
-#endif  // CONFIG_WEDGE
-#if CONFIG_COMPOUND_SEGMENT
-    mbmi->mask_type,
-    xd->seg_mask,
-#endif  // CONFIG_COMPOUND_SEGMENT
-    mbmi->interinter_compound_type
-  };
   if (this_mode == NEW_NEWMV) {
-    do_masked_motion_search_indexed(cpi, x, &compound_data, bsize, mi_row,
-                                    mi_col, tmp_mv, rate_mvs, 2);
+    do_masked_motion_search_indexed(cpi, x, &mbmi->interinter_compound_data,
+                                    bsize, mi_row, mi_col, tmp_mv, rate_mvs, 2);
     tmp_rate_mv = rate_mvs[0] + rate_mvs[1];
     mbmi->mv[0].as_int = tmp_mv[0].as_int;
     mbmi->mv[1].as_int = tmp_mv[1].as_int;
   } else if (this_mode == NEW_NEARESTMV || this_mode == NEW_NEARMV) {
-    do_masked_motion_search_indexed(cpi, x, &compound_data, bsize, mi_row,
-                                    mi_col, tmp_mv, rate_mvs, 0);
+    do_masked_motion_search_indexed(cpi, x, &mbmi->interinter_compound_data,
+                                    bsize, mi_row, mi_col, tmp_mv, rate_mvs, 0);
     tmp_rate_mv = rate_mvs[0];
     mbmi->mv[0].as_int = tmp_mv[0].as_int;
   } else if (this_mode == NEAREST_NEWMV || this_mode == NEAR_NEWMV) {
-    do_masked_motion_search_indexed(cpi, x, &compound_data, bsize, mi_row,
-                                    mi_col, tmp_mv, rate_mvs, 1);
+    do_masked_motion_search_indexed(cpi, x, &mbmi->interinter_compound_data,
+                                    bsize, mi_row, mi_col, tmp_mv, rate_mvs, 1);
     tmp_rate_mv = rate_mvs[1];
     mbmi->mv[1].as_int = tmp_mv[1].as_int;
   }
@@ -7768,7 +7760,7 @@
   int64_t rd = INT64_MAX;
   int tmp_skip_txfm_sb;
   int64_t tmp_skip_sse_sb;
-  const COMPOUND_TYPE compound_type = mbmi->interinter_compound_type;
+  const COMPOUND_TYPE compound_type = mbmi->interinter_compound_data.type;
 
   best_rd_cur = pick_interinter_mask(cpi, x, bsize, *preds0, *preds1);
   best_rd_cur += RDCOST(x->rdmult, x->rddiv, rs2 + rate_mv, 0);
@@ -8487,7 +8479,7 @@
   *args->compmode_interintra_cost = 0;
   mbmi->use_wedge_interintra = 0;
   *args->compmode_interinter_cost = 0;
-  mbmi->interinter_compound_type = COMPOUND_AVERAGE;
+  mbmi->interinter_compound_data.type = COMPOUND_AVERAGE;
 
   // is_comp_interintra_pred implies !is_comp_pred
   assert(!is_comp_interintra_pred || (!is_comp_pred));
@@ -8709,13 +8701,7 @@
 
     best_mv[0].as_int = cur_mv[0].as_int;
     best_mv[1].as_int = cur_mv[1].as_int;
-    memset(&best_compound_data, 0, sizeof(best_compound_data));
-#if CONFIG_COMPOUND_SEGMENT
-    uint8_t tmp_mask_buf[2 * MAX_SB_SQUARE];
-    best_compound_data.seg_mask = tmp_mask_buf;
-#endif  // CONFIG_COMPOUND_SEGMENT
-    av1_cost_tokens(compound_type_cost, cm->fc->compound_type_prob[bsize],
-                    av1_compound_type_tree);
+    memset(&best_compound_data, 0, sizeof(INTERINTER_COMPOUND_DATA));
 
     if (masked_compound_used) {
       av1_cost_tokens(compound_type_cost, cm->fc->compound_type_prob[bsize],
@@ -8731,11 +8717,11 @@
       if (!is_interinter_compound_used(cur_type, bsize)) break;
       tmp_rate_mv = rate_mv;
       best_rd_cur = INT64_MAX;
-      mbmi->interinter_compound_type = cur_type;
+      mbmi->interinter_compound_data.type = cur_type;
       rs2 = av1_cost_literal(get_interinter_compound_type_bits(
-                bsize, mbmi->interinter_compound_type)) +
+                bsize, mbmi->interinter_compound_data.type)) +
             (masked_compound_used
-                 ? compound_type_cost[mbmi->interinter_compound_type]
+                 ? compound_type_cost[mbmi->interinter_compound_data.type]
                  : 0);
 
       switch (cur_type) {
@@ -8775,17 +8761,8 @@
 
       if (best_rd_cur < best_rd_compound) {
         best_rd_compound = best_rd_cur;
-#if CONFIG_WEDGE
-        best_compound_data.wedge_index = mbmi->wedge_index;
-        best_compound_data.wedge_sign = mbmi->wedge_sign;
-#endif  // CONFIG_WEDGE
-#if CONFIG_COMPOUND_SEGMENT
-        best_compound_data.mask_type = mbmi->mask_type;
-        memcpy(best_compound_data.seg_mask, xd->seg_mask,
-               2 * MAX_SB_SQUARE * sizeof(uint8_t));
-#endif  // CONFIG_COMPOUND_SEGMENT
-        best_compound_data.interinter_compound_type =
-            mbmi->interinter_compound_type;
+        memcpy(&best_compound_data, &mbmi->interinter_compound_data,
+               sizeof(best_compound_data));
         if (have_newmv_in_inter_mode(this_mode)) {
           if (use_masked_motion_search(cur_type)) {
             best_tmp_rate_mv = tmp_rate_mv;
@@ -8801,23 +8778,14 @@
       mbmi->mv[0].as_int = cur_mv[0].as_int;
       mbmi->mv[1].as_int = cur_mv[1].as_int;
     }
-#if CONFIG_WEDGE
-    mbmi->wedge_index = best_compound_data.wedge_index;
-    mbmi->wedge_sign = best_compound_data.wedge_sign;
-#endif  // CONFIG_WEDGE
-#if CONFIG_COMPOUND_SEGMENT
-    mbmi->mask_type = best_compound_data.mask_type;
-    memcpy(xd->seg_mask, best_compound_data.seg_mask,
-           2 * MAX_SB_SQUARE * sizeof(uint8_t));
-#endif  // CONFIG_COMPOUND_SEGMENT
-    mbmi->interinter_compound_type =
-        best_compound_data.interinter_compound_type;
+    memcpy(&mbmi->interinter_compound_data, &best_compound_data,
+           sizeof(INTERINTER_COMPOUND_DATA));
     if (have_newmv_in_inter_mode(this_mode)) {
       mbmi->mv[0].as_int = best_mv[0].as_int;
       mbmi->mv[1].as_int = best_mv[1].as_int;
       xd->mi[0]->bmi[0].as_mv[0].as_int = mbmi->mv[0].as_int;
       xd->mi[0]->bmi[0].as_mv[1].as_int = mbmi->mv[1].as_int;
-      if (use_masked_motion_search(mbmi->interinter_compound_type)) {
+      if (use_masked_motion_search(mbmi->interinter_compound_data.type)) {
         rd_stats->rate += best_tmp_rate_mv - rate_mv;
         rate_mv = best_tmp_rate_mv;
       }
@@ -8832,9 +8800,9 @@
 
     *args->compmode_interinter_cost =
         av1_cost_literal(get_interinter_compound_type_bits(
-            bsize, mbmi->interinter_compound_type)) +
+            bsize, mbmi->interinter_compound_data.type)) +
         (masked_compound_used
-             ? compound_type_cost[mbmi->interinter_compound_type]
+             ? compound_type_cost[mbmi->interinter_compound_data.type]
              : 0);
   }
 
@@ -11667,7 +11635,7 @@
 #endif  // CONFIG_FILTER_INTRA
   mbmi->motion_mode = SIMPLE_TRANSLATION;
 #if CONFIG_EXT_INTER
-  mbmi->interinter_compound_type = COMPOUND_AVERAGE;
+  mbmi->interinter_compound_data.type = COMPOUND_AVERAGE;
   mbmi->use_wedge_interintra = 0;
 #endif  // CONFIG_EXT_INTER
 #if CONFIG_WARPED_MOTION