Avoid sending bits for the compound type for sub 8x8 blocks

The only compound mode used with sub 8x8 blocks is COMPOUND_AVERAGE, so
we don't have to send anything in this case

Change-Id: I90d0162e5f7f1ad205e65094293cde2a48eb77b1
diff --git a/av1/common/reconinter.h b/av1/common/reconinter.h
index cc4c858..1349f9c 100644
--- a/av1/common/reconinter.h
+++ b/av1/common/reconinter.h
@@ -177,13 +177,30 @@
 
 extern const wedge_params_type wedge_params_lookup[BLOCK_SIZES];
 
-static INLINE int get_wedge_bits_lookup(BLOCK_SIZE sb_type) {
-  return wedge_params_lookup[sb_type].bits;
+static INLINE int is_interinter_compound_used(COMPOUND_TYPE type,
+                                              BLOCK_SIZE sb_type) {
+  switch (type) {
+    case COMPOUND_AVERAGE: (void)sb_type; return 1;
+    case COMPOUND_WEDGE: return wedge_params_lookup[sb_type].bits > 0;
+#if CONFIG_COMPOUND_SEGMENT
+    case COMPOUND_SEG: return sb_type >= BLOCK_8X8;
+#endif  // CONFIG_COMPOUND_SEGMENT
+    default: assert(0); return 0;
+  }
 }
 
-static INLINE int is_interinter_wedge_used(BLOCK_SIZE sb_type) {
-  (void)sb_type;
-  return wedge_params_lookup[sb_type].bits > 0;
+static INLINE int is_any_masked_compound_used(BLOCK_SIZE sb_type) {
+  COMPOUND_TYPE comp_type;
+  for (comp_type = 0; comp_type < COMPOUND_TYPES; comp_type++) {
+    if (is_masked_compound_type(comp_type) &&
+        is_interinter_compound_used(comp_type, sb_type))
+      return 1;
+  }
+  return 0;
+}
+
+static INLINE int get_wedge_bits_lookup(BLOCK_SIZE sb_type) {
+  return wedge_params_lookup[sb_type].bits;
 }
 
 static INLINE int get_interinter_wedge_bits(BLOCK_SIZE sb_type) {
diff --git a/av1/decoder/decodemv.c b/av1/decoder/decodemv.c
index d59bd9e..bec0423 100644
--- a/av1/decoder/decodemv.c
+++ b/av1/decoder/decodemv.c
@@ -1940,29 +1940,33 @@
 #endif  // CONFIG_MOTION_VAR || CONFIG_WARPED_MOTION
 
 #if CONFIG_EXT_INTER
-  mbmi->interinter_compound_data.type = COMPOUND_AVERAGE;
   if (cm->reference_mode != SINGLE_REFERENCE &&
       is_inter_compound_mode(mbmi->mode)
 #if CONFIG_MOTION_VAR || CONFIG_WARPED_MOTION
       && mbmi->motion_mode == SIMPLE_TRANSLATION
 #endif  // CONFIG_MOTION_VAR || CONFIG_WARPED_MOTION
       ) {
-    mbmi->interinter_compound_data.type = aom_read_tree(
-        r, av1_compound_type_tree, cm->fc->compound_type_prob[bsize], ACCT_STR);
+    if (is_any_masked_compound_used(bsize)) {
+      mbmi->interinter_compound_data.type =
+          aom_read_tree(r, av1_compound_type_tree,
+                        cm->fc->compound_type_prob[bsize], ACCT_STR);
+      if (mbmi->interinter_compound_data.type == COMPOUND_WEDGE) {
+        mbmi->interinter_compound_data.wedge_index =
+            aom_read_literal(r, get_wedge_bits_lookup(bsize), ACCT_STR);
+        mbmi->interinter_compound_data.wedge_sign = aom_read_bit(r, ACCT_STR);
+      }
+#if CONFIG_COMPOUND_SEGMENT
+      else if (mbmi->interinter_compound_data.type == COMPOUND_SEG) {
+        mbmi->interinter_compound_data.mask_type =
+            aom_read_literal(r, MAX_SEG_MASK_BITS, ACCT_STR);
+      }
+#endif  // CONFIG_COMPOUND_SEGMENT
+    } else {
+      mbmi->interinter_compound_data.type = COMPOUND_AVERAGE;
+    }
     if (xd->counts)
       xd->counts->compound_interinter[bsize]
                                      [mbmi->interinter_compound_data.type]++;
-    if (mbmi->interinter_compound_data.type == COMPOUND_WEDGE) {
-      mbmi->interinter_compound_data.wedge_index =
-          aom_read_literal(r, get_wedge_bits_lookup(bsize), ACCT_STR);
-      mbmi->interinter_compound_data.wedge_sign = aom_read_bit(r, ACCT_STR);
-    }
-#if CONFIG_COMPOUND_SEGMENT
-    else if (mbmi->interinter_compound_data.type == COMPOUND_SEG) {
-      mbmi->interinter_compound_data.mask_type =
-          aom_read_literal(r, MAX_SEG_MASK_BITS, ACCT_STR);
-    }
-#endif  // CONFIG_COMPOUND_SEGMENT
   }
 #endif  // CONFIG_EXT_INTER
 
diff --git a/av1/encoder/bitstream.c b/av1/encoder/bitstream.c
index 83a2278..4b9f4dc 100644
--- a/av1/encoder/bitstream.c
+++ b/av1/encoder/bitstream.c
@@ -1714,7 +1714,7 @@
 #if CONFIG_MOTION_VAR
         && mbmi->motion_mode == SIMPLE_TRANSLATION
 #endif  // CONFIG_MOTION_VAR
-        ) {
+        && is_any_masked_compound_used(bsize)) {
       av1_write_token(
           w, av1_compound_type_tree, cm->fc->compound_type_prob[bsize],
           &compound_type_encodings[mbmi->interinter_compound_data.type]);
diff --git a/av1/encoder/rdopt.c b/av1/encoder/rdopt.c
index 25d0af1..7a548d8 100644
--- a/av1/encoder/rdopt.c
+++ b/av1/encoder/rdopt.c
@@ -7534,7 +7534,7 @@
   int wedge_index = -1;
   int wedge_sign = 0;
 
-  assert(is_interinter_wedge_used(bsize));
+  assert(is_interinter_compound_used(COMPOUND_WEDGE, bsize));
 
   if (cpi->sf.fast_wedge_sign_estimate) {
     wedge_sign = estimate_wedge_sign(cpi, x, bsize, p0, bw, p1, bw);
@@ -8326,6 +8326,7 @@
     uint8_t *preds1[1] = { pred1 };
     int strides[1] = { bw };
     int tmp_rate_mv;
+    int masked_compound_used = is_any_masked_compound_used(bsize);
     COMPOUND_TYPE cur_type;
 
     best_mv[0].as_int = cur_mv[0].as_int;
@@ -8334,7 +8335,7 @@
     av1_cost_tokens(compound_type_cost, cm->fc->compound_type_prob[bsize],
                     av1_compound_type_tree);
 
-    if (is_interinter_wedge_used(bsize)) {
+    if (masked_compound_used) {
       // get inter predictors to use for masked compound modes
       av1_build_inter_predictors_for_planes_single_buf(
           xd, bsize, 0, 0, mi_row, mi_col, 0, preds0, strides);
@@ -8343,12 +8344,15 @@
     }
 
     for (cur_type = COMPOUND_AVERAGE; cur_type < COMPOUND_TYPES; cur_type++) {
+      if (!is_interinter_compound_used(cur_type, bsize)) break;
       tmp_rate_mv = rate_mv;
       best_rd_cur = INT64_MAX;
       mbmi->interinter_compound_data.type = cur_type;
       rs2 = av1_cost_literal(get_interinter_compound_type_bits(
                 bsize, mbmi->interinter_compound_data.type)) +
-            compound_type_cost[mbmi->interinter_compound_data.type];
+            (masked_compound_used
+                 ? compound_type_cost[mbmi->interinter_compound_data.type]
+                 : 0);
 
       switch (cur_type) {
         case COMPOUND_AVERAGE:
@@ -8363,7 +8367,6 @@
           best_rd_compound = best_rd_cur;
           break;
         case COMPOUND_WEDGE:
-          if (!is_interinter_wedge_used(bsize)) break;
           if (x->source_variance > cpi->sf.disable_wedge_search_var_thresh &&
               best_rd_compound / 3 < ref_best_rd) {
             best_rd_cur = build_and_cost_compound_wedge(
@@ -8373,7 +8376,6 @@
           break;
 #if CONFIG_COMPOUND_SEGMENT
         case COMPOUND_SEG:
-          if (!is_interinter_wedge_used(bsize)) break;
           if (x->source_variance > cpi->sf.disable_wedge_search_var_thresh &&
               best_rd_compound / 3 < ref_best_rd) {
             best_rd_cur = build_and_cost_compound_seg(