ext-inter: Further cleanup

* Rename the 'masked_compound_*' functions to just 'masked_*'.
  The previous names were intended to be temporary, to distinguish
  the old and new masked motion search pipelines. But now that the
  old pipeline has been removed, we can reuse the old names.

* Simplify the new ext-inter compound motion search pipeline
  a bit.

* Harmonize names: Rename
  aom_highbd_masked_compound_sub_pixel_variance* to
  aom_highbd_8_masked_sub_pixel_variance*, to match the naming of
  the corresponding non-masked functions

Change-Id: I988768ffe2f42a942405b7d8e93a2757a012dca3
diff --git a/av1/encoder/encoder.c b/av1/encoder/encoder.c
index 5b2dcef..8901f0a 100644
--- a/av1/encoder/encoder.c
+++ b/av1/encoder/encoder.c
@@ -1172,8 +1172,8 @@
 
 #if CONFIG_EXT_INTER
 #define HIGHBD_MBFP(BT, MCSDF, MCSVF) \
-  cpi->fn_ptr[BT].mcsdf = MCSDF;      \
-  cpi->fn_ptr[BT].mcsvf = MCSVF;
+  cpi->fn_ptr[BT].msdf = MCSDF;       \
+  cpi->fn_ptr[BT].msvf = MCSVF;
 
 #define MAKE_MBFP_COMPOUND_SAD_WRAPPER(fnname)                           \
   static unsigned int fnname##_bits8(                                    \
@@ -1201,23 +1201,23 @@
   }
 
 #if CONFIG_EXT_PARTITION
-MAKE_MBFP_COMPOUND_SAD_WRAPPER(aom_highbd_masked_compound_sad128x128)
-MAKE_MBFP_COMPOUND_SAD_WRAPPER(aom_highbd_masked_compound_sad128x64)
-MAKE_MBFP_COMPOUND_SAD_WRAPPER(aom_highbd_masked_compound_sad64x128)
+MAKE_MBFP_COMPOUND_SAD_WRAPPER(aom_highbd_masked_sad128x128)
+MAKE_MBFP_COMPOUND_SAD_WRAPPER(aom_highbd_masked_sad128x64)
+MAKE_MBFP_COMPOUND_SAD_WRAPPER(aom_highbd_masked_sad64x128)
 #endif  // CONFIG_EXT_PARTITION
-MAKE_MBFP_COMPOUND_SAD_WRAPPER(aom_highbd_masked_compound_sad64x64)
-MAKE_MBFP_COMPOUND_SAD_WRAPPER(aom_highbd_masked_compound_sad64x32)
-MAKE_MBFP_COMPOUND_SAD_WRAPPER(aom_highbd_masked_compound_sad32x64)
-MAKE_MBFP_COMPOUND_SAD_WRAPPER(aom_highbd_masked_compound_sad32x32)
-MAKE_MBFP_COMPOUND_SAD_WRAPPER(aom_highbd_masked_compound_sad32x16)
-MAKE_MBFP_COMPOUND_SAD_WRAPPER(aom_highbd_masked_compound_sad16x32)
-MAKE_MBFP_COMPOUND_SAD_WRAPPER(aom_highbd_masked_compound_sad16x16)
-MAKE_MBFP_COMPOUND_SAD_WRAPPER(aom_highbd_masked_compound_sad16x8)
-MAKE_MBFP_COMPOUND_SAD_WRAPPER(aom_highbd_masked_compound_sad8x16)
-MAKE_MBFP_COMPOUND_SAD_WRAPPER(aom_highbd_masked_compound_sad8x8)
-MAKE_MBFP_COMPOUND_SAD_WRAPPER(aom_highbd_masked_compound_sad8x4)
-MAKE_MBFP_COMPOUND_SAD_WRAPPER(aom_highbd_masked_compound_sad4x8)
-MAKE_MBFP_COMPOUND_SAD_WRAPPER(aom_highbd_masked_compound_sad4x4)
+MAKE_MBFP_COMPOUND_SAD_WRAPPER(aom_highbd_masked_sad64x64)
+MAKE_MBFP_COMPOUND_SAD_WRAPPER(aom_highbd_masked_sad64x32)
+MAKE_MBFP_COMPOUND_SAD_WRAPPER(aom_highbd_masked_sad32x64)
+MAKE_MBFP_COMPOUND_SAD_WRAPPER(aom_highbd_masked_sad32x32)
+MAKE_MBFP_COMPOUND_SAD_WRAPPER(aom_highbd_masked_sad32x16)
+MAKE_MBFP_COMPOUND_SAD_WRAPPER(aom_highbd_masked_sad16x32)
+MAKE_MBFP_COMPOUND_SAD_WRAPPER(aom_highbd_masked_sad16x16)
+MAKE_MBFP_COMPOUND_SAD_WRAPPER(aom_highbd_masked_sad16x8)
+MAKE_MBFP_COMPOUND_SAD_WRAPPER(aom_highbd_masked_sad8x16)
+MAKE_MBFP_COMPOUND_SAD_WRAPPER(aom_highbd_masked_sad8x8)
+MAKE_MBFP_COMPOUND_SAD_WRAPPER(aom_highbd_masked_sad8x4)
+MAKE_MBFP_COMPOUND_SAD_WRAPPER(aom_highbd_masked_sad4x8)
+MAKE_MBFP_COMPOUND_SAD_WRAPPER(aom_highbd_masked_sad4x4)
 #endif  // CONFIG_EXT_INTER
 
 #if CONFIG_MOTION_VAR
@@ -1382,39 +1382,39 @@
 
 #if CONFIG_EXT_INTER
 #if CONFIG_EXT_PARTITION
-        HIGHBD_MBFP(BLOCK_128X128, aom_highbd_masked_compound_sad128x128_bits8,
-                    aom_highbd_masked_compound_sub_pixel_variance128x128)
-        HIGHBD_MBFP(BLOCK_128X64, aom_highbd_masked_compound_sad128x64_bits8,
-                    aom_highbd_masked_compound_sub_pixel_variance128x64)
-        HIGHBD_MBFP(BLOCK_64X128, aom_highbd_masked_compound_sad64x128_bits8,
-                    aom_highbd_masked_compound_sub_pixel_variance64x128)
+        HIGHBD_MBFP(BLOCK_128X128, aom_highbd_masked_sad128x128_bits8,
+                    aom_highbd_8_masked_sub_pixel_variance128x128)
+        HIGHBD_MBFP(BLOCK_128X64, aom_highbd_masked_sad128x64_bits8,
+                    aom_highbd_8_masked_sub_pixel_variance128x64)
+        HIGHBD_MBFP(BLOCK_64X128, aom_highbd_masked_sad64x128_bits8,
+                    aom_highbd_8_masked_sub_pixel_variance64x128)
 #endif  // CONFIG_EXT_PARTITION
-        HIGHBD_MBFP(BLOCK_64X64, aom_highbd_masked_compound_sad64x64_bits8,
-                    aom_highbd_masked_compound_sub_pixel_variance64x64)
-        HIGHBD_MBFP(BLOCK_64X32, aom_highbd_masked_compound_sad64x32_bits8,
-                    aom_highbd_masked_compound_sub_pixel_variance64x32)
-        HIGHBD_MBFP(BLOCK_32X64, aom_highbd_masked_compound_sad32x64_bits8,
-                    aom_highbd_masked_compound_sub_pixel_variance32x64)
-        HIGHBD_MBFP(BLOCK_32X32, aom_highbd_masked_compound_sad32x32_bits8,
-                    aom_highbd_masked_compound_sub_pixel_variance32x32)
-        HIGHBD_MBFP(BLOCK_32X16, aom_highbd_masked_compound_sad32x16_bits8,
-                    aom_highbd_masked_compound_sub_pixel_variance32x16)
-        HIGHBD_MBFP(BLOCK_16X32, aom_highbd_masked_compound_sad16x32_bits8,
-                    aom_highbd_masked_compound_sub_pixel_variance16x32)
-        HIGHBD_MBFP(BLOCK_16X16, aom_highbd_masked_compound_sad16x16_bits8,
-                    aom_highbd_masked_compound_sub_pixel_variance16x16)
-        HIGHBD_MBFP(BLOCK_8X16, aom_highbd_masked_compound_sad8x16_bits8,
-                    aom_highbd_masked_compound_sub_pixel_variance8x16)
-        HIGHBD_MBFP(BLOCK_16X8, aom_highbd_masked_compound_sad16x8_bits8,
-                    aom_highbd_masked_compound_sub_pixel_variance16x8)
-        HIGHBD_MBFP(BLOCK_8X8, aom_highbd_masked_compound_sad8x8_bits8,
-                    aom_highbd_masked_compound_sub_pixel_variance8x8)
-        HIGHBD_MBFP(BLOCK_4X8, aom_highbd_masked_compound_sad4x8_bits8,
-                    aom_highbd_masked_compound_sub_pixel_variance4x8)
-        HIGHBD_MBFP(BLOCK_8X4, aom_highbd_masked_compound_sad8x4_bits8,
-                    aom_highbd_masked_compound_sub_pixel_variance8x4)
-        HIGHBD_MBFP(BLOCK_4X4, aom_highbd_masked_compound_sad4x4_bits8,
-                    aom_highbd_masked_compound_sub_pixel_variance4x4)
+        HIGHBD_MBFP(BLOCK_64X64, aom_highbd_masked_sad64x64_bits8,
+                    aom_highbd_8_masked_sub_pixel_variance64x64)
+        HIGHBD_MBFP(BLOCK_64X32, aom_highbd_masked_sad64x32_bits8,
+                    aom_highbd_8_masked_sub_pixel_variance64x32)
+        HIGHBD_MBFP(BLOCK_32X64, aom_highbd_masked_sad32x64_bits8,
+                    aom_highbd_8_masked_sub_pixel_variance32x64)
+        HIGHBD_MBFP(BLOCK_32X32, aom_highbd_masked_sad32x32_bits8,
+                    aom_highbd_8_masked_sub_pixel_variance32x32)
+        HIGHBD_MBFP(BLOCK_32X16, aom_highbd_masked_sad32x16_bits8,
+                    aom_highbd_8_masked_sub_pixel_variance32x16)
+        HIGHBD_MBFP(BLOCK_16X32, aom_highbd_masked_sad16x32_bits8,
+                    aom_highbd_8_masked_sub_pixel_variance16x32)
+        HIGHBD_MBFP(BLOCK_16X16, aom_highbd_masked_sad16x16_bits8,
+                    aom_highbd_8_masked_sub_pixel_variance16x16)
+        HIGHBD_MBFP(BLOCK_8X16, aom_highbd_masked_sad8x16_bits8,
+                    aom_highbd_8_masked_sub_pixel_variance8x16)
+        HIGHBD_MBFP(BLOCK_16X8, aom_highbd_masked_sad16x8_bits8,
+                    aom_highbd_8_masked_sub_pixel_variance16x8)
+        HIGHBD_MBFP(BLOCK_8X8, aom_highbd_masked_sad8x8_bits8,
+                    aom_highbd_8_masked_sub_pixel_variance8x8)
+        HIGHBD_MBFP(BLOCK_4X8, aom_highbd_masked_sad4x8_bits8,
+                    aom_highbd_8_masked_sub_pixel_variance4x8)
+        HIGHBD_MBFP(BLOCK_8X4, aom_highbd_masked_sad8x4_bits8,
+                    aom_highbd_8_masked_sub_pixel_variance8x4)
+        HIGHBD_MBFP(BLOCK_4X4, aom_highbd_masked_sad4x4_bits8,
+                    aom_highbd_8_masked_sub_pixel_variance4x4)
 #endif  // CONFIG_EXT_INTER
 #if CONFIG_MOTION_VAR
 #if CONFIG_EXT_PARTITION
@@ -1589,39 +1589,39 @@
 
 #if CONFIG_EXT_INTER
 #if CONFIG_EXT_PARTITION
-        HIGHBD_MBFP(BLOCK_128X128, aom_highbd_masked_compound_sad128x128_bits10,
-                    aom_highbd_10_masked_compound_sub_pixel_variance128x128)
-        HIGHBD_MBFP(BLOCK_128X64, aom_highbd_masked_compound_sad128x64_bits10,
-                    aom_highbd_10_masked_compound_sub_pixel_variance128x64)
-        HIGHBD_MBFP(BLOCK_64X128, aom_highbd_masked_compound_sad64x128_bits10,
-                    aom_highbd_10_masked_compound_sub_pixel_variance64x128)
+        HIGHBD_MBFP(BLOCK_128X128, aom_highbd_masked_sad128x128_bits10,
+                    aom_highbd_10_masked_sub_pixel_variance128x128)
+        HIGHBD_MBFP(BLOCK_128X64, aom_highbd_masked_sad128x64_bits10,
+                    aom_highbd_10_masked_sub_pixel_variance128x64)
+        HIGHBD_MBFP(BLOCK_64X128, aom_highbd_masked_sad64x128_bits10,
+                    aom_highbd_10_masked_sub_pixel_variance64x128)
 #endif  // CONFIG_EXT_PARTITION
-        HIGHBD_MBFP(BLOCK_64X64, aom_highbd_masked_compound_sad64x64_bits10,
-                    aom_highbd_10_masked_compound_sub_pixel_variance64x64)
-        HIGHBD_MBFP(BLOCK_64X32, aom_highbd_masked_compound_sad64x32_bits10,
-                    aom_highbd_10_masked_compound_sub_pixel_variance64x32)
-        HIGHBD_MBFP(BLOCK_32X64, aom_highbd_masked_compound_sad32x64_bits10,
-                    aom_highbd_10_masked_compound_sub_pixel_variance32x64)
-        HIGHBD_MBFP(BLOCK_32X32, aom_highbd_masked_compound_sad32x32_bits10,
-                    aom_highbd_10_masked_compound_sub_pixel_variance32x32)
-        HIGHBD_MBFP(BLOCK_32X16, aom_highbd_masked_compound_sad32x16_bits10,
-                    aom_highbd_10_masked_compound_sub_pixel_variance32x16)
-        HIGHBD_MBFP(BLOCK_16X32, aom_highbd_masked_compound_sad16x32_bits10,
-                    aom_highbd_10_masked_compound_sub_pixel_variance16x32)
-        HIGHBD_MBFP(BLOCK_16X16, aom_highbd_masked_compound_sad16x16_bits10,
-                    aom_highbd_10_masked_compound_sub_pixel_variance16x16)
-        HIGHBD_MBFP(BLOCK_8X16, aom_highbd_masked_compound_sad8x16_bits10,
-                    aom_highbd_10_masked_compound_sub_pixel_variance8x16)
-        HIGHBD_MBFP(BLOCK_16X8, aom_highbd_masked_compound_sad16x8_bits10,
-                    aom_highbd_10_masked_compound_sub_pixel_variance16x8)
-        HIGHBD_MBFP(BLOCK_8X8, aom_highbd_masked_compound_sad8x8_bits10,
-                    aom_highbd_10_masked_compound_sub_pixel_variance8x8)
-        HIGHBD_MBFP(BLOCK_4X8, aom_highbd_masked_compound_sad4x8_bits10,
-                    aom_highbd_10_masked_compound_sub_pixel_variance4x8)
-        HIGHBD_MBFP(BLOCK_8X4, aom_highbd_masked_compound_sad8x4_bits10,
-                    aom_highbd_10_masked_compound_sub_pixel_variance8x4)
-        HIGHBD_MBFP(BLOCK_4X4, aom_highbd_masked_compound_sad4x4_bits10,
-                    aom_highbd_10_masked_compound_sub_pixel_variance4x4)
+        HIGHBD_MBFP(BLOCK_64X64, aom_highbd_masked_sad64x64_bits10,
+                    aom_highbd_10_masked_sub_pixel_variance64x64)
+        HIGHBD_MBFP(BLOCK_64X32, aom_highbd_masked_sad64x32_bits10,
+                    aom_highbd_10_masked_sub_pixel_variance64x32)
+        HIGHBD_MBFP(BLOCK_32X64, aom_highbd_masked_sad32x64_bits10,
+                    aom_highbd_10_masked_sub_pixel_variance32x64)
+        HIGHBD_MBFP(BLOCK_32X32, aom_highbd_masked_sad32x32_bits10,
+                    aom_highbd_10_masked_sub_pixel_variance32x32)
+        HIGHBD_MBFP(BLOCK_32X16, aom_highbd_masked_sad32x16_bits10,
+                    aom_highbd_10_masked_sub_pixel_variance32x16)
+        HIGHBD_MBFP(BLOCK_16X32, aom_highbd_masked_sad16x32_bits10,
+                    aom_highbd_10_masked_sub_pixel_variance16x32)
+        HIGHBD_MBFP(BLOCK_16X16, aom_highbd_masked_sad16x16_bits10,
+                    aom_highbd_10_masked_sub_pixel_variance16x16)
+        HIGHBD_MBFP(BLOCK_8X16, aom_highbd_masked_sad8x16_bits10,
+                    aom_highbd_10_masked_sub_pixel_variance8x16)
+        HIGHBD_MBFP(BLOCK_16X8, aom_highbd_masked_sad16x8_bits10,
+                    aom_highbd_10_masked_sub_pixel_variance16x8)
+        HIGHBD_MBFP(BLOCK_8X8, aom_highbd_masked_sad8x8_bits10,
+                    aom_highbd_10_masked_sub_pixel_variance8x8)
+        HIGHBD_MBFP(BLOCK_4X8, aom_highbd_masked_sad4x8_bits10,
+                    aom_highbd_10_masked_sub_pixel_variance4x8)
+        HIGHBD_MBFP(BLOCK_8X4, aom_highbd_masked_sad8x4_bits10,
+                    aom_highbd_10_masked_sub_pixel_variance8x4)
+        HIGHBD_MBFP(BLOCK_4X4, aom_highbd_masked_sad4x4_bits10,
+                    aom_highbd_10_masked_sub_pixel_variance4x4)
 #endif  // CONFIG_EXT_INTER
 #if CONFIG_MOTION_VAR
 #if CONFIG_EXT_PARTITION
@@ -1796,39 +1796,39 @@
 
 #if CONFIG_EXT_INTER
 #if CONFIG_EXT_PARTITION
-        HIGHBD_MBFP(BLOCK_128X128, aom_highbd_masked_compound_sad128x128_bits12,
-                    aom_highbd_12_masked_compound_sub_pixel_variance128x128)
-        HIGHBD_MBFP(BLOCK_128X64, aom_highbd_masked_compound_sad128x64_bits12,
-                    aom_highbd_12_masked_compound_sub_pixel_variance128x64)
-        HIGHBD_MBFP(BLOCK_64X128, aom_highbd_masked_compound_sad64x128_bits12,
-                    aom_highbd_12_masked_compound_sub_pixel_variance64x128)
+        HIGHBD_MBFP(BLOCK_128X128, aom_highbd_masked_sad128x128_bits12,
+                    aom_highbd_12_masked_sub_pixel_variance128x128)
+        HIGHBD_MBFP(BLOCK_128X64, aom_highbd_masked_sad128x64_bits12,
+                    aom_highbd_12_masked_sub_pixel_variance128x64)
+        HIGHBD_MBFP(BLOCK_64X128, aom_highbd_masked_sad64x128_bits12,
+                    aom_highbd_12_masked_sub_pixel_variance64x128)
 #endif  // CONFIG_EXT_PARTITION
-        HIGHBD_MBFP(BLOCK_64X64, aom_highbd_masked_compound_sad64x64_bits12,
-                    aom_highbd_12_masked_compound_sub_pixel_variance64x64)
-        HIGHBD_MBFP(BLOCK_64X32, aom_highbd_masked_compound_sad64x32_bits12,
-                    aom_highbd_12_masked_compound_sub_pixel_variance64x32)
-        HIGHBD_MBFP(BLOCK_32X64, aom_highbd_masked_compound_sad32x64_bits12,
-                    aom_highbd_12_masked_compound_sub_pixel_variance32x64)
-        HIGHBD_MBFP(BLOCK_32X32, aom_highbd_masked_compound_sad32x32_bits12,
-                    aom_highbd_12_masked_compound_sub_pixel_variance32x32)
-        HIGHBD_MBFP(BLOCK_32X16, aom_highbd_masked_compound_sad32x16_bits12,
-                    aom_highbd_12_masked_compound_sub_pixel_variance32x16)
-        HIGHBD_MBFP(BLOCK_16X32, aom_highbd_masked_compound_sad16x32_bits12,
-                    aom_highbd_12_masked_compound_sub_pixel_variance16x32)
-        HIGHBD_MBFP(BLOCK_16X16, aom_highbd_masked_compound_sad16x16_bits12,
-                    aom_highbd_12_masked_compound_sub_pixel_variance16x16)
-        HIGHBD_MBFP(BLOCK_8X16, aom_highbd_masked_compound_sad8x16_bits12,
-                    aom_highbd_12_masked_compound_sub_pixel_variance8x16)
-        HIGHBD_MBFP(BLOCK_16X8, aom_highbd_masked_compound_sad16x8_bits12,
-                    aom_highbd_12_masked_compound_sub_pixel_variance16x8)
-        HIGHBD_MBFP(BLOCK_8X8, aom_highbd_masked_compound_sad8x8_bits12,
-                    aom_highbd_12_masked_compound_sub_pixel_variance8x8)
-        HIGHBD_MBFP(BLOCK_4X8, aom_highbd_masked_compound_sad4x8_bits12,
-                    aom_highbd_12_masked_compound_sub_pixel_variance4x8)
-        HIGHBD_MBFP(BLOCK_8X4, aom_highbd_masked_compound_sad8x4_bits12,
-                    aom_highbd_12_masked_compound_sub_pixel_variance8x4)
-        HIGHBD_MBFP(BLOCK_4X4, aom_highbd_masked_compound_sad4x4_bits12,
-                    aom_highbd_12_masked_compound_sub_pixel_variance4x4)
+        HIGHBD_MBFP(BLOCK_64X64, aom_highbd_masked_sad64x64_bits12,
+                    aom_highbd_12_masked_sub_pixel_variance64x64)
+        HIGHBD_MBFP(BLOCK_64X32, aom_highbd_masked_sad64x32_bits12,
+                    aom_highbd_12_masked_sub_pixel_variance64x32)
+        HIGHBD_MBFP(BLOCK_32X64, aom_highbd_masked_sad32x64_bits12,
+                    aom_highbd_12_masked_sub_pixel_variance32x64)
+        HIGHBD_MBFP(BLOCK_32X32, aom_highbd_masked_sad32x32_bits12,
+                    aom_highbd_12_masked_sub_pixel_variance32x32)
+        HIGHBD_MBFP(BLOCK_32X16, aom_highbd_masked_sad32x16_bits12,
+                    aom_highbd_12_masked_sub_pixel_variance32x16)
+        HIGHBD_MBFP(BLOCK_16X32, aom_highbd_masked_sad16x32_bits12,
+                    aom_highbd_12_masked_sub_pixel_variance16x32)
+        HIGHBD_MBFP(BLOCK_16X16, aom_highbd_masked_sad16x16_bits12,
+                    aom_highbd_12_masked_sub_pixel_variance16x16)
+        HIGHBD_MBFP(BLOCK_8X16, aom_highbd_masked_sad8x16_bits12,
+                    aom_highbd_12_masked_sub_pixel_variance8x16)
+        HIGHBD_MBFP(BLOCK_16X8, aom_highbd_masked_sad16x8_bits12,
+                    aom_highbd_12_masked_sub_pixel_variance16x8)
+        HIGHBD_MBFP(BLOCK_8X8, aom_highbd_masked_sad8x8_bits12,
+                    aom_highbd_12_masked_sub_pixel_variance8x8)
+        HIGHBD_MBFP(BLOCK_4X8, aom_highbd_masked_sad4x8_bits12,
+                    aom_highbd_12_masked_sub_pixel_variance4x8)
+        HIGHBD_MBFP(BLOCK_8X4, aom_highbd_masked_sad8x4_bits12,
+                    aom_highbd_12_masked_sub_pixel_variance8x4)
+        HIGHBD_MBFP(BLOCK_4X4, aom_highbd_masked_sad4x4_bits12,
+                    aom_highbd_12_masked_sub_pixel_variance4x4)
 #endif  // CONFIG_EXT_INTER
 
 #if CONFIG_MOTION_VAR
@@ -2411,44 +2411,29 @@
 #endif  // CONFIG_MOTION_VAR
 
 #if CONFIG_EXT_INTER
-#define MBFP(BT, MCSDF, MCSVF)   \
-  cpi->fn_ptr[BT].mcsdf = MCSDF; \
-  cpi->fn_ptr[BT].mcsvf = MCSVF;
+#define MBFP(BT, MCSDF, MCSVF)  \
+  cpi->fn_ptr[BT].msdf = MCSDF; \
+  cpi->fn_ptr[BT].msvf = MCSVF;
 
 #if CONFIG_EXT_PARTITION
-  MBFP(BLOCK_128X128, aom_masked_compound_sad128x128,
-       aom_masked_compound_sub_pixel_variance128x128)
-  MBFP(BLOCK_128X64, aom_masked_compound_sad128x64,
-       aom_masked_compound_sub_pixel_variance128x64)
-  MBFP(BLOCK_64X128, aom_masked_compound_sad64x128,
-       aom_masked_compound_sub_pixel_variance64x128)
+  MBFP(BLOCK_128X128, aom_masked_sad128x128,
+       aom_masked_sub_pixel_variance128x128)
+  MBFP(BLOCK_128X64, aom_masked_sad128x64, aom_masked_sub_pixel_variance128x64)
+  MBFP(BLOCK_64X128, aom_masked_sad64x128, aom_masked_sub_pixel_variance64x128)
 #endif  // CONFIG_EXT_PARTITION
-  MBFP(BLOCK_64X64, aom_masked_compound_sad64x64,
-       aom_masked_compound_sub_pixel_variance64x64)
-  MBFP(BLOCK_64X32, aom_masked_compound_sad64x32,
-       aom_masked_compound_sub_pixel_variance64x32)
-  MBFP(BLOCK_32X64, aom_masked_compound_sad32x64,
-       aom_masked_compound_sub_pixel_variance32x64)
-  MBFP(BLOCK_32X32, aom_masked_compound_sad32x32,
-       aom_masked_compound_sub_pixel_variance32x32)
-  MBFP(BLOCK_32X16, aom_masked_compound_sad32x16,
-       aom_masked_compound_sub_pixel_variance32x16)
-  MBFP(BLOCK_16X32, aom_masked_compound_sad16x32,
-       aom_masked_compound_sub_pixel_variance16x32)
-  MBFP(BLOCK_16X16, aom_masked_compound_sad16x16,
-       aom_masked_compound_sub_pixel_variance16x16)
-  MBFP(BLOCK_16X8, aom_masked_compound_sad16x8,
-       aom_masked_compound_sub_pixel_variance16x8)
-  MBFP(BLOCK_8X16, aom_masked_compound_sad8x16,
-       aom_masked_compound_sub_pixel_variance8x16)
-  MBFP(BLOCK_8X8, aom_masked_compound_sad8x8,
-       aom_masked_compound_sub_pixel_variance8x8)
-  MBFP(BLOCK_4X8, aom_masked_compound_sad4x8,
-       aom_masked_compound_sub_pixel_variance4x8)
-  MBFP(BLOCK_8X4, aom_masked_compound_sad8x4,
-       aom_masked_compound_sub_pixel_variance8x4)
-  MBFP(BLOCK_4X4, aom_masked_compound_sad4x4,
-       aom_masked_compound_sub_pixel_variance4x4)
+  MBFP(BLOCK_64X64, aom_masked_sad64x64, aom_masked_sub_pixel_variance64x64)
+  MBFP(BLOCK_64X32, aom_masked_sad64x32, aom_masked_sub_pixel_variance64x32)
+  MBFP(BLOCK_32X64, aom_masked_sad32x64, aom_masked_sub_pixel_variance32x64)
+  MBFP(BLOCK_32X32, aom_masked_sad32x32, aom_masked_sub_pixel_variance32x32)
+  MBFP(BLOCK_32X16, aom_masked_sad32x16, aom_masked_sub_pixel_variance32x16)
+  MBFP(BLOCK_16X32, aom_masked_sad16x32, aom_masked_sub_pixel_variance16x32)
+  MBFP(BLOCK_16X16, aom_masked_sad16x16, aom_masked_sub_pixel_variance16x16)
+  MBFP(BLOCK_16X8, aom_masked_sad16x8, aom_masked_sub_pixel_variance16x8)
+  MBFP(BLOCK_8X16, aom_masked_sad8x16, aom_masked_sub_pixel_variance8x16)
+  MBFP(BLOCK_8X8, aom_masked_sad8x8, aom_masked_sub_pixel_variance8x8)
+  MBFP(BLOCK_4X8, aom_masked_sad4x8, aom_masked_sub_pixel_variance4x8)
+  MBFP(BLOCK_8X4, aom_masked_sad8x4, aom_masked_sub_pixel_variance8x4)
+  MBFP(BLOCK_4X4, aom_masked_sad4x4, aom_masked_sub_pixel_variance4x4)
 #endif  // CONFIG_EXT_INTER
 
 #if CONFIG_HIGHBITDEPTH
diff --git a/av1/encoder/mcomp.c b/av1/encoder/mcomp.c
index b727739..c4f475f 100644
--- a/av1/encoder/mcomp.c
+++ b/av1/encoder/mcomp.c
@@ -177,30 +177,30 @@
 
 /* checks if (r, c) has better score than previous best */
 #if CONFIG_EXT_INTER
-#define CHECK_BETTER(v, r, c)                                              \
-  if (c >= minc && c <= maxc && r >= minr && r <= maxr) {                  \
-    MV this_mv = { r, c };                                                 \
-    v = mv_err_cost(&this_mv, ref_mv, mvjcost, mvcost, error_per_bit);     \
-    if (second_pred == NULL)                                               \
-      thismse = vfp->svf(pre(y, y_stride, r, c), y_stride, sp(c), sp(r),   \
-                         src_address, src_stride, &sse);                   \
-    else if (mask)                                                         \
-      thismse = vfp->mcsvf(pre(y, y_stride, r, c), y_stride, sp(c), sp(r), \
-                           src_address, src_stride, second_pred, mask,     \
-                           mask_stride, invert_mask, &sse);                \
-    else                                                                   \
-      thismse = vfp->svaf(pre(y, y_stride, r, c), y_stride, sp(c), sp(r),  \
-                          src_address, src_stride, &sse, second_pred);     \
-    v += thismse;                                                          \
-    if (v < besterr) {                                                     \
-      besterr = v;                                                         \
-      br = r;                                                              \
-      bc = c;                                                              \
-      *distortion = thismse;                                               \
-      *sse1 = sse;                                                         \
-    }                                                                      \
-  } else {                                                                 \
-    v = INT_MAX;                                                           \
+#define CHECK_BETTER(v, r, c)                                             \
+  if (c >= minc && c <= maxc && r >= minr && r <= maxr) {                 \
+    MV this_mv = { r, c };                                                \
+    v = mv_err_cost(&this_mv, ref_mv, mvjcost, mvcost, error_per_bit);    \
+    if (second_pred == NULL)                                              \
+      thismse = vfp->svf(pre(y, y_stride, r, c), y_stride, sp(c), sp(r),  \
+                         src_address, src_stride, &sse);                  \
+    else if (mask)                                                        \
+      thismse = vfp->msvf(pre(y, y_stride, r, c), y_stride, sp(c), sp(r), \
+                          src_address, src_stride, second_pred, mask,     \
+                          mask_stride, invert_mask, &sse);                \
+    else                                                                  \
+      thismse = vfp->svaf(pre(y, y_stride, r, c), y_stride, sp(c), sp(r), \
+                          src_address, src_stride, &sse, second_pred);    \
+    v += thismse;                                                         \
+    if (v < besterr) {                                                    \
+      besterr = v;                                                        \
+      br = r;                                                             \
+      bc = c;                                                             \
+      *distortion = thismse;                                              \
+      *sse1 = sse;                                                        \
+    }                                                                     \
+  } else {                                                                \
+    v = INT_MAX;                                                          \
   }
 #else
 #define CHECK_BETTER(v, r, c)                                             \
@@ -224,7 +224,7 @@
   } else {                                                                \
     v = INT_MAX;                                                          \
   }
-#endif
+#endif  // CONFIG_EXT_INTER
 
 #define CHECK_BETTER0(v, r, c) CHECK_BETTER(v, r, c)
 
@@ -272,7 +272,7 @@
   } else {                                                             \
     v = INT_MAX;                                                       \
   }
-#endif
+#endif  // CONFIG_EXT_INTER
 
 #define FIRST_LEVEL_CHECKS                                       \
   {                                                              \
@@ -861,9 +861,9 @@
                                src_address, src_stride, &sse);
 #if CONFIG_EXT_INTER
           else if (mask)
-            thismse = vfp->mcsvf(pre_address, y_stride, sp(tc), sp(tr),
-                                 src_address, src_stride, second_pred, mask,
-                                 mask_stride, invert_mask, &sse);
+            thismse = vfp->msvf(pre_address, y_stride, sp(tc), sp(tr),
+                                src_address, src_stride, second_pred, mask,
+                                mask_stride, invert_mask, &sse);
 #endif
           else
             thismse = vfp->svaf(pre_address, y_stride, sp(tc), sp(tr),
@@ -910,9 +910,9 @@
                              src_stride, &sse);
 #if CONFIG_EXT_INTER
         else if (mask)
-          thismse = vfp->mcsvf(pre_address, y_stride, sp(tc), sp(tr),
-                               src_address, src_stride, second_pred, mask,
-                               mask_stride, invert_mask, &sse);
+          thismse = vfp->msvf(pre_address, y_stride, sp(tc), sp(tr),
+                              src_address, src_stride, second_pred, mask,
+                              mask_stride, invert_mask, &sse);
 #endif
         else
           thismse = vfp->svaf(pre_address, y_stride, sp(tc), sp(tr),
@@ -1389,9 +1389,9 @@
   const MV mv = { best_mv->row * 8, best_mv->col * 8 };
   unsigned int unused;
 
-  return vfp->mcsvf(what->buf, what->stride, 0, 0,
-                    get_buf_from_mv(in_what, best_mv), in_what->stride,
-                    second_pred, mask, mask_stride, invert_mask, &unused) +
+  return vfp->msvf(what->buf, what->stride, 0, 0,
+                   get_buf_from_mv(in_what, best_mv), in_what->stride,
+                   second_pred, mask, mask_stride, invert_mask, &unused) +
          (use_mvcost ? mv_err_cost(&mv, center_mv, x->nmvjointcost, x->mvcost,
                                    x->errorperbit)
                      : 0);
@@ -2362,7 +2362,7 @@
 }
 
 // This function is called when we do joint motion search in comp_inter_inter
-// mode.
+// mode, or when searching for one component of an ext-inter compound mode.
 int av1_refining_search_8p_c(MACROBLOCK *x, int error_per_bit, int search_range,
                              const aom_variance_fn_ptr_t *fn_ptr,
 #if CONFIG_EXT_INTER
@@ -2384,9 +2384,9 @@
            x->mv_limits.row_min, x->mv_limits.row_max);
 #if CONFIG_EXT_INTER
   if (mask)
-    best_sad = fn_ptr->mcsdf(what->buf, what->stride,
-                             get_buf_from_mv(in_what, best_mv), in_what->stride,
-                             second_pred, mask, mask_stride, invert_mask) +
+    best_sad = fn_ptr->msdf(what->buf, what->stride,
+                            get_buf_from_mv(in_what, best_mv), in_what->stride,
+                            second_pred, mask, mask_stride, invert_mask) +
                mvsad_err_cost(x, best_mv, &fcenter_mv, error_per_bit);
   else
 #endif
@@ -2406,9 +2406,9 @@
         unsigned int sad;
 #if CONFIG_EXT_INTER
         if (mask)
-          sad = fn_ptr->mcsdf(what->buf, what->stride,
-                              get_buf_from_mv(in_what, &mv), in_what->stride,
-                              second_pred, mask, mask_stride, invert_mask);
+          sad = fn_ptr->msdf(what->buf, what->stride,
+                             get_buf_from_mv(in_what, &mv), in_what->stride,
+                             second_pred, mask, mask_stride, invert_mask);
         else
 #endif
           sad = fn_ptr->sdaf(what->buf, what->stride,
diff --git a/av1/encoder/rdopt.c b/av1/encoder/rdopt.c
index ca69e7c..97100b9 100644
--- a/av1/encoder/rdopt.c
+++ b/av1/encoder/rdopt.c
@@ -7166,7 +7166,7 @@
 
 #if CONFIG_EXT_INTER
 static void build_second_inter_pred(const AV1_COMP *cpi, MACROBLOCK *x,
-                                    BLOCK_SIZE bsize, int_mv *frame_mv,
+                                    BLOCK_SIZE bsize, const MV *other_mv,
                                     int mi_row, int mi_col, const int block,
                                     int ref_idx, uint8_t *second_pred) {
   const AV1_COMMON *const cm = &cpi->common;
@@ -7247,8 +7247,8 @@
 #if CONFIG_HIGHBITDEPTH
   if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
     av1_highbd_build_inter_predictor(
-        ref_yv12.buf, ref_yv12.stride, second_pred, pw,
-        &frame_mv[other_ref].as_mv, &sf, pw, ph, 0, interp_filter,
+        ref_yv12.buf, ref_yv12.stride, second_pred, pw, other_mv, &sf, pw, ph,
+        0, interp_filter,
 #if CONFIG_GLOBAL_MOTION || CONFIG_WARPED_MOTION
         &warp_types, p_col, p_row,
 #endif  // CONFIG_GLOBAL_MOTION || CONFIG_WARPED_MOTION
@@ -7256,8 +7256,8 @@
   } else {
 #endif  // CONFIG_HIGHBITDEPTH
     av1_build_inter_predictor(
-        ref_yv12.buf, ref_yv12.stride, second_pred, pw,
-        &frame_mv[other_ref].as_mv, &sf, pw, ph, &conv_params, interp_filter,
+        ref_yv12.buf, ref_yv12.stride, second_pred, pw, other_mv, &sf, pw, ph,
+        &conv_params, interp_filter,
 #if CONFIG_GLOBAL_MOTION || CONFIG_WARPED_MOTION
         &warp_types, p_col, p_row, plane, !ref_idx,
 #endif  // CONFIG_GLOBAL_MOTION || CONFIG_WARPED_MOTION
@@ -7277,16 +7277,15 @@
 // Search for the best mv for one component of a compound,
 // given that the other component is fixed.
 static void compound_single_motion_search(
-    const AV1_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bsize, int_mv *frame_mv,
-    int mi_row, int mi_col, int_mv *ref_mv_sub8x8[2],
-    const uint8_t *second_pred, const uint8_t *mask, int mask_stride,
-    int *rate_mv, const int block, int ref_idx) {
+    const AV1_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bsize, MV *this_mv,
+    int mi_row, int mi_col, const uint8_t *second_pred, const uint8_t *mask,
+    int mask_stride, int *rate_mv, const int block, int ref_idx) {
   const int pw = block_size_wide[bsize];
   const int ph = block_size_high[bsize];
   MACROBLOCKD *xd = &x->e_mbd;
   MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi;
   const int ref = mbmi->ref_frame[ref_idx];
-  int_mv ref_mv;
+  int_mv ref_mv = x->mbmi_ext->ref_mvs[ref][0];
   struct macroblockd_plane *const pd = &xd->plane[0];
 
   struct buf_2d backup_yv12[MAX_MB_PLANE];
@@ -7294,21 +7293,10 @@
   const YV12_BUFFER_CONFIG *const scaled_ref_frame =
       av1_get_scaled_ref_frame(cpi, ref);
 
-#if CONFIG_CB4X4
-  (void)ref_mv_sub8x8;
-#endif  // CONFIG_CB4X4
-
   // Check that this is either an interinter or an interintra block
   assert(has_second_ref(mbmi) ||
          (ref_idx == 0 && mbmi->ref_frame[1] == INTRA_FRAME));
 
-#if !CONFIG_CB4X4
-  if (bsize < BLOCK_8X8 && ref_mv_sub8x8 != NULL)
-    ref_mv.as_int = ref_mv_sub8x8[ref_idx]->as_int;
-  else
-#endif  // !CONFIG_CB4X4
-    ref_mv = x->mbmi_ext->ref_mvs[ref][0];
-
   if (scaled_ref_frame) {
     int i;
     // Swap out the reference frame for a version that's been scaled to
@@ -7326,19 +7314,18 @@
   int search_range = 3;
 
   MvLimits tmp_mv_limits = x->mv_limits;
-  const int plane = 0;
 
   // Initialized here because of compiler problem in Visual Studio.
   if (ref_idx) {
-    orig_yv12 = xd->plane[plane].pre[0];
-    xd->plane[plane].pre[0] = xd->plane[plane].pre[ref_idx];
+    orig_yv12 = pd->pre[0];
+    pd->pre[0] = pd->pre[ref_idx];
   }
 
   // Do compound motion search on the current reference frame.
   av1_set_mv_search_range(&x->mv_limits, &ref_mv.as_mv);
 
   // Use the mv result from the single mode as mv predictor.
-  *best_mv = frame_mv[ref].as_mv;
+  *best_mv = *this_mv;
 
   best_mv->col >>= 3;
   best_mv->row >>= 3;
@@ -7404,10 +7391,10 @@
   }
 
   // Restore the pointer to the first (possibly scaled) prediction buffer.
-  if (ref_idx) xd->plane[plane].pre[0] = orig_yv12;
+  if (ref_idx) pd->pre[0] = orig_yv12;
 
   if (bestsme < last_besterr) {
-    frame_mv[ref].as_mv = *best_mv;
+    *this_mv = *best_mv;
     last_besterr = bestsme;
   }
 
@@ -7421,32 +7408,24 @@
   }
 
   av1_set_mvcost(x, ref, ref_idx, mbmi->ref_mv_idx);
-#if !CONFIG_CB4X4
-  if (bsize >= BLOCK_8X8)
-#endif  // !CONFIG_CB4X4
-    *rate_mv += av1_mv_bit_cost(&frame_mv[ref].as_mv,
-                                &x->mbmi_ext->ref_mvs[ref][0].as_mv,
-                                x->nmvjointcost, x->mvcost, MV_COST_WEIGHT);
-#if !CONFIG_CB4X4
-  else
-    *rate_mv +=
-        av1_mv_bit_cost(&frame_mv[ref].as_mv, &ref_mv_sub8x8[ref_idx]->as_mv,
-                        x->nmvjointcost, x->mvcost, MV_COST_WEIGHT);
-#endif  // !CONFIG_CB4X4
+  *rate_mv += av1_mv_bit_cost(this_mv, &ref_mv.as_mv, x->nmvjointcost,
+                              x->mvcost, MV_COST_WEIGHT);
 }
 
 // Wrapper for compound_single_motion_search, for the common case
 // where the second prediction is also an inter mode.
 static void compound_single_motion_search_interinter(
     const AV1_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bsize, int_mv *frame_mv,
-    int mi_row, int mi_col, int_mv *ref_mv_sub8x8[2], const uint8_t *mask,
-    int mask_stride, int *rate_mv, const int block, int ref_idx) {
+    int mi_row, int mi_col, const uint8_t *mask, int mask_stride, int *rate_mv,
+    const int block, int ref_idx) {
+  MACROBLOCKD *xd = &x->e_mbd;
+  MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi;
+
   // This function should only ever be called for compound modes
-  assert(has_second_ref(&x->e_mbd.mi[0]->mbmi));
+  assert(has_second_ref(mbmi));
 
 // Prediction buffer from second frame.
 #if CONFIG_HIGHBITDEPTH
-  MACROBLOCKD *xd = &x->e_mbd;
   DECLARE_ALIGNED(16, uint16_t, second_pred_alloc_16[MAX_SB_SQUARE]);
   uint8_t *second_pred;
   if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH)
@@ -7457,12 +7436,15 @@
   DECLARE_ALIGNED(16, uint8_t, second_pred[MAX_SB_SQUARE]);
 #endif  // CONFIG_HIGHBITDEPTH
 
-  build_second_inter_pred(cpi, x, bsize, frame_mv, mi_row, mi_col, block,
+  MV *this_mv = &frame_mv[mbmi->ref_frame[ref_idx]].as_mv;
+  const MV *other_mv = &frame_mv[mbmi->ref_frame[!ref_idx]].as_mv;
+
+  build_second_inter_pred(cpi, x, bsize, other_mv, mi_row, mi_col, block,
                           ref_idx, second_pred);
 
-  compound_single_motion_search(cpi, x, bsize, frame_mv, mi_row, mi_col,
-                                ref_mv_sub8x8, second_pred, mask, mask_stride,
-                                rate_mv, block, ref_idx);
+  compound_single_motion_search(cpi, x, bsize, this_mv, mi_row, mi_col,
+                                second_pred, mask, mask_stride, rate_mv, block,
+                                ref_idx);
 }
 
 #if CONFIG_COMPOUND_SEGMENT || CONFIG_WEDGE
@@ -7485,17 +7467,13 @@
 
   frame_mv[rf[0]].as_int = cur_mv[0].as_int;
   frame_mv[rf[1]].as_int = cur_mv[1].as_int;
-  if (which == 2) {
+  if (which == 0 || which == 1) {
+    compound_single_motion_search_interinter(cpi, x, bsize, frame_mv, mi_row,
+                                             mi_col, mask, mask_stride, rate_mv,
+                                             0, which);
+  } else if (which == 2) {
     joint_motion_search(cpi, x, bsize, frame_mv, mi_row, mi_col, NULL, mask,
                         mask_stride, rate_mv, 0);
-  } else if (which == 0) {
-    compound_single_motion_search_interinter(cpi, x, bsize, frame_mv, mi_row,
-                                             mi_col, NULL, mask, mask_stride,
-                                             rate_mv, 0, 0);
-  } else if (which == 1) {
-    compound_single_motion_search_interinter(cpi, x, bsize, frame_mv, mi_row,
-                                             mi_col, NULL, mask, mask_stride,
-                                             rate_mv, 0, 1);
   }
   tmp_mv[0].as_int = frame_mv[rf[0]].as_int;
   tmp_mv[1].as_int = frame_mv[rf[1]].as_int;
@@ -8143,9 +8121,8 @@
       if (cpi->sf.comp_inter_joint_search_thresh <= bsize) {
         frame_mv[refs[0]].as_int =
             mode_mv[compound_ref0_mode(this_mode)][refs[0]].as_int;
-        compound_single_motion_search_interinter(cpi, x, bsize, frame_mv,
-                                                 mi_row, mi_col, NULL, NULL, 0,
-                                                 rate_mv, 0, 1);
+        compound_single_motion_search_interinter(
+            cpi, x, bsize, frame_mv, mi_row, mi_col, NULL, 0, rate_mv, 0, 1);
       } else {
         av1_set_mvcost(x, refs[1], 1, mbmi->ref_mv_idx);
         *rate_mv = av1_mv_bit_cost(&frame_mv[refs[1]].as_mv,
@@ -8158,9 +8135,8 @@
       if (cpi->sf.comp_inter_joint_search_thresh <= bsize) {
         frame_mv[refs[1]].as_int =
             mode_mv[compound_ref1_mode(this_mode)][refs[1]].as_int;
-        compound_single_motion_search_interinter(cpi, x, bsize, frame_mv,
-                                                 mi_row, mi_col, NULL, NULL, 0,
-                                                 rate_mv, 0, 0);
+        compound_single_motion_search_interinter(
+            cpi, x, bsize, frame_mv, mi_row, mi_col, NULL, 0, rate_mv, 0, 0);
       } else {
         av1_set_mvcost(x, refs[0], 0, mbmi->ref_mv_idx);
         *rate_mv = av1_mv_bit_cost(&frame_mv[refs[0]].as_mv,
@@ -9178,12 +9154,10 @@
           // get negative of mask
           const uint8_t *mask = av1_get_contiguous_soft_mask(
               mbmi->interintra_wedge_index, 1, bsize);
-          int_mv frame_mv2[TOTAL_REFS_PER_FRAME];
-          frame_mv2[refs[0]].as_int = x->mbmi_ext->ref_mvs[refs[0]][0].as_int;
-          compound_single_motion_search(cpi, x, bsize, frame_mv2, mi_row,
-                                        mi_col, NULL, intrapred, mask, bw,
+          tmp_mv.as_int = x->mbmi_ext->ref_mvs[refs[0]][0].as_int;
+          compound_single_motion_search(cpi, x, bsize, &tmp_mv.as_mv, mi_row,
+                                        mi_col, intrapred, mask, bw,
                                         &tmp_rate_mv, 0, 0);
-          tmp_mv.as_int = frame_mv2[refs[0]].as_int;
           mbmi->mv[0].as_int = tmp_mv.as_int;
           av1_build_inter_predictors_sby(cm, xd, mi_row, mi_col, &orig_dst,
                                          bsize);