ext-inter: Further cleanup
* Rename the 'masked_compound_*' functions to just 'masked_*'.
The previous names were intended to be temporary, to distinguish
the old and new masked motion search pipelines. But now that the
old pipeline has been removed, we can reuse the old names.
* Simplify the new ext-inter compound motion search pipeline
a bit.
* Harmonize names: Rename
aom_highbd_masked_compound_sub_pixel_variance* to
aom_highbd_8_masked_sub_pixel_variance*, to match the naming of
the corresponding non-masked functions
Change-Id: I988768ffe2f42a942405b7d8e93a2757a012dca3
diff --git a/aom_dsp/aom_dsp_rtcd_defs.pl b/aom_dsp/aom_dsp_rtcd_defs.pl
index f528331..393d0d9 100755
--- a/aom_dsp/aom_dsp_rtcd_defs.pl
+++ b/aom_dsp/aom_dsp_rtcd_defs.pl
@@ -740,13 +740,13 @@
if (aom_config("CONFIG_EXT_INTER") eq "yes") {
foreach (@block_sizes) {
($w, $h) = @$_;
- add_proto qw/unsigned int/, "aom_masked_compound_sad${w}x${h}", "const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride, const uint8_t *second_pred, const uint8_t *msk, int msk_stride, int invert_mask";
+ add_proto qw/unsigned int/, "aom_masked_sad${w}x${h}", "const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride, const uint8_t *second_pred, const uint8_t *msk, int msk_stride, int invert_mask";
}
if (aom_config("CONFIG_HIGHBITDEPTH") eq "yes") {
foreach (@block_sizes) {
($w, $h) = @$_;
- add_proto qw/unsigned int/, "aom_highbd_masked_compound_sad${w}x${h}", "const uint8_t *src8, int src_stride, const uint8_t *ref8, int ref_stride, const uint8_t *second_pred8, const uint8_t *msk, int msk_stride, int invert_mask";
+ add_proto qw/unsigned int/, "aom_highbd_masked_sad${w}x${h}", "const uint8_t *src8, int src_stride, const uint8_t *ref8, int ref_stride, const uint8_t *second_pred8, const uint8_t *msk, int msk_stride, int invert_mask";
}
}
}
@@ -1045,14 +1045,14 @@
#
foreach (@block_sizes) {
($w, $h) = @$_;
- add_proto qw/unsigned int/, "aom_masked_compound_sub_pixel_variance${w}x${h}", "const uint8_t *src, int src_stride, int xoffset, int yoffset, const uint8_t *ref, int ref_stride, const uint8_t *second_pred, const uint8_t *msk, int msk_stride, int invert_mask, unsigned int *sse";
+ add_proto qw/unsigned int/, "aom_masked_sub_pixel_variance${w}x${h}", "const uint8_t *src, int src_stride, int xoffset, int yoffset, const uint8_t *ref, int ref_stride, const uint8_t *second_pred, const uint8_t *msk, int msk_stride, int invert_mask, unsigned int *sse";
}
if (aom_config("CONFIG_HIGHBITDEPTH") eq "yes") {
- foreach $bd ("_", "_10_", "_12_") {
+ foreach $bd ("_8_", "_10_", "_12_") {
foreach (@block_sizes) {
($w, $h) = @$_;
- add_proto qw/unsigned int/, "aom_highbd${bd}masked_compound_sub_pixel_variance${w}x${h}", "const uint8_t *src, int src_stride, int xoffset, int yoffset, const uint8_t *ref, int ref_stride, const uint8_t *second_pred, const uint8_t *msk, int msk_stride, int invert_mask, unsigned int *sse";
+ add_proto qw/unsigned int/, "aom_highbd${bd}masked_sub_pixel_variance${w}x${h}", "const uint8_t *src, int src_stride, int xoffset, int yoffset, const uint8_t *ref, int ref_stride, const uint8_t *second_pred, const uint8_t *msk, int msk_stride, int invert_mask, unsigned int *sse";
}
}
}
diff --git a/aom_dsp/sad.c b/aom_dsp/sad.c
index e4be68c..2cc172b 100644
--- a/aom_dsp/sad.c
+++ b/aom_dsp/sad.c
@@ -312,11 +312,10 @@
#if CONFIG_AV1 && CONFIG_EXT_INTER
static INLINE
- unsigned int masked_compound_sad(const uint8_t *src, int src_stride,
- const uint8_t *a, int a_stride,
- const uint8_t *b, int b_stride,
- const uint8_t *m, int m_stride, int width,
- int height) {
+ unsigned int masked_sad(const uint8_t *src, int src_stride,
+ const uint8_t *a, int a_stride, const uint8_t *b,
+ int b_stride, const uint8_t *m, int m_stride,
+ int width, int height) {
int y, x;
unsigned int sad = 0;
@@ -336,17 +335,17 @@
return sad;
}
-#define MASKSADMxN(m, n) \
- unsigned int aom_masked_compound_sad##m##x##n##_c( \
- const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride, \
- const uint8_t *second_pred, const uint8_t *msk, int msk_stride, \
- int invert_mask) { \
- if (!invert_mask) \
- return masked_compound_sad(src, src_stride, ref, ref_stride, \
- second_pred, m, msk, msk_stride, m, n); \
- else \
- return masked_compound_sad(src, src_stride, second_pred, m, ref, \
- ref_stride, msk, msk_stride, m, n); \
+#define MASKSADMxN(m, n) \
+ unsigned int aom_masked_sad##m##x##n##_c( \
+ const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride, \
+ const uint8_t *second_pred, const uint8_t *msk, int msk_stride, \
+ int invert_mask) { \
+ if (!invert_mask) \
+ return masked_sad(src, src_stride, ref, ref_stride, second_pred, m, msk, \
+ msk_stride, m, n); \
+ else \
+ return masked_sad(src, src_stride, second_pred, m, ref, ref_stride, msk, \
+ msk_stride, m, n); \
}
/* clang-format off */
@@ -372,11 +371,11 @@
#if CONFIG_HIGHBITDEPTH
static INLINE
- unsigned int highbd_masked_compound_sad(const uint8_t *src8, int src_stride,
- const uint8_t *a8, int a_stride,
- const uint8_t *b8, int b_stride,
- const uint8_t *m, int m_stride,
- int width, int height) {
+ unsigned int highbd_masked_sad(const uint8_t *src8, int src_stride,
+ const uint8_t *a8, int a_stride,
+ const uint8_t *b8, int b_stride,
+ const uint8_t *m, int m_stride, int width,
+ int height) {
int y, x;
unsigned int sad = 0;
const uint16_t *src = CONVERT_TO_SHORTPTR(src8);
@@ -399,19 +398,17 @@
return sad;
}
-#define HIGHBD_MASKSADMXN(m, n) \
- unsigned int aom_highbd_masked_compound_sad##m##x##n##_c( \
- const uint8_t *src8, int src_stride, const uint8_t *ref8, \
- int ref_stride, const uint8_t *second_pred8, const uint8_t *msk, \
- int msk_stride, int invert_mask) { \
- if (!invert_mask) \
- return highbd_masked_compound_sad(src8, src_stride, ref8, ref_stride, \
- second_pred8, m, msk, msk_stride, m, \
- n); \
- else \
- return highbd_masked_compound_sad(src8, src_stride, second_pred8, m, \
- ref8, ref_stride, msk, msk_stride, m, \
- n); \
+#define HIGHBD_MASKSADMXN(m, n) \
+ unsigned int aom_highbd_masked_sad##m##x##n##_c( \
+ const uint8_t *src8, int src_stride, const uint8_t *ref8, \
+ int ref_stride, const uint8_t *second_pred8, const uint8_t *msk, \
+ int msk_stride, int invert_mask) { \
+ if (!invert_mask) \
+ return highbd_masked_sad(src8, src_stride, ref8, ref_stride, \
+ second_pred8, m, msk, msk_stride, m, n); \
+ else \
+ return highbd_masked_sad(src8, src_stride, second_pred8, m, ref8, \
+ ref_stride, msk, msk_stride, m, n); \
}
#if CONFIG_EXT_PARTITION
diff --git a/aom_dsp/variance.c b/aom_dsp/variance.c
index 85adcd1..79677c9 100644
--- a/aom_dsp/variance.c
+++ b/aom_dsp/variance.c
@@ -714,24 +714,24 @@
}
}
-#define MASK_SUBPIX_VAR(W, H) \
- unsigned int aom_masked_compound_sub_pixel_variance##W##x##H##_c( \
- const uint8_t *src, int src_stride, int xoffset, int yoffset, \
- const uint8_t *ref, int ref_stride, const uint8_t *second_pred, \
- const uint8_t *msk, int msk_stride, int invert_mask, \
- unsigned int *sse) { \
- uint16_t fdata3[(H + 1) * W]; \
- uint8_t temp2[H * W]; \
- DECLARE_ALIGNED(16, uint8_t, temp3[H * W]); \
- \
- var_filter_block2d_bil_first_pass(src, fdata3, src_stride, 1, H + 1, W, \
- bilinear_filters_2t[xoffset]); \
- var_filter_block2d_bil_second_pass(fdata3, temp2, W, W, H, W, \
- bilinear_filters_2t[yoffset]); \
- \
- aom_comp_mask_pred(temp3, second_pred, W, H, temp2, W, msk, msk_stride, \
- invert_mask); \
- return aom_variance##W##x##H##_c(temp3, W, ref, ref_stride, sse); \
+#define MASK_SUBPIX_VAR(W, H) \
+ unsigned int aom_masked_sub_pixel_variance##W##x##H##_c( \
+ const uint8_t *src, int src_stride, int xoffset, int yoffset, \
+ const uint8_t *ref, int ref_stride, const uint8_t *second_pred, \
+ const uint8_t *msk, int msk_stride, int invert_mask, \
+ unsigned int *sse) { \
+ uint16_t fdata3[(H + 1) * W]; \
+ uint8_t temp2[H * W]; \
+ DECLARE_ALIGNED(16, uint8_t, temp3[H * W]); \
+ \
+ var_filter_block2d_bil_first_pass(src, fdata3, src_stride, 1, H + 1, W, \
+ bilinear_filters_2t[xoffset]); \
+ var_filter_block2d_bil_second_pass(fdata3, temp2, W, W, H, W, \
+ bilinear_filters_2t[yoffset]); \
+ \
+ aom_comp_mask_pred_c(temp3, second_pred, W, H, temp2, W, msk, msk_stride, \
+ invert_mask); \
+ return aom_variance##W##x##H##_c(temp3, W, ref, ref_stride, sse); \
}
MASK_SUBPIX_VAR(4, 4)
@@ -800,7 +800,7 @@
}
#define HIGHBD_MASK_SUBPIX_VAR(W, H) \
- unsigned int aom_highbd_masked_compound_sub_pixel_variance##W##x##H##_c( \
+ unsigned int aom_highbd_8_masked_sub_pixel_variance##W##x##H##_c( \
const uint8_t *src, int src_stride, int xoffset, int yoffset, \
const uint8_t *ref, int ref_stride, const uint8_t *second_pred, \
const uint8_t *msk, int msk_stride, int invert_mask, \
@@ -822,7 +822,7 @@
ref, ref_stride, sse); \
} \
\
- unsigned int aom_highbd_10_masked_compound_sub_pixel_variance##W##x##H##_c( \
+ unsigned int aom_highbd_10_masked_sub_pixel_variance##W##x##H##_c( \
const uint8_t *src, int src_stride, int xoffset, int yoffset, \
const uint8_t *ref, int ref_stride, const uint8_t *second_pred, \
const uint8_t *msk, int msk_stride, int invert_mask, \
@@ -844,7 +844,7 @@
ref, ref_stride, sse); \
} \
\
- unsigned int aom_highbd_12_masked_compound_sub_pixel_variance##W##x##H##_c( \
+ unsigned int aom_highbd_12_masked_sub_pixel_variance##W##x##H##_c( \
const uint8_t *src, int src_stride, int xoffset, int yoffset, \
const uint8_t *ref, int ref_stride, const uint8_t *second_pred, \
const uint8_t *msk, int msk_stride, int invert_mask, \
diff --git a/aom_dsp/variance.h b/aom_dsp/variance.h
index 1b546ab..20f0895 100644
--- a/aom_dsp/variance.h
+++ b/aom_dsp/variance.h
@@ -55,11 +55,12 @@
int b_stride, unsigned int *sse, const uint8_t *second_pred);
#if CONFIG_AV1 && CONFIG_EXT_INTER
-typedef unsigned int (*aom_masked_compound_sad_fn_t)(
- const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride,
- const uint8_t *second_pred, const uint8_t *msk, int msk_stride,
- int invert_mask);
-typedef unsigned int (*aom_masked_compound_subpixvariance_fn_t)(
+typedef unsigned int (*aom_masked_sad_fn_t)(const uint8_t *src, int src_stride,
+ const uint8_t *ref, int ref_stride,
+ const uint8_t *second_pred,
+ const uint8_t *msk, int msk_stride,
+ int invert_mask);
+typedef unsigned int (*aom_masked_subpixvariance_fn_t)(
const uint8_t *src, int src_stride, int xoffset, int yoffset,
const uint8_t *ref, int ref_stride, const uint8_t *second_pred,
const uint8_t *msk, int msk_stride, int invert_mask, unsigned int *sse);
@@ -90,8 +91,8 @@
aom_sad_multi_fn_t sdx8f;
aom_sad_multi_d_fn_t sdx4df;
#if CONFIG_EXT_INTER
- aom_masked_compound_sad_fn_t mcsdf;
- aom_masked_compound_subpixvariance_fn_t mcsvf;
+ aom_masked_sad_fn_t msdf;
+ aom_masked_subpixvariance_fn_t msvf;
#endif // CONFIG_EXT_INTER
#if CONFIG_MOTION_VAR
aom_obmc_sad_fn_t osdf;
diff --git a/av1/encoder/encoder.c b/av1/encoder/encoder.c
index 5b2dcef..8901f0a 100644
--- a/av1/encoder/encoder.c
+++ b/av1/encoder/encoder.c
@@ -1172,8 +1172,8 @@
#if CONFIG_EXT_INTER
#define HIGHBD_MBFP(BT, MCSDF, MCSVF) \
- cpi->fn_ptr[BT].mcsdf = MCSDF; \
- cpi->fn_ptr[BT].mcsvf = MCSVF;
+ cpi->fn_ptr[BT].msdf = MCSDF; \
+ cpi->fn_ptr[BT].msvf = MCSVF;
#define MAKE_MBFP_COMPOUND_SAD_WRAPPER(fnname) \
static unsigned int fnname##_bits8( \
@@ -1201,23 +1201,23 @@
}
#if CONFIG_EXT_PARTITION
-MAKE_MBFP_COMPOUND_SAD_WRAPPER(aom_highbd_masked_compound_sad128x128)
-MAKE_MBFP_COMPOUND_SAD_WRAPPER(aom_highbd_masked_compound_sad128x64)
-MAKE_MBFP_COMPOUND_SAD_WRAPPER(aom_highbd_masked_compound_sad64x128)
+MAKE_MBFP_COMPOUND_SAD_WRAPPER(aom_highbd_masked_sad128x128)
+MAKE_MBFP_COMPOUND_SAD_WRAPPER(aom_highbd_masked_sad128x64)
+MAKE_MBFP_COMPOUND_SAD_WRAPPER(aom_highbd_masked_sad64x128)
#endif // CONFIG_EXT_PARTITION
-MAKE_MBFP_COMPOUND_SAD_WRAPPER(aom_highbd_masked_compound_sad64x64)
-MAKE_MBFP_COMPOUND_SAD_WRAPPER(aom_highbd_masked_compound_sad64x32)
-MAKE_MBFP_COMPOUND_SAD_WRAPPER(aom_highbd_masked_compound_sad32x64)
-MAKE_MBFP_COMPOUND_SAD_WRAPPER(aom_highbd_masked_compound_sad32x32)
-MAKE_MBFP_COMPOUND_SAD_WRAPPER(aom_highbd_masked_compound_sad32x16)
-MAKE_MBFP_COMPOUND_SAD_WRAPPER(aom_highbd_masked_compound_sad16x32)
-MAKE_MBFP_COMPOUND_SAD_WRAPPER(aom_highbd_masked_compound_sad16x16)
-MAKE_MBFP_COMPOUND_SAD_WRAPPER(aom_highbd_masked_compound_sad16x8)
-MAKE_MBFP_COMPOUND_SAD_WRAPPER(aom_highbd_masked_compound_sad8x16)
-MAKE_MBFP_COMPOUND_SAD_WRAPPER(aom_highbd_masked_compound_sad8x8)
-MAKE_MBFP_COMPOUND_SAD_WRAPPER(aom_highbd_masked_compound_sad8x4)
-MAKE_MBFP_COMPOUND_SAD_WRAPPER(aom_highbd_masked_compound_sad4x8)
-MAKE_MBFP_COMPOUND_SAD_WRAPPER(aom_highbd_masked_compound_sad4x4)
+MAKE_MBFP_COMPOUND_SAD_WRAPPER(aom_highbd_masked_sad64x64)
+MAKE_MBFP_COMPOUND_SAD_WRAPPER(aom_highbd_masked_sad64x32)
+MAKE_MBFP_COMPOUND_SAD_WRAPPER(aom_highbd_masked_sad32x64)
+MAKE_MBFP_COMPOUND_SAD_WRAPPER(aom_highbd_masked_sad32x32)
+MAKE_MBFP_COMPOUND_SAD_WRAPPER(aom_highbd_masked_sad32x16)
+MAKE_MBFP_COMPOUND_SAD_WRAPPER(aom_highbd_masked_sad16x32)
+MAKE_MBFP_COMPOUND_SAD_WRAPPER(aom_highbd_masked_sad16x16)
+MAKE_MBFP_COMPOUND_SAD_WRAPPER(aom_highbd_masked_sad16x8)
+MAKE_MBFP_COMPOUND_SAD_WRAPPER(aom_highbd_masked_sad8x16)
+MAKE_MBFP_COMPOUND_SAD_WRAPPER(aom_highbd_masked_sad8x8)
+MAKE_MBFP_COMPOUND_SAD_WRAPPER(aom_highbd_masked_sad8x4)
+MAKE_MBFP_COMPOUND_SAD_WRAPPER(aom_highbd_masked_sad4x8)
+MAKE_MBFP_COMPOUND_SAD_WRAPPER(aom_highbd_masked_sad4x4)
#endif // CONFIG_EXT_INTER
#if CONFIG_MOTION_VAR
@@ -1382,39 +1382,39 @@
#if CONFIG_EXT_INTER
#if CONFIG_EXT_PARTITION
- HIGHBD_MBFP(BLOCK_128X128, aom_highbd_masked_compound_sad128x128_bits8,
- aom_highbd_masked_compound_sub_pixel_variance128x128)
- HIGHBD_MBFP(BLOCK_128X64, aom_highbd_masked_compound_sad128x64_bits8,
- aom_highbd_masked_compound_sub_pixel_variance128x64)
- HIGHBD_MBFP(BLOCK_64X128, aom_highbd_masked_compound_sad64x128_bits8,
- aom_highbd_masked_compound_sub_pixel_variance64x128)
+ HIGHBD_MBFP(BLOCK_128X128, aom_highbd_masked_sad128x128_bits8,
+ aom_highbd_8_masked_sub_pixel_variance128x128)
+ HIGHBD_MBFP(BLOCK_128X64, aom_highbd_masked_sad128x64_bits8,
+ aom_highbd_8_masked_sub_pixel_variance128x64)
+ HIGHBD_MBFP(BLOCK_64X128, aom_highbd_masked_sad64x128_bits8,
+ aom_highbd_8_masked_sub_pixel_variance64x128)
#endif // CONFIG_EXT_PARTITION
- HIGHBD_MBFP(BLOCK_64X64, aom_highbd_masked_compound_sad64x64_bits8,
- aom_highbd_masked_compound_sub_pixel_variance64x64)
- HIGHBD_MBFP(BLOCK_64X32, aom_highbd_masked_compound_sad64x32_bits8,
- aom_highbd_masked_compound_sub_pixel_variance64x32)
- HIGHBD_MBFP(BLOCK_32X64, aom_highbd_masked_compound_sad32x64_bits8,
- aom_highbd_masked_compound_sub_pixel_variance32x64)
- HIGHBD_MBFP(BLOCK_32X32, aom_highbd_masked_compound_sad32x32_bits8,
- aom_highbd_masked_compound_sub_pixel_variance32x32)
- HIGHBD_MBFP(BLOCK_32X16, aom_highbd_masked_compound_sad32x16_bits8,
- aom_highbd_masked_compound_sub_pixel_variance32x16)
- HIGHBD_MBFP(BLOCK_16X32, aom_highbd_masked_compound_sad16x32_bits8,
- aom_highbd_masked_compound_sub_pixel_variance16x32)
- HIGHBD_MBFP(BLOCK_16X16, aom_highbd_masked_compound_sad16x16_bits8,
- aom_highbd_masked_compound_sub_pixel_variance16x16)
- HIGHBD_MBFP(BLOCK_8X16, aom_highbd_masked_compound_sad8x16_bits8,
- aom_highbd_masked_compound_sub_pixel_variance8x16)
- HIGHBD_MBFP(BLOCK_16X8, aom_highbd_masked_compound_sad16x8_bits8,
- aom_highbd_masked_compound_sub_pixel_variance16x8)
- HIGHBD_MBFP(BLOCK_8X8, aom_highbd_masked_compound_sad8x8_bits8,
- aom_highbd_masked_compound_sub_pixel_variance8x8)
- HIGHBD_MBFP(BLOCK_4X8, aom_highbd_masked_compound_sad4x8_bits8,
- aom_highbd_masked_compound_sub_pixel_variance4x8)
- HIGHBD_MBFP(BLOCK_8X4, aom_highbd_masked_compound_sad8x4_bits8,
- aom_highbd_masked_compound_sub_pixel_variance8x4)
- HIGHBD_MBFP(BLOCK_4X4, aom_highbd_masked_compound_sad4x4_bits8,
- aom_highbd_masked_compound_sub_pixel_variance4x4)
+ HIGHBD_MBFP(BLOCK_64X64, aom_highbd_masked_sad64x64_bits8,
+ aom_highbd_8_masked_sub_pixel_variance64x64)
+ HIGHBD_MBFP(BLOCK_64X32, aom_highbd_masked_sad64x32_bits8,
+ aom_highbd_8_masked_sub_pixel_variance64x32)
+ HIGHBD_MBFP(BLOCK_32X64, aom_highbd_masked_sad32x64_bits8,
+ aom_highbd_8_masked_sub_pixel_variance32x64)
+ HIGHBD_MBFP(BLOCK_32X32, aom_highbd_masked_sad32x32_bits8,
+ aom_highbd_8_masked_sub_pixel_variance32x32)
+ HIGHBD_MBFP(BLOCK_32X16, aom_highbd_masked_sad32x16_bits8,
+ aom_highbd_8_masked_sub_pixel_variance32x16)
+ HIGHBD_MBFP(BLOCK_16X32, aom_highbd_masked_sad16x32_bits8,
+ aom_highbd_8_masked_sub_pixel_variance16x32)
+ HIGHBD_MBFP(BLOCK_16X16, aom_highbd_masked_sad16x16_bits8,
+ aom_highbd_8_masked_sub_pixel_variance16x16)
+ HIGHBD_MBFP(BLOCK_8X16, aom_highbd_masked_sad8x16_bits8,
+ aom_highbd_8_masked_sub_pixel_variance8x16)
+ HIGHBD_MBFP(BLOCK_16X8, aom_highbd_masked_sad16x8_bits8,
+ aom_highbd_8_masked_sub_pixel_variance16x8)
+ HIGHBD_MBFP(BLOCK_8X8, aom_highbd_masked_sad8x8_bits8,
+ aom_highbd_8_masked_sub_pixel_variance8x8)
+ HIGHBD_MBFP(BLOCK_4X8, aom_highbd_masked_sad4x8_bits8,
+ aom_highbd_8_masked_sub_pixel_variance4x8)
+ HIGHBD_MBFP(BLOCK_8X4, aom_highbd_masked_sad8x4_bits8,
+ aom_highbd_8_masked_sub_pixel_variance8x4)
+ HIGHBD_MBFP(BLOCK_4X4, aom_highbd_masked_sad4x4_bits8,
+ aom_highbd_8_masked_sub_pixel_variance4x4)
#endif // CONFIG_EXT_INTER
#if CONFIG_MOTION_VAR
#if CONFIG_EXT_PARTITION
@@ -1589,39 +1589,39 @@
#if CONFIG_EXT_INTER
#if CONFIG_EXT_PARTITION
- HIGHBD_MBFP(BLOCK_128X128, aom_highbd_masked_compound_sad128x128_bits10,
- aom_highbd_10_masked_compound_sub_pixel_variance128x128)
- HIGHBD_MBFP(BLOCK_128X64, aom_highbd_masked_compound_sad128x64_bits10,
- aom_highbd_10_masked_compound_sub_pixel_variance128x64)
- HIGHBD_MBFP(BLOCK_64X128, aom_highbd_masked_compound_sad64x128_bits10,
- aom_highbd_10_masked_compound_sub_pixel_variance64x128)
+ HIGHBD_MBFP(BLOCK_128X128, aom_highbd_masked_sad128x128_bits10,
+ aom_highbd_10_masked_sub_pixel_variance128x128)
+ HIGHBD_MBFP(BLOCK_128X64, aom_highbd_masked_sad128x64_bits10,
+ aom_highbd_10_masked_sub_pixel_variance128x64)
+ HIGHBD_MBFP(BLOCK_64X128, aom_highbd_masked_sad64x128_bits10,
+ aom_highbd_10_masked_sub_pixel_variance64x128)
#endif // CONFIG_EXT_PARTITION
- HIGHBD_MBFP(BLOCK_64X64, aom_highbd_masked_compound_sad64x64_bits10,
- aom_highbd_10_masked_compound_sub_pixel_variance64x64)
- HIGHBD_MBFP(BLOCK_64X32, aom_highbd_masked_compound_sad64x32_bits10,
- aom_highbd_10_masked_compound_sub_pixel_variance64x32)
- HIGHBD_MBFP(BLOCK_32X64, aom_highbd_masked_compound_sad32x64_bits10,
- aom_highbd_10_masked_compound_sub_pixel_variance32x64)
- HIGHBD_MBFP(BLOCK_32X32, aom_highbd_masked_compound_sad32x32_bits10,
- aom_highbd_10_masked_compound_sub_pixel_variance32x32)
- HIGHBD_MBFP(BLOCK_32X16, aom_highbd_masked_compound_sad32x16_bits10,
- aom_highbd_10_masked_compound_sub_pixel_variance32x16)
- HIGHBD_MBFP(BLOCK_16X32, aom_highbd_masked_compound_sad16x32_bits10,
- aom_highbd_10_masked_compound_sub_pixel_variance16x32)
- HIGHBD_MBFP(BLOCK_16X16, aom_highbd_masked_compound_sad16x16_bits10,
- aom_highbd_10_masked_compound_sub_pixel_variance16x16)
- HIGHBD_MBFP(BLOCK_8X16, aom_highbd_masked_compound_sad8x16_bits10,
- aom_highbd_10_masked_compound_sub_pixel_variance8x16)
- HIGHBD_MBFP(BLOCK_16X8, aom_highbd_masked_compound_sad16x8_bits10,
- aom_highbd_10_masked_compound_sub_pixel_variance16x8)
- HIGHBD_MBFP(BLOCK_8X8, aom_highbd_masked_compound_sad8x8_bits10,
- aom_highbd_10_masked_compound_sub_pixel_variance8x8)
- HIGHBD_MBFP(BLOCK_4X8, aom_highbd_masked_compound_sad4x8_bits10,
- aom_highbd_10_masked_compound_sub_pixel_variance4x8)
- HIGHBD_MBFP(BLOCK_8X4, aom_highbd_masked_compound_sad8x4_bits10,
- aom_highbd_10_masked_compound_sub_pixel_variance8x4)
- HIGHBD_MBFP(BLOCK_4X4, aom_highbd_masked_compound_sad4x4_bits10,
- aom_highbd_10_masked_compound_sub_pixel_variance4x4)
+ HIGHBD_MBFP(BLOCK_64X64, aom_highbd_masked_sad64x64_bits10,
+ aom_highbd_10_masked_sub_pixel_variance64x64)
+ HIGHBD_MBFP(BLOCK_64X32, aom_highbd_masked_sad64x32_bits10,
+ aom_highbd_10_masked_sub_pixel_variance64x32)
+ HIGHBD_MBFP(BLOCK_32X64, aom_highbd_masked_sad32x64_bits10,
+ aom_highbd_10_masked_sub_pixel_variance32x64)
+ HIGHBD_MBFP(BLOCK_32X32, aom_highbd_masked_sad32x32_bits10,
+ aom_highbd_10_masked_sub_pixel_variance32x32)
+ HIGHBD_MBFP(BLOCK_32X16, aom_highbd_masked_sad32x16_bits10,
+ aom_highbd_10_masked_sub_pixel_variance32x16)
+ HIGHBD_MBFP(BLOCK_16X32, aom_highbd_masked_sad16x32_bits10,
+ aom_highbd_10_masked_sub_pixel_variance16x32)
+ HIGHBD_MBFP(BLOCK_16X16, aom_highbd_masked_sad16x16_bits10,
+ aom_highbd_10_masked_sub_pixel_variance16x16)
+ HIGHBD_MBFP(BLOCK_8X16, aom_highbd_masked_sad8x16_bits10,
+ aom_highbd_10_masked_sub_pixel_variance8x16)
+ HIGHBD_MBFP(BLOCK_16X8, aom_highbd_masked_sad16x8_bits10,
+ aom_highbd_10_masked_sub_pixel_variance16x8)
+ HIGHBD_MBFP(BLOCK_8X8, aom_highbd_masked_sad8x8_bits10,
+ aom_highbd_10_masked_sub_pixel_variance8x8)
+ HIGHBD_MBFP(BLOCK_4X8, aom_highbd_masked_sad4x8_bits10,
+ aom_highbd_10_masked_sub_pixel_variance4x8)
+ HIGHBD_MBFP(BLOCK_8X4, aom_highbd_masked_sad8x4_bits10,
+ aom_highbd_10_masked_sub_pixel_variance8x4)
+ HIGHBD_MBFP(BLOCK_4X4, aom_highbd_masked_sad4x4_bits10,
+ aom_highbd_10_masked_sub_pixel_variance4x4)
#endif // CONFIG_EXT_INTER
#if CONFIG_MOTION_VAR
#if CONFIG_EXT_PARTITION
@@ -1796,39 +1796,39 @@
#if CONFIG_EXT_INTER
#if CONFIG_EXT_PARTITION
- HIGHBD_MBFP(BLOCK_128X128, aom_highbd_masked_compound_sad128x128_bits12,
- aom_highbd_12_masked_compound_sub_pixel_variance128x128)
- HIGHBD_MBFP(BLOCK_128X64, aom_highbd_masked_compound_sad128x64_bits12,
- aom_highbd_12_masked_compound_sub_pixel_variance128x64)
- HIGHBD_MBFP(BLOCK_64X128, aom_highbd_masked_compound_sad64x128_bits12,
- aom_highbd_12_masked_compound_sub_pixel_variance64x128)
+ HIGHBD_MBFP(BLOCK_128X128, aom_highbd_masked_sad128x128_bits12,
+ aom_highbd_12_masked_sub_pixel_variance128x128)
+ HIGHBD_MBFP(BLOCK_128X64, aom_highbd_masked_sad128x64_bits12,
+ aom_highbd_12_masked_sub_pixel_variance128x64)
+ HIGHBD_MBFP(BLOCK_64X128, aom_highbd_masked_sad64x128_bits12,
+ aom_highbd_12_masked_sub_pixel_variance64x128)
#endif // CONFIG_EXT_PARTITION
- HIGHBD_MBFP(BLOCK_64X64, aom_highbd_masked_compound_sad64x64_bits12,
- aom_highbd_12_masked_compound_sub_pixel_variance64x64)
- HIGHBD_MBFP(BLOCK_64X32, aom_highbd_masked_compound_sad64x32_bits12,
- aom_highbd_12_masked_compound_sub_pixel_variance64x32)
- HIGHBD_MBFP(BLOCK_32X64, aom_highbd_masked_compound_sad32x64_bits12,
- aom_highbd_12_masked_compound_sub_pixel_variance32x64)
- HIGHBD_MBFP(BLOCK_32X32, aom_highbd_masked_compound_sad32x32_bits12,
- aom_highbd_12_masked_compound_sub_pixel_variance32x32)
- HIGHBD_MBFP(BLOCK_32X16, aom_highbd_masked_compound_sad32x16_bits12,
- aom_highbd_12_masked_compound_sub_pixel_variance32x16)
- HIGHBD_MBFP(BLOCK_16X32, aom_highbd_masked_compound_sad16x32_bits12,
- aom_highbd_12_masked_compound_sub_pixel_variance16x32)
- HIGHBD_MBFP(BLOCK_16X16, aom_highbd_masked_compound_sad16x16_bits12,
- aom_highbd_12_masked_compound_sub_pixel_variance16x16)
- HIGHBD_MBFP(BLOCK_8X16, aom_highbd_masked_compound_sad8x16_bits12,
- aom_highbd_12_masked_compound_sub_pixel_variance8x16)
- HIGHBD_MBFP(BLOCK_16X8, aom_highbd_masked_compound_sad16x8_bits12,
- aom_highbd_12_masked_compound_sub_pixel_variance16x8)
- HIGHBD_MBFP(BLOCK_8X8, aom_highbd_masked_compound_sad8x8_bits12,
- aom_highbd_12_masked_compound_sub_pixel_variance8x8)
- HIGHBD_MBFP(BLOCK_4X8, aom_highbd_masked_compound_sad4x8_bits12,
- aom_highbd_12_masked_compound_sub_pixel_variance4x8)
- HIGHBD_MBFP(BLOCK_8X4, aom_highbd_masked_compound_sad8x4_bits12,
- aom_highbd_12_masked_compound_sub_pixel_variance8x4)
- HIGHBD_MBFP(BLOCK_4X4, aom_highbd_masked_compound_sad4x4_bits12,
- aom_highbd_12_masked_compound_sub_pixel_variance4x4)
+ HIGHBD_MBFP(BLOCK_64X64, aom_highbd_masked_sad64x64_bits12,
+ aom_highbd_12_masked_sub_pixel_variance64x64)
+ HIGHBD_MBFP(BLOCK_64X32, aom_highbd_masked_sad64x32_bits12,
+ aom_highbd_12_masked_sub_pixel_variance64x32)
+ HIGHBD_MBFP(BLOCK_32X64, aom_highbd_masked_sad32x64_bits12,
+ aom_highbd_12_masked_sub_pixel_variance32x64)
+ HIGHBD_MBFP(BLOCK_32X32, aom_highbd_masked_sad32x32_bits12,
+ aom_highbd_12_masked_sub_pixel_variance32x32)
+ HIGHBD_MBFP(BLOCK_32X16, aom_highbd_masked_sad32x16_bits12,
+ aom_highbd_12_masked_sub_pixel_variance32x16)
+ HIGHBD_MBFP(BLOCK_16X32, aom_highbd_masked_sad16x32_bits12,
+ aom_highbd_12_masked_sub_pixel_variance16x32)
+ HIGHBD_MBFP(BLOCK_16X16, aom_highbd_masked_sad16x16_bits12,
+ aom_highbd_12_masked_sub_pixel_variance16x16)
+ HIGHBD_MBFP(BLOCK_8X16, aom_highbd_masked_sad8x16_bits12,
+ aom_highbd_12_masked_sub_pixel_variance8x16)
+ HIGHBD_MBFP(BLOCK_16X8, aom_highbd_masked_sad16x8_bits12,
+ aom_highbd_12_masked_sub_pixel_variance16x8)
+ HIGHBD_MBFP(BLOCK_8X8, aom_highbd_masked_sad8x8_bits12,
+ aom_highbd_12_masked_sub_pixel_variance8x8)
+ HIGHBD_MBFP(BLOCK_4X8, aom_highbd_masked_sad4x8_bits12,
+ aom_highbd_12_masked_sub_pixel_variance4x8)
+ HIGHBD_MBFP(BLOCK_8X4, aom_highbd_masked_sad8x4_bits12,
+ aom_highbd_12_masked_sub_pixel_variance8x4)
+ HIGHBD_MBFP(BLOCK_4X4, aom_highbd_masked_sad4x4_bits12,
+ aom_highbd_12_masked_sub_pixel_variance4x4)
#endif // CONFIG_EXT_INTER
#if CONFIG_MOTION_VAR
@@ -2411,44 +2411,29 @@
#endif // CONFIG_MOTION_VAR
#if CONFIG_EXT_INTER
-#define MBFP(BT, MCSDF, MCSVF) \
- cpi->fn_ptr[BT].mcsdf = MCSDF; \
- cpi->fn_ptr[BT].mcsvf = MCSVF;
+#define MBFP(BT, MCSDF, MCSVF) \
+ cpi->fn_ptr[BT].msdf = MCSDF; \
+ cpi->fn_ptr[BT].msvf = MCSVF;
#if CONFIG_EXT_PARTITION
- MBFP(BLOCK_128X128, aom_masked_compound_sad128x128,
- aom_masked_compound_sub_pixel_variance128x128)
- MBFP(BLOCK_128X64, aom_masked_compound_sad128x64,
- aom_masked_compound_sub_pixel_variance128x64)
- MBFP(BLOCK_64X128, aom_masked_compound_sad64x128,
- aom_masked_compound_sub_pixel_variance64x128)
+ MBFP(BLOCK_128X128, aom_masked_sad128x128,
+ aom_masked_sub_pixel_variance128x128)
+ MBFP(BLOCK_128X64, aom_masked_sad128x64, aom_masked_sub_pixel_variance128x64)
+ MBFP(BLOCK_64X128, aom_masked_sad64x128, aom_masked_sub_pixel_variance64x128)
#endif // CONFIG_EXT_PARTITION
- MBFP(BLOCK_64X64, aom_masked_compound_sad64x64,
- aom_masked_compound_sub_pixel_variance64x64)
- MBFP(BLOCK_64X32, aom_masked_compound_sad64x32,
- aom_masked_compound_sub_pixel_variance64x32)
- MBFP(BLOCK_32X64, aom_masked_compound_sad32x64,
- aom_masked_compound_sub_pixel_variance32x64)
- MBFP(BLOCK_32X32, aom_masked_compound_sad32x32,
- aom_masked_compound_sub_pixel_variance32x32)
- MBFP(BLOCK_32X16, aom_masked_compound_sad32x16,
- aom_masked_compound_sub_pixel_variance32x16)
- MBFP(BLOCK_16X32, aom_masked_compound_sad16x32,
- aom_masked_compound_sub_pixel_variance16x32)
- MBFP(BLOCK_16X16, aom_masked_compound_sad16x16,
- aom_masked_compound_sub_pixel_variance16x16)
- MBFP(BLOCK_16X8, aom_masked_compound_sad16x8,
- aom_masked_compound_sub_pixel_variance16x8)
- MBFP(BLOCK_8X16, aom_masked_compound_sad8x16,
- aom_masked_compound_sub_pixel_variance8x16)
- MBFP(BLOCK_8X8, aom_masked_compound_sad8x8,
- aom_masked_compound_sub_pixel_variance8x8)
- MBFP(BLOCK_4X8, aom_masked_compound_sad4x8,
- aom_masked_compound_sub_pixel_variance4x8)
- MBFP(BLOCK_8X4, aom_masked_compound_sad8x4,
- aom_masked_compound_sub_pixel_variance8x4)
- MBFP(BLOCK_4X4, aom_masked_compound_sad4x4,
- aom_masked_compound_sub_pixel_variance4x4)
+ MBFP(BLOCK_64X64, aom_masked_sad64x64, aom_masked_sub_pixel_variance64x64)
+ MBFP(BLOCK_64X32, aom_masked_sad64x32, aom_masked_sub_pixel_variance64x32)
+ MBFP(BLOCK_32X64, aom_masked_sad32x64, aom_masked_sub_pixel_variance32x64)
+ MBFP(BLOCK_32X32, aom_masked_sad32x32, aom_masked_sub_pixel_variance32x32)
+ MBFP(BLOCK_32X16, aom_masked_sad32x16, aom_masked_sub_pixel_variance32x16)
+ MBFP(BLOCK_16X32, aom_masked_sad16x32, aom_masked_sub_pixel_variance16x32)
+ MBFP(BLOCK_16X16, aom_masked_sad16x16, aom_masked_sub_pixel_variance16x16)
+ MBFP(BLOCK_16X8, aom_masked_sad16x8, aom_masked_sub_pixel_variance16x8)
+ MBFP(BLOCK_8X16, aom_masked_sad8x16, aom_masked_sub_pixel_variance8x16)
+ MBFP(BLOCK_8X8, aom_masked_sad8x8, aom_masked_sub_pixel_variance8x8)
+ MBFP(BLOCK_4X8, aom_masked_sad4x8, aom_masked_sub_pixel_variance4x8)
+ MBFP(BLOCK_8X4, aom_masked_sad8x4, aom_masked_sub_pixel_variance8x4)
+ MBFP(BLOCK_4X4, aom_masked_sad4x4, aom_masked_sub_pixel_variance4x4)
#endif // CONFIG_EXT_INTER
#if CONFIG_HIGHBITDEPTH
diff --git a/av1/encoder/mcomp.c b/av1/encoder/mcomp.c
index b727739..c4f475f 100644
--- a/av1/encoder/mcomp.c
+++ b/av1/encoder/mcomp.c
@@ -177,30 +177,30 @@
/* checks if (r, c) has better score than previous best */
#if CONFIG_EXT_INTER
-#define CHECK_BETTER(v, r, c) \
- if (c >= minc && c <= maxc && r >= minr && r <= maxr) { \
- MV this_mv = { r, c }; \
- v = mv_err_cost(&this_mv, ref_mv, mvjcost, mvcost, error_per_bit); \
- if (second_pred == NULL) \
- thismse = vfp->svf(pre(y, y_stride, r, c), y_stride, sp(c), sp(r), \
- src_address, src_stride, &sse); \
- else if (mask) \
- thismse = vfp->mcsvf(pre(y, y_stride, r, c), y_stride, sp(c), sp(r), \
- src_address, src_stride, second_pred, mask, \
- mask_stride, invert_mask, &sse); \
- else \
- thismse = vfp->svaf(pre(y, y_stride, r, c), y_stride, sp(c), sp(r), \
- src_address, src_stride, &sse, second_pred); \
- v += thismse; \
- if (v < besterr) { \
- besterr = v; \
- br = r; \
- bc = c; \
- *distortion = thismse; \
- *sse1 = sse; \
- } \
- } else { \
- v = INT_MAX; \
+#define CHECK_BETTER(v, r, c) \
+ if (c >= minc && c <= maxc && r >= minr && r <= maxr) { \
+ MV this_mv = { r, c }; \
+ v = mv_err_cost(&this_mv, ref_mv, mvjcost, mvcost, error_per_bit); \
+ if (second_pred == NULL) \
+ thismse = vfp->svf(pre(y, y_stride, r, c), y_stride, sp(c), sp(r), \
+ src_address, src_stride, &sse); \
+ else if (mask) \
+ thismse = vfp->msvf(pre(y, y_stride, r, c), y_stride, sp(c), sp(r), \
+ src_address, src_stride, second_pred, mask, \
+ mask_stride, invert_mask, &sse); \
+ else \
+ thismse = vfp->svaf(pre(y, y_stride, r, c), y_stride, sp(c), sp(r), \
+ src_address, src_stride, &sse, second_pred); \
+ v += thismse; \
+ if (v < besterr) { \
+ besterr = v; \
+ br = r; \
+ bc = c; \
+ *distortion = thismse; \
+ *sse1 = sse; \
+ } \
+ } else { \
+ v = INT_MAX; \
}
#else
#define CHECK_BETTER(v, r, c) \
@@ -224,7 +224,7 @@
} else { \
v = INT_MAX; \
}
-#endif
+#endif // CONFIG_EXT_INTER
#define CHECK_BETTER0(v, r, c) CHECK_BETTER(v, r, c)
@@ -272,7 +272,7 @@
} else { \
v = INT_MAX; \
}
-#endif
+#endif // CONFIG_EXT_INTER
#define FIRST_LEVEL_CHECKS \
{ \
@@ -861,9 +861,9 @@
src_address, src_stride, &sse);
#if CONFIG_EXT_INTER
else if (mask)
- thismse = vfp->mcsvf(pre_address, y_stride, sp(tc), sp(tr),
- src_address, src_stride, second_pred, mask,
- mask_stride, invert_mask, &sse);
+ thismse = vfp->msvf(pre_address, y_stride, sp(tc), sp(tr),
+ src_address, src_stride, second_pred, mask,
+ mask_stride, invert_mask, &sse);
#endif
else
thismse = vfp->svaf(pre_address, y_stride, sp(tc), sp(tr),
@@ -910,9 +910,9 @@
src_stride, &sse);
#if CONFIG_EXT_INTER
else if (mask)
- thismse = vfp->mcsvf(pre_address, y_stride, sp(tc), sp(tr),
- src_address, src_stride, second_pred, mask,
- mask_stride, invert_mask, &sse);
+ thismse = vfp->msvf(pre_address, y_stride, sp(tc), sp(tr),
+ src_address, src_stride, second_pred, mask,
+ mask_stride, invert_mask, &sse);
#endif
else
thismse = vfp->svaf(pre_address, y_stride, sp(tc), sp(tr),
@@ -1389,9 +1389,9 @@
const MV mv = { best_mv->row * 8, best_mv->col * 8 };
unsigned int unused;
- return vfp->mcsvf(what->buf, what->stride, 0, 0,
- get_buf_from_mv(in_what, best_mv), in_what->stride,
- second_pred, mask, mask_stride, invert_mask, &unused) +
+ return vfp->msvf(what->buf, what->stride, 0, 0,
+ get_buf_from_mv(in_what, best_mv), in_what->stride,
+ second_pred, mask, mask_stride, invert_mask, &unused) +
(use_mvcost ? mv_err_cost(&mv, center_mv, x->nmvjointcost, x->mvcost,
x->errorperbit)
: 0);
@@ -2362,7 +2362,7 @@
}
// This function is called when we do joint motion search in comp_inter_inter
-// mode.
+// mode, or when searching for one component of an ext-inter compound mode.
int av1_refining_search_8p_c(MACROBLOCK *x, int error_per_bit, int search_range,
const aom_variance_fn_ptr_t *fn_ptr,
#if CONFIG_EXT_INTER
@@ -2384,9 +2384,9 @@
x->mv_limits.row_min, x->mv_limits.row_max);
#if CONFIG_EXT_INTER
if (mask)
- best_sad = fn_ptr->mcsdf(what->buf, what->stride,
- get_buf_from_mv(in_what, best_mv), in_what->stride,
- second_pred, mask, mask_stride, invert_mask) +
+ best_sad = fn_ptr->msdf(what->buf, what->stride,
+ get_buf_from_mv(in_what, best_mv), in_what->stride,
+ second_pred, mask, mask_stride, invert_mask) +
mvsad_err_cost(x, best_mv, &fcenter_mv, error_per_bit);
else
#endif
@@ -2406,9 +2406,9 @@
unsigned int sad;
#if CONFIG_EXT_INTER
if (mask)
- sad = fn_ptr->mcsdf(what->buf, what->stride,
- get_buf_from_mv(in_what, &mv), in_what->stride,
- second_pred, mask, mask_stride, invert_mask);
+ sad = fn_ptr->msdf(what->buf, what->stride,
+ get_buf_from_mv(in_what, &mv), in_what->stride,
+ second_pred, mask, mask_stride, invert_mask);
else
#endif
sad = fn_ptr->sdaf(what->buf, what->stride,
diff --git a/av1/encoder/rdopt.c b/av1/encoder/rdopt.c
index ca69e7c..97100b9 100644
--- a/av1/encoder/rdopt.c
+++ b/av1/encoder/rdopt.c
@@ -7166,7 +7166,7 @@
#if CONFIG_EXT_INTER
static void build_second_inter_pred(const AV1_COMP *cpi, MACROBLOCK *x,
- BLOCK_SIZE bsize, int_mv *frame_mv,
+ BLOCK_SIZE bsize, const MV *other_mv,
int mi_row, int mi_col, const int block,
int ref_idx, uint8_t *second_pred) {
const AV1_COMMON *const cm = &cpi->common;
@@ -7247,8 +7247,8 @@
#if CONFIG_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
av1_highbd_build_inter_predictor(
- ref_yv12.buf, ref_yv12.stride, second_pred, pw,
- &frame_mv[other_ref].as_mv, &sf, pw, ph, 0, interp_filter,
+ ref_yv12.buf, ref_yv12.stride, second_pred, pw, other_mv, &sf, pw, ph,
+ 0, interp_filter,
#if CONFIG_GLOBAL_MOTION || CONFIG_WARPED_MOTION
&warp_types, p_col, p_row,
#endif // CONFIG_GLOBAL_MOTION || CONFIG_WARPED_MOTION
@@ -7256,8 +7256,8 @@
} else {
#endif // CONFIG_HIGHBITDEPTH
av1_build_inter_predictor(
- ref_yv12.buf, ref_yv12.stride, second_pred, pw,
- &frame_mv[other_ref].as_mv, &sf, pw, ph, &conv_params, interp_filter,
+ ref_yv12.buf, ref_yv12.stride, second_pred, pw, other_mv, &sf, pw, ph,
+ &conv_params, interp_filter,
#if CONFIG_GLOBAL_MOTION || CONFIG_WARPED_MOTION
&warp_types, p_col, p_row, plane, !ref_idx,
#endif // CONFIG_GLOBAL_MOTION || CONFIG_WARPED_MOTION
@@ -7277,16 +7277,15 @@
// Search for the best mv for one component of a compound,
// given that the other component is fixed.
static void compound_single_motion_search(
- const AV1_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bsize, int_mv *frame_mv,
- int mi_row, int mi_col, int_mv *ref_mv_sub8x8[2],
- const uint8_t *second_pred, const uint8_t *mask, int mask_stride,
- int *rate_mv, const int block, int ref_idx) {
+ const AV1_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bsize, MV *this_mv,
+ int mi_row, int mi_col, const uint8_t *second_pred, const uint8_t *mask,
+ int mask_stride, int *rate_mv, const int block, int ref_idx) {
const int pw = block_size_wide[bsize];
const int ph = block_size_high[bsize];
MACROBLOCKD *xd = &x->e_mbd;
MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi;
const int ref = mbmi->ref_frame[ref_idx];
- int_mv ref_mv;
+ int_mv ref_mv = x->mbmi_ext->ref_mvs[ref][0];
struct macroblockd_plane *const pd = &xd->plane[0];
struct buf_2d backup_yv12[MAX_MB_PLANE];
@@ -7294,21 +7293,10 @@
const YV12_BUFFER_CONFIG *const scaled_ref_frame =
av1_get_scaled_ref_frame(cpi, ref);
-#if CONFIG_CB4X4
- (void)ref_mv_sub8x8;
-#endif // CONFIG_CB4X4
-
// Check that this is either an interinter or an interintra block
assert(has_second_ref(mbmi) ||
(ref_idx == 0 && mbmi->ref_frame[1] == INTRA_FRAME));
-#if !CONFIG_CB4X4
- if (bsize < BLOCK_8X8 && ref_mv_sub8x8 != NULL)
- ref_mv.as_int = ref_mv_sub8x8[ref_idx]->as_int;
- else
-#endif // !CONFIG_CB4X4
- ref_mv = x->mbmi_ext->ref_mvs[ref][0];
-
if (scaled_ref_frame) {
int i;
// Swap out the reference frame for a version that's been scaled to
@@ -7326,19 +7314,18 @@
int search_range = 3;
MvLimits tmp_mv_limits = x->mv_limits;
- const int plane = 0;
// Initialized here because of compiler problem in Visual Studio.
if (ref_idx) {
- orig_yv12 = xd->plane[plane].pre[0];
- xd->plane[plane].pre[0] = xd->plane[plane].pre[ref_idx];
+ orig_yv12 = pd->pre[0];
+ pd->pre[0] = pd->pre[ref_idx];
}
// Do compound motion search on the current reference frame.
av1_set_mv_search_range(&x->mv_limits, &ref_mv.as_mv);
// Use the mv result from the single mode as mv predictor.
- *best_mv = frame_mv[ref].as_mv;
+ *best_mv = *this_mv;
best_mv->col >>= 3;
best_mv->row >>= 3;
@@ -7404,10 +7391,10 @@
}
// Restore the pointer to the first (possibly scaled) prediction buffer.
- if (ref_idx) xd->plane[plane].pre[0] = orig_yv12;
+ if (ref_idx) pd->pre[0] = orig_yv12;
if (bestsme < last_besterr) {
- frame_mv[ref].as_mv = *best_mv;
+ *this_mv = *best_mv;
last_besterr = bestsme;
}
@@ -7421,32 +7408,24 @@
}
av1_set_mvcost(x, ref, ref_idx, mbmi->ref_mv_idx);
-#if !CONFIG_CB4X4
- if (bsize >= BLOCK_8X8)
-#endif // !CONFIG_CB4X4
- *rate_mv += av1_mv_bit_cost(&frame_mv[ref].as_mv,
- &x->mbmi_ext->ref_mvs[ref][0].as_mv,
- x->nmvjointcost, x->mvcost, MV_COST_WEIGHT);
-#if !CONFIG_CB4X4
- else
- *rate_mv +=
- av1_mv_bit_cost(&frame_mv[ref].as_mv, &ref_mv_sub8x8[ref_idx]->as_mv,
- x->nmvjointcost, x->mvcost, MV_COST_WEIGHT);
-#endif // !CONFIG_CB4X4
+ *rate_mv += av1_mv_bit_cost(this_mv, &ref_mv.as_mv, x->nmvjointcost,
+ x->mvcost, MV_COST_WEIGHT);
}
// Wrapper for compound_single_motion_search, for the common case
// where the second prediction is also an inter mode.
static void compound_single_motion_search_interinter(
const AV1_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bsize, int_mv *frame_mv,
- int mi_row, int mi_col, int_mv *ref_mv_sub8x8[2], const uint8_t *mask,
- int mask_stride, int *rate_mv, const int block, int ref_idx) {
+ int mi_row, int mi_col, const uint8_t *mask, int mask_stride, int *rate_mv,
+ const int block, int ref_idx) {
+ MACROBLOCKD *xd = &x->e_mbd;
+ MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi;
+
// This function should only ever be called for compound modes
- assert(has_second_ref(&x->e_mbd.mi[0]->mbmi));
+ assert(has_second_ref(mbmi));
// Prediction buffer from second frame.
#if CONFIG_HIGHBITDEPTH
- MACROBLOCKD *xd = &x->e_mbd;
DECLARE_ALIGNED(16, uint16_t, second_pred_alloc_16[MAX_SB_SQUARE]);
uint8_t *second_pred;
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH)
@@ -7457,12 +7436,15 @@
DECLARE_ALIGNED(16, uint8_t, second_pred[MAX_SB_SQUARE]);
#endif // CONFIG_HIGHBITDEPTH
- build_second_inter_pred(cpi, x, bsize, frame_mv, mi_row, mi_col, block,
+ MV *this_mv = &frame_mv[mbmi->ref_frame[ref_idx]].as_mv;
+ const MV *other_mv = &frame_mv[mbmi->ref_frame[!ref_idx]].as_mv;
+
+ build_second_inter_pred(cpi, x, bsize, other_mv, mi_row, mi_col, block,
ref_idx, second_pred);
- compound_single_motion_search(cpi, x, bsize, frame_mv, mi_row, mi_col,
- ref_mv_sub8x8, second_pred, mask, mask_stride,
- rate_mv, block, ref_idx);
+ compound_single_motion_search(cpi, x, bsize, this_mv, mi_row, mi_col,
+ second_pred, mask, mask_stride, rate_mv, block,
+ ref_idx);
}
#if CONFIG_COMPOUND_SEGMENT || CONFIG_WEDGE
@@ -7485,17 +7467,13 @@
frame_mv[rf[0]].as_int = cur_mv[0].as_int;
frame_mv[rf[1]].as_int = cur_mv[1].as_int;
- if (which == 2) {
+ if (which == 0 || which == 1) {
+ compound_single_motion_search_interinter(cpi, x, bsize, frame_mv, mi_row,
+ mi_col, mask, mask_stride, rate_mv,
+ 0, which);
+ } else if (which == 2) {
joint_motion_search(cpi, x, bsize, frame_mv, mi_row, mi_col, NULL, mask,
mask_stride, rate_mv, 0);
- } else if (which == 0) {
- compound_single_motion_search_interinter(cpi, x, bsize, frame_mv, mi_row,
- mi_col, NULL, mask, mask_stride,
- rate_mv, 0, 0);
- } else if (which == 1) {
- compound_single_motion_search_interinter(cpi, x, bsize, frame_mv, mi_row,
- mi_col, NULL, mask, mask_stride,
- rate_mv, 0, 1);
}
tmp_mv[0].as_int = frame_mv[rf[0]].as_int;
tmp_mv[1].as_int = frame_mv[rf[1]].as_int;
@@ -8143,9 +8121,8 @@
if (cpi->sf.comp_inter_joint_search_thresh <= bsize) {
frame_mv[refs[0]].as_int =
mode_mv[compound_ref0_mode(this_mode)][refs[0]].as_int;
- compound_single_motion_search_interinter(cpi, x, bsize, frame_mv,
- mi_row, mi_col, NULL, NULL, 0,
- rate_mv, 0, 1);
+ compound_single_motion_search_interinter(
+ cpi, x, bsize, frame_mv, mi_row, mi_col, NULL, 0, rate_mv, 0, 1);
} else {
av1_set_mvcost(x, refs[1], 1, mbmi->ref_mv_idx);
*rate_mv = av1_mv_bit_cost(&frame_mv[refs[1]].as_mv,
@@ -8158,9 +8135,8 @@
if (cpi->sf.comp_inter_joint_search_thresh <= bsize) {
frame_mv[refs[1]].as_int =
mode_mv[compound_ref1_mode(this_mode)][refs[1]].as_int;
- compound_single_motion_search_interinter(cpi, x, bsize, frame_mv,
- mi_row, mi_col, NULL, NULL, 0,
- rate_mv, 0, 0);
+ compound_single_motion_search_interinter(
+ cpi, x, bsize, frame_mv, mi_row, mi_col, NULL, 0, rate_mv, 0, 0);
} else {
av1_set_mvcost(x, refs[0], 0, mbmi->ref_mv_idx);
*rate_mv = av1_mv_bit_cost(&frame_mv[refs[0]].as_mv,
@@ -9178,12 +9154,10 @@
// get negative of mask
const uint8_t *mask = av1_get_contiguous_soft_mask(
mbmi->interintra_wedge_index, 1, bsize);
- int_mv frame_mv2[TOTAL_REFS_PER_FRAME];
- frame_mv2[refs[0]].as_int = x->mbmi_ext->ref_mvs[refs[0]][0].as_int;
- compound_single_motion_search(cpi, x, bsize, frame_mv2, mi_row,
- mi_col, NULL, intrapred, mask, bw,
+ tmp_mv.as_int = x->mbmi_ext->ref_mvs[refs[0]][0].as_int;
+ compound_single_motion_search(cpi, x, bsize, &tmp_mv.as_mv, mi_row,
+ mi_col, intrapred, mask, bw,
&tmp_rate_mv, 0, 0);
- tmp_mv.as_int = frame_mv2[refs[0]].as_int;
mbmi->mv[0].as_int = tmp_mv.as_int;
av1_build_inter_predictors_sby(cm, xd, mi_row, mi_col, &orig_dst,
bsize);
diff --git a/test/masked_sad_test.cc b/test/masked_sad_test.cc
index c0b6eb2..3cc6aae 100644
--- a/test/masked_sad_test.cc
+++ b/test/masked_sad_test.cc
@@ -162,80 +162,64 @@
// TODO(david.barker): Re-enable this once we have vectorized
// versions of the masked_compound_* functions
#if 0 && HAVE_SSSE3
-INSTANTIATE_TEST_CASE_P(SSSE3_C_COMPARE, MaskedSADTest,
- ::testing::Values(
-#if CONFIG_EXT_PARTITION
- make_tuple(&aom_masked_compound_sad128x128_ssse3,
- &aom_masked_compound_sad128x128_c),
- make_tuple(&aom_masked_compound_sad128x64_ssse3,
- &aom_masked_compound_sad128x64_c),
- make_tuple(&aom_masked_compound_sad64x128_ssse3,
- &aom_masked_compound_sad64x128_c),
-#endif // CONFIG_EXT_PARTITION
- make_tuple(&aom_masked_compound_sad64x64_ssse3,
- &aom_masked_compound_sad64x64_c),
- make_tuple(&aom_masked_compound_sad64x32_ssse3,
- &aom_masked_compound_sad64x32_c),
- make_tuple(&aom_masked_compound_sad32x64_ssse3,
- &aom_masked_compound_sad32x64_c),
- make_tuple(&aom_masked_compound_sad32x32_ssse3,
- &aom_masked_compound_sad32x32_c),
- make_tuple(&aom_masked_compound_sad32x16_ssse3,
- &aom_masked_compound_sad32x16_c),
- make_tuple(&aom_masked_compound_sad16x32_ssse3,
- &aom_masked_compound_sad16x32_c),
- make_tuple(&aom_masked_compound_sad16x16_ssse3,
- &aom_masked_compound_sad16x16_c),
- make_tuple(&aom_masked_compound_sad16x8_ssse3,
- &aom_masked_compound_sad16x8_c),
- make_tuple(&aom_masked_compound_sad8x16_ssse3,
- &aom_masked_compound_sad8x16_c),
- make_tuple(&aom_masked_compound_sad8x8_ssse3,
- &aom_masked_compound_sad8x8_c),
- make_tuple(&aom_masked_compound_sad8x4_ssse3,
- &aom_masked_compound_sad8x4_c),
- make_tuple(&aom_masked_compound_sad4x8_ssse3,
- &aom_masked_compound_sad4x8_c),
- make_tuple(&aom_masked_compound_sad4x4_ssse3,
- &aom_masked_compound_sad4x4_c)));
-#if CONFIG_HIGHBITDEPTH
INSTANTIATE_TEST_CASE_P(
- SSSE3_C_COMPARE, HighbdMaskedSADTest,
+ SSSE3_C_COMPARE, MaskedSADTest,
::testing::Values(
#if CONFIG_EXT_PARTITION
- make_tuple(&aom_highbd_masked_compound_sad128x128_ssse3,
- &aom_highbd_masked_compound_sad128x128_c),
- make_tuple(&aom_highbd_masked_compound_sad128x64_ssse3,
- &aom_highbd_masked_compound_sad128x64_c),
- make_tuple(&aom_highbd_masked_compound_sad64x128_ssse3,
- &aom_highbd_masked_compound_sad64x128_c),
+ make_tuple(&aom_masked_sad128x128_ssse3, &aom_masked_sad128x128_c),
+ make_tuple(&aom_masked_sad128x64_ssse3, &aom_masked_sad128x64_c),
+ make_tuple(&aom_masked_sad64x128_ssse3, &aom_masked_sad64x128_c),
#endif // CONFIG_EXT_PARTITION
- make_tuple(&aom_highbd_masked_compound_sad64x64_ssse3,
- &aom_highbd_masked_compound_sad64x64_c),
- make_tuple(&aom_highbd_masked_compound_sad64x32_ssse3,
- &aom_highbd_masked_compound_sad64x32_c),
- make_tuple(&aom_highbd_masked_compound_sad32x64_ssse3,
- &aom_highbd_masked_compound_sad32x64_c),
- make_tuple(&aom_highbd_masked_compound_sad32x32_ssse3,
- &aom_highbd_masked_compound_sad32x32_c),
- make_tuple(&aom_highbd_masked_compound_sad32x16_ssse3,
- &aom_highbd_masked_compound_sad32x16_c),
- make_tuple(&aom_highbd_masked_compound_sad16x32_ssse3,
- &aom_highbd_masked_compound_sad16x32_c),
- make_tuple(&aom_highbd_masked_compound_sad16x16_ssse3,
- &aom_highbd_masked_compound_sad16x16_c),
- make_tuple(&aom_highbd_masked_compound_sad16x8_ssse3,
- &aom_highbd_masked_compound_sad16x8_c),
- make_tuple(&aom_highbd_masked_compound_sad8x16_ssse3,
- &aom_highbd_masked_compound_sad8x16_c),
- make_tuple(&aom_highbd_masked_compound_sad8x8_ssse3,
- &aom_highbd_masked_compound_sad8x8_c),
- make_tuple(&aom_highbd_masked_compound_sad8x4_ssse3,
- &aom_highbd_masked_compound_sad8x4_c),
- make_tuple(&aom_highbd_masked_compound_sad4x8_ssse3,
- &aom_highbd_masked_compound_sad4x8_c),
- make_tuple(&aom_highbd_masked_compound_sad4x4_ssse3,
- &aom_highbd_masked_compound_sad4x4_c)));
+ make_tuple(&aom_masked_sad64x64_ssse3, &aom_masked_sad64x64_c),
+ make_tuple(&aom_masked_sad64x32_ssse3, &aom_masked_sad64x32_c),
+ make_tuple(&aom_masked_sad32x64_ssse3, &aom_masked_sad32x64_c),
+ make_tuple(&aom_masked_sad32x32_ssse3, &aom_masked_sad32x32_c),
+ make_tuple(&aom_masked_sad32x16_ssse3, &aom_masked_sad32x16_c),
+ make_tuple(&aom_masked_sad16x32_ssse3, &aom_masked_sad16x32_c),
+ make_tuple(&aom_masked_sad16x16_ssse3, &aom_masked_sad16x16_c),
+ make_tuple(&aom_masked_sad16x8_ssse3, &aom_masked_sad16x8_c),
+ make_tuple(&aom_masked_sad8x16_ssse3, &aom_masked_sad8x16_c),
+ make_tuple(&aom_masked_sad8x8_ssse3, &aom_masked_sad8x8_c),
+ make_tuple(&aom_masked_sad8x4_ssse3, &aom_masked_sad8x4_c),
+ make_tuple(&aom_masked_sad4x8_ssse3, &aom_masked_sad4x8_c),
+ make_tuple(&aom_masked_sad4x4_ssse3, &aom_masked_sad4x4_c)));
+#if CONFIG_HIGHBITDEPTH
+INSTANTIATE_TEST_CASE_P(SSSE3_C_COMPARE, HighbdMaskedSADTest,
+ ::testing::Values(
+#if CONFIG_EXT_PARTITION
+ make_tuple(&aom_highbd_masked_sad128x128_ssse3,
+ &aom_highbd_masked_sad128x128_c),
+ make_tuple(&aom_highbd_masked_sad128x64_ssse3,
+ &aom_highbd_masked_sad128x64_c),
+ make_tuple(&aom_highbd_masked_sad64x128_ssse3,
+ &aom_highbd_masked_sad64x128_c),
+#endif // CONFIG_EXT_PARTITION
+ make_tuple(&aom_highbd_masked_sad64x64_ssse3,
+ &aom_highbd_masked_sad64x64_c),
+ make_tuple(&aom_highbd_masked_sad64x32_ssse3,
+ &aom_highbd_masked_sad64x32_c),
+ make_tuple(&aom_highbd_masked_sad32x64_ssse3,
+ &aom_highbd_masked_sad32x64_c),
+ make_tuple(&aom_highbd_masked_sad32x32_ssse3,
+ &aom_highbd_masked_sad32x32_c),
+ make_tuple(&aom_highbd_masked_sad32x16_ssse3,
+ &aom_highbd_masked_sad32x16_c),
+ make_tuple(&aom_highbd_masked_sad16x32_ssse3,
+ &aom_highbd_masked_sad16x32_c),
+ make_tuple(&aom_highbd_masked_sad16x16_ssse3,
+ &aom_highbd_masked_sad16x16_c),
+ make_tuple(&aom_highbd_masked_sad16x8_ssse3,
+ &aom_highbd_masked_sad16x8_c),
+ make_tuple(&aom_highbd_masked_sad8x16_ssse3,
+ &aom_highbd_masked_sad8x16_c),
+ make_tuple(&aom_highbd_masked_sad8x8_ssse3,
+ &aom_highbd_masked_sad8x8_c),
+ make_tuple(&aom_highbd_masked_sad8x4_ssse3,
+ &aom_highbd_masked_sad8x4_c),
+ make_tuple(&aom_highbd_masked_sad4x8_ssse3,
+ &aom_highbd_masked_sad4x8_c),
+ make_tuple(&aom_highbd_masked_sad4x4_ssse3,
+ &aom_highbd_masked_sad4x4_c)));
#endif // CONFIG_HIGHBITDEPTH
#endif // 0 && HAVE_SSSE3
} // namespace
diff --git a/test/masked_variance_test.cc b/test/masked_variance_test.cc
index e0fc010..24c67dd 100644
--- a/test/masked_variance_test.cc
+++ b/test/masked_variance_test.cc
@@ -326,199 +326,167 @@
SSSE3_C_COMPARE, MaskedSubPixelVarianceTest,
::testing::Values(
#if CONFIG_EXT_PARTITION
- make_tuple(&aom_masked_compound_sub_pixel_variance128x128_ssse3,
- &aom_masked_compound_sub_pixel_variance128x128_c),
- make_tuple(&aom_masked_compound_sub_pixel_variance128x64_ssse3,
- &aom_masked_compound_sub_pixel_variance128x64_c),
- make_tuple(&aom_masked_compound_sub_pixel_variance64x128_ssse3,
- &aom_masked_compound_sub_pixel_variance64x128_c),
+ make_tuple(&aom_masked_sub_pixel_variance128x128_ssse3,
+ &aom_masked_sub_pixel_variance128x128_c),
+ make_tuple(&aom_masked_sub_pixel_variance128x64_ssse3,
+ &aom_masked_sub_pixel_variance128x64_c),
+ make_tuple(&aom_masked_sub_pixel_variance64x128_ssse3,
+ &aom_masked_sub_pixel_variance64x128_c),
#endif // CONFIG_EXT_PARTITION
- make_tuple(&aom_masked_compound_sub_pixel_variance64x64_ssse3,
- &aom_masked_compound_sub_pixel_variance64x64_c),
- make_tuple(&aom_masked_compound_sub_pixel_variance64x32_ssse3,
- &aom_masked_compound_sub_pixel_variance64x32_c),
- make_tuple(&aom_masked_compound_sub_pixel_variance32x64_ssse3,
- &aom_masked_compound_sub_pixel_variance32x64_c),
- make_tuple(&aom_masked_compound_sub_pixel_variance32x32_ssse3,
- &aom_masked_compound_sub_pixel_variance32x32_c),
- make_tuple(&aom_masked_compound_sub_pixel_variance32x16_ssse3,
- &aom_masked_compound_sub_pixel_variance32x16_c),
- make_tuple(&aom_masked_compound_sub_pixel_variance16x32_ssse3,
- &aom_masked_compound_sub_pixel_variance16x32_c),
- make_tuple(&aom_masked_compound_sub_pixel_variance16x16_ssse3,
- &aom_masked_compound_sub_pixel_variance16x16_c),
- make_tuple(&aom_masked_compound_sub_pixel_variance16x8_ssse3,
- &aom_masked_compound_sub_pixel_variance16x8_c),
- make_tuple(&aom_masked_compound_sub_pixel_variance8x16_ssse3,
- &aom_masked_compound_sub_pixel_variance8x16_c),
- make_tuple(&aom_masked_compound_sub_pixel_variance8x8_ssse3,
- &aom_masked_compound_sub_pixel_variance8x8_c),
- make_tuple(&aom_masked_compound_sub_pixel_variance8x4_ssse3,
- &aom_masked_compound_sub_pixel_variance8x4_c),
- make_tuple(&aom_masked_compound_sub_pixel_variance4x8_ssse3,
- &aom_masked_compound_sub_pixel_variance4x8_c),
- make_tuple(&aom_masked_compound_sub_pixel_variance4x4_ssse3,
- &aom_masked_compound_sub_pixel_variance4x4_c)));
+ make_tuple(&aom_masked_sub_pixel_variance64x64_ssse3,
+ &aom_masked_sub_pixel_variance64x64_c),
+ make_tuple(&aom_masked_sub_pixel_variance64x32_ssse3,
+ &aom_masked_sub_pixel_variance64x32_c),
+ make_tuple(&aom_masked_sub_pixel_variance32x64_ssse3,
+ &aom_masked_sub_pixel_variance32x64_c),
+ make_tuple(&aom_masked_sub_pixel_variance32x32_ssse3,
+ &aom_masked_sub_pixel_variance32x32_c),
+ make_tuple(&aom_masked_sub_pixel_variance32x16_ssse3,
+ &aom_masked_sub_pixel_variance32x16_c),
+ make_tuple(&aom_masked_sub_pixel_variance16x32_ssse3,
+ &aom_masked_sub_pixel_variance16x32_c),
+ make_tuple(&aom_masked_sub_pixel_variance16x16_ssse3,
+ &aom_masked_sub_pixel_variance16x16_c),
+ make_tuple(&aom_masked_sub_pixel_variance16x8_ssse3,
+ &aom_masked_sub_pixel_variance16x8_c),
+ make_tuple(&aom_masked_sub_pixel_variance8x16_ssse3,
+ &aom_masked_sub_pixel_variance8x16_c),
+ make_tuple(&aom_masked_sub_pixel_variance8x8_ssse3,
+ &aom_masked_sub_pixel_variance8x8_c),
+ make_tuple(&aom_masked_sub_pixel_variance8x4_ssse3,
+ &aom_masked_sub_pixel_variance8x4_c),
+ make_tuple(&aom_masked_sub_pixel_variance4x8_ssse3,
+ &aom_masked_sub_pixel_variance4x8_c),
+ make_tuple(&aom_masked_sub_pixel_variance4x4_ssse3,
+ &aom_masked_sub_pixel_variance4x4_c)));
#if CONFIG_HIGHBITDEPTH
INSTANTIATE_TEST_CASE_P(
SSSE3_C_COMPARE, HighbdMaskedSubPixelVarianceTest,
::testing::Values(
#if CONFIG_EXT_PARTITION
- make_tuple(&aom_highbd_masked_compound_sub_pixel_variance128x128_ssse3,
- &aom_highbd_masked_compound_sub_pixel_variance128x128_c,
+ make_tuple(&aom_highbd_8_masked_sub_pixel_variance128x128_ssse3,
+ &aom_highbd_8_masked_sub_pixel_variance128x128_c,
AOM_BITS_8),
- make_tuple(&aom_highbd_masked_compound_sub_pixel_variance128x64_ssse3,
- &aom_highbd_masked_compound_sub_pixel_variance128x64_c,
- AOM_BITS_8),
- make_tuple(&aom_highbd_masked_compound_sub_pixel_variance64x128_ssse3,
- &aom_highbd_masked_compound_sub_pixel_variance64x128_c,
- AOM_BITS_8),
+ make_tuple(&aom_highbd_8_masked_sub_pixel_variance128x64_ssse3,
+ &aom_highbd_8_masked_sub_pixel_variance128x64_c, AOM_BITS_8),
+ make_tuple(&aom_highbd_8_masked_sub_pixel_variance64x128_ssse3,
+ &aom_highbd_8_masked_sub_pixel_variance64x128_c, AOM_BITS_8),
#endif // CONFIG_EXT_PARTITION
- make_tuple(&aom_highbd_masked_compound_sub_pixel_variance64x64_ssse3,
- &aom_highbd_masked_compound_sub_pixel_variance64x64_c,
- AOM_BITS_8),
- make_tuple(&aom_highbd_masked_compound_sub_pixel_variance64x32_ssse3,
- &aom_highbd_masked_compound_sub_pixel_variance64x32_c,
- AOM_BITS_8),
- make_tuple(&aom_highbd_masked_compound_sub_pixel_variance32x64_ssse3,
- &aom_highbd_masked_compound_sub_pixel_variance32x64_c,
- AOM_BITS_8),
- make_tuple(&aom_highbd_masked_compound_sub_pixel_variance32x32_ssse3,
- &aom_highbd_masked_compound_sub_pixel_variance32x32_c,
- AOM_BITS_8),
- make_tuple(&aom_highbd_masked_compound_sub_pixel_variance32x16_ssse3,
- &aom_highbd_masked_compound_sub_pixel_variance32x16_c,
- AOM_BITS_8),
- make_tuple(&aom_highbd_masked_compound_sub_pixel_variance16x32_ssse3,
- &aom_highbd_masked_compound_sub_pixel_variance16x32_c,
- AOM_BITS_8),
- make_tuple(&aom_highbd_masked_compound_sub_pixel_variance16x16_ssse3,
- &aom_highbd_masked_compound_sub_pixel_variance16x16_c,
- AOM_BITS_8),
- make_tuple(&aom_highbd_masked_compound_sub_pixel_variance16x8_ssse3,
- &aom_highbd_masked_compound_sub_pixel_variance16x8_c,
- AOM_BITS_8),
- make_tuple(&aom_highbd_masked_compound_sub_pixel_variance8x16_ssse3,
- &aom_highbd_masked_compound_sub_pixel_variance8x16_c,
- AOM_BITS_8),
- make_tuple(&aom_highbd_masked_compound_sub_pixel_variance8x8_ssse3,
- &aom_highbd_masked_compound_sub_pixel_variance8x8_c,
- AOM_BITS_8),
- make_tuple(&aom_highbd_masked_compound_sub_pixel_variance8x4_ssse3,
- &aom_highbd_masked_compound_sub_pixel_variance8x4_c,
- AOM_BITS_8),
- make_tuple(&aom_highbd_masked_compound_sub_pixel_variance4x8_ssse3,
- &aom_highbd_masked_compound_sub_pixel_variance4x8_c,
- AOM_BITS_8),
- make_tuple(&aom_highbd_masked_compound_sub_pixel_variance4x4_ssse3,
- &aom_highbd_masked_compound_sub_pixel_variance4x4_c,
- AOM_BITS_8),
+ make_tuple(&aom_highbd_8_masked_sub_pixel_variance64x64_ssse3,
+ &aom_highbd_8_masked_sub_pixel_variance64x64_c, AOM_BITS_8),
+ make_tuple(&aom_highbd_8_masked_sub_pixel_variance64x32_ssse3,
+ &aom_highbd_8_masked_sub_pixel_variance64x32_c, AOM_BITS_8),
+ make_tuple(&aom_highbd_8_masked_sub_pixel_variance32x64_ssse3,
+ &aom_highbd_8_masked_sub_pixel_variance32x64_c, AOM_BITS_8),
+ make_tuple(&aom_highbd_8_masked_sub_pixel_variance32x32_ssse3,
+ &aom_highbd_8_masked_sub_pixel_variance32x32_c, AOM_BITS_8),
+ make_tuple(&aom_highbd_8_masked_sub_pixel_variance32x16_ssse3,
+ &aom_highbd_8_masked_sub_pixel_variance32x16_c, AOM_BITS_8),
+ make_tuple(&aom_highbd_8_masked_sub_pixel_variance16x32_ssse3,
+ &aom_highbd_8_masked_sub_pixel_variance16x32_c, AOM_BITS_8),
+ make_tuple(&aom_highbd_8_masked_sub_pixel_variance16x16_ssse3,
+ &aom_highbd_8_masked_sub_pixel_variance16x16_c, AOM_BITS_8),
+ make_tuple(&aom_highbd_8_masked_sub_pixel_variance16x8_ssse3,
+ &aom_highbd_8_masked_sub_pixel_variance16x8_c, AOM_BITS_8),
+ make_tuple(&aom_highbd_8_masked_sub_pixel_variance8x16_ssse3,
+ &aom_highbd_8_masked_sub_pixel_variance8x16_c, AOM_BITS_8),
+ make_tuple(&aom_highbd_8_masked_sub_pixel_variance8x8_ssse3,
+ &aom_highbd_8_masked_sub_pixel_variance8x8_c, AOM_BITS_8),
+ make_tuple(&aom_highbd_8_masked_sub_pixel_variance8x4_ssse3,
+ &aom_highbd_8_masked_sub_pixel_variance8x4_c, AOM_BITS_8),
+ make_tuple(&aom_highbd_8_masked_sub_pixel_variance4x8_ssse3,
+ &aom_highbd_8_masked_sub_pixel_variance4x8_c, AOM_BITS_8),
+ make_tuple(&aom_highbd_8_masked_sub_pixel_variance4x4_ssse3,
+ &aom_highbd_8_masked_sub_pixel_variance4x4_c, AOM_BITS_8),
#if CONFIG_EXT_PARTITION
- make_tuple(
- &aom_highbd_10_masked_compound_sub_pixel_variance128x128_ssse3,
- &aom_highbd_10_masked_compound_sub_pixel_variance128x128_c,
- AOM_BITS_10),
- make_tuple(
- &aom_highbd_10_masked_compound_sub_pixel_variance128x64_ssse3,
- &aom_highbd_10_masked_compound_sub_pixel_variance128x64_c,
- AOM_BITS_10),
- make_tuple(
- &aom_highbd_10_masked_compound_sub_pixel_variance64x128_ssse3,
- &aom_highbd_10_masked_compound_sub_pixel_variance64x128_c,
- AOM_BITS_10),
+ make_tuple(&aom_highbd_10_masked_sub_pixel_variance128x128_ssse3,
+ &aom_highbd_10_masked_sub_pixel_variance128x128_c,
+ AOM_BITS_10),
+ make_tuple(&aom_highbd_10_masked_sub_pixel_variance128x64_ssse3,
+ &aom_highbd_10_masked_sub_pixel_variance128x64_c,
+ AOM_BITS_10),
+ make_tuple(&aom_highbd_10_masked_sub_pixel_variance64x128_ssse3,
+ &aom_highbd_10_masked_sub_pixel_variance64x128_c,
+ AOM_BITS_10),
#endif // CONFIG_EXT_PARTITION
- make_tuple(&aom_highbd_10_masked_compound_sub_pixel_variance64x64_ssse3,
- &aom_highbd_10_masked_compound_sub_pixel_variance64x64_c,
+ make_tuple(&aom_highbd_10_masked_sub_pixel_variance64x64_ssse3,
+ &aom_highbd_10_masked_sub_pixel_variance64x64_c,
AOM_BITS_10),
- make_tuple(&aom_highbd_10_masked_compound_sub_pixel_variance64x32_ssse3,
- &aom_highbd_10_masked_compound_sub_pixel_variance64x32_c,
+ make_tuple(&aom_highbd_10_masked_sub_pixel_variance64x32_ssse3,
+ &aom_highbd_10_masked_sub_pixel_variance64x32_c,
AOM_BITS_10),
- make_tuple(&aom_highbd_10_masked_compound_sub_pixel_variance32x64_ssse3,
- &aom_highbd_10_masked_compound_sub_pixel_variance32x64_c,
+ make_tuple(&aom_highbd_10_masked_sub_pixel_variance32x64_ssse3,
+ &aom_highbd_10_masked_sub_pixel_variance32x64_c,
AOM_BITS_10),
- make_tuple(&aom_highbd_10_masked_compound_sub_pixel_variance32x32_ssse3,
- &aom_highbd_10_masked_compound_sub_pixel_variance32x32_c,
+ make_tuple(&aom_highbd_10_masked_sub_pixel_variance32x32_ssse3,
+ &aom_highbd_10_masked_sub_pixel_variance32x32_c,
AOM_BITS_10),
- make_tuple(&aom_highbd_10_masked_compound_sub_pixel_variance32x16_ssse3,
- &aom_highbd_10_masked_compound_sub_pixel_variance32x16_c,
+ make_tuple(&aom_highbd_10_masked_sub_pixel_variance32x16_ssse3,
+ &aom_highbd_10_masked_sub_pixel_variance32x16_c,
AOM_BITS_10),
- make_tuple(&aom_highbd_10_masked_compound_sub_pixel_variance16x32_ssse3,
- &aom_highbd_10_masked_compound_sub_pixel_variance16x32_c,
+ make_tuple(&aom_highbd_10_masked_sub_pixel_variance16x32_ssse3,
+ &aom_highbd_10_masked_sub_pixel_variance16x32_c,
AOM_BITS_10),
- make_tuple(&aom_highbd_10_masked_compound_sub_pixel_variance16x16_ssse3,
- &aom_highbd_10_masked_compound_sub_pixel_variance16x16_c,
+ make_tuple(&aom_highbd_10_masked_sub_pixel_variance16x16_ssse3,
+ &aom_highbd_10_masked_sub_pixel_variance16x16_c,
AOM_BITS_10),
- make_tuple(&aom_highbd_10_masked_compound_sub_pixel_variance16x8_ssse3,
- &aom_highbd_10_masked_compound_sub_pixel_variance16x8_c,
- AOM_BITS_10),
- make_tuple(&aom_highbd_10_masked_compound_sub_pixel_variance8x16_ssse3,
- &aom_highbd_10_masked_compound_sub_pixel_variance8x16_c,
- AOM_BITS_10),
- make_tuple(&aom_highbd_10_masked_compound_sub_pixel_variance8x8_ssse3,
- &aom_highbd_10_masked_compound_sub_pixel_variance8x8_c,
- AOM_BITS_10),
- make_tuple(&aom_highbd_10_masked_compound_sub_pixel_variance8x4_ssse3,
- &aom_highbd_10_masked_compound_sub_pixel_variance8x4_c,
- AOM_BITS_10),
- make_tuple(&aom_highbd_10_masked_compound_sub_pixel_variance4x8_ssse3,
- &aom_highbd_10_masked_compound_sub_pixel_variance4x8_c,
- AOM_BITS_10),
- make_tuple(&aom_highbd_10_masked_compound_sub_pixel_variance4x4_ssse3,
- &aom_highbd_10_masked_compound_sub_pixel_variance4x4_c,
- AOM_BITS_10),
+ make_tuple(&aom_highbd_10_masked_sub_pixel_variance16x8_ssse3,
+ &aom_highbd_10_masked_sub_pixel_variance16x8_c, AOM_BITS_10),
+ make_tuple(&aom_highbd_10_masked_sub_pixel_variance8x16_ssse3,
+ &aom_highbd_10_masked_sub_pixel_variance8x16_c, AOM_BITS_10),
+ make_tuple(&aom_highbd_10_masked_sub_pixel_variance8x8_ssse3,
+ &aom_highbd_10_masked_sub_pixel_variance8x8_c, AOM_BITS_10),
+ make_tuple(&aom_highbd_10_masked_sub_pixel_variance8x4_ssse3,
+ &aom_highbd_10_masked_sub_pixel_variance8x4_c, AOM_BITS_10),
+ make_tuple(&aom_highbd_10_masked_sub_pixel_variance4x8_ssse3,
+ &aom_highbd_10_masked_sub_pixel_variance4x8_c, AOM_BITS_10),
+ make_tuple(&aom_highbd_10_masked_sub_pixel_variance4x4_ssse3,
+ &aom_highbd_10_masked_sub_pixel_variance4x4_c, AOM_BITS_10),
#if CONFIG_EXT_PARTITION
- make_tuple(
- &aom_highbd_12_masked_compound_sub_pixel_variance128x128_ssse3,
- &aom_highbd_12_masked_compound_sub_pixel_variance128x128_c,
- AOM_BITS_12),
- make_tuple(
- &aom_highbd_12_masked_compound_sub_pixel_variance128x64_ssse3,
- &aom_highbd_12_masked_compound_sub_pixel_variance128x64_c,
- AOM_BITS_12),
- make_tuple(
- &aom_highbd_12_masked_compound_sub_pixel_variance64x128_ssse3,
- &aom_highbd_12_masked_compound_sub_pixel_variance64x128_c,
- AOM_BITS_12),
+ make_tuple(&aom_highbd_12_masked_sub_pixel_variance128x128_ssse3,
+ &aom_highbd_12_masked_sub_pixel_variance128x128_c,
+ AOM_BITS_12),
+ make_tuple(&aom_highbd_12_masked_sub_pixel_variance128x64_ssse3,
+ &aom_highbd_12_masked_sub_pixel_variance128x64_c,
+ AOM_BITS_12),
+ make_tuple(&aom_highbd_12_masked_sub_pixel_variance64x128_ssse3,
+ &aom_highbd_12_masked_sub_pixel_variance64x128_c,
+ AOM_BITS_12),
#endif // CONFIG_EXT_PARTITION
- make_tuple(&aom_highbd_12_masked_compound_sub_pixel_variance64x64_ssse3,
- &aom_highbd_12_masked_compound_sub_pixel_variance64x64_c,
+ make_tuple(&aom_highbd_12_masked_sub_pixel_variance64x64_ssse3,
+ &aom_highbd_12_masked_sub_pixel_variance64x64_c,
AOM_BITS_12),
- make_tuple(&aom_highbd_12_masked_compound_sub_pixel_variance64x32_ssse3,
- &aom_highbd_12_masked_compound_sub_pixel_variance64x32_c,
+ make_tuple(&aom_highbd_12_masked_sub_pixel_variance64x32_ssse3,
+ &aom_highbd_12_masked_sub_pixel_variance64x32_c,
AOM_BITS_12),
- make_tuple(&aom_highbd_12_masked_compound_sub_pixel_variance32x64_ssse3,
- &aom_highbd_12_masked_compound_sub_pixel_variance32x64_c,
+ make_tuple(&aom_highbd_12_masked_sub_pixel_variance32x64_ssse3,
+ &aom_highbd_12_masked_sub_pixel_variance32x64_c,
AOM_BITS_12),
- make_tuple(&aom_highbd_12_masked_compound_sub_pixel_variance32x32_ssse3,
- &aom_highbd_12_masked_compound_sub_pixel_variance32x32_c,
+ make_tuple(&aom_highbd_12_masked_sub_pixel_variance32x32_ssse3,
+ &aom_highbd_12_masked_sub_pixel_variance32x32_c,
AOM_BITS_12),
- make_tuple(&aom_highbd_12_masked_compound_sub_pixel_variance32x16_ssse3,
- &aom_highbd_12_masked_compound_sub_pixel_variance32x16_c,
+ make_tuple(&aom_highbd_12_masked_sub_pixel_variance32x16_ssse3,
+ &aom_highbd_12_masked_sub_pixel_variance32x16_c,
AOM_BITS_12),
- make_tuple(&aom_highbd_12_masked_compound_sub_pixel_variance16x32_ssse3,
- &aom_highbd_12_masked_compound_sub_pixel_variance16x32_c,
+ make_tuple(&aom_highbd_12_masked_sub_pixel_variance16x32_ssse3,
+ &aom_highbd_12_masked_sub_pixel_variance16x32_c,
AOM_BITS_12),
- make_tuple(&aom_highbd_12_masked_compound_sub_pixel_variance16x16_ssse3,
- &aom_highbd_12_masked_compound_sub_pixel_variance16x16_c,
+ make_tuple(&aom_highbd_12_masked_sub_pixel_variance16x16_ssse3,
+ &aom_highbd_12_masked_sub_pixel_variance16x16_c,
AOM_BITS_12),
- make_tuple(&aom_highbd_12_masked_compound_sub_pixel_variance16x8_ssse3,
- &aom_highbd_12_masked_compound_sub_pixel_variance16x8_c,
- AOM_BITS_12),
- make_tuple(&aom_highbd_12_masked_compound_sub_pixel_variance8x16_ssse3,
- &aom_highbd_12_masked_compound_sub_pixel_variance8x16_c,
- AOM_BITS_12),
- make_tuple(&aom_highbd_12_masked_compound_sub_pixel_variance8x8_ssse3,
- &aom_highbd_12_masked_compound_sub_pixel_variance8x8_c,
- AOM_BITS_12),
- make_tuple(&aom_highbd_12_masked_compound_sub_pixel_variance8x4_ssse3,
- &aom_highbd_12_masked_compound_sub_pixel_variance8x4_c,
- AOM_BITS_12),
- make_tuple(&aom_highbd_12_masked_compound_sub_pixel_variance4x8_ssse3,
- &aom_highbd_12_masked_compound_sub_pixel_variance4x8_c,
- AOM_BITS_12),
- make_tuple(&aom_highbd_12_masked_compound_sub_pixel_variance4x4_ssse3,
- &aom_highbd_12_masked_compound_sub_pixel_variance4x4_c,
+ make_tuple(&aom_highbd_12_masked_sub_pixel_variance16x8_ssse3,
+ &aom_highbd_12_masked_sub_pixel_variance16x8_c, AOM_BITS_12),
+ make_tuple(&aom_highbd_12_masked_sub_pixel_variance8x16_ssse3,
+ &aom_highbd_12_masked_sub_pixel_variance8x16_c, AOM_BITS_12),
+ make_tuple(&aom_highbd_12_masked_sub_pixel_variance8x8_ssse3,
+ &aom_highbd_12_masked_sub_pixel_variance8x8_c, AOM_BITS_12),
+ make_tuple(&aom_highbd_12_masked_sub_pixel_variance8x4_ssse3,
+ &aom_highbd_12_masked_sub_pixel_variance8x4_c, AOM_BITS_12),
+ make_tuple(&aom_highbd_12_masked_sub_pixel_variance4x8_ssse3,
+ &aom_highbd_12_masked_sub_pixel_variance4x8_c, AOM_BITS_12),
+ make_tuple(&aom_highbd_12_masked_sub_pixel_variance4x4_ssse3,
+ &aom_highbd_12_masked_sub_pixel_variance4x4_c,
AOM_BITS_12)));
#endif // CONFIG_HIGHBITDEPTH