Merge "Add optimized vpx_blend_mask6" into nextgenv2
diff --git a/test/encode_test_driver.cc b/test/encode_test_driver.cc
index f3c3243..f4c4c4b 100644
--- a/test/encode_test_driver.cc
+++ b/test/encode_test_driver.cc
@@ -56,10 +56,12 @@
#endif // !CONFIG_EXT_TILE
} else
#endif
- if (CodecInterface() == &vpx_codec_vp8_cx_algo) {
+ {
#if CONFIG_VP8_ENCODER
- ASSERT_EQ(&vpx_codec_vp8_cx_algo, CodecInterface())
- << "Unknown Codec Interface";
+ if (CodecInterface() == &vpx_codec_vp8_cx_algo) {
+ ASSERT_EQ(&vpx_codec_vp8_cx_algo, CodecInterface())
+ << "Unknown Codec Interface";
+ }
#endif
}
}
diff --git a/test/vp10_ext_tile_test.cc b/test/vp10_ext_tile_test.cc
index 7c9e960..ad04eeb 100644
--- a/test/vp10_ext_tile_test.cc
+++ b/test/vp10_ext_tile_test.cc
@@ -162,7 +162,7 @@
break;
}
- if (IsLastFrame && &tile_img_) {
+ if (IsLastFrame) {
::libvpx_test::MD5 md5_res;
md5_res.Add(&tile_img_);
tile_md5_.push_back(md5_res.Get());
diff --git a/vp10/common/reconinter.c b/vp10/common/reconinter.c
index 825fff3..6483fc9 100644
--- a/vp10/common/reconinter.c
+++ b/vp10/common/reconinter.c
@@ -24,7 +24,11 @@
#endif // CONFIG_OBMC
#if CONFIG_EXT_INTER
-#define NSMOOTHERS 2
+
+// Set to one to use larger codebooks
+#define USE_LARGE_WEDGE_CODEBOOK 0
+
+#define NSMOOTHERS 1
static int get_masked_weight(int m, int smoothness) {
#define SMOOTHER_LEN 32
static const uint8_t smoothfn[NSMOOTHERS][2 * SMOOTHER_LEN + 1] = {
@@ -38,16 +42,6 @@
64, 64, 64, 64, 64, 64, 64, 64,
64, 64, 64, 64, 64, 64, 64, 64,
64, 64, 64, 64, 64, 64, 64, 64,
- }, {
- 0, 0, 0, 0, 0, 0, 0, 0,
- 1, 1, 1, 1, 1, 1, 2, 2,
- 3, 3, 4, 4, 5, 6, 8, 9,
- 11, 13, 15, 17, 20, 23, 26, 29,
- 32,
- 35, 38, 41, 44, 47, 49, 51, 53,
- 55, 56, 58, 59, 60, 60, 61, 61,
- 62, 62, 63, 63, 63, 63, 63, 63,
- 64, 64, 64, 64, 64, 64, 64, 64,
}
};
if (m < -SMOOTHER_LEN)
@@ -58,18 +52,6 @@
return smoothfn[smoothness][m + SMOOTHER_LEN];
}
-// Angles are with respect to horizontal anti-clockwise
-typedef enum {
- WEDGE_HORIZONTAL = 0,
- WEDGE_VERTICAL = 1,
- WEDGE_OBLIQUE27 = 2,
- WEDGE_OBLIQUE63 = 3,
- WEDGE_OBLIQUE117 = 4,
- WEDGE_OBLIQUE153 = 5,
- WEDGE_DIRECTIONS
-} WedgeDirectionType;
-
-#define WEDGE_PARMS 4
// [smoother][negative][direction]
DECLARE_ALIGNED(
@@ -77,6 +59,314 @@
wedge_mask_obl[NSMOOTHERS][2][WEDGE_DIRECTIONS]
[MASK_MASTER_SIZE * MASK_MASTER_SIZE]);
+DECLARE_ALIGNED(
+ 16, static uint8_t,
+ wedge_signflip_lookup[BLOCK_SIZES][MAX_WEDGE_TYPES]);
+
+// Some unused wedge codebooks left temporarily to facilitate experiments.
+// To be removed when setteld.
+static wedge_code_type wedge_codebook_8_hgtw[8] = {
+ {WEDGE_OBLIQUE27, 4, 4},
+ {WEDGE_OBLIQUE63, 4, 4},
+ {WEDGE_OBLIQUE117, 4, 4},
+ {WEDGE_OBLIQUE153, 4, 4},
+ {WEDGE_OBLIQUE27, 4, 2},
+ {WEDGE_OBLIQUE27, 4, 6},
+ {WEDGE_OBLIQUE153, 4, 2},
+ {WEDGE_OBLIQUE153, 4, 6},
+};
+
+static wedge_code_type wedge_codebook_8_hltw[8] = {
+ {WEDGE_OBLIQUE27, 4, 4},
+ {WEDGE_OBLIQUE63, 4, 4},
+ {WEDGE_OBLIQUE117, 4, 4},
+ {WEDGE_OBLIQUE153, 4, 4},
+ {WEDGE_OBLIQUE63, 2, 4},
+ {WEDGE_OBLIQUE63, 6, 4},
+ {WEDGE_OBLIQUE117, 2, 4},
+ {WEDGE_OBLIQUE117, 6, 4},
+};
+
+static wedge_code_type wedge_codebook_8_heqw[8] = {
+ {WEDGE_OBLIQUE27, 4, 4},
+ {WEDGE_OBLIQUE63, 4, 4},
+ {WEDGE_OBLIQUE117, 4, 4},
+ {WEDGE_OBLIQUE153, 4, 4},
+ {WEDGE_HORIZONTAL, 4, 2},
+ {WEDGE_HORIZONTAL, 4, 6},
+ {WEDGE_VERTICAL, 2, 4},
+ {WEDGE_VERTICAL, 6, 4},
+};
+
+#if !USE_LARGE_WEDGE_CODEBOOK
+static const wedge_code_type wedge_codebook_16_hgtw[16] = {
+ {WEDGE_OBLIQUE27, 4, 4},
+ {WEDGE_OBLIQUE63, 4, 4},
+ {WEDGE_OBLIQUE117, 4, 4},
+ {WEDGE_OBLIQUE153, 4, 4},
+ {WEDGE_HORIZONTAL, 4, 2},
+ {WEDGE_HORIZONTAL, 4, 4},
+ {WEDGE_HORIZONTAL, 4, 6},
+ {WEDGE_VERTICAL, 4, 4},
+ {WEDGE_OBLIQUE27, 4, 2},
+ {WEDGE_OBLIQUE27, 4, 6},
+ {WEDGE_OBLIQUE153, 4, 2},
+ {WEDGE_OBLIQUE153, 4, 6},
+ {WEDGE_OBLIQUE63, 2, 4},
+ {WEDGE_OBLIQUE63, 6, 4},
+ {WEDGE_OBLIQUE117, 2, 4},
+ {WEDGE_OBLIQUE117, 6, 4},
+};
+
+static const wedge_code_type wedge_codebook_16_hltw[16] = {
+ {WEDGE_OBLIQUE27, 4, 4},
+ {WEDGE_OBLIQUE63, 4, 4},
+ {WEDGE_OBLIQUE117, 4, 4},
+ {WEDGE_OBLIQUE153, 4, 4},
+ {WEDGE_VERTICAL, 2, 4},
+ {WEDGE_VERTICAL, 4, 4},
+ {WEDGE_VERTICAL, 6, 4},
+ {WEDGE_HORIZONTAL, 4, 4},
+ {WEDGE_OBLIQUE27, 4, 2},
+ {WEDGE_OBLIQUE27, 4, 6},
+ {WEDGE_OBLIQUE153, 4, 2},
+ {WEDGE_OBLIQUE153, 4, 6},
+ {WEDGE_OBLIQUE63, 2, 4},
+ {WEDGE_OBLIQUE63, 6, 4},
+ {WEDGE_OBLIQUE117, 2, 4},
+ {WEDGE_OBLIQUE117, 6, 4},
+};
+
+static const wedge_code_type wedge_codebook_16_heqw[16] = {
+ {WEDGE_OBLIQUE27, 4, 4},
+ {WEDGE_OBLIQUE63, 4, 4},
+ {WEDGE_OBLIQUE117, 4, 4},
+ {WEDGE_OBLIQUE153, 4, 4},
+ {WEDGE_HORIZONTAL, 4, 2},
+ {WEDGE_HORIZONTAL, 4, 6},
+ {WEDGE_VERTICAL, 2, 4},
+ {WEDGE_VERTICAL, 6, 4},
+ {WEDGE_OBLIQUE27, 4, 2},
+ {WEDGE_OBLIQUE27, 4, 6},
+ {WEDGE_OBLIQUE153, 4, 2},
+ {WEDGE_OBLIQUE153, 4, 6},
+ {WEDGE_OBLIQUE63, 2, 4},
+ {WEDGE_OBLIQUE63, 6, 4},
+ {WEDGE_OBLIQUE117, 2, 4},
+ {WEDGE_OBLIQUE117, 6, 4},
+};
+
+const wedge_params_type wedge_params_lookup[BLOCK_SIZES] = {
+ {0, NULL, NULL, 0},
+ {0, NULL, NULL, 0},
+ {0, NULL, NULL, 0},
+ {4, wedge_codebook_16_heqw, wedge_signflip_lookup[3], 0},
+ {4, wedge_codebook_16_hgtw, wedge_signflip_lookup[4], 0},
+ {4, wedge_codebook_16_hltw, wedge_signflip_lookup[5], 0},
+ {4, wedge_codebook_16_heqw, wedge_signflip_lookup[6], 0},
+ {4, wedge_codebook_16_hgtw, wedge_signflip_lookup[7], 0},
+ {4, wedge_codebook_16_hltw, wedge_signflip_lookup[8], 0},
+ {4, wedge_codebook_16_heqw, wedge_signflip_lookup[9], 0},
+ {0, wedge_codebook_8_hgtw, wedge_signflip_lookup[10], 0},
+ {0, wedge_codebook_8_hltw, wedge_signflip_lookup[11], 0},
+ {0, wedge_codebook_8_heqw, wedge_signflip_lookup[12], 0},
+#if CONFIG_EXT_PARTITION
+ {0, NULL, NULL, 0},
+ {0, NULL, NULL, 0},
+ {0, NULL, NULL, 0},
+#endif // CONFIG_EXT_PARTITION
+};
+
+#else
+
+static const wedge_code_type wedge_codebook_32_hgtw[32] = {
+ {WEDGE_OBLIQUE27, 4, 4},
+ {WEDGE_OBLIQUE63, 4, 4},
+ {WEDGE_OBLIQUE117, 4, 4},
+ {WEDGE_OBLIQUE153, 4, 4},
+ {WEDGE_HORIZONTAL, 4, 2},
+ {WEDGE_HORIZONTAL, 4, 4},
+ {WEDGE_HORIZONTAL, 4, 6},
+ {WEDGE_VERTICAL, 4, 4},
+ {WEDGE_OBLIQUE27, 4, 1},
+ {WEDGE_OBLIQUE27, 4, 2},
+ {WEDGE_OBLIQUE27, 4, 3},
+ {WEDGE_OBLIQUE27, 4, 5},
+ {WEDGE_OBLIQUE27, 4, 6},
+ {WEDGE_OBLIQUE27, 4, 7},
+ {WEDGE_OBLIQUE153, 4, 1},
+ {WEDGE_OBLIQUE153, 4, 2},
+ {WEDGE_OBLIQUE153, 4, 3},
+ {WEDGE_OBLIQUE153, 4, 5},
+ {WEDGE_OBLIQUE153, 4, 6},
+ {WEDGE_OBLIQUE153, 4, 7},
+ {WEDGE_OBLIQUE63, 1, 4},
+ {WEDGE_OBLIQUE63, 2, 4},
+ {WEDGE_OBLIQUE63, 3, 4},
+ {WEDGE_OBLIQUE63, 5, 4},
+ {WEDGE_OBLIQUE63, 6, 4},
+ {WEDGE_OBLIQUE63, 7, 4},
+ {WEDGE_OBLIQUE117, 1, 4},
+ {WEDGE_OBLIQUE117, 2, 4},
+ {WEDGE_OBLIQUE117, 3, 4},
+ {WEDGE_OBLIQUE117, 5, 4},
+ {WEDGE_OBLIQUE117, 6, 4},
+ {WEDGE_OBLIQUE117, 7, 4},
+};
+
+static const wedge_code_type wedge_codebook_32_hltw[32] = {
+ {WEDGE_OBLIQUE27, 4, 4},
+ {WEDGE_OBLIQUE63, 4, 4},
+ {WEDGE_OBLIQUE117, 4, 4},
+ {WEDGE_OBLIQUE153, 4, 4},
+ {WEDGE_VERTICAL, 2, 4},
+ {WEDGE_VERTICAL, 4, 4},
+ {WEDGE_VERTICAL, 6, 4},
+ {WEDGE_HORIZONTAL, 4, 4},
+ {WEDGE_OBLIQUE27, 4, 1},
+ {WEDGE_OBLIQUE27, 4, 2},
+ {WEDGE_OBLIQUE27, 4, 3},
+ {WEDGE_OBLIQUE27, 4, 5},
+ {WEDGE_OBLIQUE27, 4, 6},
+ {WEDGE_OBLIQUE27, 4, 7},
+ {WEDGE_OBLIQUE153, 4, 1},
+ {WEDGE_OBLIQUE153, 4, 2},
+ {WEDGE_OBLIQUE153, 4, 3},
+ {WEDGE_OBLIQUE153, 4, 5},
+ {WEDGE_OBLIQUE153, 4, 6},
+ {WEDGE_OBLIQUE153, 4, 7},
+ {WEDGE_OBLIQUE63, 1, 4},
+ {WEDGE_OBLIQUE63, 2, 4},
+ {WEDGE_OBLIQUE63, 3, 4},
+ {WEDGE_OBLIQUE63, 5, 4},
+ {WEDGE_OBLIQUE63, 6, 4},
+ {WEDGE_OBLIQUE63, 7, 4},
+ {WEDGE_OBLIQUE117, 1, 4},
+ {WEDGE_OBLIQUE117, 2, 4},
+ {WEDGE_OBLIQUE117, 3, 4},
+ {WEDGE_OBLIQUE117, 5, 4},
+ {WEDGE_OBLIQUE117, 6, 4},
+ {WEDGE_OBLIQUE117, 7, 4},
+};
+
+static const wedge_code_type wedge_codebook_32_heqw[32] = {
+ {WEDGE_OBLIQUE27, 4, 4},
+ {WEDGE_OBLIQUE63, 4, 4},
+ {WEDGE_OBLIQUE117, 4, 4},
+ {WEDGE_OBLIQUE153, 4, 4},
+ {WEDGE_HORIZONTAL, 4, 2},
+ {WEDGE_HORIZONTAL, 4, 6},
+ {WEDGE_VERTICAL, 2, 4},
+ {WEDGE_VERTICAL, 6, 4},
+ {WEDGE_OBLIQUE27, 4, 1},
+ {WEDGE_OBLIQUE27, 4, 2},
+ {WEDGE_OBLIQUE27, 4, 3},
+ {WEDGE_OBLIQUE27, 4, 5},
+ {WEDGE_OBLIQUE27, 4, 6},
+ {WEDGE_OBLIQUE27, 4, 7},
+ {WEDGE_OBLIQUE153, 4, 1},
+ {WEDGE_OBLIQUE153, 4, 2},
+ {WEDGE_OBLIQUE153, 4, 3},
+ {WEDGE_OBLIQUE153, 4, 5},
+ {WEDGE_OBLIQUE153, 4, 6},
+ {WEDGE_OBLIQUE153, 4, 7},
+ {WEDGE_OBLIQUE63, 1, 4},
+ {WEDGE_OBLIQUE63, 2, 4},
+ {WEDGE_OBLIQUE63, 3, 4},
+ {WEDGE_OBLIQUE63, 5, 4},
+ {WEDGE_OBLIQUE63, 6, 4},
+ {WEDGE_OBLIQUE63, 7, 4},
+ {WEDGE_OBLIQUE117, 1, 4},
+ {WEDGE_OBLIQUE117, 2, 4},
+ {WEDGE_OBLIQUE117, 3, 4},
+ {WEDGE_OBLIQUE117, 5, 4},
+ {WEDGE_OBLIQUE117, 6, 4},
+ {WEDGE_OBLIQUE117, 7, 4},
+};
+
+const wedge_params_type wedge_params_lookup[BLOCK_SIZES] = {
+ {0, NULL, NULL, 0},
+ {0, NULL, NULL, 0},
+ {0, NULL, NULL, 0},
+ {5, wedge_codebook_32_heqw, wedge_signflip_lookup[3], 0},
+ {5, wedge_codebook_32_hgtw, wedge_signflip_lookup[4], 0},
+ {5, wedge_codebook_32_hltw, wedge_signflip_lookup[5], 0},
+ {5, wedge_codebook_32_heqw, wedge_signflip_lookup[6], 0},
+ {5, wedge_codebook_32_hgtw, wedge_signflip_lookup[7], 0},
+ {5, wedge_codebook_32_hltw, wedge_signflip_lookup[8], 0},
+ {5, wedge_codebook_32_heqw, wedge_signflip_lookup[9], 0},
+ {0, wedge_codebook_8_hgtw, wedge_signflip_lookup[10], 0},
+ {0, wedge_codebook_8_hltw, wedge_signflip_lookup[11], 0},
+ {0, wedge_codebook_8_heqw, wedge_signflip_lookup[12], 0},
+#if CONFIG_EXT_PARTITION
+ {0, NULL, NULL, 0},
+ {0, NULL, NULL, 0},
+ {0, NULL, NULL, 0},
+#endif // CONFIG_EXT_PARTITION
+};
+#endif // USE_LARGE_WEDGE_CODEBOOK
+
+static const uint8_t *get_wedge_mask_inplace(int wedge_index,
+ int neg,
+ BLOCK_SIZE sb_type) {
+ const uint8_t *master;
+ const int bh = 4 << b_height_log2_lookup[sb_type];
+ const int bw = 4 << b_width_log2_lookup[sb_type];
+ const wedge_code_type *a =
+ wedge_params_lookup[sb_type].codebook + wedge_index;
+ const int smoother = wedge_params_lookup[sb_type].smoother;
+ int woff, hoff;
+ const uint8_t wsignflip = wedge_params_lookup[sb_type].signflip[wedge_index];
+
+ assert(wedge_index >= 0 &&
+ wedge_index < (1 << get_wedge_bits_lookup(sb_type)));
+ woff = (a->x_offset * bw) >> 3;
+ hoff = (a->y_offset * bh) >> 3;
+ master = wedge_mask_obl[smoother][neg ^ wsignflip][a->direction] +
+ MASK_MASTER_STRIDE * (MASK_MASTER_SIZE / 2 - hoff) +
+ MASK_MASTER_SIZE / 2 - woff;
+ return master;
+}
+
+const uint8_t *vp10_get_soft_mask(int wedge_index,
+ int wedge_sign,
+ BLOCK_SIZE sb_type,
+ int offset_x,
+ int offset_y) {
+ const uint8_t *mask =
+ get_wedge_mask_inplace(wedge_index, wedge_sign, sb_type);
+ if (mask)
+ mask -= (offset_x + offset_y * MASK_MASTER_STRIDE);
+ return mask;
+}
+
+// If the signs for the wedges for various blocksizes are
+// inconsistent flip the sign flag. Do it only once for every
+// wedge codebook.
+static void init_wedge_signs() {
+ BLOCK_SIZE sb_type;
+ memset(wedge_signflip_lookup, 0, sizeof(wedge_signflip_lookup));
+ for (sb_type = BLOCK_4X4; sb_type < BLOCK_SIZES; ++sb_type) {
+ const int bw = 4 * num_4x4_blocks_wide_lookup[sb_type];
+ const int bh = 4 * num_4x4_blocks_high_lookup[sb_type];
+ const wedge_params_type wedge_params = wedge_params_lookup[sb_type];
+ const int wbits = wedge_params.bits;
+ const int wtypes = 1 << wbits;
+ int i, w;
+ if (wbits == 0) continue;
+ for (w = 0; w < wtypes; ++w) {
+ const uint8_t *mask = get_wedge_mask_inplace(w, 0, sb_type);
+ int sum = 0;
+ for (i = 0; i < bw; ++i)
+ sum += mask[i];
+ for (i = 0; i < bh; ++i)
+ sum += mask[i * MASK_MASTER_STRIDE];
+ sum = (sum + (bw + bh) / 2) / (bw + bh);
+ wedge_params.signflip[w] = (sum < 32);
+ }
+ }
+}
+
// Equation of line: f(x, y) = a[0]*(x - a[2]*w/8) + a[1]*(y - a[3]*h/8) = 0
void vp10_init_wedge_masks() {
int i, j, s;
@@ -111,304 +401,7 @@
(1 << WEDGE_WEIGHT_BITS) - get_masked_weight(x, s);
}
}
-}
-
-static const int wedge_params_4[1 << WEDGE_BITS_2]
- [WEDGE_PARMS] = {
- {WEDGE_OBLIQUE27, 4, 4, 1},
- {WEDGE_OBLIQUE63, 4, 4, 1},
- {WEDGE_OBLIQUE117, 4, 4, 1},
- {WEDGE_OBLIQUE153, 4, 4, 1},
-};
-
-static const int wedge_params_8_hgtw[1 << WEDGE_BITS_3]
- [WEDGE_PARMS] = {
- {WEDGE_OBLIQUE27, 4, 4, 1},
- {WEDGE_OBLIQUE63, 4, 4, 1},
- {WEDGE_OBLIQUE117, 4, 4, 1},
- {WEDGE_OBLIQUE153, 4, 4, 1},
-
- {WEDGE_OBLIQUE27, 4, 2, 1},
- {WEDGE_OBLIQUE27, 4, 6, 1},
- {WEDGE_OBLIQUE153, 4, 2, 1},
- {WEDGE_OBLIQUE153, 4, 6, 1},
-};
-
-static const int wedge_params_8_hltw[1 << WEDGE_BITS_3]
- [WEDGE_PARMS] = {
- {WEDGE_OBLIQUE27, 4, 4, 1},
- {WEDGE_OBLIQUE63, 4, 4, 1},
- {WEDGE_OBLIQUE117, 4, 4, 1},
- {WEDGE_OBLIQUE153, 4, 4, 1},
-
- {WEDGE_OBLIQUE63, 2, 4, 1},
- {WEDGE_OBLIQUE63, 6, 4, 1},
- {WEDGE_OBLIQUE117, 2, 4, 1},
- {WEDGE_OBLIQUE117, 6, 4, 1},
-};
-
-static const int wedge_params_8_heqw[1 << WEDGE_BITS_3]
- [WEDGE_PARMS] = {
- {WEDGE_OBLIQUE27, 4, 4, 1},
- {WEDGE_OBLIQUE63, 4, 4, 1},
- {WEDGE_OBLIQUE117, 4, 4, 1},
- {WEDGE_OBLIQUE153, 4, 4, 1},
-
- {WEDGE_HORIZONTAL, 4, 2, 1},
- {WEDGE_HORIZONTAL, 4, 6, 1},
- {WEDGE_VERTICAL, 2, 4, 1},
- {WEDGE_VERTICAL, 6, 4, 1},
-};
-
-static const int wedge_params_16_hgtw[1 << WEDGE_BITS_4]
- [WEDGE_PARMS] = {
- {WEDGE_OBLIQUE27, 4, 4, 0},
- {WEDGE_OBLIQUE63, 4, 4, 0},
- {WEDGE_OBLIQUE117, 4, 4, 0},
- {WEDGE_OBLIQUE153, 4, 4, 0},
-
- {WEDGE_HORIZONTAL, 4, 2, 0},
- {WEDGE_HORIZONTAL, 4, 4, 0},
- {WEDGE_HORIZONTAL, 4, 6, 0},
- {WEDGE_VERTICAL, 4, 4, 0},
-
- {WEDGE_OBLIQUE27, 4, 2, 0},
- {WEDGE_OBLIQUE27, 4, 6, 0},
- {WEDGE_OBLIQUE153, 4, 2, 0},
- {WEDGE_OBLIQUE153, 4, 6, 0},
-
- {WEDGE_OBLIQUE63, 2, 4, 0},
- {WEDGE_OBLIQUE63, 6, 4, 0},
- {WEDGE_OBLIQUE117, 2, 4, 0},
- {WEDGE_OBLIQUE117, 6, 4, 0},
-};
-
-static const int wedge_params_16_hltw[1 << WEDGE_BITS_4]
- [WEDGE_PARMS] = {
- {WEDGE_OBLIQUE27, 4, 4, 0},
- {WEDGE_OBLIQUE63, 4, 4, 0},
- {WEDGE_OBLIQUE117, 4, 4, 0},
- {WEDGE_OBLIQUE153, 4, 4, 0},
-
- {WEDGE_VERTICAL, 2, 4, 0},
- {WEDGE_VERTICAL, 4, 4, 0},
- {WEDGE_VERTICAL, 6, 4, 0},
- {WEDGE_HORIZONTAL, 4, 4, 0},
-
- {WEDGE_OBLIQUE27, 4, 2, 0},
- {WEDGE_OBLIQUE27, 4, 6, 0},
- {WEDGE_OBLIQUE153, 4, 2, 0},
- {WEDGE_OBLIQUE153, 4, 6, 0},
-
- {WEDGE_OBLIQUE63, 2, 4, 0},
- {WEDGE_OBLIQUE63, 6, 4, 0},
- {WEDGE_OBLIQUE117, 2, 4, 0},
- {WEDGE_OBLIQUE117, 6, 4, 0},
-};
-
-static const int wedge_params_16_heqw[1 << WEDGE_BITS_4]
- [WEDGE_PARMS] = {
- {WEDGE_OBLIQUE27, 4, 4, 0},
- {WEDGE_OBLIQUE63, 4, 4, 0},
- {WEDGE_OBLIQUE117, 4, 4, 0},
- {WEDGE_OBLIQUE153, 4, 4, 0},
-
- {WEDGE_HORIZONTAL, 4, 2, 0},
- {WEDGE_HORIZONTAL, 4, 6, 0},
- {WEDGE_VERTICAL, 2, 4, 0},
- {WEDGE_VERTICAL, 6, 4, 0},
-
- {WEDGE_OBLIQUE27, 4, 2, 0},
- {WEDGE_OBLIQUE27, 4, 6, 0},
- {WEDGE_OBLIQUE153, 4, 2, 0},
- {WEDGE_OBLIQUE153, 4, 6, 0},
-
- {WEDGE_OBLIQUE63, 2, 4, 0},
- {WEDGE_OBLIQUE63, 6, 4, 0},
- {WEDGE_OBLIQUE117, 2, 4, 0},
- {WEDGE_OBLIQUE117, 6, 4, 0},
-};
-
-static const int wedge_params_32_hgtw[1 << WEDGE_BITS_5]
- [WEDGE_PARMS] = {
- {WEDGE_OBLIQUE27, 4, 4, 0},
- {WEDGE_OBLIQUE63, 4, 4, 0},
- {WEDGE_OBLIQUE117, 4, 4, 0},
- {WEDGE_OBLIQUE153, 4, 4, 0},
-
- {WEDGE_HORIZONTAL, 4, 2, 0},
- {WEDGE_HORIZONTAL, 4, 4, 0},
- {WEDGE_HORIZONTAL, 4, 6, 0},
- {WEDGE_VERTICAL, 4, 4, 0},
-
- {WEDGE_OBLIQUE27, 4, 1, 0},
- {WEDGE_OBLIQUE27, 4, 2, 0},
- {WEDGE_OBLIQUE27, 4, 3, 0},
- {WEDGE_OBLIQUE27, 4, 5, 0},
- {WEDGE_OBLIQUE27, 4, 6, 0},
- {WEDGE_OBLIQUE27, 4, 7, 0},
-
- {WEDGE_OBLIQUE153, 4, 1, 0},
- {WEDGE_OBLIQUE153, 4, 2, 0},
- {WEDGE_OBLIQUE153, 4, 3, 0},
- {WEDGE_OBLIQUE153, 4, 5, 0},
- {WEDGE_OBLIQUE153, 4, 6, 0},
- {WEDGE_OBLIQUE153, 4, 7, 0},
-
- {WEDGE_OBLIQUE63, 1, 4, 0},
- {WEDGE_OBLIQUE63, 2, 4, 0},
- {WEDGE_OBLIQUE63, 3, 4, 0},
- {WEDGE_OBLIQUE63, 5, 4, 0},
- {WEDGE_OBLIQUE63, 6, 4, 0},
- {WEDGE_OBLIQUE63, 7, 4, 0},
-
- {WEDGE_OBLIQUE117, 1, 4, 0},
- {WEDGE_OBLIQUE117, 2, 4, 0},
- {WEDGE_OBLIQUE117, 3, 4, 0},
- {WEDGE_OBLIQUE117, 5, 4, 0},
- {WEDGE_OBLIQUE117, 6, 4, 0},
- {WEDGE_OBLIQUE117, 7, 4, 0},
-};
-
-static const int wedge_params_32_hltw[1 << WEDGE_BITS_5]
- [WEDGE_PARMS] = {
- {WEDGE_OBLIQUE27, 4, 4, 0},
- {WEDGE_OBLIQUE63, 4, 4, 0},
- {WEDGE_OBLIQUE117, 4, 4, 0},
- {WEDGE_OBLIQUE153, 4, 4, 0},
-
- {WEDGE_VERTICAL, 2, 4, 0},
- {WEDGE_VERTICAL, 4, 4, 0},
- {WEDGE_VERTICAL, 6, 4, 0},
- {WEDGE_HORIZONTAL, 4, 4, 0},
-
- {WEDGE_OBLIQUE27, 4, 1, 0},
- {WEDGE_OBLIQUE27, 4, 2, 0},
- {WEDGE_OBLIQUE27, 4, 3, 0},
- {WEDGE_OBLIQUE27, 4, 5, 0},
- {WEDGE_OBLIQUE27, 4, 6, 0},
- {WEDGE_OBLIQUE27, 4, 7, 0},
-
- {WEDGE_OBLIQUE153, 4, 1, 0},
- {WEDGE_OBLIQUE153, 4, 2, 0},
- {WEDGE_OBLIQUE153, 4, 3, 0},
- {WEDGE_OBLIQUE153, 4, 5, 0},
- {WEDGE_OBLIQUE153, 4, 6, 0},
- {WEDGE_OBLIQUE153, 4, 7, 0},
-
- {WEDGE_OBLIQUE63, 1, 4, 0},
- {WEDGE_OBLIQUE63, 2, 4, 0},
- {WEDGE_OBLIQUE63, 3, 4, 0},
- {WEDGE_OBLIQUE63, 5, 4, 0},
- {WEDGE_OBLIQUE63, 6, 4, 0},
- {WEDGE_OBLIQUE63, 7, 4, 0},
-
- {WEDGE_OBLIQUE117, 1, 4, 0},
- {WEDGE_OBLIQUE117, 2, 4, 0},
- {WEDGE_OBLIQUE117, 3, 4, 0},
- {WEDGE_OBLIQUE117, 5, 4, 0},
- {WEDGE_OBLIQUE117, 6, 4, 0},
- {WEDGE_OBLIQUE117, 7, 4, 0},
-};
-
-static const int wedge_params_32_heqw[1 << WEDGE_BITS_5]
- [WEDGE_PARMS] = {
- {WEDGE_OBLIQUE27, 4, 4, 0},
- {WEDGE_OBLIQUE63, 4, 4, 0},
- {WEDGE_OBLIQUE117, 4, 4, 0},
- {WEDGE_OBLIQUE153, 4, 4, 0},
-
- {WEDGE_HORIZONTAL, 4, 2, 0},
- {WEDGE_HORIZONTAL, 4, 6, 0},
- {WEDGE_VERTICAL, 2, 4, 0},
- {WEDGE_VERTICAL, 6, 4, 0},
-
- {WEDGE_OBLIQUE27, 4, 1, 0},
- {WEDGE_OBLIQUE27, 4, 2, 0},
- {WEDGE_OBLIQUE27, 4, 3, 0},
- {WEDGE_OBLIQUE27, 4, 5, 0},
- {WEDGE_OBLIQUE27, 4, 6, 0},
- {WEDGE_OBLIQUE27, 4, 7, 0},
-
- {WEDGE_OBLIQUE153, 4, 1, 0},
- {WEDGE_OBLIQUE153, 4, 2, 0},
- {WEDGE_OBLIQUE153, 4, 3, 0},
- {WEDGE_OBLIQUE153, 4, 5, 0},
- {WEDGE_OBLIQUE153, 4, 6, 0},
- {WEDGE_OBLIQUE153, 4, 7, 0},
-
- {WEDGE_OBLIQUE63, 1, 4, 0},
- {WEDGE_OBLIQUE63, 2, 4, 0},
- {WEDGE_OBLIQUE63, 3, 4, 0},
- {WEDGE_OBLIQUE63, 5, 4, 0},
- {WEDGE_OBLIQUE63, 6, 4, 0},
- {WEDGE_OBLIQUE63, 7, 4, 0},
-
- {WEDGE_OBLIQUE117, 1, 4, 0},
- {WEDGE_OBLIQUE117, 2, 4, 0},
- {WEDGE_OBLIQUE117, 3, 4, 0},
- {WEDGE_OBLIQUE117, 5, 4, 0},
- {WEDGE_OBLIQUE117, 6, 4, 0},
- {WEDGE_OBLIQUE117, 7, 4, 0},
-};
-
-static const int *get_wedge_params_lookup[BLOCK_SIZES] = {
- NULL,
- NULL,
- NULL,
- &wedge_params_16_heqw[0][0],
- &wedge_params_16_hgtw[0][0],
- &wedge_params_16_hltw[0][0],
- &wedge_params_16_heqw[0][0],
- &wedge_params_16_hgtw[0][0],
- &wedge_params_16_hltw[0][0],
- &wedge_params_16_heqw[0][0],
- NULL,
- NULL,
- NULL,
-#if CONFIG_EXT_PARTITION
- NULL,
- NULL,
- NULL,
-#endif // CONFIG_EXT_PARTITION
-};
-
-static const int *get_wedge_params(int wedge_index,
- BLOCK_SIZE sb_type) {
- const int *a = NULL;
- if (wedge_index != WEDGE_NONE) {
- return get_wedge_params_lookup[sb_type] + WEDGE_PARMS * wedge_index;
- }
- return a;
-}
-
-static const uint8_t *get_wedge_mask_inplace(int wedge_index,
- int neg,
- BLOCK_SIZE sb_type) {
- const uint8_t *master;
- const int bh = 4 << b_height_log2_lookup[sb_type];
- const int bw = 4 << b_width_log2_lookup[sb_type];
- const int *a = get_wedge_params(wedge_index, sb_type);
- int woff, hoff;
- if (!a) return NULL;
- woff = (a[1] * bw) >> 3;
- hoff = (a[2] * bh) >> 3;
- master = wedge_mask_obl[a[3]][neg][a[0]] +
- MASK_MASTER_STRIDE * (MASK_MASTER_SIZE / 2 - hoff) +
- MASK_MASTER_SIZE / 2 - woff;
- return master;
-}
-
-const uint8_t *vp10_get_soft_mask(int wedge_index,
- int wedge_sign,
- BLOCK_SIZE sb_type,
- int offset_x,
- int offset_y) {
- const uint8_t *mask =
- get_wedge_mask_inplace(wedge_index, wedge_sign, sb_type);
- if (mask)
- mask -= (offset_x + offset_y * MASK_MASTER_STRIDE);
- return mask;
+ init_wedge_signs();
}
@@ -655,6 +648,74 @@
const int is_compound = has_second_ref(&mi->mbmi);
int ref;
+#if CONFIG_DUAL_FILTER
+ if (mi->mbmi.sb_type < BLOCK_8X8 && plane > 0) {
+ int blk_num = 1 << (pd->subsampling_x + pd->subsampling_y);
+ int chr_idx;
+ int x_base = x;
+ int y_base = y;
+ int x_step = w >> pd->subsampling_x;
+ int y_step = h >> pd->subsampling_y;
+
+ for (chr_idx = 0; chr_idx < blk_num; ++chr_idx) {
+ for (ref = 0; ref < 1 + is_compound; ++ref) {
+ const struct scale_factors *const sf = &xd->block_refs[ref]->sf;
+ struct buf_2d *const pre_buf = &pd->pre[ref];
+ struct buf_2d *const dst_buf = &pd->dst;
+ uint8_t *dst = dst_buf->buf;
+ const MV mv = mi->bmi[chr_idx].as_mv[ref].as_mv;
+ const MV mv_q4 = clamp_mv_to_umv_border_sb(xd, &mv, bw, bh,
+ pd->subsampling_x,
+ pd->subsampling_y);
+ uint8_t *pre;
+ MV32 scaled_mv;
+ int xs, ys, subpel_x, subpel_y;
+ const int is_scaled = vp10_is_scaled(sf);
+
+ x = x_base + (chr_idx & 0x01) * x_step;
+ y = y_base + (chr_idx >> 1) * y_step;
+
+ dst += dst_buf->stride * y + x;
+
+ if (is_scaled) {
+ pre = pre_buf->buf + scaled_buffer_offset(x, y, pre_buf->stride, sf);
+ scaled_mv = vp10_scale_mv(&mv_q4, mi_x + x, mi_y + y, sf);
+ xs = sf->x_step_q4;
+ ys = sf->y_step_q4;
+ } else {
+ pre = pre_buf->buf + y * pre_buf->stride + x;
+ scaled_mv.row = mv_q4.row;
+ scaled_mv.col = mv_q4.col;
+ xs = ys = 16;
+ }
+
+ subpel_x = scaled_mv.col & SUBPEL_MASK;
+ subpel_y = scaled_mv.row & SUBPEL_MASK;
+ pre += (scaled_mv.row >> SUBPEL_BITS) * pre_buf->stride
+ + (scaled_mv.col >> SUBPEL_BITS);
+
+ #if CONFIG_EXT_INTER
+ if (ref && is_interinter_wedge_used(mi->mbmi.sb_type) &&
+ mi->mbmi.use_wedge_interinter)
+ vp10_make_masked_inter_predictor(
+ pre, pre_buf->stride, dst, dst_buf->stride,
+ subpel_x, subpel_y, sf, w, h,
+ mi->mbmi.interp_filter, xs, ys,
+ #if CONFIG_SUPERTX
+ wedge_offset_x, wedge_offset_y,
+ #endif // CONFIG_SUPERTX
+ xd);
+ else
+ #endif // CONFIG_EXT_INTER
+ vp10_make_inter_predictor(pre, pre_buf->stride, dst, dst_buf->stride,
+ subpel_x, subpel_y, sf, x_step, y_step, ref,
+ mi->mbmi.interp_filter, xs, ys, xd);
+ }
+ }
+ return;
+ }
+#endif
+
for (ref = 0; ref < 1 + is_compound; ++ref) {
const struct scale_factors *const sf = &xd->block_refs[ref]->sf;
struct buf_2d *const pre_buf = &pd->pre[ref];
diff --git a/vp10/common/reconinter.h b/vp10/common/reconinter.h
index c4a0978..5d9a6f9 100644
--- a/vp10/common/reconinter.h
+++ b/vp10/common/reconinter.h
@@ -44,7 +44,8 @@
#if CONFIG_DUAL_FILTER
if (interp_filter_params_x.taps == SUBPEL_TAPS &&
- interp_filter_params_y.taps == SUBPEL_TAPS) {
+ interp_filter_params_y.taps == SUBPEL_TAPS &&
+ w > 2 && h > 2) {
const int16_t *kernel_x =
vp10_get_interp_filter_subpel_kernel(interp_filter_params_x, subpel_x);
const int16_t *kernel_y =
@@ -106,7 +107,8 @@
#if CONFIG_DUAL_FILTER
if (interp_filter_params_x.taps == SUBPEL_TAPS &&
- interp_filter_params_y.taps == SUBPEL_TAPS) {
+ interp_filter_params_y.taps == SUBPEL_TAPS &&
+ w > 2 && h > 2) {
const int16_t *kernel_x =
vp10_get_interp_filter_subpel_kernel(interp_filter_params_x, subpel_x);
const int16_t *kernel_y =
@@ -147,51 +149,60 @@
#endif // CONFIG_VP9_HIGHBITDEPTH
#if CONFIG_EXT_INTER
-#define WEDGE_BITS_2 2
-#define WEDGE_BITS_3 3
-#define WEDGE_BITS_4 4
-#define WEDGE_BITS_5 5
-#define WEDGE_NONE -1
+#define MAX_WEDGE_TYPES (1 << 5)
+
#define WEDGE_WEIGHT_BITS 6
-static const int get_wedge_bits_lookup[BLOCK_SIZES] = {
- 0,
- 0,
- 0,
- WEDGE_BITS_4,
- WEDGE_BITS_4,
- WEDGE_BITS_4,
- WEDGE_BITS_4,
- WEDGE_BITS_4,
- WEDGE_BITS_4,
- WEDGE_BITS_4,
- 0,
- 0,
- 0,
-#if CONFIG_EXT_PARTITION
- 0,
- 0,
- 0,
-#endif // CONFIG_EXT_PARTITION
-};
+#define WEDGE_NONE -1
+
+// Angles are with respect to horizontal anti-clockwise
+typedef enum {
+ WEDGE_HORIZONTAL = 0,
+ WEDGE_VERTICAL = 1,
+ WEDGE_OBLIQUE27 = 2,
+ WEDGE_OBLIQUE63 = 3,
+ WEDGE_OBLIQUE117 = 4,
+ WEDGE_OBLIQUE153 = 5,
+ WEDGE_DIRECTIONS
+} WedgeDirectionType;
+
+// 3-tuple: {direction, x_offset, y_offset}
+typedef struct {
+ WedgeDirectionType direction;
+ int x_offset;
+ int y_offset;
+} wedge_code_type;
+
+typedef struct {
+ int bits;
+ const wedge_code_type *codebook;
+ uint8_t *signflip;
+ int smoother;
+} wedge_params_type;
+
+extern const wedge_params_type wedge_params_lookup[BLOCK_SIZES];
+
+static INLINE int get_wedge_bits_lookup(BLOCK_SIZE sb_type) {
+ return wedge_params_lookup[sb_type].bits;
+}
static INLINE int is_interinter_wedge_used(BLOCK_SIZE sb_type) {
(void) sb_type;
- return get_wedge_bits_lookup[sb_type] > 0;
+ return wedge_params_lookup[sb_type].bits > 0;
}
static INLINE int get_interinter_wedge_bits(BLOCK_SIZE sb_type) {
- const int wbits = get_wedge_bits_lookup[sb_type];
+ const int wbits = wedge_params_lookup[sb_type].bits;
return (wbits > 0) ? wbits + 1 : 0;
}
static INLINE int is_interintra_wedge_used(BLOCK_SIZE sb_type) {
(void) sb_type;
- return get_wedge_bits_lookup[sb_type] > 0;
+ return wedge_params_lookup[sb_type].bits > 0;
}
static INLINE int get_interintra_wedge_bits(BLOCK_SIZE sb_type) {
- return get_wedge_bits_lookup[sb_type];
+ return wedge_params_lookup[sb_type].bits;
}
#endif // CONFIG_EXT_INTER
diff --git a/vp10/common/reconintra.c b/vp10/common/reconintra.c
index 6b4a460..fa20f2c 100644
--- a/vp10/common/reconintra.c
+++ b/vp10/common/reconintra.c
@@ -391,7 +391,6 @@
#if CONFIG_EXT_INTRA
#define FILTER_INTRA_PREC_BITS 10
-#define FILTER_INTRA_ROUND_VAL 511
static const uint8_t ext_intra_extend_modes[FILTER_INTRA_MODES] = {
NEED_LEFT | NEED_ABOVE, // FILTER_DC
@@ -774,9 +773,7 @@
for (c = 1; c < 2 * bs + 1 - r; ++c) {
ipred = c0 * pred[r - 1][c] + c1 * pred[r][c - 1] +
c2 * pred[r - 1][c - 1] + c3 * pred[r - 1][c + 1];
- pred[r][c] = ipred < 0 ?
- -((-ipred + FILTER_INTRA_ROUND_VAL) >> FILTER_INTRA_PREC_BITS) :
- ((ipred + FILTER_INTRA_ROUND_VAL) >> FILTER_INTRA_PREC_BITS);
+ pred[r][c] = ROUND_POWER_OF_TWO_SIGNED(ipred, FILTER_INTRA_PREC_BITS);
}
for (r = 0; r < bs; ++r) {
@@ -1050,9 +1047,7 @@
for (c = 1; c < 2 * bs + 1 - r; ++c) {
ipred = c0 * pred[r - 1][c] + c1 * pred[r][c - 1] +
c2 * pred[r - 1][c - 1] + c3 * pred[r - 1][c + 1];
- pred[r][c] = ipred < 0 ?
- -((-ipred + FILTER_INTRA_ROUND_VAL) >> FILTER_INTRA_PREC_BITS) :
- ((ipred + FILTER_INTRA_ROUND_VAL) >> FILTER_INTRA_PREC_BITS);
+ pred[r][c] = ROUND_POWER_OF_TWO_SIGNED(ipred, FILTER_INTRA_PREC_BITS);
}
for (r = 0; r < bs; ++r) {
diff --git a/vp10/common/vp10_convolve.c b/vp10/common/vp10_convolve.c
index 6ffb425..d7e2eaf 100644
--- a/vp10/common/vp10_convolve.c
+++ b/vp10/common/vp10_convolve.c
@@ -320,13 +320,17 @@
int temp_stride = MAX_BLOCK_WIDTH;
#if CONFIG_DUAL_FILTER
- InterpFilterParams filter_params =
+ InterpFilterParams filter_params_x =
vp10_get_interp_filter_params(interp_filter[1 + 2 * ref_idx]);
+ InterpFilterParams filter_params_y =
+ vp10_get_interp_filter_params(interp_filter[0 + 2 * ref_idx]);
+ InterpFilterParams filter_params = filter_params_x;
+ int filter_size = filter_params_y.taps;
#else
InterpFilterParams filter_params =
vp10_get_interp_filter_params(interp_filter);
-#endif
int filter_size = filter_params.taps;
+#endif
int intermediate_height =
(((h - 1) * y_step_q4 + subpel_y_q4) >> SUBPEL_BITS) + filter_size;
@@ -336,9 +340,7 @@
filter_params, subpel_x_q4, x_step_q4, 0, bd);
#if CONFIG_DUAL_FILTER
- filter_params = vp10_get_interp_filter_params(interp_filter[2 * ref_idx]);
-#else
- filter_params = vp10_get_interp_filter_params(interp_filter);
+ filter_params = filter_params_y;
#endif
filter_size = filter_params.taps;
assert(filter_params.taps <= MAX_FILTER_TAP);
diff --git a/vp10/decoder/decodemv.c b/vp10/decoder/decodemv.c
index 72ab781..0863034 100644
--- a/vp10/decoder/decodemv.c
+++ b/vp10/decoder/decodemv.c
@@ -1577,7 +1577,7 @@
xd->counts->wedge_interintra[bsize][mbmi->use_wedge_interintra]++;
if (mbmi->use_wedge_interintra) {
mbmi->interintra_wedge_index =
- vp10_read_literal(r, get_wedge_bits_lookup[bsize]);
+ vp10_read_literal(r, get_wedge_bits_lookup(bsize));
mbmi->interintra_wedge_sign = 0;
}
}
@@ -1610,7 +1610,7 @@
xd->counts->wedge_interinter[bsize][mbmi->use_wedge_interinter]++;
if (mbmi->use_wedge_interinter) {
mbmi->interinter_wedge_index =
- vp10_read_literal(r, get_wedge_bits_lookup[bsize]);
+ vp10_read_literal(r, get_wedge_bits_lookup(bsize));
mbmi->interinter_wedge_sign = vp10_read_bit(r);
}
}
diff --git a/vp10/encoder/bitstream.c b/vp10/encoder/bitstream.c
index 0cbbc33..e491b06 100644
--- a/vp10/encoder/bitstream.c
+++ b/vp10/encoder/bitstream.c
@@ -1338,7 +1338,7 @@
cm->fc->wedge_interintra_prob[bsize]);
if (mbmi->use_wedge_interintra) {
vp10_write_literal(w, mbmi->interintra_wedge_index,
- get_wedge_bits_lookup[bsize]);
+ get_wedge_bits_lookup(bsize));
assert(mbmi->interintra_wedge_sign == 0);
}
}
@@ -1368,7 +1368,7 @@
cm->fc->wedge_interinter_prob[bsize]);
if (mbmi->use_wedge_interinter) {
vp10_write_literal(w, mbmi->interinter_wedge_index,
- get_wedge_bits_lookup[bsize]);
+ get_wedge_bits_lookup(bsize));
vp10_write_bit(w, mbmi->interinter_wedge_sign);
}
}
diff --git a/vp10/encoder/rdopt.c b/vp10/encoder/rdopt.c
index c27c887..31bf7b0 100644
--- a/vp10/encoder/rdopt.c
+++ b/vp10/encoder/rdopt.c
@@ -7146,7 +7146,7 @@
mbmi->use_wedge_interinter = 1;
rs = vp10_cost_literal(get_interinter_wedge_bits(bsize)) +
vp10_cost_bit(cm->fc->wedge_interinter_prob[bsize], 1);
- wedge_types = (1 << get_wedge_bits_lookup[bsize]);
+ wedge_types = (1 << get_wedge_bits_lookup(bsize));
vp10_build_inter_predictors_for_planes_single_buf(
xd, bsize, 0, 0, mi_row, mi_col, 0, preds0, strides);
@@ -7384,7 +7384,7 @@
// Disbale wedge search if source variance is small
if (x->source_variance > cpi->sf.disable_wedge_search_var_thresh) {
mbmi->use_wedge_interintra = 1;
- wedge_types = (1 << get_wedge_bits_lookup[bsize]);
+ wedge_types = (1 << get_wedge_bits_lookup(bsize));
rwedge = vp10_cost_literal(get_interintra_wedge_bits(bsize)) +
vp10_cost_bit(cm->fc->wedge_interintra_prob[bsize], 1);
for (wedge_index = 0; wedge_index < wedge_types; ++wedge_index) {
diff --git a/vpx_ports/mem.h b/vpx_ports/mem.h
index 4dce9c2..993124a 100644
--- a/vpx_ports/mem.h
+++ b/vpx_ports/mem.h
@@ -46,6 +46,11 @@
#define ROUNDZ_POWER_OF_TWO(value, n) \
((n) ? (((value) + (1 << ((n) - 1))) >> (n)) : (value))
+/* Shift down with rounding for signed integers, for use when n > 0 */
+#define ROUND_POWER_OF_TWO_SIGNED(value, n) \
+ (((value) < 0) ? -ROUND_POWER_OF_TWO(-(value), (n)) \
+ : ROUND_POWER_OF_TWO((value), (n)))
+
#define ALIGN_POWER_OF_TWO(value, n) \
(((value) + ((1 << (n)) - 1)) & ~((1 << (n)) - 1))