Add sse2/ssse3 intra predictors for 4x16
dc, dc_left, dc_top, dc_128, v, h, smooth, and paeth
Change-Id: I328a0970d5fa08f6976160e730f3fccfc3bd7947
diff --git a/aom_dsp/aom_dsp_rtcd_defs.pl b/aom_dsp/aom_dsp_rtcd_defs.pl
index 75679f5..9dd162f 100755
--- a/aom_dsp/aom_dsp_rtcd_defs.pl
+++ b/aom_dsp/aom_dsp_rtcd_defs.pl
@@ -82,6 +82,7 @@
specialize qw/aom_dc_top_predictor_4x4 msa neon sse2/;
specialize qw/aom_dc_top_predictor_4x8 sse2/;
+specialize qw/aom_dc_top_predictor_4x16 sse2/;
specialize qw/aom_dc_top_predictor_8x4 sse2/;
specialize qw/aom_dc_top_predictor_8x8 neon msa sse2/;
specialize qw/aom_dc_top_predictor_8x16 sse2/;
@@ -97,6 +98,7 @@
specialize qw/aom_dc_top_predictor_64x16 sse2 avx2/;
specialize qw/aom_dc_left_predictor_4x4 msa neon sse2/;
specialize qw/aom_dc_left_predictor_4x8 sse2/;
+specialize qw/aom_dc_left_predictor_4x16 sse2/;
specialize qw/aom_dc_left_predictor_8x4 sse2/;
specialize qw/aom_dc_left_predictor_8x8 neon msa sse2/;
specialize qw/aom_dc_left_predictor_8x16 sse2/;
@@ -112,6 +114,7 @@
specialize qw/aom_dc_left_predictor_64x16 sse2 avx2/;
specialize qw/aom_dc_128_predictor_4x4 msa neon sse2/;
specialize qw/aom_dc_128_predictor_4x8 sse2/;
+specialize qw/aom_dc_128_predictor_4x16 sse2/;
specialize qw/aom_dc_128_predictor_8x4 sse2/;
specialize qw/aom_dc_128_predictor_8x8 neon msa sse2/;
specialize qw/aom_dc_128_predictor_8x16 sse2/;
@@ -127,6 +130,7 @@
specialize qw/aom_dc_128_predictor_64x16 sse2 avx2/;
specialize qw/aom_v_predictor_4x4 neon msa sse2/;
specialize qw/aom_v_predictor_4x8 sse2/;
+specialize qw/aom_v_predictor_4x16 sse2/;
specialize qw/aom_v_predictor_8x4 sse2/;
specialize qw/aom_v_predictor_8x8 neon msa sse2/;
specialize qw/aom_v_predictor_8x16 sse2/;
@@ -141,6 +145,7 @@
specialize qw/aom_v_predictor_64x32 sse2 avx2/;
specialize qw/aom_v_predictor_64x16 sse2 avx2/;
specialize qw/aom_h_predictor_4x8 sse2/;
+specialize qw/aom_h_predictor_4x16 sse2/;
specialize qw/aom_h_predictor_4x4 neon dspr2 msa sse2/;
specialize qw/aom_h_predictor_8x4 sse2/;
specialize qw/aom_h_predictor_8x8 neon dspr2 msa sse2/;
@@ -157,6 +162,7 @@
specialize qw/aom_h_predictor_64x16 sse2/;
specialize qw/aom_paeth_predictor_4x4 ssse3/;
specialize qw/aom_paeth_predictor_4x8 ssse3/;
+specialize qw/aom_paeth_predictor_4x16 ssse3/;
specialize qw/aom_paeth_predictor_8x4 ssse3/;
specialize qw/aom_paeth_predictor_8x8 ssse3/;
specialize qw/aom_paeth_predictor_8x16 ssse3/;
@@ -177,6 +183,7 @@
specialize qw/aom_paeth_predictor_32x32 ssse3/;
specialize qw/aom_smooth_predictor_4x4 ssse3/;
specialize qw/aom_smooth_predictor_4x8 ssse3/;
+specialize qw/aom_smooth_predictor_4x16 ssse3/;
specialize qw/aom_smooth_predictor_8x4 ssse3/;
specialize qw/aom_smooth_predictor_8x8 ssse3/;
specialize qw/aom_smooth_predictor_8x16 ssse3/;
@@ -235,6 +242,7 @@
# by multiply and shift.
specialize qw/aom_dc_predictor_4x4 dspr2 msa neon sse2/;
specialize qw/aom_dc_predictor_4x8 sse2/;
+specialize qw/aom_dc_predictor_4x16 sse2/;
specialize qw/aom_dc_predictor_8x4 sse2/;
specialize qw/aom_dc_predictor_8x8 dspr2 neon msa sse2/;
specialize qw/aom_dc_predictor_8x16 sse2/;
diff --git a/aom_dsp/x86/intrapred_sse2.c b/aom_dsp/x86/intrapred_sse2.c
index 7c19fab..14b43da 100644
--- a/aom_dsp/x86/intrapred_sse2.c
+++ b/aom_dsp/x86/intrapred_sse2.c
@@ -13,9 +13,9 @@
#include "./aom_dsp_rtcd.h"
-static INLINE void dc_store_4x8(uint32_t dc, uint8_t *dst, ptrdiff_t stride) {
- int i;
- for (i = 0; i < 4; ++i) {
+static INLINE void dc_store_4xh(uint32_t dc, int height, uint8_t *dst,
+ ptrdiff_t stride) {
+ for (int i = 0; i < height; i += 2) {
*(uint32_t *)dst = dc;
dst += stride;
*(uint32_t *)dst = dc;
@@ -126,7 +126,22 @@
const __m128i row = _mm_set1_epi8((uint8_t)sum);
const uint32_t pred = _mm_cvtsi128_si32(row);
- dc_store_4x8(pred, dst, stride);
+ dc_store_4xh(pred, 8, dst, stride);
+}
+
+void aom_dc_predictor_4x16_sse2(uint8_t *dst, ptrdiff_t stride,
+ const uint8_t *above, const uint8_t *left) {
+ const __m128i sum_left = dc_sum_16(left);
+ __m128i sum_above = dc_sum_4(above);
+ sum_above = _mm_add_epi16(sum_left, sum_above);
+
+ uint32_t sum = _mm_cvtsi128_si32(sum_above);
+ sum += 10;
+ sum /= 20;
+
+ const __m128i row = _mm_set1_epi8((uint8_t)sum);
+ const uint32_t pred = _mm_cvtsi128_si32(row);
+ dc_store_4xh(pred, 16, dst, stride);
}
void aom_dc_predictor_8x4_sse2(uint8_t *dst, ptrdiff_t stride,
@@ -274,7 +289,21 @@
sum_above = _mm_packus_epi16(sum_above, sum_above);
const uint32_t pred = _mm_cvtsi128_si32(sum_above);
- dc_store_4x8(pred, dst, stride);
+ dc_store_4xh(pred, 8, dst, stride);
+}
+
+void aom_dc_top_predictor_4x16_sse2(uint8_t *dst, ptrdiff_t stride,
+ const uint8_t *above, const uint8_t *left) {
+ (void)left;
+ __m128i sum_above = dc_sum_4(above);
+ const __m128i two = _mm_set1_epi16((int16_t)2);
+ sum_above = _mm_add_epi16(sum_above, two);
+ sum_above = _mm_srai_epi16(sum_above, 2);
+ sum_above = _mm_shufflelo_epi16(sum_above, 0);
+ sum_above = _mm_packus_epi16(sum_above, sum_above);
+
+ const uint32_t pred = _mm_cvtsi128_si32(sum_above);
+ dc_store_4xh(pred, 16, dst, stride);
}
void aom_dc_top_predictor_8x4_sse2(uint8_t *dst, ptrdiff_t stride,
@@ -426,7 +455,22 @@
sum_left = _mm_packus_epi16(sum_left, sum_left);
const uint32_t pred = _mm_cvtsi128_si32(sum_left);
- dc_store_4x8(pred, dst, stride);
+ dc_store_4xh(pred, 8, dst, stride);
+}
+
+void aom_dc_left_predictor_4x16_sse2(uint8_t *dst, ptrdiff_t stride,
+ const uint8_t *above,
+ const uint8_t *left) {
+ (void)above;
+ __m128i sum_left = dc_sum_16(left);
+ const __m128i eight = _mm_set1_epi16((uint16_t)8);
+ sum_left = _mm_add_epi16(sum_left, eight);
+ sum_left = _mm_srai_epi16(sum_left, 4);
+ sum_left = _mm_shufflelo_epi16(sum_left, 0);
+ sum_left = _mm_packus_epi16(sum_left, sum_left);
+
+ const uint32_t pred = _mm_cvtsi128_si32(sum_left);
+ dc_store_4xh(pred, 16, dst, stride);
}
void aom_dc_left_predictor_8x4_sse2(uint8_t *dst, ptrdiff_t stride,
@@ -574,7 +618,15 @@
(void)above;
(void)left;
const uint32_t pred = 0x80808080;
- dc_store_4x8(pred, dst, stride);
+ dc_store_4xh(pred, 8, dst, stride);
+}
+
+void aom_dc_128_predictor_4x16_sse2(uint8_t *dst, ptrdiff_t stride,
+ const uint8_t *above, const uint8_t *left) {
+ (void)above;
+ (void)left;
+ const uint32_t pred = 0x80808080;
+ dc_store_4xh(pred, 16, dst, stride);
}
void aom_dc_128_predictor_8x4_sse2(uint8_t *dst, ptrdiff_t stride,
@@ -671,7 +723,14 @@
const uint8_t *above, const uint8_t *left) {
const uint32_t pred = *(uint32_t *)above;
(void)left;
- dc_store_4x8(pred, dst, stride);
+ dc_store_4xh(pred, 8, dst, stride);
+}
+
+void aom_v_predictor_4x16_sse2(uint8_t *dst, ptrdiff_t stride,
+ const uint8_t *above, const uint8_t *left) {
+ const uint32_t pred = *(uint32_t *)above;
+ (void)left;
+ dc_store_4xh(pred, 16, dst, stride);
}
void aom_v_predictor_8x4_sse2(uint8_t *dst, ptrdiff_t stride,
@@ -799,6 +858,67 @@
*(uint32_t *)dst = _mm_cvtsi128_si32(row3);
}
+void aom_h_predictor_4x16_sse2(uint8_t *dst, ptrdiff_t stride,
+ const uint8_t *above, const uint8_t *left) {
+ (void)above;
+ const __m128i left_col = _mm_load_si128((__m128i const *)left);
+ __m128i left_col_low = _mm_unpacklo_epi8(left_col, left_col);
+ __m128i left_col_high = _mm_unpackhi_epi8(left_col, left_col);
+
+ __m128i row0 = _mm_shufflelo_epi16(left_col_low, 0);
+ __m128i row1 = _mm_shufflelo_epi16(left_col_low, 0x55);
+ __m128i row2 = _mm_shufflelo_epi16(left_col_low, 0xaa);
+ __m128i row3 = _mm_shufflelo_epi16(left_col_low, 0xff);
+ *(uint32_t *)dst = _mm_cvtsi128_si32(row0);
+ dst += stride;
+ *(uint32_t *)dst = _mm_cvtsi128_si32(row1);
+ dst += stride;
+ *(uint32_t *)dst = _mm_cvtsi128_si32(row2);
+ dst += stride;
+ *(uint32_t *)dst = _mm_cvtsi128_si32(row3);
+ dst += stride;
+
+ left_col_low = _mm_unpackhi_epi64(left_col_low, left_col_low);
+ row0 = _mm_shufflelo_epi16(left_col_low, 0);
+ row1 = _mm_shufflelo_epi16(left_col_low, 0x55);
+ row2 = _mm_shufflelo_epi16(left_col_low, 0xaa);
+ row3 = _mm_shufflelo_epi16(left_col_low, 0xff);
+ *(uint32_t *)dst = _mm_cvtsi128_si32(row0);
+ dst += stride;
+ *(uint32_t *)dst = _mm_cvtsi128_si32(row1);
+ dst += stride;
+ *(uint32_t *)dst = _mm_cvtsi128_si32(row2);
+ dst += stride;
+ *(uint32_t *)dst = _mm_cvtsi128_si32(row3);
+ dst += stride;
+
+ row0 = _mm_shufflelo_epi16(left_col_high, 0);
+ row1 = _mm_shufflelo_epi16(left_col_high, 0x55);
+ row2 = _mm_shufflelo_epi16(left_col_high, 0xaa);
+ row3 = _mm_shufflelo_epi16(left_col_high, 0xff);
+ *(uint32_t *)dst = _mm_cvtsi128_si32(row0);
+ dst += stride;
+ *(uint32_t *)dst = _mm_cvtsi128_si32(row1);
+ dst += stride;
+ *(uint32_t *)dst = _mm_cvtsi128_si32(row2);
+ dst += stride;
+ *(uint32_t *)dst = _mm_cvtsi128_si32(row3);
+ dst += stride;
+
+ left_col_high = _mm_unpackhi_epi64(left_col_high, left_col_high);
+ row0 = _mm_shufflelo_epi16(left_col_high, 0);
+ row1 = _mm_shufflelo_epi16(left_col_high, 0x55);
+ row2 = _mm_shufflelo_epi16(left_col_high, 0xaa);
+ row3 = _mm_shufflelo_epi16(left_col_high, 0xff);
+ *(uint32_t *)dst = _mm_cvtsi128_si32(row0);
+ dst += stride;
+ *(uint32_t *)dst = _mm_cvtsi128_si32(row1);
+ dst += stride;
+ *(uint32_t *)dst = _mm_cvtsi128_si32(row2);
+ dst += stride;
+ *(uint32_t *)dst = _mm_cvtsi128_si32(row3);
+}
+
void aom_h_predictor_8x4_sse2(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
(void)above;
diff --git a/aom_dsp/x86/intrapred_ssse3.c b/aom_dsp/x86/intrapred_ssse3.c
index 069a3bf..6602fcf 100644
--- a/aom_dsp/x86/intrapred_ssse3.c
+++ b/aom_dsp/x86/intrapred_ssse3.c
@@ -82,6 +82,26 @@
}
}
+void aom_paeth_predictor_4x16_ssse3(uint8_t *dst, ptrdiff_t stride,
+ const uint8_t *above, const uint8_t *left) {
+ __m128i l = _mm_load_si128((const __m128i *)left);
+ const __m128i t = _mm_cvtsi32_si128(((const uint32_t *)above)[0]);
+ const __m128i zero = _mm_setzero_si128();
+ const __m128i t16 = _mm_unpacklo_epi8(t, zero);
+ const __m128i tl16 = _mm_set1_epi16((uint16_t)above[-1]);
+ __m128i rep = _mm_set1_epi16(0x8000);
+ const __m128i one = _mm_set1_epi16(1);
+
+ for (int i = 0; i < 16; ++i) {
+ const __m128i l16 = _mm_shuffle_epi8(l, rep);
+ const __m128i row = paeth_8x1_pred(&l16, &t16, &tl16);
+
+ *(uint32_t *)dst = _mm_cvtsi128_si32(_mm_packus_epi16(row, row));
+ dst += stride;
+ rep = _mm_add_epi16(rep, one);
+ }
+}
+
void aom_paeth_predictor_8x4_ssse3(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
__m128i l = _mm_loadl_epi64((const __m128i *)left);
@@ -494,9 +514,15 @@
// pixels[2]: right_pred vector
static INLINE void load_pixel_w4(const uint8_t *above, const uint8_t *left,
int height, __m128i *pixels) {
- __m128i d = _mm_loadl_epi64((const __m128i *)above);
+ __m128i d = _mm_cvtsi32_si128(((const uint32_t *)above)[0]);
+ if (height == 4)
+ pixels[1] = _mm_cvtsi32_si128(((const uint32_t *)left)[0]);
+ else if (height == 8)
+ pixels[1] = _mm_loadl_epi64(((const __m128i *)left));
+ else
+ pixels[1] = _mm_loadu_si128(((const __m128i *)left));
+
pixels[2] = _mm_set1_epi16((uint16_t)above[3]);
- pixels[1] = _mm_loadl_epi64((const __m128i *)left);
const __m128i bp = _mm_set1_epi16((uint16_t)left[height - 1]);
const __m128i zero = _mm_setzero_si128();
@@ -504,45 +530,52 @@
pixels[0] = _mm_unpacklo_epi16(d, bp);
}
-// weights[0]: weights_h vector
-// weights[1]: scale - weights_h vecotr
-// weights[2]: weights_w and scale - weights_w interleave vector
+// weight_h[0]: weight_h vector
+// weight_h[1]: scale - weight_h vector
+// weight_h[2]: same as [0], second half for height = 16 only
+// weight_h[3]: same as [1], second half for height = 16 only
+// weight_w[0]: weights_w and scale - weights_w interleave vector
static INLINE void load_weight_w4(const uint8_t *weight_array, int height,
- __m128i *weights) {
- __m128i t = _mm_loadu_si128((const __m128i *)&weight_array[4]);
+ __m128i *weight_h, __m128i *weight_w) {
const __m128i zero = _mm_setzero_si128();
-
- weights[0] = _mm_unpacklo_epi8(t, zero);
const __m128i d = _mm_set1_epi16((uint16_t)(1 << sm_weight_log2_scale));
- weights[1] = _mm_sub_epi16(d, weights[0]);
- weights[2] = _mm_unpacklo_epi16(weights[0], weights[1]);
+ const __m128i t = _mm_cvtsi32_si128(((const uint32_t *)weight_array)[1]);
+ weight_h[0] = _mm_unpacklo_epi8(t, zero);
+ weight_h[1] = _mm_sub_epi16(d, weight_h[0]);
+ weight_w[0] = _mm_unpacklo_epi16(weight_h[0], weight_h[1]);
if (height == 8) {
- t = _mm_srli_si128(t, 4);
- weights[0] = _mm_unpacklo_epi8(t, zero);
- weights[1] = _mm_sub_epi16(d, weights[0]);
+ const __m128i weight = _mm_loadl_epi64((const __m128i *)&weight_array[8]);
+ weight_h[0] = _mm_unpacklo_epi8(weight, zero);
+ weight_h[1] = _mm_sub_epi16(d, weight_h[0]);
+ } else if (height == 16) {
+ const __m128i weight = _mm_loadu_si128((const __m128i *)&weight_array[16]);
+ weight_h[0] = _mm_unpacklo_epi8(weight, zero);
+ weight_h[1] = _mm_sub_epi16(d, weight_h[0]);
+ weight_h[2] = _mm_unpackhi_epi8(weight, zero);
+ weight_h[3] = _mm_sub_epi16(d, weight_h[2]);
}
}
-static INLINE void smooth_pred_4xh(const __m128i *pixel, const __m128i *weight,
- int h, uint8_t *dst, ptrdiff_t stride) {
+static INLINE void smooth_pred_4xh(const __m128i *pixel, const __m128i *wh,
+ const __m128i *ww, int h, uint8_t *dst,
+ ptrdiff_t stride, int second_half) {
const __m128i round = _mm_set1_epi32((1 << sm_weight_log2_scale));
const __m128i one = _mm_set1_epi16(1);
const __m128i inc = _mm_set1_epi16(0x202);
const __m128i gat = _mm_set1_epi32(0xc080400);
- __m128i rep = _mm_set1_epi16(0x8000);
+ __m128i rep = second_half ? _mm_set1_epi16(0x8008) : _mm_set1_epi16(0x8000);
__m128i d = _mm_set1_epi16(0x100);
- int i;
- for (i = 0; i < h; ++i) {
- const __m128i wg_wg = _mm_shuffle_epi8(weight[0], d);
- const __m128i sc_sc = _mm_shuffle_epi8(weight[1], d);
+ for (int i = 0; i < h; ++i) {
+ const __m128i wg_wg = _mm_shuffle_epi8(wh[0], d);
+ const __m128i sc_sc = _mm_shuffle_epi8(wh[1], d);
const __m128i wh_sc = _mm_unpacklo_epi16(wg_wg, sc_sc);
__m128i s = _mm_madd_epi16(pixel[0], wh_sc);
__m128i b = _mm_shuffle_epi8(pixel[1], rep);
b = _mm_unpacklo_epi16(b, pixel[2]);
- __m128i sum = _mm_madd_epi16(b, weight[2]);
+ __m128i sum = _mm_madd_epi16(b, ww[0]);
sum = _mm_add_epi32(s, sum);
sum = _mm_add_epi32(sum, round);
@@ -562,10 +595,10 @@
__m128i pixels[3];
load_pixel_w4(above, left, 4, pixels);
- __m128i weights[3];
- load_weight_w4(sm_weight_arrays, 4, weights);
+ __m128i wh[4], ww[2];
+ load_weight_w4(sm_weight_arrays, 4, wh, ww);
- smooth_pred_4xh(pixels, weights, 4, dst, stride);
+ smooth_pred_4xh(pixels, wh, ww, 4, dst, stride, 0);
}
void aom_smooth_predictor_4x8_ssse3(uint8_t *dst, ptrdiff_t stride,
@@ -573,10 +606,24 @@
__m128i pixels[3];
load_pixel_w4(above, left, 8, pixels);
- __m128i weights[3];
- load_weight_w4(sm_weight_arrays, 8, weights);
+ __m128i wh[4], ww[2];
+ load_weight_w4(sm_weight_arrays, 8, wh, ww);
- smooth_pred_4xh(pixels, weights, 8, dst, stride);
+ smooth_pred_4xh(pixels, wh, ww, 8, dst, stride, 0);
+}
+
+void aom_smooth_predictor_4x16_ssse3(uint8_t *dst, ptrdiff_t stride,
+ const uint8_t *above,
+ const uint8_t *left) {
+ __m128i pixels[3];
+ load_pixel_w4(above, left, 16, pixels);
+
+ __m128i wh[4], ww[2];
+ load_weight_w4(sm_weight_arrays, 16, wh, ww);
+
+ smooth_pred_4xh(pixels, wh, ww, 8, dst, stride, 0);
+ dst += stride << 3;
+ smooth_pred_4xh(pixels, &wh[2], ww, 8, dst, stride, 1);
}
// pixels[0]: above and below_pred interleave vector, first half
diff --git a/test/test_intra_pred_speed.cc b/test/test_intra_pred_speed.cc
index 329a7d0..e2b85c2 100644
--- a/test/test_intra_pred_speed.cc
+++ b/test/test_intra_pred_speed.cc
@@ -442,6 +442,10 @@
aom_dc_left_predictor_4x8_sse2, aom_dc_top_predictor_4x8_sse2,
aom_dc_128_predictor_4x8_sse2, aom_v_predictor_4x8_sse2,
aom_h_predictor_4x8_sse2, NULL, NULL, NULL, NULL)
+INTRA_PRED_TEST(SSE2_3, TX_4X16, aom_dc_predictor_4x16_sse2,
+ aom_dc_left_predictor_4x16_sse2, aom_dc_top_predictor_4x16_sse2,
+ aom_dc_128_predictor_4x16_sse2, aom_v_predictor_4x16_sse2,
+ aom_h_predictor_4x16_sse2, NULL, NULL, NULL, NULL)
#endif // HAVE_SSE2
#if HAVE_SSSE3
@@ -453,8 +457,9 @@
aom_paeth_predictor_4x8_ssse3, aom_smooth_predictor_4x8_ssse3,
aom_smooth_v_predictor_4x8_ssse3,
aom_smooth_h_predictor_4x8_ssse3)
-INTRA_PRED_TEST(SSSE3_3, TX_4X16, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
- NULL, aom_smooth_v_predictor_4x16_ssse3,
+INTRA_PRED_TEST(SSSE3_3, TX_4X16, NULL, NULL, NULL, NULL, NULL, NULL,
+ aom_paeth_predictor_4x16_ssse3, aom_smooth_predictor_4x16_ssse3,
+ aom_smooth_v_predictor_4x16_ssse3,
aom_smooth_h_predictor_4x16_ssse3)
#endif // HAVE_SSSE3