Lowbd SMOOTH_PRED intrapred ssse3 optimization
On i7-6700:
Predictor ssse3 v. C
4x4 ~1.3x
4x8 ~1.9x
8x4 ~2.3x
8x8 ~3.4x
8x16 ~4.1x
16x8 ~4.6x
16x16 ~5.2x
16x32 ~5.6x
32x16 ~4.2x
32x32 ~4.7x
Change-Id: Ic12383cf9d4446361d6355eb8a480a3c7602060e
diff --git a/aom_dsp/aom_dsp.cmake b/aom_dsp/aom_dsp.cmake
index 7a281a4..2a5f544 100644
--- a/aom_dsp/aom_dsp.cmake
+++ b/aom_dsp/aom_dsp.cmake
@@ -23,6 +23,7 @@
"${AOM_ROOT}/aom_dsp/blend_a64_mask.c"
"${AOM_ROOT}/aom_dsp/blend_a64_vmask.c"
"${AOM_ROOT}/aom_dsp/intrapred.c"
+ "${AOM_ROOT}/aom_dsp/intrapred_common.h"
"${AOM_ROOT}/aom_dsp/loopfilter.c"
"${AOM_ROOT}/aom_dsp/prob.c"
"${AOM_ROOT}/aom_dsp/prob.h"
diff --git a/aom_dsp/aom_dsp.mk b/aom_dsp/aom_dsp.mk
index 6e7076b..f1eb57d 100644
--- a/aom_dsp/aom_dsp.mk
+++ b/aom_dsp/aom_dsp.mk
@@ -64,6 +64,7 @@
# intra predictions
DSP_SRCS-yes += intrapred.c
+DSP_SRCS-yes += intrapred_common.h
ifneq ($(CONFIG_ANS),yes)
DSP_SRCS-yes += entcode.c
diff --git a/aom_dsp/aom_dsp_rtcd_defs.pl b/aom_dsp/aom_dsp_rtcd_defs.pl
index 4541b79..5962726 100755
--- a/aom_dsp/aom_dsp_rtcd_defs.pl
+++ b/aom_dsp/aom_dsp_rtcd_defs.pl
@@ -147,6 +147,21 @@
specialize qw/aom_paeth_predictor_16x32 ssse3 avx2/;
specialize qw/aom_paeth_predictor_32x16 ssse3 avx2/;
specialize qw/aom_paeth_predictor_32x32 ssse3 avx2/;
+specialize qw/aom_paeth_predictor_16x8 ssse3/;
+specialize qw/aom_paeth_predictor_16x16 ssse3/;
+specialize qw/aom_paeth_predictor_16x32 ssse3/;
+specialize qw/aom_paeth_predictor_32x16 ssse3/;
+specialize qw/aom_paeth_predictor_32x32 ssse3/;
+specialize qw/aom_smooth_predictor_4x4 ssse3/;
+specialize qw/aom_smooth_predictor_4x8 ssse3/;
+specialize qw/aom_smooth_predictor_8x4 ssse3/;
+specialize qw/aom_smooth_predictor_8x8 ssse3/;
+specialize qw/aom_smooth_predictor_8x16 ssse3/;
+specialize qw/aom_smooth_predictor_16x8 ssse3/;
+specialize qw/aom_smooth_predictor_16x16 ssse3/;
+specialize qw/aom_smooth_predictor_16x32 ssse3/;
+specialize qw/aom_smooth_predictor_32x16 ssse3/;
+specialize qw/aom_smooth_predictor_32x32 ssse3/;
specialize qw/aom_d63e_predictor_4x4 ssse3/;
specialize qw/aom_d135_predictor_4x4 neon/;
diff --git a/aom_dsp/intrapred.c b/aom_dsp/intrapred.c
index 6373817..6d2ac37 100644
--- a/aom_dsp/intrapred.c
+++ b/aom_dsp/intrapred.c
@@ -16,6 +16,7 @@
#include "./aom_dsp_rtcd.h"
#include "aom_dsp/aom_dsp_common.h"
+#include "aom_dsp/intrapred_common.h"
#include "aom_mem/aom_mem.h"
#include "aom_ports/bitops.h"
@@ -207,40 +208,6 @@
}
}
-// Weights are quadratic from '1' to '1 / block_size', scaled by
-// 2^sm_weight_log2_scale.
-static const int sm_weight_log2_scale = 8;
-
-#if CONFIG_TX64X64
-// max(block_size_wide[BLOCK_LARGEST], block_size_high[BLOCK_LARGEST])
-#define MAX_BLOCK_DIM 64
-#else
-#define MAX_BLOCK_DIM 32
-#endif // CONFIG_TX64X64
-
-static const uint8_t sm_weight_arrays[2 * MAX_BLOCK_DIM] = {
- // Unused, because we always offset by bs, which is at least 2.
- 0, 0,
- // bs = 2
- 255, 128,
- // bs = 4
- 255, 149, 85, 64,
- // bs = 8
- 255, 197, 146, 105, 73, 50, 37, 32,
- // bs = 16
- 255, 225, 196, 170, 145, 123, 102, 84, 68, 54, 43, 33, 26, 20, 17, 16,
- // bs = 32
- 255, 240, 225, 210, 196, 182, 169, 157, 145, 133, 122, 111, 101, 92, 83, 74,
- 66, 59, 52, 45, 39, 34, 29, 25, 21, 17, 14, 12, 10, 9, 8, 8,
-#if CONFIG_TX64X64
- // bs = 64
- 255, 248, 240, 233, 225, 218, 210, 203, 196, 189, 182, 176, 169, 163, 156,
- 150, 144, 138, 133, 127, 121, 116, 111, 106, 101, 96, 91, 86, 82, 77, 73, 69,
- 65, 61, 57, 54, 50, 47, 44, 41, 38, 35, 32, 29, 27, 25, 22, 20, 18, 16, 15,
- 13, 12, 10, 9, 8, 7, 6, 6, 5, 5, 4, 4, 4,
-#endif // CONFIG_TX64X64
-};
-
// Some basic checks on weights for smooth predictor.
#define sm_weights_sanity_checks(weights_w, weights_h, weights_scale, \
pred_scale) \
diff --git a/aom_dsp/intrapred_common.h b/aom_dsp/intrapred_common.h
new file mode 100644
index 0000000..96da49b
--- /dev/null
+++ b/aom_dsp/intrapred_common.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2016, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#ifndef _AOM_DSP_INTRAPRED_COMMON_H
+#define _AOM_DSP_INTRAPRED_COMMON_H
+
+#include "./aom_config.h"
+
+// Weights are quadratic from '1' to '1 / block_size', scaled by
+// 2^sm_weight_log2_scale.
+static const int sm_weight_log2_scale = 8;
+
+#if CONFIG_TX64X64
+// max(block_size_wide[BLOCK_LARGEST], block_size_high[BLOCK_LARGEST])
+#define MAX_BLOCK_DIM 64
+#else
+#define MAX_BLOCK_DIM 32
+#endif // CONFIG_TX64X64
+
+static const uint8_t sm_weight_arrays[2 * MAX_BLOCK_DIM] = {
+ // Unused, because we always offset by bs, which is at least 2.
+ 0, 0,
+ // bs = 2
+ 255, 128,
+ // bs = 4
+ 255, 149, 85, 64,
+ // bs = 8
+ 255, 197, 146, 105, 73, 50, 37, 32,
+ // bs = 16
+ 255, 225, 196, 170, 145, 123, 102, 84, 68, 54, 43, 33, 26, 20, 17, 16,
+ // bs = 32
+ 255, 240, 225, 210, 196, 182, 169, 157, 145, 133, 122, 111, 101, 92, 83, 74,
+ 66, 59, 52, 45, 39, 34, 29, 25, 21, 17, 14, 12, 10, 9, 8, 8,
+#if CONFIG_TX64X64
+ // bs = 64
+ 255, 248, 240, 233, 225, 218, 210, 203, 196, 189, 182, 176, 169, 163, 156,
+ 150, 144, 138, 133, 127, 121, 116, 111, 106, 101, 96, 91, 86, 82, 77, 73, 69,
+ 65, 61, 57, 54, 50, 47, 44, 41, 38, 35, 32, 29, 27, 25, 22, 20, 18, 16, 15,
+ 13, 12, 10, 9, 8, 7, 6, 6, 5, 5, 4, 4, 4,
+#endif // CONFIG_TX64X64
+};
+
+#endif // _AOM_DSP_INTRAPRED_COMMON_H
diff --git a/aom_dsp/x86/intrapred_ssse3.c b/aom_dsp/x86/intrapred_ssse3.c
index 54589a8..85b8274 100644
--- a/aom_dsp/x86/intrapred_ssse3.c
+++ b/aom_dsp/x86/intrapred_ssse3.c
@@ -12,6 +12,7 @@
#include <tmmintrin.h>
#include "./aom_dsp_rtcd.h"
+#include "aom_dsp/intrapred_common.h"
// -----------------------------------------------------------------------------
// TM_PRED
@@ -305,3 +306,580 @@
rep = _mm_add_epi16(rep, one);
}
}
+
+// -----------------------------------------------------------------------------
+// SMOOTH_PRED
+
+// pixels[0]: above and below_pred interleave vector
+// pixels[1]: left vector
+// pixels[2]: right_pred vector
+static INLINE void load_pixel_w4(const uint8_t *above, const uint8_t *left,
+ int height, __m128i *pixels) {
+ __m128i d = _mm_loadl_epi64((const __m128i *)above);
+ pixels[2] = _mm_set1_epi16((uint16_t)above[3]);
+ pixels[1] = _mm_loadl_epi64((const __m128i *)left);
+
+ const __m128i bp = _mm_set1_epi16((uint16_t)left[height - 1]);
+ const __m128i zero = _mm_setzero_si128();
+ d = _mm_unpacklo_epi8(d, zero);
+ pixels[0] = _mm_unpacklo_epi16(d, bp);
+}
+
+// weights[0]: weights_h vector
+// weights[1]: scale - weights_h vecotr
+// weights[2]: weights_w and scale - weights_w interleave vector
+static INLINE void load_weight_w4(const uint8_t *weight_array, int height,
+ __m128i *weights) {
+ __m128i t = _mm_loadu_si128((const __m128i *)&weight_array[4]);
+ const __m128i zero = _mm_setzero_si128();
+
+ weights[0] = _mm_unpacklo_epi8(t, zero);
+ const __m128i d = _mm_set1_epi16((uint16_t)(1 << sm_weight_log2_scale));
+ weights[1] = _mm_sub_epi16(d, weights[0]);
+ weights[2] = _mm_unpacklo_epi16(weights[0], weights[1]);
+
+ if (height == 8) {
+ t = _mm_srli_si128(t, 4);
+ weights[0] = _mm_unpacklo_epi8(t, zero);
+ weights[1] = _mm_sub_epi16(d, weights[0]);
+ }
+}
+
+static INLINE void smooth_pred_4xh(const __m128i *pixel, const __m128i *weight,
+ int h, uint8_t *dst, ptrdiff_t stride) {
+ const __m128i round = _mm_set1_epi32((1 << sm_weight_log2_scale));
+ const __m128i one = _mm_set1_epi16(1);
+ const __m128i inc = _mm_set1_epi16(0x202);
+ const __m128i gat = _mm_set1_epi32(0xc080400);
+ __m128i rep = _mm_set1_epi16(0x8000);
+ __m128i d = _mm_set1_epi16(0x100);
+
+ int i;
+ for (i = 0; i < h; ++i) {
+ const __m128i wg_wg = _mm_shuffle_epi8(weight[0], d);
+ const __m128i sc_sc = _mm_shuffle_epi8(weight[1], d);
+ const __m128i wh_sc = _mm_unpacklo_epi16(wg_wg, sc_sc);
+ __m128i s = _mm_madd_epi16(pixel[0], wh_sc);
+
+ __m128i b = _mm_shuffle_epi8(pixel[1], rep);
+ b = _mm_unpacklo_epi16(b, pixel[2]);
+ __m128i sum = _mm_madd_epi16(b, weight[2]);
+
+ sum = _mm_add_epi32(s, sum);
+ sum = _mm_add_epi32(sum, round);
+ sum = _mm_srai_epi32(sum, 1 + sm_weight_log2_scale);
+
+ sum = _mm_shuffle_epi8(sum, gat);
+ *(uint32_t *)dst = _mm_cvtsi128_si32(sum);
+ dst += stride;
+
+ rep = _mm_add_epi16(rep, one);
+ d = _mm_add_epi16(d, inc);
+ }
+}
+
+void aom_smooth_predictor_4x4_ssse3(uint8_t *dst, ptrdiff_t stride,
+ const uint8_t *above, const uint8_t *left) {
+ __m128i pixels[3];
+ load_pixel_w4(above, left, 4, pixels);
+
+ __m128i weights[3];
+ load_weight_w4(sm_weight_arrays, 4, weights);
+
+ smooth_pred_4xh(pixels, weights, 4, dst, stride);
+}
+
+void aom_smooth_predictor_4x8_ssse3(uint8_t *dst, ptrdiff_t stride,
+ const uint8_t *above, const uint8_t *left) {
+ __m128i pixels[3];
+ load_pixel_w4(above, left, 8, pixels);
+
+ __m128i weights[3];
+ load_weight_w4(sm_weight_arrays, 8, weights);
+
+ smooth_pred_4xh(pixels, weights, 8, dst, stride);
+}
+
+// pixels[0]: above and below_pred interleave vector, first half
+// pixels[1]: above and below_pred interleave vector, second half
+// pixels[2]: left vector
+// pixels[3]: right_pred vector
+static INLINE void load_pixel_w8(const uint8_t *above, const uint8_t *left,
+ int height, __m128i *pixels) {
+ __m128i d = _mm_loadl_epi64((const __m128i *)above);
+ pixels[3] = _mm_set1_epi16((uint16_t)above[7]);
+ pixels[2] = _mm_load_si128((const __m128i *)left);
+ const __m128i bp = _mm_set1_epi16((uint16_t)left[height - 1]);
+ const __m128i zero = _mm_setzero_si128();
+
+ d = _mm_unpacklo_epi8(d, zero);
+ pixels[0] = _mm_unpacklo_epi16(d, bp);
+ pixels[1] = _mm_unpackhi_epi16(d, bp);
+}
+
+// weight_h[0]: weight_h vector
+// weight_h[1]: scale - weight_h vector
+// weight_h[2]: same as [0], second half for height = 16 only
+// weight_h[3]: same as [1], second half for height = 16 only
+// weight_w[0]: weights_w and scale - weights_w interleave vector, first half
+// weight_w[1]: weights_w and scale - weights_w interleave vector, second half
+static INLINE void load_weight_w8(const uint8_t *weight_array, int height,
+ __m128i *weight_h, __m128i *weight_w) {
+ const __m128i zero = _mm_setzero_si128();
+ const int we_offset = height < 8 ? 4 : 8;
+ __m128i we = _mm_loadu_si128((const __m128i *)&weight_array[we_offset]);
+ weight_h[0] = _mm_unpacklo_epi8(we, zero);
+
+ const __m128i d = _mm_set1_epi16((uint16_t)(1 << sm_weight_log2_scale));
+ weight_h[1] = _mm_sub_epi16(d, weight_h[0]);
+
+ if (height == 4) {
+ we = _mm_srli_si128(we, 4);
+ __m128i tmp1 = _mm_unpacklo_epi8(we, zero);
+ __m128i tmp2 = _mm_sub_epi16(d, tmp1);
+ weight_w[0] = _mm_unpacklo_epi16(tmp1, tmp2);
+ weight_w[1] = _mm_unpackhi_epi16(tmp1, tmp2);
+ } else {
+ weight_w[0] = _mm_unpacklo_epi16(weight_h[0], weight_h[1]);
+ weight_w[1] = _mm_unpackhi_epi16(weight_h[0], weight_h[1]);
+ }
+
+ if (height == 16) {
+ we = _mm_loadu_si128((const __m128i *)&weight_array[16]);
+ weight_h[0] = _mm_unpacklo_epi8(we, zero);
+ weight_h[1] = _mm_sub_epi16(d, weight_h[0]);
+ weight_h[2] = _mm_unpackhi_epi8(we, zero);
+ weight_h[3] = _mm_sub_epi16(d, weight_h[2]);
+ }
+}
+
+static INLINE void smooth_pred_8xh(const __m128i *pixels, const __m128i *wh,
+ const __m128i *ww, int h, uint8_t *dst,
+ ptrdiff_t stride, int second_half) {
+ const __m128i round = _mm_set1_epi32((1 << sm_weight_log2_scale));
+ const __m128i one = _mm_set1_epi16(1);
+ const __m128i inc = _mm_set1_epi16(0x202);
+ const __m128i gat = _mm_set_epi32(0, 0, 0xe0c0a08, 0x6040200);
+
+ __m128i rep = second_half ? _mm_set1_epi16(0x8008) : _mm_set1_epi16(0x8000);
+ __m128i d = _mm_set1_epi16(0x100);
+
+ int i;
+ for (i = 0; i < h; ++i) {
+ const __m128i wg_wg = _mm_shuffle_epi8(wh[0], d);
+ const __m128i sc_sc = _mm_shuffle_epi8(wh[1], d);
+ const __m128i wh_sc = _mm_unpacklo_epi16(wg_wg, sc_sc);
+ __m128i s0 = _mm_madd_epi16(pixels[0], wh_sc);
+ __m128i s1 = _mm_madd_epi16(pixels[1], wh_sc);
+
+ __m128i b = _mm_shuffle_epi8(pixels[2], rep);
+ b = _mm_unpacklo_epi16(b, pixels[3]);
+ __m128i sum0 = _mm_madd_epi16(b, ww[0]);
+ __m128i sum1 = _mm_madd_epi16(b, ww[1]);
+
+ s0 = _mm_add_epi32(s0, sum0);
+ s0 = _mm_add_epi32(s0, round);
+ s0 = _mm_srai_epi32(s0, 1 + sm_weight_log2_scale);
+
+ s1 = _mm_add_epi32(s1, sum1);
+ s1 = _mm_add_epi32(s1, round);
+ s1 = _mm_srai_epi32(s1, 1 + sm_weight_log2_scale);
+
+ sum0 = _mm_packus_epi16(s0, s1);
+ sum0 = _mm_shuffle_epi8(sum0, gat);
+ _mm_storel_epi64((__m128i *)dst, sum0);
+ dst += stride;
+
+ rep = _mm_add_epi16(rep, one);
+ d = _mm_add_epi16(d, inc);
+ }
+}
+
+void aom_smooth_predictor_8x4_ssse3(uint8_t *dst, ptrdiff_t stride,
+ const uint8_t *above, const uint8_t *left) {
+ __m128i pixels[4];
+ load_pixel_w8(above, left, 4, pixels);
+
+ __m128i wh[4], ww[2];
+ load_weight_w8(sm_weight_arrays, 4, wh, ww);
+
+ smooth_pred_8xh(pixels, wh, ww, 4, dst, stride, 0);
+}
+
+void aom_smooth_predictor_8x8_ssse3(uint8_t *dst, ptrdiff_t stride,
+ const uint8_t *above, const uint8_t *left) {
+ __m128i pixels[4];
+ load_pixel_w8(above, left, 8, pixels);
+
+ __m128i wh[4], ww[2];
+ load_weight_w8(sm_weight_arrays, 8, wh, ww);
+
+ smooth_pred_8xh(pixels, wh, ww, 8, dst, stride, 0);
+}
+
+void aom_smooth_predictor_8x16_ssse3(uint8_t *dst, ptrdiff_t stride,
+ const uint8_t *above,
+ const uint8_t *left) {
+ __m128i pixels[4];
+ load_pixel_w8(above, left, 16, pixels);
+
+ __m128i wh[4], ww[2];
+ load_weight_w8(sm_weight_arrays, 16, wh, ww);
+
+ smooth_pred_8xh(pixels, wh, ww, 8, dst, stride, 0);
+ dst += stride << 3;
+ smooth_pred_8xh(pixels, &wh[2], ww, 8, dst, stride, 1);
+}
+
+// pixels[0]: above and below_pred interleave vector, 1/4
+// pixels[1]: above and below_pred interleave vector, 2/4
+// pixels[2]: above and below_pred interleave vector, 3/4
+// pixels[3]: above and below_pred interleave vector, 3/4
+// pixels[4]: left vector
+// pixels[5]: left vector, h = 32 only
+// pixels[6]: right_pred vector
+static INLINE void load_pixel_w16(const uint8_t *above, const uint8_t *left,
+ int height, __m128i *pixels) {
+ __m128i ab = _mm_load_si128((const __m128i *)above);
+ pixels[6] = _mm_set1_epi16((uint16_t)above[15]);
+ pixels[4] = _mm_load_si128((const __m128i *)left);
+ pixels[5] = _mm_load_si128((const __m128i *)(left + 16));
+ const __m128i bp = _mm_set1_epi16((uint16_t)left[height - 1]);
+ const __m128i zero = _mm_setzero_si128();
+
+ __m128i x = _mm_unpacklo_epi8(ab, zero);
+ pixels[0] = _mm_unpacklo_epi16(x, bp);
+ pixels[1] = _mm_unpackhi_epi16(x, bp);
+
+ x = _mm_unpackhi_epi8(ab, zero);
+ pixels[2] = _mm_unpacklo_epi16(x, bp);
+ pixels[3] = _mm_unpackhi_epi16(x, bp);
+}
+
+// weight_h[0]: weight_h vector
+// weight_h[1]: scale - weight_h vector
+// weight_h[2]: same as [0], second half for height = 16 only
+// weight_h[3]: same as [1], second half for height = 16 only
+// ... ...
+// weight_w[0]: weights_w and scale - weights_w interleave vector, first half
+// weight_w[1]: weights_w and scale - weights_w interleave vector, second half
+// ... ...
+static INLINE void load_weight_w16(const uint8_t *weight_array, int height,
+ __m128i *weight_h, __m128i *weight_w) {
+ const __m128i zero = _mm_setzero_si128();
+ __m128i w8 = _mm_loadu_si128((const __m128i *)&weight_array[8]);
+ __m128i w16 = _mm_loadu_si128((const __m128i *)&weight_array[16]);
+ __m128i w32_0 = _mm_loadu_si128((const __m128i *)&weight_array[32]);
+ __m128i w32_1 = _mm_loadu_si128((const __m128i *)&weight_array[32 + 16]);
+ const __m128i d = _mm_set1_epi16((uint16_t)(1 << sm_weight_log2_scale));
+
+ if (height == 8) {
+ weight_h[0] = _mm_unpacklo_epi8(w8, zero);
+ weight_h[1] = _mm_sub_epi16(d, weight_h[0]); // scale - weight_h
+
+ __m128i x = _mm_unpacklo_epi8(w16, zero);
+ __m128i y = _mm_sub_epi16(d, x);
+ weight_w[0] = _mm_unpacklo_epi16(x, y);
+ weight_w[1] = _mm_unpackhi_epi16(x, y);
+ x = _mm_unpackhi_epi8(w16, zero);
+ y = _mm_sub_epi16(d, x);
+ weight_w[2] = _mm_unpacklo_epi16(x, y);
+ weight_w[3] = _mm_unpackhi_epi16(x, y);
+ }
+
+ if (height == 16) {
+ weight_h[0] = _mm_unpacklo_epi8(w16, zero);
+ weight_h[1] = _mm_sub_epi16(d, weight_h[0]);
+ weight_h[2] = _mm_unpackhi_epi8(w16, zero);
+ weight_h[3] = _mm_sub_epi16(d, weight_h[2]);
+
+ weight_w[0] = _mm_unpacklo_epi16(weight_h[0], weight_h[1]);
+ weight_w[1] = _mm_unpackhi_epi16(weight_h[0], weight_h[1]);
+ weight_w[2] = _mm_unpacklo_epi16(weight_h[2], weight_h[3]);
+ weight_w[3] = _mm_unpackhi_epi16(weight_h[2], weight_h[3]);
+ }
+
+ if (height == 32) {
+ weight_h[0] = _mm_unpacklo_epi8(w32_0, zero);
+ weight_h[1] = _mm_sub_epi16(d, weight_h[0]);
+ weight_h[2] = _mm_unpackhi_epi8(w32_0, zero);
+ weight_h[3] = _mm_sub_epi16(d, weight_h[2]);
+
+ __m128i x = _mm_unpacklo_epi8(w16, zero);
+ __m128i y = _mm_sub_epi16(d, x);
+ weight_w[0] = _mm_unpacklo_epi16(x, y);
+ weight_w[1] = _mm_unpackhi_epi16(x, y);
+ x = _mm_unpackhi_epi8(w16, zero);
+ y = _mm_sub_epi16(d, x);
+ weight_w[2] = _mm_unpacklo_epi16(x, y);
+ weight_w[3] = _mm_unpackhi_epi16(x, y);
+
+ weight_h[4] = _mm_unpacklo_epi8(w32_1, zero);
+ weight_h[5] = _mm_sub_epi16(d, weight_h[4]);
+ weight_h[6] = _mm_unpackhi_epi8(w32_1, zero);
+ weight_h[7] = _mm_sub_epi16(d, weight_h[6]);
+ }
+}
+
+static INLINE void smooth_pred_16x8(const __m128i *pixels, const __m128i *wh,
+ const __m128i *ww, uint8_t *dst,
+ ptrdiff_t stride, int quarter) {
+ __m128i d = _mm_set1_epi16(0x100);
+ const __m128i one = _mm_set1_epi16(1);
+ const __m128i inc = _mm_set1_epi16(0x202);
+ const __m128i gat = _mm_set_epi32(0, 0, 0xe0c0a08, 0x6040200);
+ const __m128i round = _mm_set1_epi32((1 << sm_weight_log2_scale));
+ __m128i rep =
+ (quarter % 2 == 0) ? _mm_set1_epi16(0x8000) : _mm_set1_epi16(0x8008);
+ const __m128i left = (quarter < 2) ? pixels[4] : pixels[5];
+
+ int i;
+ for (i = 0; i < 8; ++i) {
+ const __m128i wg_wg = _mm_shuffle_epi8(wh[0], d);
+ const __m128i sc_sc = _mm_shuffle_epi8(wh[1], d);
+ const __m128i wh_sc = _mm_unpacklo_epi16(wg_wg, sc_sc);
+ __m128i s0 = _mm_madd_epi16(pixels[0], wh_sc);
+ __m128i s1 = _mm_madd_epi16(pixels[1], wh_sc);
+ __m128i s2 = _mm_madd_epi16(pixels[2], wh_sc);
+ __m128i s3 = _mm_madd_epi16(pixels[3], wh_sc);
+
+ __m128i b = _mm_shuffle_epi8(left, rep);
+ b = _mm_unpacklo_epi16(b, pixels[6]);
+ __m128i sum0 = _mm_madd_epi16(b, ww[0]);
+ __m128i sum1 = _mm_madd_epi16(b, ww[1]);
+ __m128i sum2 = _mm_madd_epi16(b, ww[2]);
+ __m128i sum3 = _mm_madd_epi16(b, ww[3]);
+
+ s0 = _mm_add_epi32(s0, sum0);
+ s0 = _mm_add_epi32(s0, round);
+ s0 = _mm_srai_epi32(s0, 1 + sm_weight_log2_scale);
+
+ s1 = _mm_add_epi32(s1, sum1);
+ s1 = _mm_add_epi32(s1, round);
+ s1 = _mm_srai_epi32(s1, 1 + sm_weight_log2_scale);
+
+ s2 = _mm_add_epi32(s2, sum2);
+ s2 = _mm_add_epi32(s2, round);
+ s2 = _mm_srai_epi32(s2, 1 + sm_weight_log2_scale);
+
+ s3 = _mm_add_epi32(s3, sum3);
+ s3 = _mm_add_epi32(s3, round);
+ s3 = _mm_srai_epi32(s3, 1 + sm_weight_log2_scale);
+
+ sum0 = _mm_packus_epi16(s0, s1);
+ sum0 = _mm_shuffle_epi8(sum0, gat);
+ sum1 = _mm_packus_epi16(s2, s3);
+ sum1 = _mm_shuffle_epi8(sum1, gat);
+
+ _mm_storel_epi64((__m128i *)dst, sum0);
+ _mm_storel_epi64((__m128i *)(dst + 8), sum1);
+
+ dst += stride;
+ rep = _mm_add_epi16(rep, one);
+ d = _mm_add_epi16(d, inc);
+ }
+}
+
+void aom_smooth_predictor_16x8_ssse3(uint8_t *dst, ptrdiff_t stride,
+ const uint8_t *above,
+ const uint8_t *left) {
+ __m128i pixels[7];
+ load_pixel_w16(above, left, 8, pixels);
+
+ __m128i wh[2], ww[4];
+ load_weight_w16(sm_weight_arrays, 8, wh, ww);
+
+ smooth_pred_16x8(pixels, wh, ww, dst, stride, 0);
+}
+
+void aom_smooth_predictor_16x16_ssse3(uint8_t *dst, ptrdiff_t stride,
+ const uint8_t *above,
+ const uint8_t *left) {
+ __m128i pixels[7];
+ load_pixel_w16(above, left, 16, pixels);
+
+ __m128i wh[4], ww[4];
+ load_weight_w16(sm_weight_arrays, 16, wh, ww);
+
+ smooth_pred_16x8(pixels, wh, ww, dst, stride, 0);
+ dst += stride << 3;
+ smooth_pred_16x8(pixels, &wh[2], ww, dst, stride, 1);
+}
+
+void aom_smooth_predictor_16x32_ssse3(uint8_t *dst, ptrdiff_t stride,
+ const uint8_t *above,
+ const uint8_t *left) {
+ __m128i pixels[7];
+ load_pixel_w16(above, left, 32, pixels);
+
+ __m128i wh[8], ww[4];
+ load_weight_w16(sm_weight_arrays, 32, wh, ww);
+
+ smooth_pred_16x8(pixels, wh, ww, dst, stride, 0);
+ dst += stride << 3;
+ smooth_pred_16x8(pixels, &wh[2], ww, dst, stride, 1);
+ dst += stride << 3;
+ smooth_pred_16x8(pixels, &wh[4], ww, dst, stride, 2);
+ dst += stride << 3;
+ smooth_pred_16x8(pixels, &wh[6], ww, dst, stride, 3);
+}
+
+static INLINE void load_pixel_w32(const uint8_t *above, const uint8_t *left,
+ int height, __m128i *pixels) {
+ __m128i ab0 = _mm_load_si128((const __m128i *)above);
+ __m128i ab1 = _mm_load_si128((const __m128i *)(above + 16));
+
+ pixels[10] = _mm_set1_epi16((uint16_t)above[31]);
+ pixels[8] = _mm_load_si128((const __m128i *)left);
+ pixels[9] = _mm_load_si128((const __m128i *)(left + 16));
+
+ const __m128i bp = _mm_set1_epi16((uint16_t)left[height - 1]);
+ const __m128i zero = _mm_setzero_si128();
+
+ __m128i x = _mm_unpacklo_epi8(ab0, zero);
+ pixels[0] = _mm_unpacklo_epi16(x, bp);
+ pixels[1] = _mm_unpackhi_epi16(x, bp);
+
+ x = _mm_unpackhi_epi8(ab0, zero);
+ pixels[2] = _mm_unpacklo_epi16(x, bp);
+ pixels[3] = _mm_unpackhi_epi16(x, bp);
+
+ x = _mm_unpacklo_epi8(ab1, zero);
+ pixels[4] = _mm_unpacklo_epi16(x, bp);
+ pixels[5] = _mm_unpackhi_epi16(x, bp);
+
+ x = _mm_unpackhi_epi8(ab1, zero);
+ pixels[6] = _mm_unpacklo_epi16(x, bp);
+ pixels[7] = _mm_unpackhi_epi16(x, bp);
+}
+
+static INLINE void load_weight_w32(const uint8_t *weight_array, int height,
+ __m128i *weight_h, __m128i *weight_w) {
+ const __m128i zero = _mm_setzero_si128();
+ __m128i w16 = _mm_loadu_si128((const __m128i *)&weight_array[16]);
+ __m128i w32_0 = _mm_loadu_si128((const __m128i *)&weight_array[32]);
+ __m128i w32_1 = _mm_loadu_si128((const __m128i *)&weight_array[32 + 16]);
+ const __m128i d = _mm_set1_epi16((uint16_t)(1 << sm_weight_log2_scale));
+
+ if (height == 16) {
+ weight_h[0] = _mm_unpacklo_epi8(w16, zero);
+ weight_h[1] = _mm_sub_epi16(d, weight_h[0]);
+ weight_h[2] = _mm_unpackhi_epi8(w16, zero);
+ weight_h[3] = _mm_sub_epi16(d, weight_h[2]);
+
+ __m128i x = _mm_unpacklo_epi8(w32_0, zero);
+ __m128i y = _mm_sub_epi16(d, x);
+ weight_w[0] = _mm_unpacklo_epi16(x, y);
+ weight_w[1] = _mm_unpackhi_epi16(x, y);
+
+ x = _mm_unpackhi_epi8(w32_0, zero);
+ y = _mm_sub_epi16(d, x);
+ weight_w[2] = _mm_unpacklo_epi16(x, y);
+ weight_w[3] = _mm_unpackhi_epi16(x, y);
+
+ x = _mm_unpacklo_epi8(w32_1, zero);
+ y = _mm_sub_epi16(d, x);
+ weight_w[4] = _mm_unpacklo_epi16(x, y);
+ weight_w[5] = _mm_unpackhi_epi16(x, y);
+
+ x = _mm_unpackhi_epi8(w32_1, zero);
+ y = _mm_sub_epi16(d, x);
+ weight_w[6] = _mm_unpacklo_epi16(x, y);
+ weight_w[7] = _mm_unpackhi_epi16(x, y);
+ }
+
+ if (height == 32) {
+ weight_h[0] = _mm_unpacklo_epi8(w32_0, zero);
+ weight_h[1] = _mm_sub_epi16(d, weight_h[0]);
+ weight_h[2] = _mm_unpackhi_epi8(w32_0, zero);
+ weight_h[3] = _mm_sub_epi16(d, weight_h[2]);
+
+ weight_h[4] = _mm_unpacklo_epi8(w32_1, zero);
+ weight_h[5] = _mm_sub_epi16(d, weight_h[4]);
+ weight_h[6] = _mm_unpackhi_epi8(w32_1, zero);
+ weight_h[7] = _mm_sub_epi16(d, weight_h[6]);
+
+ weight_w[0] = _mm_unpacklo_epi16(weight_h[0], weight_h[1]);
+ weight_w[1] = _mm_unpackhi_epi16(weight_h[0], weight_h[1]);
+ weight_w[2] = _mm_unpacklo_epi16(weight_h[2], weight_h[3]);
+ weight_w[3] = _mm_unpackhi_epi16(weight_h[2], weight_h[3]);
+
+ weight_w[4] = _mm_unpacklo_epi16(weight_h[4], weight_h[5]);
+ weight_w[5] = _mm_unpackhi_epi16(weight_h[4], weight_h[5]);
+ weight_w[6] = _mm_unpacklo_epi16(weight_h[6], weight_h[7]);
+ weight_w[7] = _mm_unpackhi_epi16(weight_h[6], weight_h[7]);
+ }
+}
+
+static INLINE void smooth_pred_32x8(const __m128i *pixels, const __m128i *wh,
+ const __m128i *ww, uint8_t *dst,
+ ptrdiff_t stride, int quarter) {
+ __m128i d = _mm_set1_epi16(0x100);
+ const __m128i one = _mm_set1_epi16(1);
+ const __m128i inc = _mm_set1_epi16(0x202);
+ const __m128i gat = _mm_set_epi32(0, 0, 0xe0c0a08, 0x6040200);
+ const __m128i round = _mm_set1_epi32((1 << sm_weight_log2_scale));
+ __m128i rep =
+ (quarter % 2 == 0) ? _mm_set1_epi16(0x8000) : _mm_set1_epi16(0x8008);
+ const __m128i left = (quarter < 2) ? pixels[8] : pixels[9];
+
+ int i;
+ for (i = 0; i < 8; ++i) {
+ const __m128i wg_wg = _mm_shuffle_epi8(wh[0], d);
+ const __m128i sc_sc = _mm_shuffle_epi8(wh[1], d);
+ const __m128i wh_sc = _mm_unpacklo_epi16(wg_wg, sc_sc);
+
+ int j;
+ __m128i s[8];
+ __m128i b = _mm_shuffle_epi8(left, rep);
+ b = _mm_unpacklo_epi16(b, pixels[10]);
+
+ for (j = 0; j < 8; ++j) {
+ s[j] = _mm_madd_epi16(pixels[j], wh_sc);
+ s[j] = _mm_add_epi32(s[j], _mm_madd_epi16(b, ww[j]));
+ s[j] = _mm_add_epi32(s[j], round);
+ s[j] = _mm_srai_epi32(s[j], 1 + sm_weight_log2_scale);
+ }
+
+ for (j = 0; j < 8; j += 2) {
+ __m128i sum = _mm_packus_epi16(s[j], s[j + 1]);
+ sum = _mm_shuffle_epi8(sum, gat);
+ _mm_storel_epi64((__m128i *)(dst + (j << 2)), sum);
+ }
+ dst += stride;
+ rep = _mm_add_epi16(rep, one);
+ d = _mm_add_epi16(d, inc);
+ }
+}
+
+void aom_smooth_predictor_32x16_ssse3(uint8_t *dst, ptrdiff_t stride,
+ const uint8_t *above,
+ const uint8_t *left) {
+ __m128i pixels[11];
+ load_pixel_w32(above, left, 16, pixels);
+
+ __m128i wh[4], ww[8];
+ load_weight_w32(sm_weight_arrays, 16, wh, ww);
+
+ smooth_pred_32x8(pixels, wh, ww, dst, stride, 0);
+ dst += stride << 3;
+ smooth_pred_32x8(pixels, &wh[2], ww, dst, stride, 1);
+}
+
+void aom_smooth_predictor_32x32_ssse3(uint8_t *dst, ptrdiff_t stride,
+ const uint8_t *above,
+ const uint8_t *left) {
+ __m128i pixels[11];
+ load_pixel_w32(above, left, 32, pixels);
+
+ __m128i wh[8], ww[8];
+ load_weight_w32(sm_weight_arrays, 32, wh, ww);
+
+ smooth_pred_32x8(pixels, &wh[0], ww, dst, stride, 0);
+ dst += stride << 3;
+ smooth_pred_32x8(pixels, &wh[2], ww, dst, stride, 1);
+ dst += stride << 3;
+ smooth_pred_32x8(pixels, &wh[4], ww, dst, stride, 2);
+ dst += stride << 3;
+ smooth_pred_32x8(pixels, &wh[6], ww, dst, stride, 3);
+}
diff --git a/test/intrapred_test.cc b/test/intrapred_test.cc
index 29e33ec..2bb446a 100644
--- a/test/intrapred_test.cc
+++ b/test/intrapred_test.cc
@@ -246,7 +246,7 @@
#if HAVE_SSSE3
const IntraPredFunc<IntraPred> LowbdIntraPredTestVectorSsse3[] = {
- lowbd_intrapred(paeth, ssse3),
+ lowbd_intrapred(paeth, ssse3), lowbd_intrapred(smooth, ssse3),
};
INSTANTIATE_TEST_CASE_P(SSSE3, LowbdIntraPredTest,
diff --git a/test/test_intra_pred_speed.cc b/test/test_intra_pred_speed.cc
index 47864ce..54c451e 100644
--- a/test/test_intra_pred_speed.cc
+++ b/test/test_intra_pred_speed.cc
@@ -440,10 +440,12 @@
INTRA_PRED_TEST(SSSE3_1, TestIntraPred4, "intra4x4", NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, aom_d153_predictor_4x4_ssse3,
NULL, aom_d63e_predictor_4x4_ssse3,
- aom_paeth_predictor_4x4_ssse3, NULL, NULL, NULL)
+ aom_paeth_predictor_4x4_ssse3, aom_smooth_predictor_4x4_ssse3,
+ NULL, NULL)
INTRA_PRED_TEST(SSSE3_2, TestIntraPred4, "intra4x8", NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
- aom_paeth_predictor_4x8_ssse3, NULL, NULL, NULL)
+ aom_paeth_predictor_4x8_ssse3, aom_smooth_predictor_4x8_ssse3,
+ NULL, NULL)
#endif // HAVE_SSSE3
#if HAVE_DSPR2
@@ -549,13 +551,16 @@
#if HAVE_SSSE3
INTRA_PRED_TEST(SSSE3_1, TestIntraPred8, "intra8x8", NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, aom_d153_predictor_8x8_ssse3,
- NULL, NULL, aom_paeth_predictor_8x8_ssse3, NULL, NULL, NULL)
+ NULL, NULL, aom_paeth_predictor_8x8_ssse3,
+ aom_smooth_predictor_8x8_ssse3, NULL, NULL)
INTRA_PRED_TEST(SSSE3_2, TestIntraPred8, "intra8x4", NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
- aom_paeth_predictor_8x4_ssse3, NULL, NULL, NULL)
+ aom_paeth_predictor_8x4_ssse3, aom_smooth_predictor_8x4_ssse3,
+ NULL, NULL)
INTRA_PRED_TEST(SSSE3_3, TestIntraPred8, "intra8x16", NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
- aom_paeth_predictor_8x16_ssse3, NULL, NULL, NULL)
+ aom_paeth_predictor_8x16_ssse3, aom_smooth_predictor_8x16_ssse3,
+ NULL, NULL)
#endif // HAVE_SSSE3
#if HAVE_DSPR2
@@ -663,13 +668,16 @@
#if HAVE_SSSE3
INTRA_PRED_TEST(SSSE3_1, TestIntraPred16, "intra16x16", NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, aom_d153_predictor_16x16_ssse3,
- NULL, NULL, aom_paeth_predictor_16x16_ssse3, NULL, NULL, NULL)
+ NULL, NULL, aom_paeth_predictor_16x16_ssse3,
+ aom_smooth_predictor_16x16_ssse3, NULL, NULL)
INTRA_PRED_TEST(SSSE3_2, TestIntraPred16, "intra16x8", NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
- aom_paeth_predictor_16x8_ssse3, NULL, NULL, NULL)
+ aom_paeth_predictor_16x8_ssse3, aom_smooth_predictor_16x8_ssse3,
+ NULL, NULL)
INTRA_PRED_TEST(SSSE3_3, TestIntraPred16, "intra16x32", NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
- aom_paeth_predictor_16x32_ssse3, NULL, NULL, NULL)
+ aom_paeth_predictor_16x32_ssse3,
+ aom_smooth_predictor_16x32_ssse3, NULL, NULL)
#endif // HAVE_SSSE3
#if HAVE_AVX2
@@ -767,10 +775,12 @@
#if HAVE_SSSE3
INTRA_PRED_TEST(SSSE3_1, TestIntraPred32, "intra32x32", NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, aom_d153_predictor_32x32_ssse3,
- NULL, NULL, aom_paeth_predictor_32x32_ssse3, NULL, NULL, NULL)
+ NULL, NULL, aom_paeth_predictor_32x32_ssse3,
+ aom_smooth_predictor_32x32_ssse3, NULL, NULL)
INTRA_PRED_TEST(SSSE3_2, TestIntraPred32, "intra32x16", NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
- aom_paeth_predictor_32x16_ssse3, NULL, NULL, NULL)
+ aom_paeth_predictor_32x16_ssse3,
+ aom_smooth_predictor_32x16_ssse3, NULL, NULL)
#endif // HAVE_SSSE3
#if HAVE_AVX2