sad4d avx2 code improvements
sad4d avx2 code generic cleanup
New block sizes added:
(16xN), (32x16),(32x8), (64x16)
No performance changes for previously existing block sizes
Change-Id: Id9531aa60fbd74b2db17c8452d86bb87c01a2965
diff --git a/aom_dsp/aom_dsp_rtcd_defs.pl b/aom_dsp/aom_dsp_rtcd_defs.pl
index 9867602..d482228 100755
--- a/aom_dsp/aom_dsp_rtcd_defs.pl
+++ b/aom_dsp/aom_dsp_rtcd_defs.pl
@@ -817,23 +817,28 @@
specialize qw/aom_sad64x128x4d avx2 sse2/;
specialize qw/aom_sad64x64x4d avx2 neon msa sse2/;
specialize qw/aom_sad64x32x4d avx2 msa sse2/;
+ specialize qw/aom_sad64x16x4d avx2 sse2/;
specialize qw/aom_sad32x64x4d avx2 msa sse2/;
specialize qw/aom_sad32x32x4d avx2 neon msa sse2/;
- specialize qw/aom_sad32x16x4d msa sse2/;
+ specialize qw/aom_sad32x16x4d avx2 msa sse2/;
+ specialize qw/aom_sad32x8x4d avx2 sse2/;
+ specialize qw/aom_sad16x64x4d sse2/;
specialize qw/aom_sad16x32x4d msa sse2/;
- specialize qw/aom_sad16x16x4d neon msa sse2/;
- specialize qw/aom_sad16x8x4d msa sse2/;
+ specialize qw/aom_sad16x16x4d neon msa sse2/;
+ specialize qw/aom_sad16x8x4d msa sse2/;
+
specialize qw/aom_sad8x16x4d msa sse2/;
specialize qw/aom_sad8x8x4d msa sse2/;
specialize qw/aom_sad8x4x4d msa sse2/;
+ specialize qw/aom_sad4x16x4d msa sse2/;
specialize qw/aom_sad4x8x4d msa sse2/;
specialize qw/aom_sad4x4x4d msa sse2/;
+ specialize qw/aom_sad4x32x4d sse2/;
specialize qw/aom_sad4x16x4d sse2/;
specialize qw/aom_sad16x4x4d sse2/;
specialize qw/aom_sad8x32x4d sse2/;
specialize qw/aom_sad32x8x4d sse2/;
- specialize qw/aom_sad16x64x4d sse2/;
specialize qw/aom_sad64x16x4d sse2/;
#
diff --git a/aom_dsp/x86/sad4d_avx2.c b/aom_dsp/x86/sad4d_avx2.c
index f662b62..0771252 100644
--- a/aom_dsp/x86/sad4d_avx2.c
+++ b/aom_dsp/x86/sad4d_avx2.c
@@ -14,41 +14,43 @@
#include "aom/aom_integer.h"
-void aom_sad32x32x4d_avx2(const uint8_t *src, int src_stride,
- const uint8_t *const ref[4], int ref_stride,
- uint32_t res[4]) {
+void aom_sadMxNx4d_avx2(int M, int N, const uint8_t *src, int src_stride,
+ const uint8_t *const ref[4], int ref_stride,
+ uint32_t res[4]) {
__m256i src_reg, ref0_reg, ref1_reg, ref2_reg, ref3_reg;
__m256i sum_ref0, sum_ref1, sum_ref2, sum_ref3;
- __m256i sum_mlow, sum_mhigh;
- int i;
+ int i, j;
const uint8_t *ref0, *ref1, *ref2, *ref3;
ref0 = ref[0];
ref1 = ref[1];
ref2 = ref[2];
ref3 = ref[3];
- sum_ref0 = _mm256_set1_epi16(0);
- sum_ref1 = _mm256_set1_epi16(0);
- sum_ref2 = _mm256_set1_epi16(0);
- sum_ref3 = _mm256_set1_epi16(0);
- for (i = 0; i < 32; i++) {
- // load src and all refs
- src_reg = _mm256_loadu_si256((const __m256i *)src);
- ref0_reg = _mm256_loadu_si256((const __m256i *)ref0);
- ref1_reg = _mm256_loadu_si256((const __m256i *)ref1);
- ref2_reg = _mm256_loadu_si256((const __m256i *)ref2);
- ref3_reg = _mm256_loadu_si256((const __m256i *)ref3);
- // sum of the absolute differences between every ref-i to src
- ref0_reg = _mm256_sad_epu8(ref0_reg, src_reg);
- ref1_reg = _mm256_sad_epu8(ref1_reg, src_reg);
- ref2_reg = _mm256_sad_epu8(ref2_reg, src_reg);
- ref3_reg = _mm256_sad_epu8(ref3_reg, src_reg);
- // sum every ref-i
- sum_ref0 = _mm256_add_epi32(sum_ref0, ref0_reg);
- sum_ref1 = _mm256_add_epi32(sum_ref1, ref1_reg);
- sum_ref2 = _mm256_add_epi32(sum_ref2, ref2_reg);
- sum_ref3 = _mm256_add_epi32(sum_ref3, ref3_reg);
+ sum_ref0 = _mm256_setzero_si256();
+ sum_ref2 = _mm256_setzero_si256();
+ sum_ref1 = _mm256_setzero_si256();
+ sum_ref3 = _mm256_setzero_si256();
+ for (i = 0; i < N; i++) {
+ for (j = 0; j < M; j += 32) {
+ // load src and all refs
+ src_reg = _mm256_loadu_si256((const __m256i *)(src + j));
+ ref0_reg = _mm256_loadu_si256((const __m256i *)(ref0 + j));
+ ref1_reg = _mm256_loadu_si256((const __m256i *)(ref1 + j));
+ ref2_reg = _mm256_loadu_si256((const __m256i *)(ref2 + j));
+ ref3_reg = _mm256_loadu_si256((const __m256i *)(ref3 + j));
+
+ // sum of the absolute differences between every ref-i to src
+ ref0_reg = _mm256_sad_epu8(ref0_reg, src_reg);
+ ref1_reg = _mm256_sad_epu8(ref1_reg, src_reg);
+ ref2_reg = _mm256_sad_epu8(ref2_reg, src_reg);
+ ref3_reg = _mm256_sad_epu8(ref3_reg, src_reg);
+ // sum every ref-i
+ sum_ref0 = _mm256_add_epi32(sum_ref0, ref0_reg);
+ sum_ref1 = _mm256_add_epi32(sum_ref1, ref1_reg);
+ sum_ref2 = _mm256_add_epi32(sum_ref2, ref2_reg);
+ sum_ref3 = _mm256_add_epi32(sum_ref3, ref3_reg);
+ }
src += src_stride;
ref0 += ref_stride;
ref1 += ref_stride;
@@ -57,6 +59,7 @@
}
{
__m128i sum;
+ __m256i sum_mlow, sum_mhigh;
// in sum_ref-i the result is saved in the first 4 bytes
// the other 4 bytes are zeroed.
// sum_ref1 and sum_ref3 are shifted left by 4 bytes
@@ -80,139 +83,24 @@
_mm_storeu_si128((__m128i *)(res), sum);
}
- _mm256_zeroupper();
}
-void aom_sad64x64x4d_avx2(const uint8_t *src, int src_stride,
- const uint8_t *const ref[4], int ref_stride,
- uint32_t res[4]) {
- __m256i src_reg, srcnext_reg, ref0_reg, ref0next_reg;
- __m256i ref1_reg, ref1next_reg, ref2_reg, ref2next_reg;
- __m256i ref3_reg, ref3next_reg;
- __m256i sum_ref0, sum_ref1, sum_ref2, sum_ref3;
- __m256i sum_mlow, sum_mhigh;
- int i;
- const uint8_t *ref0, *ref1, *ref2, *ref3;
-
- ref0 = ref[0];
- ref1 = ref[1];
- ref2 = ref[2];
- ref3 = ref[3];
- sum_ref0 = _mm256_set1_epi16(0);
- sum_ref1 = _mm256_set1_epi16(0);
- sum_ref2 = _mm256_set1_epi16(0);
- sum_ref3 = _mm256_set1_epi16(0);
- for (i = 0; i < 64; i++) {
- // load 64 bytes from src and all refs
- src_reg = _mm256_loadu_si256((const __m256i *)src);
- srcnext_reg = _mm256_loadu_si256((const __m256i *)(src + 32));
- ref0_reg = _mm256_loadu_si256((const __m256i *)ref0);
- ref0next_reg = _mm256_loadu_si256((const __m256i *)(ref0 + 32));
- ref1_reg = _mm256_loadu_si256((const __m256i *)ref1);
- ref1next_reg = _mm256_loadu_si256((const __m256i *)(ref1 + 32));
- ref2_reg = _mm256_loadu_si256((const __m256i *)ref2);
- ref2next_reg = _mm256_loadu_si256((const __m256i *)(ref2 + 32));
- ref3_reg = _mm256_loadu_si256((const __m256i *)ref3);
- ref3next_reg = _mm256_loadu_si256((const __m256i *)(ref3 + 32));
- // sum of the absolute differences between every ref-i to src
- ref0_reg = _mm256_sad_epu8(ref0_reg, src_reg);
- ref1_reg = _mm256_sad_epu8(ref1_reg, src_reg);
- ref2_reg = _mm256_sad_epu8(ref2_reg, src_reg);
- ref3_reg = _mm256_sad_epu8(ref3_reg, src_reg);
- ref0next_reg = _mm256_sad_epu8(ref0next_reg, srcnext_reg);
- ref1next_reg = _mm256_sad_epu8(ref1next_reg, srcnext_reg);
- ref2next_reg = _mm256_sad_epu8(ref2next_reg, srcnext_reg);
- ref3next_reg = _mm256_sad_epu8(ref3next_reg, srcnext_reg);
-
- // sum every ref-i
- sum_ref0 = _mm256_add_epi32(sum_ref0, ref0_reg);
- sum_ref1 = _mm256_add_epi32(sum_ref1, ref1_reg);
- sum_ref2 = _mm256_add_epi32(sum_ref2, ref2_reg);
- sum_ref3 = _mm256_add_epi32(sum_ref3, ref3_reg);
- sum_ref0 = _mm256_add_epi32(sum_ref0, ref0next_reg);
- sum_ref1 = _mm256_add_epi32(sum_ref1, ref1next_reg);
- sum_ref2 = _mm256_add_epi32(sum_ref2, ref2next_reg);
- sum_ref3 = _mm256_add_epi32(sum_ref3, ref3next_reg);
- src += src_stride;
- ref0 += ref_stride;
- ref1 += ref_stride;
- ref2 += ref_stride;
- ref3 += ref_stride;
+#define sadMxN_avx2(m, n) \
+ void aom_sad##m##x##n##x4d_avx2(const uint8_t *src, int src_stride, \
+ const uint8_t *const ref[4], int ref_stride, \
+ uint32_t res[4]) { \
+ aom_sadMxNx4d_avx2(m, n, src, src_stride, ref, ref_stride, res); \
}
- {
- __m128i sum;
- // in sum_ref-i the result is saved in the first 4 bytes
- // the other 4 bytes are zeroed.
- // sum_ref1 and sum_ref3 are shifted left by 4 bytes
- sum_ref1 = _mm256_slli_si256(sum_ref1, 4);
- sum_ref3 = _mm256_slli_si256(sum_ref3, 4);
+sadMxN_avx2(32, 8);
+sadMxN_avx2(32, 16);
+sadMxN_avx2(32, 32);
+sadMxN_avx2(32, 64);
- // merge sum_ref0 and sum_ref1 also sum_ref2 and sum_ref3
- sum_ref0 = _mm256_or_si256(sum_ref0, sum_ref1);
- sum_ref2 = _mm256_or_si256(sum_ref2, sum_ref3);
+sadMxN_avx2(64, 16);
+sadMxN_avx2(64, 32);
+sadMxN_avx2(64, 64);
+sadMxN_avx2(64, 128);
- // merge every 64 bit from each sum_ref-i
- sum_mlow = _mm256_unpacklo_epi64(sum_ref0, sum_ref2);
- sum_mhigh = _mm256_unpackhi_epi64(sum_ref0, sum_ref2);
-
- // add the low 64 bit to the high 64 bit
- sum_mlow = _mm256_add_epi32(sum_mlow, sum_mhigh);
-
- // add the low 128 bit to the high 128 bit
- sum = _mm_add_epi32(_mm256_castsi256_si128(sum_mlow),
- _mm256_extractf128_si256(sum_mlow, 1));
-
- _mm_storeu_si128((__m128i *)(res), sum);
- }
- _mm256_zeroupper();
-}
-
-void aom_sad32x64x4d_avx2(const uint8_t *src, int src_stride,
- const uint8_t *const ref[4], int ref_stride,
- uint32_t res[4]) {
- const uint8_t *rf[4];
- uint32_t sum0[4];
- uint32_t sum1[4];
-
- rf[0] = ref[0];
- rf[1] = ref[1];
- rf[2] = ref[2];
- rf[3] = ref[3];
- aom_sad32x32x4d_avx2(src, src_stride, rf, ref_stride, sum0);
- src += src_stride << 5;
- rf[0] += ref_stride << 5;
- rf[1] += ref_stride << 5;
- rf[2] += ref_stride << 5;
- rf[3] += ref_stride << 5;
- aom_sad32x32x4d_avx2(src, src_stride, rf, ref_stride, sum1);
- res[0] = sum0[0] + sum1[0];
- res[1] = sum0[1] + sum1[1];
- res[2] = sum0[2] + sum1[2];
- res[3] = sum0[3] + sum1[3];
-}
-
-void aom_sad64x32x4d_avx2(const uint8_t *src, int src_stride,
- const uint8_t *const ref[4], int ref_stride,
- uint32_t res[4]) {
- const uint8_t *rf[4];
- uint32_t sum0[4];
- uint32_t sum1[4];
- unsigned int half_width = 32;
-
- rf[0] = ref[0];
- rf[1] = ref[1];
- rf[2] = ref[2];
- rf[3] = ref[3];
- aom_sad32x32x4d_avx2(src, src_stride, rf, ref_stride, sum0);
- src += half_width;
- rf[0] += half_width;
- rf[1] += half_width;
- rf[2] += half_width;
- rf[3] += half_width;
- aom_sad32x32x4d_avx2(src, src_stride, rf, ref_stride, sum1);
- res[0] = sum0[0] + sum1[0];
- res[1] = sum0[1] + sum1[1];
- res[2] = sum0[2] + sum1[2];
- res[3] = sum0[3] + sum1[3];
-}
+sadMxN_avx2(128, 64);
+sadMxN_avx2(128, 128);
diff --git a/aom_dsp/x86/sad_impl_avx2.c b/aom_dsp/x86/sad_impl_avx2.c
index c6fd62c..f77a585 100644
--- a/aom_dsp/x86/sad_impl_avx2.c
+++ b/aom_dsp/x86/sad_impl_avx2.c
@@ -84,81 +84,6 @@
return sum;
}
-static void sad64x64x4d(const uint8_t *src, int src_stride,
- const uint8_t *const ref[4], int ref_stride,
- __m128i *res) {
- uint32_t sum[4];
- aom_sad64x64x4d_avx2(src, src_stride, ref, ref_stride, sum);
- *res = _mm_loadu_si128((const __m128i *)sum);
-}
-
-void aom_sad64x128x4d_avx2(const uint8_t *src, int src_stride,
- const uint8_t *const ref[4], int ref_stride,
- uint32_t res[4]) {
- __m128i sum0, sum1;
- const uint8_t *rf[4];
-
- rf[0] = ref[0];
- rf[1] = ref[1];
- rf[2] = ref[2];
- rf[3] = ref[3];
- sad64x64x4d(src, src_stride, rf, ref_stride, &sum0);
- src += src_stride << 6;
- rf[0] += ref_stride << 6;
- rf[1] += ref_stride << 6;
- rf[2] += ref_stride << 6;
- rf[3] += ref_stride << 6;
- sad64x64x4d(src, src_stride, rf, ref_stride, &sum1);
- sum0 = _mm_add_epi32(sum0, sum1);
- _mm_storeu_si128((__m128i *)res, sum0);
-}
-
-void aom_sad128x64x4d_avx2(const uint8_t *src, int src_stride,
- const uint8_t *const ref[4], int ref_stride,
- uint32_t res[4]) {
- __m128i sum0, sum1;
- unsigned int half_width = 64;
- const uint8_t *rf[4];
-
- rf[0] = ref[0];
- rf[1] = ref[1];
- rf[2] = ref[2];
- rf[3] = ref[3];
- sad64x64x4d(src, src_stride, rf, ref_stride, &sum0);
- src += half_width;
- rf[0] += half_width;
- rf[1] += half_width;
- rf[2] += half_width;
- rf[3] += half_width;
- sad64x64x4d(src, src_stride, rf, ref_stride, &sum1);
- sum0 = _mm_add_epi32(sum0, sum1);
- _mm_storeu_si128((__m128i *)res, sum0);
-}
-
-void aom_sad128x128x4d_avx2(const uint8_t *src, int src_stride,
- const uint8_t *const ref[4], int ref_stride,
- uint32_t res[4]) {
- const uint8_t *rf[4];
- uint32_t sum0[4];
- uint32_t sum1[4];
-
- rf[0] = ref[0];
- rf[1] = ref[1];
- rf[2] = ref[2];
- rf[3] = ref[3];
- aom_sad128x64x4d_avx2(src, src_stride, rf, ref_stride, sum0);
- src += src_stride << 6;
- rf[0] += ref_stride << 6;
- rf[1] += ref_stride << 6;
- rf[2] += ref_stride << 6;
- rf[3] += ref_stride << 6;
- aom_sad128x64x4d_avx2(src, src_stride, rf, ref_stride, sum1);
- res[0] = sum0[0] + sum1[0];
- res[1] = sum0[1] + sum1[1];
- res[2] = sum0[2] + sum1[2];
- res[3] = sum0[3] + sum1[3];
-}
-
static unsigned int sad_w64_avg_avx2(const uint8_t *src_ptr, int src_stride,
const uint8_t *ref_ptr, int ref_stride,
const int h, const uint8_t *second_pred,
diff --git a/test/sad_test.cc b/test/sad_test.cc
index 960a95e..5712929 100644
--- a/test/sad_test.cc
+++ b/test/sad_test.cc
@@ -1703,13 +1703,16 @@
INSTANTIATE_TEST_CASE_P(AVX2, SADavgTest, ::testing::ValuesIn(avg_avx2_tests));
const SadMxNx4Param x4d_avx2_tests[] = {
- make_tuple(64, 128, &aom_sad64x128x4d_avx2, -1),
- make_tuple(128, 64, &aom_sad128x64x4d_avx2, -1),
- make_tuple(128, 128, &aom_sad128x128x4d_avx2, -1),
- make_tuple(64, 64, &aom_sad64x64x4d_avx2, -1),
make_tuple(32, 64, &aom_sad32x64x4d_avx2, -1),
- make_tuple(64, 32, &aom_sad64x32x4d_avx2, -1),
make_tuple(32, 32, &aom_sad32x32x4d_avx2, -1),
+ make_tuple(32, 16, &aom_sad32x16x4d_avx2, -1),
+ make_tuple(32, 8, &aom_sad32x8x4d_avx2, -1),
+ make_tuple(64, 128, &aom_sad64x128x4d_avx2, -1),
+ make_tuple(64, 64, &aom_sad64x64x4d_avx2, -1),
+ make_tuple(64, 32, &aom_sad64x32x4d_avx2, -1),
+ make_tuple(64, 16, &aom_sad64x16x4d_avx2, -1),
+ make_tuple(128, 128, &aom_sad128x128x4d_avx2, -1),
+ make_tuple(128, 64, &aom_sad128x64x4d_avx2, -1),
#if CONFIG_AV1_HIGHBITDEPTH
make_tuple(128, 128, &aom_highbd_sad128x128x4d_avx2, 8),
make_tuple(128, 128, &aom_highbd_sad128x128x4d_avx2, 10),