Add aom_sum_sse_2d_i16_c and associated SIMD versions

A new function aom_sum_sse_2d_i16_c is introduced which computes
the sum of values and sum of squared values of residual block.
The corresponding AVX2 and SSE2 functions have been added.

Change-Id: Ia9e92ef5d828a47e1dee112f631a9850e7452f7b
diff --git a/aom_dsp/x86/sum_squares_avx2.c b/aom_dsp/x86/sum_squares_avx2.c
index 97d78b6..0d63db2 100644
--- a/aom_dsp/x86/sum_squares_avx2.c
+++ b/aom_dsp/x86/sum_squares_avx2.c
@@ -78,6 +78,84 @@
   }
 }
 
+static uint64_t aom_sum_sse_2d_i16_nxn_avx2(const int16_t *src, int stride,
+                                            int width, int height, int *sum) {
+  uint64_t result;
+  const __m256i zero_reg = _mm256_setzero_si256();
+  const __m256i one_reg = _mm256_set1_epi16(1);
+
+  __m256i v_sse_total = zero_reg;
+  __m256i v_sum_total = zero_reg;
+
+  for (int col = 0; col < height; col += 4) {
+    __m256i v_sse_row = zero_reg;
+    for (int row = 0; row < width; row += 16) {
+      const int16_t *tempsrc = src + row;
+      const __m256i v_val_0_w =
+          _mm256_loadu_si256((const __m256i *)(tempsrc + 0 * stride));
+      const __m256i v_val_1_w =
+          _mm256_loadu_si256((const __m256i *)(tempsrc + 1 * stride));
+      const __m256i v_val_2_w =
+          _mm256_loadu_si256((const __m256i *)(tempsrc + 2 * stride));
+      const __m256i v_val_3_w =
+          _mm256_loadu_si256((const __m256i *)(tempsrc + 3 * stride));
+
+      const __m256i v_sum_01 = _mm256_add_epi16(v_val_0_w, v_val_1_w);
+      const __m256i v_sum_23 = _mm256_add_epi16(v_val_2_w, v_val_3_w);
+      __m256i v_sum_0123 = _mm256_add_epi16(v_sum_01, v_sum_23);
+      v_sum_0123 = _mm256_madd_epi16(v_sum_0123, one_reg);
+      v_sum_total = _mm256_add_epi32(v_sum_total, v_sum_0123);
+
+      const __m256i v_sq_0_d = _mm256_madd_epi16(v_val_0_w, v_val_0_w);
+      const __m256i v_sq_1_d = _mm256_madd_epi16(v_val_1_w, v_val_1_w);
+      const __m256i v_sq_2_d = _mm256_madd_epi16(v_val_2_w, v_val_2_w);
+      const __m256i v_sq_3_d = _mm256_madd_epi16(v_val_3_w, v_val_3_w);
+      const __m256i v_sq_01_d = _mm256_add_epi32(v_sq_0_d, v_sq_1_d);
+      const __m256i v_sq_23_d = _mm256_add_epi32(v_sq_2_d, v_sq_3_d);
+      const __m256i v_sq_0123_d = _mm256_add_epi32(v_sq_01_d, v_sq_23_d);
+      v_sse_row = _mm256_add_epi32(v_sse_row, v_sq_0123_d);
+    }
+    const __m256i v_sse_row_low = _mm256_unpacklo_epi32(v_sse_row, zero_reg);
+    const __m256i v_sse_row_hi = _mm256_unpackhi_epi32(v_sse_row, zero_reg);
+    v_sse_row = _mm256_add_epi64(v_sse_row_low, v_sse_row_hi);
+    v_sse_total = _mm256_add_epi64(v_sse_total, v_sse_row);
+    src += 4 * stride;
+  }
+
+  const __m128i v_sum_total_low = _mm256_castsi256_si128(v_sum_total);
+  const __m128i v_sum_total_hi = _mm256_extracti128_si256(v_sum_total, 1);
+  __m128i sum_128bit = _mm_add_epi32(v_sum_total_hi, v_sum_total_low);
+  sum_128bit = _mm_add_epi32(sum_128bit, _mm_srli_si128(sum_128bit, 8));
+  sum_128bit = _mm_add_epi32(sum_128bit, _mm_srli_si128(sum_128bit, 4));
+  *sum += _mm_cvtsi128_si32(sum_128bit);
+
+  __m128i v_sse_total_lo = _mm256_castsi256_si128(v_sse_total);
+  __m128i v_sse_total_hi = _mm256_extracti128_si256(v_sse_total, 1);
+  __m128i sse_128bit = _mm_add_epi64(v_sse_total_lo, v_sse_total_hi);
+
+  sse_128bit =
+      _mm_add_epi64(sse_128bit, _mm_unpackhi_epi64(sse_128bit, sse_128bit));
+
+  xx_storel_64(&result, sse_128bit);
+
+  return result;
+}
+
+uint64_t aom_sum_sse_2d_i16_avx2(const int16_t *src, int src_stride, int width,
+                                 int height, int *sum) {
+  if (LIKELY(width == 4 && height == 4)) {
+    return aom_sum_sse_2d_i16_4x4_sse2(src, src_stride, sum);
+  } else if (LIKELY(width == 4 && (height & 3) == 0)) {
+    return aom_sum_sse_2d_i16_4xn_sse2(src, src_stride, height, sum);
+  } else if (LIKELY(width == 8 && (height & 3) == 0)) {
+    return aom_sum_sse_2d_i16_nxn_sse2(src, src_stride, width, height, sum);
+  } else if (LIKELY(((width & 15) == 0) && ((height & 3) == 0))) {
+    return aom_sum_sse_2d_i16_nxn_avx2(src, src_stride, width, height, sum);
+  } else {
+    return aom_sum_sse_2d_i16_c(src, src_stride, width, height, sum);
+  }
+}
+
 // Accumulate sum of 16-bit elements in the vector
 static AOM_INLINE int32_t mm256_accumulate_epi16(__m256i vec_a) {
   __m128i vtmp1 = _mm256_extracti128_si256(vec_a, 1);
diff --git a/aom_dsp/x86/sum_squares_sse2.c b/aom_dsp/x86/sum_squares_sse2.c
index 85b301a..0bdeee9 100644
--- a/aom_dsp/x86/sum_squares_sse2.c
+++ b/aom_dsp/x86/sum_squares_sse2.c
@@ -53,6 +53,27 @@
   return (uint64_t)_mm_cvtsi128_si32(v_sum_d);
 }
 
+uint64_t aom_sum_sse_2d_i16_4x4_sse2(const int16_t *src, int stride, int *sum) {
+  const __m128i one_reg = _mm_set1_epi16(1);
+  const __m128i v_val_0_w = xx_loadl_64(src + 0 * stride);
+  const __m128i v_val_2_w = xx_loadl_64(src + 2 * stride);
+  __m128i v_val_01_w = xx_loadh_64(v_val_0_w, src + 1 * stride);
+  __m128i v_val_23_w = xx_loadh_64(v_val_2_w, src + 3 * stride);
+
+  __m128i v_sum_0123_d = _mm_add_epi16(v_val_01_w, v_val_23_w);
+  v_sum_0123_d = _mm_madd_epi16(v_sum_0123_d, one_reg);
+  v_sum_0123_d = _mm_add_epi32(v_sum_0123_d, _mm_srli_si128(v_sum_0123_d, 8));
+  v_sum_0123_d = _mm_add_epi32(v_sum_0123_d, _mm_srli_si128(v_sum_0123_d, 4));
+  *sum = _mm_cvtsi128_si32(v_sum_0123_d);
+
+  const __m128i v_sq_01_d = _mm_madd_epi16(v_val_01_w, v_val_01_w);
+  const __m128i v_sq_23_d = _mm_madd_epi16(v_val_23_w, v_val_23_w);
+  __m128i v_sq_0123_d = _mm_add_epi32(v_sq_01_d, v_sq_23_d);
+  v_sq_0123_d = _mm_add_epi32(v_sq_0123_d, _mm_srli_si128(v_sq_0123_d, 8));
+  v_sq_0123_d = _mm_add_epi32(v_sq_0123_d, _mm_srli_si128(v_sq_0123_d, 4));
+  return (uint64_t)_mm_cvtsi128_si32(v_sq_0123_d);
+}
+
 uint64_t aom_sum_squares_2d_i16_4xn_sse2(const int16_t *src, int stride,
                                          int height) {
   int r = 0;
@@ -70,6 +91,20 @@
   return xx_cvtsi128_si64(v_acc_64);
 }
 
+uint64_t aom_sum_sse_2d_i16_4xn_sse2(const int16_t *src, int stride, int height,
+                                     int *sum) {
+  int r = 0;
+  uint64_t sse = 0;
+  do {
+    int curr_sum = 0;
+    sse += aom_sum_sse_2d_i16_4x4_sse2(src, stride, &curr_sum);
+    *sum += curr_sum;
+    src += stride << 2;
+    r += 4;
+  } while (r < height);
+  return sse;
+}
+
 #ifdef __GNUC__
 // This prevents GCC/Clang from inlining this function into
 // aom_sum_squares_2d_i16_sse2, which in turn saves some stack
@@ -120,6 +155,69 @@
   return xx_cvtsi128_si64(v_acc_q);
 }
 
+#ifdef __GNUC__
+// This prevents GCC/Clang from inlining this function into
+// aom_sum_sse_2d_i16_nxn_sse2, which in turn saves some stack
+// maintenance instructions in the common case of 4x4.
+__attribute__((noinline))
+#endif
+uint64_t
+aom_sum_sse_2d_i16_nxn_sse2(const int16_t *src, int stride, int width,
+                            int height, int *sum) {
+  int r = 0;
+  uint64_t result;
+  const __m128i zero_reg = _mm_setzero_si128();
+  const __m128i one_reg = _mm_set1_epi16(1);
+
+  __m128i v_sse_total = zero_reg;
+  __m128i v_sum_total = zero_reg;
+
+  do {
+    int c = 0;
+    __m128i v_sse_row = zero_reg;
+    do {
+      const int16_t *b = src + c;
+
+      __m128i v_val_0_w = xx_load_128(b + 0 * stride);
+      __m128i v_val_1_w = xx_load_128(b + 1 * stride);
+      __m128i v_val_2_w = xx_load_128(b + 2 * stride);
+      __m128i v_val_3_w = xx_load_128(b + 3 * stride);
+
+      const __m128i v_sq_0_d = _mm_madd_epi16(v_val_0_w, v_val_0_w);
+      const __m128i v_sq_1_d = _mm_madd_epi16(v_val_1_w, v_val_1_w);
+      const __m128i v_sq_2_d = _mm_madd_epi16(v_val_2_w, v_val_2_w);
+      const __m128i v_sq_3_d = _mm_madd_epi16(v_val_3_w, v_val_3_w);
+      const __m128i v_sq_01_d = _mm_add_epi32(v_sq_0_d, v_sq_1_d);
+      const __m128i v_sq_23_d = _mm_add_epi32(v_sq_2_d, v_sq_3_d);
+      const __m128i v_sq_0123_d = _mm_add_epi32(v_sq_01_d, v_sq_23_d);
+      v_sse_row = _mm_add_epi32(v_sse_row, v_sq_0123_d);
+
+      const __m128i v_sum_01 = _mm_add_epi16(v_val_0_w, v_val_1_w);
+      const __m128i v_sum_23 = _mm_add_epi16(v_val_2_w, v_val_3_w);
+      __m128i v_sum_0123_d = _mm_add_epi16(v_sum_01, v_sum_23);
+      v_sum_0123_d = _mm_madd_epi16(v_sum_0123_d, one_reg);
+      v_sum_total = _mm_add_epi32(v_sum_total, v_sum_0123_d);
+
+      c += 8;
+    } while (c < width);
+
+    const __m128i v_sse_row_low = _mm_unpacklo_epi32(v_sse_row, zero_reg);
+    const __m128i v_sse_row_hi = _mm_unpackhi_epi32(v_sse_row, zero_reg);
+    v_sse_row = _mm_add_epi64(v_sse_row_low, v_sse_row_hi);
+    v_sse_total = _mm_add_epi64(v_sse_total, v_sse_row);
+    src += 4 * stride;
+    r += 4;
+  } while (r < height);
+
+  v_sum_total = _mm_add_epi32(v_sum_total, _mm_srli_si128(v_sum_total, 8));
+  v_sum_total = _mm_add_epi32(v_sum_total, _mm_srli_si128(v_sum_total, 4));
+  *sum += _mm_cvtsi128_si32(v_sum_total);
+
+  v_sse_total = _mm_add_epi64(v_sse_total, _mm_srli_si128(v_sse_total, 8));
+  xx_storel_64(&result, v_sse_total);
+  return result;
+}
+
 uint64_t aom_sum_squares_2d_i16_sse2(const int16_t *src, int stride, int width,
                                      int height) {
   // 4 elements per row only requires half an XMM register, so this
@@ -137,6 +235,20 @@
   }
 }
 
+uint64_t aom_sum_sse_2d_i16_sse2(const int16_t *src, int src_stride, int width,
+                                 int height, int *sum) {
+  if (LIKELY(width == 4 && height == 4)) {
+    return aom_sum_sse_2d_i16_4x4_sse2(src, src_stride, sum);
+  } else if (LIKELY(width == 4 && (height & 3) == 0)) {
+    return aom_sum_sse_2d_i16_4xn_sse2(src, src_stride, height, sum);
+  } else if (LIKELY((width & 7) == 0 && (height & 3) == 0)) {
+    // Generic case
+    return aom_sum_sse_2d_i16_nxn_sse2(src, src_stride, width, height, sum);
+  } else {
+    return aom_sum_sse_2d_i16_c(src, src_stride, width, height, sum);
+  }
+}
+
 //////////////////////////////////////////////////////////////////////////////
 // 1D version
 //////////////////////////////////////////////////////////////////////////////
diff --git a/aom_dsp/x86/sum_squares_sse2.h b/aom_dsp/x86/sum_squares_sse2.h
index 491e31c..5ed3f2c 100644
--- a/aom_dsp/x86/sum_squares_sse2.h
+++ b/aom_dsp/x86/sum_squares_sse2.h
@@ -19,4 +19,10 @@
                                          int height);
 uint64_t aom_sum_squares_2d_i16_4x4_sse2(const int16_t *src, int stride);
 
+uint64_t aom_sum_sse_2d_i16_4x4_sse2(const int16_t *src, int stride, int *sum);
+uint64_t aom_sum_sse_2d_i16_4xn_sse2(const int16_t *src, int stride, int height,
+                                     int *sum);
+uint64_t aom_sum_sse_2d_i16_nxn_sse2(const int16_t *src, int stride, int width,
+                                     int height, int *sum);
+
 #endif  // AOM_DSP_X86_SUM_SQUARES_SSE2_H_