Add AVX2 variants for 4 tap highbd filter

Added AVX2 implementation of the following :
- aom_highbd_filter_block1d16_h4_avx2
- aom_highbd_filter_block1d16_v4_avx2
- aom_highbd_filter_block1d8_h4_avx2
- aom_highbd_filter_block1d8_v4_avx2
- aom_highbd_filter_block1d4_h4_avx2
- aom_highbd_filter_block1d4_v4_avx2

Obtained ~30% gain w.r.t. 8-tap filter on
an average at unit test level.

When tested for 20 frames of crowd_run_360p_10 at 1 mbps
for speed=1 preset, observed ~2.5% reduction in encoder time.

Change-Id: I127a869ea02c12db8dc3c3cc310fcc8c310f519c
diff --git a/aom_dsp/x86/convolve_avx2.h b/aom_dsp/x86/convolve_avx2.h
index 63d453d..3cc0e23 100644
--- a/aom_dsp/x86/convolve_avx2.h
+++ b/aom_dsp/x86/convolve_avx2.h
@@ -133,6 +133,15 @@
   return res;
 }
 
+static INLINE __m256i convolve_4tap(const __m256i *const s,
+                                    const __m256i *const coeffs) {
+  const __m256i res_1 = _mm256_madd_epi16(s[0], coeffs[0]);
+  const __m256i res_2 = _mm256_madd_epi16(s[1], coeffs[1]);
+
+  const __m256i res = _mm256_add_epi32(res_1, res_2);
+  return res;
+}
+
 static INLINE __m256i convolve_lowbd_x(const __m256i data,
                                        const __m256i *const coeffs,
                                        const __m256i *const filt) {
diff --git a/aom_dsp/x86/highbd_convolve_avx2.c b/aom_dsp/x86/highbd_convolve_avx2.c
index e3b8c5a..ebcb5ac 100644
--- a/aom_dsp/x86/highbd_convolve_avx2.c
+++ b/aom_dsp/x86/highbd_convolve_avx2.c
@@ -20,6 +20,14 @@
 // -----------------------------------------------------------------------------
 // Copy and average
 
+static const uint8_t ip_shuffle_f2f3[32] = { 0, 1, 2, 3, 2, 3, 4, 5, 4, 5, 6,
+                                             7, 6, 7, 8, 9, 0, 1, 2, 3, 2, 3,
+                                             4, 5, 4, 5, 6, 7, 6, 7, 8, 9 };
+static const uint8_t ip_shuffle_f4f5[32] = { 4, 5, 6,  7,  6,  7,  8,  9,
+                                             8, 9, 10, 11, 10, 11, 12, 13,
+                                             4, 5, 6,  7,  6,  7,  8,  9,
+                                             8, 9, 10, 11, 10, 11, 12, 13 };
+
 void aom_highbd_convolve_copy_avx2(const uint8_t *src8, ptrdiff_t src_stride,
                                    uint8_t *dst8, ptrdiff_t dst_stride,
                                    const int16_t *filter_x, int filter_x_stride,
@@ -444,6 +452,17 @@
   f[3] = _mm256_shuffle_epi8(hh, p3);
 }
 
+static INLINE void pack_filters_4tap(const int16_t *filter,
+                                     __m256i *f /*f[4]*/) {
+  const __m128i h = _mm_loadu_si128((const __m128i *)filter);
+  const __m256i coeff = _mm256_broadcastsi128_si256(h);
+
+  // coeffs 2 3 2 3 2 3 2 3
+  f[0] = _mm256_shuffle_epi32(coeff, 0x55);
+  // coeffs 4 5 4 5 4 5 4 5
+  f[1] = _mm256_shuffle_epi32(coeff, 0xaa);
+}
+
 static INLINE void filter_8x1_pixels(const __m256i *sig /*sig[4]*/,
                                      const __m256i *fil /*fil[4]*/,
                                      __m256i *y) {
@@ -544,6 +563,176 @@
   } while (height > 0);
 }
 
+static void aom_highbd_filter_block1d4_h4_avx2(
+    const uint16_t *src_ptr, ptrdiff_t src_pitch, uint16_t *dst_ptr,
+    ptrdiff_t dst_pitch, uint32_t height, const int16_t *filter, int bd) {
+  const __m256i rounding = _mm256_set1_epi32(1 << (CONV8_ROUNDING_BITS - 1));
+  __m256i ff[2], s[2];
+  uint32_t i;
+  const __m256i clip_pixel =
+      _mm256_set1_epi16(bd == 10 ? 1023 : (bd == 12 ? 4095 : 255));
+  const __m256i zero = _mm256_setzero_si256();
+
+  static const uint8_t shuffle_mask[32] = { 0, 1, 2, 3, 2, 3, 4, 5, 4, 5, 6,
+                                            7, 6, 7, 8, 9, 0, 1, 2, 3, 2, 3,
+                                            4, 5, 4, 5, 6, 7, 6, 7, 8, 9 };
+
+  __m256i mask = _mm256_loadu_si256((__m256i *)shuffle_mask);
+  __m256i ip_mask_f2f3 = _mm256_loadu_si256((__m256i *)ip_shuffle_f2f3);
+  __m256i ip_mask_f4f5 = _mm256_loadu_si256((__m256i *)ip_shuffle_f4f5);
+
+  pack_filters_4tap(filter, ff);
+  src_ptr -= 3;
+  for (i = 0; i <= (height - 2); i += 2) {
+    __m256i row0 = _mm256_castsi128_si256(
+        _mm_loadu_si128((__m128i *)&src_ptr[i * src_pitch + 2]));
+    __m256i row1 = _mm256_castsi128_si256(
+        _mm_loadu_si128((__m128i *)&src_ptr[(i + 1) * src_pitch + 2]));
+
+    s[0] = _mm256_inserti128_si256(row0, _mm256_castsi256_si128(row1), 1);
+    s[1] = _mm256_alignr_epi8(s[0], s[0], 4);
+
+    s[0] = _mm256_shuffle_epi8(s[0], mask);
+    s[1] = _mm256_shuffle_epi8(s[1], mask);
+
+    __m256i res = convolve_4tap(s, ff);
+    res =
+        _mm256_srai_epi32(_mm256_add_epi32(res, rounding), CONV8_ROUNDING_BITS);
+
+    res = _mm256_packs_epi32(res, res);
+    res = _mm256_min_epi16(res, clip_pixel);
+    res = _mm256_max_epi16(res, zero);
+
+    _mm_storel_epi64((__m128i *)&dst_ptr[i * dst_pitch],
+                     _mm256_castsi256_si128(res));
+    _mm_storel_epi64((__m128i *)&dst_ptr[(i + 1) * dst_pitch],
+                     _mm256_extracti128_si256(res, 1));
+  }
+  if (height % 2 != 0) {
+    i = height - 1;
+    const __m256i row0_0 = _mm256_castsi128_si256(
+        _mm_loadu_si128((__m128i *)&src_ptr[i * src_pitch + 2]));
+    const __m256i row0_1 = _mm256_castsi128_si256(
+        _mm_loadu_si128((__m128i *)&src_ptr[i * src_pitch + 6]));
+
+    const __m256i r0 =
+        _mm256_inserti128_si256(row0_0, _mm256_castsi256_si128(row0_1), 1);
+
+    s[0] = _mm256_shuffle_epi8(r0, ip_mask_f2f3);
+    s[1] = _mm256_shuffle_epi8(r0, ip_mask_f4f5);
+
+    __m256i res = convolve_4tap(s, ff);
+    res =
+        _mm256_srai_epi32(_mm256_add_epi32(res, rounding), CONV8_ROUNDING_BITS);
+
+    res = _mm256_packs_epi32(res, res);
+    res = _mm256_min_epi16(res, clip_pixel);
+    res = _mm256_max_epi16(res, zero);
+
+    _mm_storel_epi64((__m128i *)&dst_ptr[i * dst_pitch],
+                     _mm256_castsi256_si128(res));
+  }
+}
+
+static void aom_highbd_filter_block1d8_h4_avx2(
+    const uint16_t *src_ptr, ptrdiff_t src_pitch, uint16_t *dst_ptr,
+    ptrdiff_t dst_pitch, uint32_t height, const int16_t *filter, int bd) {
+  const __m256i rounding = _mm256_set1_epi32(1 << (CONV8_ROUNDING_BITS - 1));
+  __m256i ff[2], s[2];
+  uint32_t i = 0;
+  const __m256i clip_pixel =
+      _mm256_set1_epi16(bd == 10 ? 1023 : (bd == 12 ? 4095 : 255));
+  const __m256i zero = _mm256_setzero_si256();
+
+  static const uint8_t shuffle_mask[32] = { 0, 1, 8,  9,  2, 3, 10, 11,
+                                            4, 5, 12, 13, 6, 7, 14, 15,
+                                            0, 1, 8,  9,  2, 3, 10, 11,
+                                            4, 5, 12, 13, 6, 7, 14, 15 };
+
+  __m256i mask = _mm256_loadu_si256((__m256i *)shuffle_mask);
+  __m256i ip_mask_f2f3 = _mm256_loadu_si256((__m256i *)ip_shuffle_f2f3);
+  __m256i ip_mask_f4f5 = _mm256_loadu_si256((__m256i *)ip_shuffle_f4f5);
+
+  pack_filters_4tap(filter, ff);
+  src_ptr -= 3;
+
+  /* Horizontal filter */
+
+  for (i = 0; i <= (height - 2); i += 2) {
+    const __m256i row0 =
+        _mm256_loadu_si256((__m256i *)&src_ptr[i * src_pitch + 2]);
+    __m256i row1 =
+        _mm256_loadu_si256((__m256i *)&src_ptr[(i + 1) * src_pitch + 2]);
+
+    const __m256i r0 =
+        _mm256_inserti128_si256(row0, _mm256_castsi256_si128(row1), 1);
+    const __m256i r1 = _mm256_permute2x128_si256(row0, row1, 0x31);
+
+    // even pixels
+    s[0] = r0;
+    s[1] = _mm256_alignr_epi8(r1, r0, 4);
+
+    __m256i res_even = convolve_4tap(s, ff);
+    res_even = _mm256_srai_epi32(_mm256_add_epi32(res_even, rounding),
+                                 CONV8_ROUNDING_BITS);
+
+    // odd pixels
+    s[0] = _mm256_alignr_epi8(r1, r0, 2);
+    s[1] = _mm256_alignr_epi8(r1, r0, 6);
+
+    __m256i res_odd = convolve_4tap(s, ff);
+    res_odd = _mm256_srai_epi32(_mm256_add_epi32(res_odd, rounding),
+                                CONV8_ROUNDING_BITS);
+
+    __m256i res = _mm256_packs_epi32(res_even, res_odd);
+    res = _mm256_shuffle_epi8(res, mask);
+
+    res = _mm256_min_epi16(res, clip_pixel);
+    res = _mm256_max_epi16(res, zero);
+
+    _mm_storeu_si128((__m128i *)&dst_ptr[i * dst_pitch],
+                     _mm256_castsi256_si128(res));
+    _mm_storeu_si128((__m128i *)&dst_ptr[i * dst_pitch + dst_pitch],
+                     _mm256_extracti128_si256(res, 1));
+  }
+
+  if (height % 2 != 0) {
+    i = height - 1;
+    const __m256i row0_0 =
+        _mm256_loadu_si256((__m256i *)&src_ptr[i * src_pitch + 2]);
+    const __m256i row0_1 =
+        _mm256_loadu_si256((__m256i *)&src_ptr[i * src_pitch + 6]);
+
+    const __m256i r0 =
+        _mm256_inserti128_si256(row0_0, _mm256_castsi256_si128(row0_1), 1);
+
+    s[0] = _mm256_shuffle_epi8(r0, ip_mask_f2f3);
+    s[1] = _mm256_shuffle_epi8(r0, ip_mask_f4f5);
+
+    __m256i res = convolve_4tap(s, ff);
+    res =
+        _mm256_srai_epi32(_mm256_add_epi32(res, rounding), CONV8_ROUNDING_BITS);
+
+    res = _mm256_packs_epi32(res, res);
+    res = _mm256_min_epi16(res, clip_pixel);
+    res = _mm256_max_epi16(res, zero);
+
+    _mm_storel_epi64((__m128i *)&dst_ptr[i * dst_pitch],
+                     _mm256_castsi256_si128(res));
+    _mm_storel_epi64((__m128i *)&dst_ptr[i * dst_pitch + 4],
+                     _mm256_extracti128_si256(res, 1));
+  }
+}
+
+static void aom_highbd_filter_block1d16_h4_avx2(
+    const uint16_t *src_ptr, ptrdiff_t src_pitch, uint16_t *dst_ptr,
+    ptrdiff_t dst_pitch, uint32_t height, const int16_t *filter, int bd) {
+  aom_highbd_filter_block1d8_h4_avx2(src_ptr, src_pitch, dst_ptr, dst_pitch,
+                                     height, filter, bd);
+  aom_highbd_filter_block1d8_h4_avx2(src_ptr + 8, src_pitch, dst_ptr + 8,
+                                     dst_pitch, height, filter, bd);
+}
+
 // -----------------------------------------------------------------------------
 // 2-tap horizontal filtering
 
@@ -875,6 +1064,142 @@
   } while (height > 0);
 }
 
+static void aom_highbd_filter_block1d4_v4_avx2(
+    const uint16_t *src_ptr, ptrdiff_t src_pitch, uint16_t *dst_ptr,
+    ptrdiff_t dst_pitch, uint32_t height, const int16_t *filter, int bd) {
+  const int bits = FILTER_BITS;
+
+  const __m128i round_shift_bits = _mm_cvtsi32_si128(bits);
+  const __m256i round_const_bits = _mm256_set1_epi32((1 << bits) >> 1);
+  const __m256i clip_pixel =
+      _mm256_set1_epi32(bd == 10 ? 1023 : (bd == 12 ? 4095 : 255));
+  const __m256i zero = _mm256_setzero_si256();
+  uint32_t i;
+  __m256i s[2], ff[2];
+
+  pack_filters_4tap(filter, ff);
+
+  const uint16_t *data = src_ptr;
+  /* Vertical filter */
+  {
+    __m128i s2 = _mm_loadl_epi64((__m128i *)(data + 2 * src_pitch));
+    __m128i s3 = _mm_loadl_epi64((__m128i *)(data + 3 * src_pitch));
+
+    __m256i s23 = _mm256_inserti128_si256(_mm256_castsi128_si256(s2), s3, 1);
+
+    __m128i s4 = _mm_loadl_epi64((__m128i *)(data + 4 * src_pitch));
+
+    __m256i s34 = _mm256_inserti128_si256(_mm256_castsi128_si256(s3), s4, 1);
+
+    s[0] = _mm256_unpacklo_epi16(s23, s34);
+
+    for (i = 0; i < height; i += 2) {
+      data = &src_ptr[i * src_pitch];
+
+      __m128i s5 = _mm_loadl_epi64((__m128i *)(data + 5 * src_pitch));
+      __m128i s6 = _mm_loadl_epi64((__m128i *)(data + 6 * src_pitch));
+
+      __m256i s45 = _mm256_inserti128_si256(_mm256_castsi128_si256(s4), s5, 1);
+      __m256i s56 = _mm256_inserti128_si256(_mm256_castsi128_si256(s5), s6, 1);
+
+      s[1] = _mm256_unpacklo_epi16(s45, s56);
+
+      const __m256i res_a = convolve_4tap(s, ff);
+
+      __m256i res_a_round = _mm256_sra_epi32(
+          _mm256_add_epi32(res_a, round_const_bits), round_shift_bits);
+
+      __m256i res_16bit = _mm256_min_epi32(res_a_round, clip_pixel);
+      res_16bit = _mm256_max_epi32(res_16bit, zero);
+      res_16bit = _mm256_packs_epi32(res_16bit, res_16bit);
+
+      _mm_storel_epi64((__m128i *)&dst_ptr[i * dst_pitch],
+                       _mm256_castsi256_si128(res_16bit));
+      _mm_storel_epi64((__m128i *)&dst_ptr[i * dst_pitch + dst_pitch],
+                       _mm256_extracti128_si256(res_16bit, 1));
+
+      s[0] = s[1];
+      s4 = s6;
+    }
+  }
+}
+
+static void aom_highbd_filter_block1d8_v4_avx2(
+    const uint16_t *src_ptr, ptrdiff_t src_pitch, uint16_t *dst_ptr,
+    ptrdiff_t dst_pitch, uint32_t height, const int16_t *filter, int bd) {
+  const int bits = FILTER_BITS;
+
+  const __m128i round_shift_bits = _mm_cvtsi32_si128(bits);
+  const __m256i round_const_bits = _mm256_set1_epi32((1 << bits) >> 1);
+  const __m256i clip_pixel =
+      _mm256_set1_epi16(bd == 10 ? 1023 : (bd == 12 ? 4095 : 255));
+  const __m256i zero = _mm256_setzero_si256();
+  __m256i s[4], ff[2];
+  uint32_t i;
+  pack_filters_4tap(filter, ff);
+
+  const uint16_t *data = src_ptr;
+  /* Vertical filter */
+  {
+    __m128i s2 = _mm_loadu_si128((__m128i *)(data + 2 * src_pitch));
+    __m128i s3 = _mm_loadu_si128((__m128i *)(data + 3 * src_pitch));
+
+    __m256i s23 = _mm256_inserti128_si256(_mm256_castsi128_si256(s2), s3, 1);
+
+    __m128i s4 = _mm_loadu_si128((__m128i *)(data + 4 * src_pitch));
+
+    __m256i s34 = _mm256_inserti128_si256(_mm256_castsi128_si256(s3), s4, 1);
+
+    s[0] = _mm256_unpacklo_epi16(s23, s34);
+    s[2] = _mm256_unpackhi_epi16(s23, s34);
+
+    for (i = 0; i < height; i += 2) {
+      data = &src_ptr[i * src_pitch];
+
+      __m128i s5 = _mm_loadu_si128((__m128i *)(data + 5 * src_pitch));
+      __m128i s6 = _mm_loadu_si128((__m128i *)(data + 6 * src_pitch));
+
+      __m256i s45 = _mm256_inserti128_si256(_mm256_castsi128_si256(s4), s5, 1);
+      __m256i s56 = _mm256_inserti128_si256(_mm256_castsi128_si256(s5), s6, 1);
+
+      s[1] = _mm256_unpacklo_epi16(s45, s56);
+      s[3] = _mm256_unpackhi_epi16(s45, s56);
+
+      const __m256i res_a = convolve_4tap(s, ff);
+
+      __m256i res_a_round = _mm256_sra_epi32(
+          _mm256_add_epi32(res_a, round_const_bits), round_shift_bits);
+
+      const __m256i res_b = convolve_4tap(s + 2, ff);
+      __m256i res_b_round = _mm256_sra_epi32(
+          _mm256_add_epi32(res_b, round_const_bits), round_shift_bits);
+
+      __m256i res_16bit = _mm256_packs_epi32(res_a_round, res_b_round);
+      res_16bit = _mm256_min_epi16(res_16bit, clip_pixel);
+      res_16bit = _mm256_max_epi16(res_16bit, zero);
+
+      _mm_storeu_si128((__m128i *)&dst_ptr[i * dst_pitch],
+                       _mm256_castsi256_si128(res_16bit));
+      _mm_storeu_si128((__m128i *)&dst_ptr[i * dst_pitch + dst_pitch],
+                       _mm256_extracti128_si256(res_16bit, 1));
+
+      s[0] = s[1];
+      s[2] = s[3];
+      s4 = s6;
+    }
+  }
+}
+
+static void aom_highbd_filter_block1d16_v4_avx2(
+    const uint16_t *src_ptr, ptrdiff_t src_pitch, uint16_t *dst_ptr,
+    ptrdiff_t dst_pitch, uint32_t height, const int16_t *filter, int bd) {
+  aom_highbd_filter_block1d8_v4_avx2(src_ptr, src_pitch, dst_ptr, dst_pitch,
+                                     height, filter, bd);
+
+  aom_highbd_filter_block1d8_v4_avx2(src_ptr + 8, src_pitch, dst_ptr + 8,
+                                     dst_pitch, height, filter, bd);
+}
+
 // -----------------------------------------------------------------------------
 // 2-tap vertical filtering
 
@@ -992,13 +1317,6 @@
 #define aom_highbd_filter_block1d4_v8_avx2 aom_highbd_filter_block1d4_v8_sse2
 #define aom_highbd_filter_block1d4_v2_avx2 aom_highbd_filter_block1d4_v2_sse2
 
-#define aom_highbd_filter_block1d16_h4_avx2 aom_highbd_filter_block1d16_h8_avx2
-#define aom_highbd_filter_block1d8_h4_avx2 aom_highbd_filter_block1d8_h8_avx2
-#define aom_highbd_filter_block1d4_h4_avx2 aom_highbd_filter_block1d4_h8_avx2
-#define aom_highbd_filter_block1d16_v4_avx2 aom_highbd_filter_block1d16_v8_avx2
-#define aom_highbd_filter_block1d8_v4_avx2 aom_highbd_filter_block1d8_v8_avx2
-#define aom_highbd_filter_block1d4_v4_avx2 aom_highbd_filter_block1d4_v8_avx2
-
 HIGH_FUN_CONV_1D(horiz, x_step_q4, filter_x, h, src, , avx2);
 HIGH_FUN_CONV_1D(vert, y_step_q4, filter_y, v, src - src_stride * 3, , avx2);