Faster AVX2 implementation of motion compensation modules

Improvements have been made to av1_convolve_y_avx2 (~1.5x faster),
av1_convolve_y_sr_avx2 (~1.8x faster) and av1_convolve_2d_sr_avx2 (~1.3x faster).

Change-Id: Iaed764a7c4d069a4180c3edb0b1ac57ad36dad21
diff --git a/aom_dsp/aom_dsp.cmake b/aom_dsp/aom_dsp.cmake
index f61af74..6c1d89e 100644
--- a/aom_dsp/aom_dsp.cmake
+++ b/aom_dsp/aom_dsp.cmake
@@ -81,6 +81,7 @@
     "${AOM_ROOT}/aom_dsp/x86/intrapred_avx2.c"
     "${AOM_ROOT}/aom_dsp/x86/inv_txfm_avx2.c"
     "${AOM_ROOT}/aom_dsp/x86/common_avx2.h"
+    "${AOM_ROOT}/aom_dsp/x86/convolve_avx2.h"
     "${AOM_ROOT}/aom_dsp/x86/inv_txfm_common_avx2.h"
     "${AOM_ROOT}/aom_dsp/x86/txfm_common_avx2.h")
 
diff --git a/aom_dsp/x86/convolve_avx2.h b/aom_dsp/x86/convolve_avx2.h
index f4ea91e..80a6067 100644
--- a/aom_dsp/x86/convolve_avx2.h
+++ b/aom_dsp/x86/convolve_avx2.h
@@ -33,4 +33,101 @@
   6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 14
 };
 
+static INLINE void prepare_coeffs(const InterpFilterParams *const filter_params,
+                                  const int subpel_q4,
+                                  __m256i *const coeffs /* [4] */) {
+  const int16_t *const filter = av1_get_interp_filter_subpel_kernel(
+      *filter_params, subpel_q4 & SUBPEL_MASK);
+  const __m128i coeffs_8 = _mm_loadu_si128((__m128i *)filter);
+  const __m256i filter_coeffs = _mm256_broadcastsi128_si256(coeffs_8);
+
+  // right shift all filter co-efficients by 1 to reduce the bits required.
+  // This extra right shift will be taken care of at the end while rounding
+  // the result.
+  // Since all filter co-efficients are even, this change will not affect the
+  // end result
+  assert(_mm_test_all_zeros(_mm_and_si128(coeffs_8, _mm_set1_epi16(1)),
+                            _mm_set1_epi16(0xffff)));
+
+  const __m256i coeffs_1 = _mm256_srai_epi16(filter_coeffs, 1);
+
+  // coeffs 0 1 0 1 0 1 0 1
+  coeffs[0] = _mm256_shuffle_epi8(coeffs_1, _mm256_set1_epi16(0x0200u));
+  // coeffs 2 3 2 3 2 3 2 3
+  coeffs[1] = _mm256_shuffle_epi8(coeffs_1, _mm256_set1_epi16(0x0604u));
+  // coeffs 4 5 4 5 4 5 4 5
+  coeffs[2] = _mm256_shuffle_epi8(coeffs_1, _mm256_set1_epi16(0x0a08u));
+  // coeffs 6 7 6 7 6 7 6 7
+  coeffs[3] = _mm256_shuffle_epi8(coeffs_1, _mm256_set1_epi16(0x0e0cu));
+}
+
+static INLINE void prepare_coeffs_y_2d(
+    const InterpFilterParams *const filter_params_y, const int subpel_y_q4,
+    __m256i *const coeffs /* [4] */) {
+  const int16_t *y_filter = av1_get_interp_filter_subpel_kernel(
+      *filter_params_y, subpel_y_q4 & SUBPEL_MASK);
+
+  const __m128i coeffs_y8 = _mm_loadu_si128((__m128i *)y_filter);
+  const __m256i coeffs_y = _mm256_broadcastsi128_si256(coeffs_y8);
+
+  // coeffs 0 1 0 1 0 1 0 1
+  coeffs[0] = _mm256_shuffle_epi32(coeffs_y, 0x00);
+  // coeffs 2 3 2 3 2 3 2 3
+  coeffs[1] = _mm256_shuffle_epi32(coeffs_y, 0x55);
+  // coeffs 4 5 4 5 4 5 4 5
+  coeffs[2] = _mm256_shuffle_epi32(coeffs_y, 0xaa);
+  // coeffs 6 7 6 7 6 7 6 7
+  coeffs[3] = _mm256_shuffle_epi32(coeffs_y, 0xff);
+}
+
+static INLINE __m256i convolve(const __m256i *const s,
+                               const __m256i *const coeffs) {
+  const __m256i res_01 = _mm256_maddubs_epi16(s[0], coeffs[0]);
+  const __m256i res_23 = _mm256_maddubs_epi16(s[1], coeffs[1]);
+  const __m256i res_45 = _mm256_maddubs_epi16(s[2], coeffs[2]);
+  const __m256i res_67 = _mm256_maddubs_epi16(s[3], coeffs[3]);
+
+  // order: 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
+  const __m256i res = _mm256_add_epi16(_mm256_add_epi16(res_01, res_45),
+                                       _mm256_add_epi16(res_23, res_67));
+
+  return res;
+}
+
+static INLINE __m256i convolve_y_2d(const __m256i *const s,
+                                    const __m256i *const coeffs) {
+  const __m256i res_0 = _mm256_madd_epi16(s[0], coeffs[0]);
+  const __m256i res_1 = _mm256_madd_epi16(s[1], coeffs[1]);
+  const __m256i res_2 = _mm256_madd_epi16(s[2], coeffs[2]);
+  const __m256i res_3 = _mm256_madd_epi16(s[3], coeffs[3]);
+
+  const __m256i res = _mm256_add_epi32(_mm256_add_epi32(res_0, res_1),
+                                       _mm256_add_epi32(res_2, res_3));
+
+  return res;
+}
+
+static INLINE __m256i convolve_x(const __m256i data,
+                                 const __m256i *const coeffs,
+                                 const __m256i *const filt) {
+  __m256i s[4];
+
+  s[0] = _mm256_shuffle_epi8(data, filt[0]);
+  s[1] = _mm256_shuffle_epi8(data, filt[1]);
+  s[2] = _mm256_shuffle_epi8(data, filt[2]);
+  s[3] = _mm256_shuffle_epi8(data, filt[3]);
+
+  return convolve(s, coeffs);
+}
+
+static INLINE void add_store_aligned(CONV_BUF_TYPE *const dst,
+                                     const __m256i *const res,
+                                     const __m256i *const avg_mask) {
+  __m256i d;
+  d = _mm256_load_si256((__m256i *)dst);
+  d = _mm256_and_si256(d, *avg_mask);
+  d = _mm256_add_epi32(d, *res);
+  _mm256_store_si256((__m256i *)dst, d);
+}
+
 #endif
diff --git a/av1/common/convolve.c b/av1/common/convolve.c
index 9f4ec26..6ce9700 100644
--- a/av1/common/convolve.c
+++ b/av1/common/convolve.c
@@ -401,7 +401,6 @@
     for (int x = 0; x < w; ++x) {
       int32_t sum = (1 << (bd + FILTER_BITS - 1));
       for (int k = 0; k < filter_params_x->taps; ++k) {
-        assert((x_filter[k] % 2) == 0);
         sum += x_filter[k] * src_horiz[y * src_stride + x - fo_horiz + k];
       }
       assert(0 <= sum && sum < (1 << (bd + FILTER_BITS + 1)));
@@ -419,7 +418,6 @@
     for (int x = 0; x < w; ++x) {
       CONV_BUF_TYPE sum = 1 << offset_bits;
       for (int k = 0; k < filter_params_y->taps; ++k) {
-        assert((y_filter[k] % 2) == 0);
         sum += y_filter[k] * src_vert[(y - fo_vert + k) * im_stride + x];
       }
       assert(0 <= sum && sum < (1 << (offset_bits + 2)));
@@ -456,7 +454,6 @@
     for (int x = 0; x < w; ++x) {
       CONV_BUF_TYPE res = 0;
       for (int k = 0; k < filter_params_y->taps; ++k) {
-        assert((y_filter[k] % 2) == 0);
         res += y_filter[k] * src[(y - fo_vert + k) * src_stride + x];
       }
       res *= (1 << bits);
@@ -490,7 +487,6 @@
     for (int x = 0; x < w; ++x) {
       CONV_BUF_TYPE res = 0;
       for (int k = 0; k < filter_params_x->taps; ++k) {
-        assert((x_filter[k] % 2) == 0);
         res += x_filter[k] * src[y * src_stride + x - fo_horiz + k];
       }
       res = (1 << bits) * ROUND_POWER_OF_TWO(res, conv_params->round_0);
@@ -554,7 +550,6 @@
     for (int x = 0; x < w; ++x) {
       int32_t sum = (1 << (bd + FILTER_BITS - 1));
       for (int k = 0; k < filter_params_x->taps; ++k) {
-        assert((x_filter[k] % 2) == 0);
         sum += x_filter[k] * src_horiz[y * src_stride + x - fo_horiz + k];
       }
       assert(0 <= sum && sum < (1 << (bd + FILTER_BITS + 1)));
@@ -572,7 +567,6 @@
     for (int x = 0; x < w; ++x) {
       CONV_BUF_TYPE sum = 1 << offset_bits;
       for (int k = 0; k < filter_params_y->taps; ++k) {
-        assert((y_filter[k] % 2) == 0);
         sum += y_filter[k] * src_vert[(y - fo_vert + k) * im_stride + x];
       }
       assert(0 <= sum && sum < (1 << (offset_bits + 2)));
@@ -602,7 +596,6 @@
     for (int x = 0; x < w; ++x) {
       CONV_BUF_TYPE res = 0;
       for (int k = 0; k < filter_params_y->taps; ++k) {
-        assert((y_filter[k] % 2) == 0);
         res += y_filter[k] * src[(y - fo_vert + k) * src_stride + x];
       }
       dst[y * dst_stride + x] =
@@ -630,7 +623,6 @@
     for (int x = 0; x < w; ++x) {
       CONV_BUF_TYPE res = 0;
       for (int k = 0; k < filter_params_x->taps; ++k) {
-        assert((x_filter[k] % 2) == 0);
         res += x_filter[k] * src[y * src_stride + x - fo_horiz + k];
       }
       res = ROUND_POWER_OF_TWO(res, conv_params->round_0);
diff --git a/av1/common/reconinter.c b/av1/common/reconinter.c
index 0a033f1..46d2129 100644
--- a/av1/common/reconinter.c
+++ b/av1/common/reconinter.c
@@ -740,7 +740,7 @@
 // CONFIG_HIGHBITDEPTH or just 8 otherwise.
 #define INTER_PRED_BYTES_PER_PIXEL 4
 
-  DECLARE_ALIGNED(16, uint8_t,
+  DECLARE_ALIGNED(32, uint8_t,
                   tmp_buf[INTER_PRED_BYTES_PER_PIXEL * MAX_SB_SQUARE]);
 #undef INTER_PRED_BYTES_PER_PIXEL
 
@@ -988,7 +988,7 @@
       for (idx = 0; idx < b8_w; idx += b4_w) {
         MB_MODE_INFO *this_mbmi = &xd->mi[row * xd->mi_stride + col]->mbmi;
         is_compound = has_second_ref(this_mbmi);
-        DECLARE_ALIGNED(16, int32_t, tmp_dst[8 * 8]);
+        DECLARE_ALIGNED(32, int32_t, tmp_dst[8 * 8]);
         int tmp_dst_stride = 8;
         assert(w <= 8 && h <= 8);
         ConvolveParams conv_params = get_conv_params_no_round(
@@ -1124,7 +1124,7 @@
     uint8_t *const dst = dst_buf->buf + dst_buf->stride * y + x;
     uint8_t *pre[2];
     SubpelParams subpel_params[2];
-    DECLARE_ALIGNED(16, int32_t, tmp_dst[MAX_SB_SIZE * MAX_SB_SIZE]);
+    DECLARE_ALIGNED(32, int32_t, tmp_dst[MAX_SB_SIZE * MAX_SB_SIZE]);
 
     for (ref = 0; ref < 1 + is_compound; ++ref) {
 #if CONFIG_INTRABC
diff --git a/av1/common/x86/convolve_2d_avx2.c b/av1/common/x86/convolve_2d_avx2.c
index 9ee6e4b..a5df334 100644
--- a/av1/common/x86/convolve_2d_avx2.c
+++ b/av1/common/x86/convolve_2d_avx2.c
@@ -39,10 +39,10 @@
   int i, j;
   const int fo_vert = filter_params_y->taps / 2 - 1;
   const int fo_horiz = filter_params_x->taps / 2 - 1;
-  const int do_average = conv_params->do_average;
   const uint8_t *const src_ptr = src - fo_vert * src_stride - fo_horiz;
+  const __m256i avg_mask = _mm256_set1_epi32(conv_params->do_average ? -1 : 0);
 
-  __m256i filt[4], s[4];
+  __m256i filt[4], s[4], coeffs[4];
 
   filt[0] = _mm256_load_si256((__m256i const *)filt1_global_avx2);
   filt[1] = _mm256_load_si256((__m256i const *)filt2_global_avx2);
@@ -51,33 +51,7 @@
 
   /* Horizontal filter */
   {
-    const int16_t *x_filter = av1_get_interp_filter_subpel_kernel(
-        *filter_params_x, subpel_x_q4 & SUBPEL_MASK);
-
-    const __m128i coeffs_x8 = _mm_loadu_si128((__m128i *)x_filter);
-    // since not all compilers yet support _mm256_set_m128i()
-    const __m256i coeffs_x = _mm256_insertf128_si256(
-        _mm256_castsi128_si256(coeffs_x8), coeffs_x8, 1);
-
-    // right shift all filter co-efficients by 1 to reduce the bits required.
-    // This extra right shift will be taken care of at the end while rounding
-    // the result.
-    // Since all filter co-efficients are even, this change will not affect the
-    // end result
-    const __m256i coeffs_x_1 = _mm256_srai_epi16(coeffs_x, 1);
-
-    // coeffs 0 1 0 1 0 1 0 1
-    const __m256i coeff_01 =
-        _mm256_shuffle_epi8(coeffs_x_1, _mm256_set1_epi16(0x0200u));
-    // coeffs 2 3 2 3 2 3 2 3
-    const __m256i coeff_23 =
-        _mm256_shuffle_epi8(coeffs_x_1, _mm256_set1_epi16(0x0604u));
-    // coeffs 4 5 4 5 4 5 4 5
-    const __m256i coeff_45 =
-        _mm256_shuffle_epi8(coeffs_x_1, _mm256_set1_epi16(0x0a08u));
-    // coeffs 6 7 6 7 6 7 6 7
-    const __m256i coeff_67 =
-        _mm256_shuffle_epi8(coeffs_x_1, _mm256_set1_epi16(0x0e0cu));
+    prepare_coeffs(filter_params_x, subpel_x_q4, coeffs);
 
     const __m256i round_const =
         _mm256_set1_epi16(((1 << (conv_params->round_0 - 1)) >> 1) +
@@ -93,52 +67,20 @@
             _mm_loadu_si128((__m128i *)&src_ptr[(i * src_stride) + (j + 8)]),
             1);
 
-        // filter the source buffer
-        s[0] = _mm256_shuffle_epi8(data, filt[0]);
-        s[1] = _mm256_shuffle_epi8(data, filt[1]);
-        s[2] = _mm256_shuffle_epi8(data, filt[2]);
-        s[3] = _mm256_shuffle_epi8(data, filt[3]);
+        __m256i res = convolve_x(data, coeffs, filt);
 
-        const __m256i res_0 = _mm256_maddubs_epi16(s[0], coeff_01);
-        const __m256i res_1 = _mm256_maddubs_epi16(s[1], coeff_23);
-        const __m256i res_2 = _mm256_maddubs_epi16(s[2], coeff_45);
-        const __m256i res_3 = _mm256_maddubs_epi16(s[3], coeff_67);
-
-        const __m256i res_a = _mm256_add_epi16(res_0, res_2);
-        const __m256i res_b = _mm256_add_epi16(res_1, res_3);
-
-        __m256i res = _mm256_add_epi16(res_a, res_b);
         res = _mm256_sra_epi16(_mm256_add_epi16(res, round_const), round_shift);
         res = _mm256_permute4x64_epi64(res, 216);
 
         // 0 1 2 3 8 9 10 11 4 5 6 7 12 13 14 15
-        _mm256_storeu_si256((__m256i *)&im_block[i * im_stride + j], res);
+        _mm256_store_si256((__m256i *)&im_block[i * im_stride + j], res);
       }
     }
   }
 
   /* Vertical filter */
   {
-    const int16_t *y_filter = av1_get_interp_filter_subpel_kernel(
-        *filter_params_y, subpel_y_q4 & SUBPEL_MASK);
-
-    const __m128i coeffs_y8 = _mm_loadu_si128((__m128i *)y_filter);
-    const __m256i coeffs_y = _mm256_insertf128_si256(
-        _mm256_castsi128_si256(coeffs_y8), coeffs_y8, 1);
-
-    // coeffs 0 1 0 1 2 3 2 3
-    const __m256i tmp_0 = _mm256_unpacklo_epi32(coeffs_y, coeffs_y);
-    // coeffs 4 5 4 5 6 7 6 7
-    const __m256i tmp_1 = _mm256_unpackhi_epi32(coeffs_y, coeffs_y);
-
-    // coeffs 0 1 0 1 0 1 0 1
-    const __m256i coeff_01 = _mm256_unpacklo_epi64(tmp_0, tmp_0);
-    // coeffs 2 3 2 3 2 3 2 3
-    const __m256i coeff_23 = _mm256_unpackhi_epi64(tmp_0, tmp_0);
-    // coeffs 4 5 4 5 4 5 4 5
-    const __m256i coeff_45 = _mm256_unpacklo_epi64(tmp_1, tmp_1);
-    // coeffs 6 7 6 7 6 7 6 7
-    const __m256i coeff_67 = _mm256_unpackhi_epi64(tmp_1, tmp_1);
+    prepare_coeffs_y_2d(filter_params_y, subpel_y_q4, coeffs);
 
     const __m256i round_const = _mm256_set1_epi32(
         ((1 << conv_params->round_1) >> 1) -
@@ -149,69 +91,38 @@
       for (j = 0; j < w; j += 16) {
         // Filter 0 1 2 3 4 5 6 7
         const int16_t *data = &im_block[i * im_stride + j];
-        const __m256i src_0 =
-            _mm256_unpacklo_epi16(*(__m256i *)(data + 0 * im_stride),
-                                  *(__m256i *)(data + 1 * im_stride));
-        const __m256i src_1 =
-            _mm256_unpacklo_epi16(*(__m256i *)(data + 2 * im_stride),
-                                  *(__m256i *)(data + 3 * im_stride));
-        const __m256i src_2 =
-            _mm256_unpacklo_epi16(*(__m256i *)(data + 4 * im_stride),
-                                  *(__m256i *)(data + 5 * im_stride));
-        const __m256i src_3 =
-            _mm256_unpacklo_epi16(*(__m256i *)(data + 6 * im_stride),
-                                  *(__m256i *)(data + 7 * im_stride));
+        s[0] = _mm256_unpacklo_epi16(*(__m256i *)(data + 0 * im_stride),
+                                     *(__m256i *)(data + 1 * im_stride));
+        s[1] = _mm256_unpacklo_epi16(*(__m256i *)(data + 2 * im_stride),
+                                     *(__m256i *)(data + 3 * im_stride));
+        s[2] = _mm256_unpacklo_epi16(*(__m256i *)(data + 4 * im_stride),
+                                     *(__m256i *)(data + 5 * im_stride));
+        s[3] = _mm256_unpacklo_epi16(*(__m256i *)(data + 6 * im_stride),
+                                     *(__m256i *)(data + 7 * im_stride));
 
-        const __m256i res_0 = _mm256_madd_epi16(src_0, coeff_01);
-        const __m256i res_1 = _mm256_madd_epi16(src_1, coeff_23);
-        const __m256i res_2 = _mm256_madd_epi16(src_2, coeff_45);
-        const __m256i res_3 = _mm256_madd_epi16(src_3, coeff_67);
-
-        const __m256i res_a = _mm256_add_epi32(_mm256_add_epi32(res_0, res_1),
-                                               _mm256_add_epi32(res_2, res_3));
+        const __m256i res_a = convolve_y_2d(s, coeffs);
 
         // Filter 8 9 10 11 12 13 14 15
-        const __m256i src_4 =
-            _mm256_unpackhi_epi16(*(__m256i *)(data + 0 * im_stride),
-                                  *(__m256i *)(data + 1 * im_stride));
-        const __m256i src_5 =
-            _mm256_unpackhi_epi16(*(__m256i *)(data + 2 * im_stride),
-                                  *(__m256i *)(data + 3 * im_stride));
-        const __m256i src_6 =
-            _mm256_unpackhi_epi16(*(__m256i *)(data + 4 * im_stride),
-                                  *(__m256i *)(data + 5 * im_stride));
-        const __m256i src_7 =
-            _mm256_unpackhi_epi16(*(__m256i *)(data + 6 * im_stride),
-                                  *(__m256i *)(data + 7 * im_stride));
+        s[0] = _mm256_unpackhi_epi16(*(__m256i *)(data + 0 * im_stride),
+                                     *(__m256i *)(data + 1 * im_stride));
+        s[1] = _mm256_unpackhi_epi16(*(__m256i *)(data + 2 * im_stride),
+                                     *(__m256i *)(data + 3 * im_stride));
+        s[2] = _mm256_unpackhi_epi16(*(__m256i *)(data + 4 * im_stride),
+                                     *(__m256i *)(data + 5 * im_stride));
+        s[3] = _mm256_unpackhi_epi16(*(__m256i *)(data + 6 * im_stride),
+                                     *(__m256i *)(data + 7 * im_stride));
 
-        const __m256i res_4 = _mm256_madd_epi16(src_4, coeff_01);
-        const __m256i res_5 = _mm256_madd_epi16(src_5, coeff_23);
-        const __m256i res_6 = _mm256_madd_epi16(src_6, coeff_45);
-        const __m256i res_7 = _mm256_madd_epi16(src_7, coeff_67);
-
-        const __m256i res_b = _mm256_add_epi32(_mm256_add_epi32(res_4, res_5),
-                                               _mm256_add_epi32(res_6, res_7));
+        const __m256i res_b = convolve_y_2d(s, coeffs);
 
         const __m256i res_a_round =
             _mm256_sra_epi32(_mm256_add_epi32(res_a, round_const), round_shift);
         const __m256i res_b_round =
             _mm256_sra_epi32(_mm256_add_epi32(res_b, round_const), round_shift);
 
-        // Accumulate values into the destination buffer
-        __m256i *const p = (__m256i *)&dst[i * dst_stride + j];
-        if (do_average) {
-          _mm256_storeu_si256(
-              p + 0, _mm256_add_epi32(_mm256_loadu_si256(p + 0), res_a_round));
-          if (w - j > 8) {
-            _mm256_storeu_si256(
-                p + 1,
-                _mm256_add_epi32(_mm256_loadu_si256(p + 1), res_b_round));
-          }
-        } else {
-          _mm256_storeu_si256(p + 0, res_a_round);
-          if (w - j > 8) {
-            _mm256_storeu_si256(p + 1, res_b_round);
-          }
+        add_store_aligned(&dst[i * dst_stride + j], &res_a_round, &avg_mask);
+        if (w - j > 8) {
+          add_store_aligned(&dst[i * dst_stride + j + 8], &res_b_round,
+                            &avg_mask);
         }
       }
     }
@@ -226,192 +137,123 @@
                              ConvolveParams *conv_params) {
   const int bd = 8;
 
-  DECLARE_ALIGNED(32, int16_t,
-                  im_block[(MAX_SB_SIZE + MAX_FILTER_TAP - 1) * MAX_SB_SIZE]);
+  DECLARE_ALIGNED(32, int16_t, im_block[(MAX_SB_SIZE + MAX_FILTER_TAP) * 8]);
   int im_h = h + filter_params_y->taps - 1;
-  int im_stride = MAX_SB_SIZE;
+  int im_stride = 8;
   int i, j;
   const int fo_vert = filter_params_y->taps / 2 - 1;
   const int fo_horiz = filter_params_x->taps / 2 - 1;
   const uint8_t *const src_ptr = src - fo_vert * src_stride - fo_horiz;
 
-  __m256i filt[4], s[4];
+  __m256i filt[4], coeffs_h[4], coeffs_v[4];
 
   filt[0] = _mm256_load_si256((__m256i const *)filt1_global_avx2);
   filt[1] = _mm256_load_si256((__m256i const *)filt2_global_avx2);
   filt[2] = _mm256_load_si256((__m256i const *)filt3_global_avx2);
   filt[3] = _mm256_load_si256((__m256i const *)filt4_global_avx2);
 
-  /* Horizontal filter */
-  {
-    const int16_t *x_filter = av1_get_interp_filter_subpel_kernel(
-        *filter_params_x, subpel_x_q4 & SUBPEL_MASK);
+  prepare_coeffs(filter_params_x, subpel_x_q4, coeffs_h);
+  prepare_coeffs_y_2d(filter_params_y, subpel_y_q4, coeffs_v);
 
-    const __m128i coeffs_x8 = _mm_loadu_si128((__m128i *)x_filter);
-    // since not all compilers yet support _mm256_set_m128i()
-    const __m256i coeffs_x = _mm256_insertf128_si256(
-        _mm256_castsi128_si256(coeffs_x8), coeffs_x8, 1);
+  const __m256i round_const_h = _mm256_set1_epi16(
+      ((1 << (conv_params->round_0 - 1)) >> 1) + (1 << (bd + FILTER_BITS - 2)));
+  const __m128i round_shift_h = _mm_cvtsi32_si128(conv_params->round_0 - 1);
 
-    // right shift all filter co-efficients by 1 to reduce the bits required.
-    // This extra right shift will be taken care of at the end while rounding
-    // the result.
-    // Since all filter co-efficients are even, this change will not affect
-    // the end result
-    const __m256i coeffs_x_1 = _mm256_srai_epi16(coeffs_x, 1);
+  const __m256i round_const_v = _mm256_set1_epi32(
+      ((1 << conv_params->round_1) >> 1) -
+      (1 << (bd + 2 * FILTER_BITS - conv_params->round_0 - 1)) +
+      ((1 << (2 * FILTER_BITS - conv_params->round_0)) >> 1));
+  const __m128i round_shift_v =
+      _mm_cvtsi32_si128(2 * FILTER_BITS - conv_params->round_0);
 
-    // coeffs 0 1 0 1 0 1 0 1
-    const __m256i coeff_01 =
-        _mm256_shuffle_epi8(coeffs_x_1, _mm256_set1_epi16(0x0200u));
-    // coeffs 2 3 2 3 2 3 2 3
-    const __m256i coeff_23 =
-        _mm256_shuffle_epi8(coeffs_x_1, _mm256_set1_epi16(0x0604u));
-    // coeffs 4 5 4 5 4 5 4 5
-    const __m256i coeff_45 =
-        _mm256_shuffle_epi8(coeffs_x_1, _mm256_set1_epi16(0x0a08u));
-    // coeffs 6 7 6 7 6 7 6 7
-    const __m256i coeff_67 =
-        _mm256_shuffle_epi8(coeffs_x_1, _mm256_set1_epi16(0x0e0cu));
+  for (j = 0; j < w; j += 8) {
+    for (i = 0; i < im_h; i += 2) {
+      __m256i data = _mm256_castsi128_si256(
+          _mm_loadu_si128((__m128i *)&src_ptr[(i * src_stride) + j]));
 
-    const __m256i round_const =
-        _mm256_set1_epi16(((1 << (conv_params->round_0 - 1)) >> 1) +
-                          (1 << (bd + FILTER_BITS - 2)));
-    const __m128i round_shift = _mm_cvtsi32_si128(conv_params->round_0 - 1);
-
-    for (i = 0; i < im_h; ++i) {
-      for (j = 0; j < w; j += 16) {
-        // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 8 9 10 11 12 13 14 15 16 17
-        // 18 19 20 21 22 23
-        const __m256i data = _mm256_inserti128_si256(
-            _mm256_loadu_si256((__m256i *)&src_ptr[(i * src_stride) + j]),
-            _mm_loadu_si128((__m128i *)&src_ptr[(i * src_stride) + (j + 8)]),
+      // Load the next line
+      if (i + 1 < im_h)
+        data = _mm256_inserti128_si256(
+            data, _mm_loadu_si128(
+                      (__m128i *)&src_ptr[(i * src_stride) + j + src_stride]),
             1);
 
-        // filter the source buffer
-        s[0] = _mm256_shuffle_epi8(data, filt[0]);
-        s[1] = _mm256_shuffle_epi8(data, filt[1]);
-        s[2] = _mm256_shuffle_epi8(data, filt[2]);
-        s[3] = _mm256_shuffle_epi8(data, filt[3]);
+      __m256i res = convolve_x(data, coeffs_h, filt);
 
-        const __m256i res_0 = _mm256_maddubs_epi16(s[0], coeff_01);
-        const __m256i res_1 = _mm256_maddubs_epi16(s[1], coeff_23);
-        const __m256i res_2 = _mm256_maddubs_epi16(s[2], coeff_45);
-        const __m256i res_3 = _mm256_maddubs_epi16(s[3], coeff_67);
+      res =
+          _mm256_sra_epi16(_mm256_add_epi16(res, round_const_h), round_shift_h);
 
-        const __m256i res_a = _mm256_add_epi16(res_0, res_2);
-        const __m256i res_b = _mm256_add_epi16(res_1, res_3);
-
-        __m256i res = _mm256_add_epi16(res_a, res_b);
-        res = _mm256_sra_epi16(_mm256_add_epi16(res, round_const), round_shift);
-
-        // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
-        _mm256_storeu_si256((__m256i *)&im_block[i * im_stride + j], res);
-      }
+      _mm256_store_si256((__m256i *)&im_block[i * im_stride], res);
     }
-  }
 
-  /* Vertical filter */
-  {
-    const int16_t *y_filter = av1_get_interp_filter_subpel_kernel(
-        *filter_params_y, subpel_y_q4 & SUBPEL_MASK);
+    /* Vertical filter */
+    {
+      __m256i src_0 = _mm256_loadu_si256((__m256i *)(im_block + 0 * im_stride));
+      __m256i src_1 = _mm256_loadu_si256((__m256i *)(im_block + 1 * im_stride));
+      __m256i src_2 = _mm256_loadu_si256((__m256i *)(im_block + 2 * im_stride));
+      __m256i src_3 = _mm256_loadu_si256((__m256i *)(im_block + 3 * im_stride));
+      __m256i src_4 = _mm256_loadu_si256((__m256i *)(im_block + 4 * im_stride));
+      __m256i src_5 = _mm256_loadu_si256((__m256i *)(im_block + 5 * im_stride));
 
-    const __m128i coeffs_y8 = _mm_loadu_si128((__m128i *)y_filter);
-    const __m256i coeffs_y = _mm256_insertf128_si256(
-        _mm256_castsi128_si256(coeffs_y8), coeffs_y8, 1);
+      __m256i s[8];
+      s[0] = _mm256_unpacklo_epi16(src_0, src_1);
+      s[1] = _mm256_unpacklo_epi16(src_2, src_3);
+      s[2] = _mm256_unpacklo_epi16(src_4, src_5);
 
-    // coeffs 0 1 0 1 2 3 2 3
-    const __m256i tmp_0 = _mm256_unpacklo_epi32(coeffs_y, coeffs_y);
-    // coeffs 4 5 4 5 6 7 6 7
-    const __m256i tmp_1 = _mm256_unpackhi_epi32(coeffs_y, coeffs_y);
+      s[4] = _mm256_unpackhi_epi16(src_0, src_1);
+      s[5] = _mm256_unpackhi_epi16(src_2, src_3);
+      s[6] = _mm256_unpackhi_epi16(src_4, src_5);
 
-    // coeffs 0 1 0 1 0 1 0 1
-    const __m256i coeff_01 = _mm256_unpacklo_epi64(tmp_0, tmp_0);
-    // coeffs 2 3 2 3 2 3 2 3
-    const __m256i coeff_23 = _mm256_unpackhi_epi64(tmp_0, tmp_0);
-    // coeffs 4 5 4 5 4 5 4 5
-    const __m256i coeff_45 = _mm256_unpacklo_epi64(tmp_1, tmp_1);
-    // coeffs 6 7 6 7 6 7 6 7
-    const __m256i coeff_67 = _mm256_unpackhi_epi64(tmp_1, tmp_1);
+      for (i = 0; i < h; i += 2) {
+        const int16_t *data = &im_block[i * im_stride];
 
-    const __m256i round_const = _mm256_set1_epi32(
-        ((1 << conv_params->round_1) >> 1) -
-        (1 << (bd + 2 * FILTER_BITS - conv_params->round_0 - 1)) +
-        ((1 << (2 * FILTER_BITS - conv_params->round_0)) >> 1));
-    const __m128i round_shift =
-        _mm_cvtsi32_si128(2 * FILTER_BITS - conv_params->round_0);
+        const __m256i s6 =
+            _mm256_loadu_si256((__m256i *)(data + 6 * im_stride));
+        const __m256i s7 =
+            _mm256_loadu_si256((__m256i *)(data + 7 * im_stride));
 
-    for (i = 0; i < h; ++i) {
-      for (j = 0; j < w; j += 16) {
-        // Filter 0 1 2 3 8 9 10 11
-        const int16_t *data = &im_block[i * im_stride + j];
-        const __m256i src_0 =
-            _mm256_unpacklo_epi16(*(__m256i *)(data + 0 * im_stride),
-                                  *(__m256i *)(data + 1 * im_stride));
-        const __m256i src_1 =
-            _mm256_unpacklo_epi16(*(__m256i *)(data + 2 * im_stride),
-                                  *(__m256i *)(data + 3 * im_stride));
-        const __m256i src_2 =
-            _mm256_unpacklo_epi16(*(__m256i *)(data + 4 * im_stride),
-                                  *(__m256i *)(data + 5 * im_stride));
-        const __m256i src_3 =
-            _mm256_unpacklo_epi16(*(__m256i *)(data + 6 * im_stride),
-                                  *(__m256i *)(data + 7 * im_stride));
+        s[3] = _mm256_unpacklo_epi16(s6, s7);
+        s[7] = _mm256_unpackhi_epi16(s6, s7);
 
-        const __m256i res_0 = _mm256_madd_epi16(src_0, coeff_01);
-        const __m256i res_1 = _mm256_madd_epi16(src_1, coeff_23);
-        const __m256i res_2 = _mm256_madd_epi16(src_2, coeff_45);
-        const __m256i res_3 = _mm256_madd_epi16(src_3, coeff_67);
-
-        const __m256i res_a = _mm256_add_epi32(_mm256_add_epi32(res_0, res_1),
-                                               _mm256_add_epi32(res_2, res_3));
-
-        // Filter 4 5 6 7 12 13 14 15
-        const __m256i src_4 =
-            _mm256_unpackhi_epi16(*(__m256i *)(data + 0 * im_stride),
-                                  *(__m256i *)(data + 1 * im_stride));
-        const __m256i src_5 =
-            _mm256_unpackhi_epi16(*(__m256i *)(data + 2 * im_stride),
-                                  *(__m256i *)(data + 3 * im_stride));
-        const __m256i src_6 =
-            _mm256_unpackhi_epi16(*(__m256i *)(data + 4 * im_stride),
-                                  *(__m256i *)(data + 5 * im_stride));
-        const __m256i src_7 =
-            _mm256_unpackhi_epi16(*(__m256i *)(data + 6 * im_stride),
-                                  *(__m256i *)(data + 7 * im_stride));
-
-        const __m256i res_4 = _mm256_madd_epi16(src_4, coeff_01);
-        const __m256i res_5 = _mm256_madd_epi16(src_5, coeff_23);
-        const __m256i res_6 = _mm256_madd_epi16(src_6, coeff_45);
-        const __m256i res_7 = _mm256_madd_epi16(src_7, coeff_67);
-
-        const __m256i res_b = _mm256_add_epi32(_mm256_add_epi32(res_4, res_5),
-                                               _mm256_add_epi32(res_6, res_7));
+        const __m256i res_a = convolve_y_2d(s, coeffs_v);
+        const __m256i res_b = convolve_y_2d(s + 4, coeffs_v);
 
         // Combine V round and 2F-H-V round into a single rounding
-        const __m256i res_a_round =
-            _mm256_sra_epi32(_mm256_add_epi32(res_a, round_const), round_shift);
-        const __m256i res_b_round =
-            _mm256_sra_epi32(_mm256_add_epi32(res_b, round_const), round_shift);
+        const __m256i res_a_round = _mm256_sra_epi32(
+            _mm256_add_epi32(res_a, round_const_v), round_shift_v);
+        const __m256i res_b_round = _mm256_sra_epi32(
+            _mm256_add_epi32(res_b, round_const_v), round_shift_v);
 
         /* rounding code */
         // 16 bit conversion
         const __m256i res_16bit = _mm256_packs_epi32(res_a_round, res_b_round);
         // 8 bit conversion and saturation to uint8
-        __m256i res_8b = _mm256_packus_epi16(res_16bit, res_16bit);
-        res_8b = _mm256_permute4x64_epi64(res_8b, 216);
-        // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
-        const __m128i res = _mm256_castsi256_si128(res_8b);
+        const __m256i res_8b = _mm256_packus_epi16(res_16bit, res_16bit);
+
+        const __m128i res_0 = _mm256_castsi256_si128(res_8b);
+        const __m128i res_1 = _mm256_extracti128_si256(res_8b, 1);
 
         // Store values into the destination buffer
-        __m128i *const p = (__m128i *)&dst[i * dst_stride + j];
-        if (w - j > 8) {
-          _mm_storeu_si128(p, res);
-        } else if (w - j > 4) {
-          _mm_storel_epi64(p, res);
+        __m128i *const p_0 = (__m128i *)&dst[i * dst_stride + j];
+        __m128i *const p_1 = (__m128i *)&dst[i * dst_stride + j + dst_stride];
+        if (w - j > 4) {
+          _mm_storel_epi64(p_0, res_0);
+          _mm_storel_epi64(p_1, res_1);
         } else if (w == 4) {
-          xx_storel_32(&dst[i * dst_stride + j], res);
+          xx_storel_32(p_0, res_0);
+          xx_storel_32(p_1, res_1);
         } else {
-          *(uint16_t *)p = _mm_cvtsi128_si32(res);
+          *(uint16_t *)p_0 = _mm_cvtsi128_si32(res_0);
+          *(uint16_t *)p_1 = _mm_cvtsi128_si32(res_1);
         }
+
+        s[0] = s[1];
+        s[1] = s[2];
+        s[2] = s[3];
+
+        s[4] = s[5];
+        s[5] = s[6];
+        s[6] = s[7];
       }
     }
   }
diff --git a/av1/common/x86/convolve_avx2.c b/av1/common/x86/convolve_avx2.c
index dd1025d..7fd76f8 100644
--- a/av1/common/x86/convolve_avx2.c
+++ b/av1/common/x86/convolve_avx2.c
@@ -339,142 +339,142 @@
   }
 }
 
-DECLARE_ALIGNED(32, static const uint8_t, g_shuf1[32]) = {
-  0, 8, 1, 9, 2, 10, 3, 11, 4, 12, 5, 13, 6, 14, 7, 15,
-  0, 8, 1, 9, 2, 10, 3, 11, 4, 12, 5, 13, 6, 14, 7, 15
-};
-
 void av1_convolve_y_avx2(const uint8_t *src, int src_stride, uint8_t *dst0,
                          int dst_stride0, int w, int h,
                          InterpFilterParams *filter_params_x,
                          InterpFilterParams *filter_params_y,
                          const int subpel_x_q4, const int subpel_y_q4,
                          ConvolveParams *conv_params) {
-  if (w < 16) {
-    av1_convolve_y_sse2(src, src_stride, dst0, dst_stride0, w, h,
-                        filter_params_x, filter_params_y, subpel_x_q4,
-                        subpel_y_q4, conv_params);
-    return;
-  }
-  {
-    CONV_BUF_TYPE *dst = conv_params->dst;
-    int dst_stride = conv_params->dst_stride;
-    int i, j;
-    const int fo_vert = filter_params_y->taps / 2 - 1;
-    const int do_average = conv_params->do_average;
-    const uint8_t *const src_ptr = src - fo_vert * src_stride;
-    const int bits =
-        FILTER_BITS - conv_params->round_0 - (conv_params->round_1 - 1);
-    const int16_t *y_filter = av1_get_interp_filter_subpel_kernel(
-        *filter_params_y, subpel_y_q4 & SUBPEL_MASK);
-    const __m128i coeffs_y8 = _mm_loadu_si128((__m128i *)y_filter);
-    const __m256i coeffs_y = _mm256_insertf128_si256(
-        _mm256_castsi128_si256(coeffs_y8), coeffs_y8, 1);
+  CONV_BUF_TYPE *dst = conv_params->dst;
+  int dst_stride = conv_params->dst_stride;
+  int i, j;
+  const int fo_vert = filter_params_y->taps / 2 - 1;
+  const uint8_t *const src_ptr = src - fo_vert * src_stride;
+  const int bits =
+      FILTER_BITS - conv_params->round_0 - (conv_params->round_1 - 1);
+  const __m256i avg_mask = _mm256_set1_epi32(conv_params->do_average ? -1 : 0);
+  __m256i coeffs[4], s[8];
 
-    (void)conv_params;
+  prepare_coeffs(filter_params_y, subpel_y_q4, coeffs);
 
-    // right shift all filter co-efficients by 1 to reduce the bits required.
-    // This extra right shift will be taken care of at the end while rounding
-    // the result. Since all filter co-efficients are even, this change will not
-    // affect the end result
-    const __m256i coeffs_y_1 = _mm256_srai_epi16(coeffs_y, 1);
+  (void)conv_params;
+  (void)filter_params_x;
+  (void)subpel_x_q4;
+  (void)dst0;
+  (void)dst_stride0;
 
-    // coeffs 0 1 0 1 0 1 0 1
-    const __m256i coeff_01 =
-        _mm256_shuffle_epi8(coeffs_y_1, _mm256_set1_epi16(0x0200u));
-    // coeffs 2 3 2 3 2 3 2 3
-    const __m256i coeff_23 =
-        _mm256_shuffle_epi8(coeffs_y_1, _mm256_set1_epi16(0x0604u));
-    // coeffs 4 5 4 5 4 5 4 5
-    const __m256i coeff_45 =
-        _mm256_shuffle_epi8(coeffs_y_1, _mm256_set1_epi16(0x0a08u));
-    // coeffs 6 7 6 7 6 7 6 7
-    const __m256i coeff_67 =
-        _mm256_shuffle_epi8(coeffs_y_1, _mm256_set1_epi16(0x0e0cu));
+  for (j = 0; j < w; j += 16) {
+    const uint8_t *data = &src_ptr[j];
+    __m256i src6;
 
-    const __m256i shuf = _mm256_load_si256((__m256i const *)g_shuf1);
+    // Load lines a and b. Line a to lower 128, line b to upper 128
+    const __m256i src_01a = _mm256_permute2x128_si256(
+        _mm256_castsi128_si256(
+            _mm_loadu_si128((__m128i *)(data + 0 * src_stride))),
+        _mm256_castsi128_si256(
+            _mm_loadu_si128((__m128i *)(data + 1 * src_stride))),
+        0x20);
 
-    (void)filter_params_x;
-    (void)subpel_x_q4;
-    (void)dst0;
-    (void)dst_stride0;
+    const __m256i src_12a = _mm256_permute2x128_si256(
+        _mm256_castsi128_si256(
+            _mm_loadu_si128((__m128i *)(data + 1 * src_stride))),
+        _mm256_castsi128_si256(
+            _mm_loadu_si128((__m128i *)(data + 2 * src_stride))),
+        0x20);
 
-    for (i = 0; i < h; ++i) {
-      for (j = 0; j < w; j += 16) {
-        const uint8_t *data = &src_ptr[i * src_stride + j];
-        // Load lines a and b. Line a to lower 128, line b to upper 128
-        const __m256i src_01a = _mm256_permute2x128_si256(
-            _mm256_castsi128_si256(
-                _mm_loadu_si128((__m128i *)(data + 0 * src_stride))),
-            _mm256_castsi128_si256(
-                _mm_loadu_si128((__m128i *)(data + 1 * src_stride))),
-            0x20);
-        const __m256i src_23a = _mm256_permute2x128_si256(
-            _mm256_castsi128_si256(
-                _mm_loadu_si128((__m128i *)(data + 2 * src_stride))),
-            _mm256_castsi128_si256(
-                _mm_loadu_si128((__m128i *)(data + 3 * src_stride))),
-            0x20);
-        const __m256i src_45a = _mm256_permute2x128_si256(
-            _mm256_castsi128_si256(
-                _mm_loadu_si128((__m128i *)(data + 4 * src_stride))),
-            _mm256_castsi128_si256(
-                _mm_loadu_si128((__m128i *)(data + 5 * src_stride))),
-            0x20);
-        const __m256i src_67a = _mm256_permute2x128_si256(
-            _mm256_castsi128_si256(
-                _mm_loadu_si128((__m128i *)(data + 6 * src_stride))),
-            _mm256_castsi128_si256(
-                _mm_loadu_si128((__m128i *)(data + 7 * src_stride))),
-            0x20);
+    const __m256i src_23a = _mm256_permute2x128_si256(
+        _mm256_castsi128_si256(
+            _mm_loadu_si128((__m128i *)(data + 2 * src_stride))),
+        _mm256_castsi128_si256(
+            _mm_loadu_si128((__m128i *)(data + 3 * src_stride))),
+        0x20);
 
-        // Permute across lanes. (a_lo a_hi b_lo b_hi -> a_lo b_lo a_hi b_hi)
-        const __m256i src_01b = _mm256_permute4x64_epi64(src_01a, 0xd8);
-        const __m256i src_23b = _mm256_permute4x64_epi64(src_23a, 0xd8);
-        const __m256i src_45b = _mm256_permute4x64_epi64(src_45a, 0xd8);
-        const __m256i src_67b = _mm256_permute4x64_epi64(src_67a, 0xd8);
-        // Interleave a and b within lanes.
-        const __m256i src_01 = _mm256_shuffle_epi8(src_01b, shuf);
-        const __m256i src_23 = _mm256_shuffle_epi8(src_23b, shuf);
-        const __m256i src_45 = _mm256_shuffle_epi8(src_45b, shuf);
-        const __m256i src_67 = _mm256_shuffle_epi8(src_67b, shuf);
+    const __m256i src_34a = _mm256_permute2x128_si256(
+        _mm256_castsi128_si256(
+            _mm_loadu_si128((__m128i *)(data + 3 * src_stride))),
+        _mm256_castsi128_si256(
+            _mm_loadu_si128((__m128i *)(data + 4 * src_stride))),
+        0x20);
 
-        // Filter source pixels
-        const __m256i res_01 = _mm256_maddubs_epi16(src_01, coeff_01);
-        const __m256i res_23 = _mm256_maddubs_epi16(src_23, coeff_23);
-        const __m256i res_45 = _mm256_maddubs_epi16(src_45, coeff_45);
-        const __m256i res_67 = _mm256_maddubs_epi16(src_67, coeff_67);
+    const __m256i src_45a = _mm256_permute2x128_si256(
+        _mm256_castsi128_si256(
+            _mm_loadu_si128((__m128i *)(data + 4 * src_stride))),
+        _mm256_castsi128_si256(
+            _mm_loadu_si128((__m128i *)(data + 5 * src_stride))),
+        0x20);
 
-        // order: 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
-        __m256i res = _mm256_add_epi16(_mm256_add_epi16(res_01, res_45),
-                                       _mm256_add_epi16(res_23, res_67));
+    src6 = _mm256_castsi128_si256(
+        _mm_loadu_si128((__m128i *)(data + 6 * src_stride)));
+    const __m256i src_56a = _mm256_permute2x128_si256(
+        _mm256_castsi128_si256(
+            _mm_loadu_si128((__m128i *)(data + 5 * src_stride))),
+        src6, 0x20);
 
-        const __m256i res_01_32b =
-            _mm256_cvtepi16_epi32(_mm256_castsi256_si128(res));
-        const __m256i res_23_32b =
-            _mm256_cvtepi16_epi32(_mm256_extracti128_si256(res, 1));
+    s[0] = _mm256_unpacklo_epi8(src_01a, src_12a);
+    s[1] = _mm256_unpacklo_epi8(src_23a, src_34a);
+    s[2] = _mm256_unpacklo_epi8(src_45a, src_56a);
 
-        const __m256i res_01_shift = _mm256_slli_epi32(res_01_32b, bits);
-        const __m256i res_23_shift = _mm256_slli_epi32(res_23_32b, bits);
+    s[4] = _mm256_unpackhi_epi8(src_01a, src_12a);
+    s[5] = _mm256_unpackhi_epi8(src_23a, src_34a);
+    s[6] = _mm256_unpackhi_epi8(src_45a, src_56a);
 
-        // Accumulate values into the destination buffer
-        __m256i *const p = (__m256i *)&dst[i * dst_stride + j];
-        if (do_average) {
-          const __m256i dst_lo = _mm256_loadu_si256(p + 0);
-          const __m256i dst_hi = _mm256_loadu_si256(p + 1);
-          const __m256i res_lo = _mm256_add_epi32(dst_lo, res_01_shift);
-          const __m256i res_hi = _mm256_add_epi32(dst_hi, res_23_shift);
-          _mm256_storeu_si256(p + 0, res_lo);
-          if (w - j > 8) {
-            _mm256_storeu_si256(p + 1, res_hi);
-          }
-        } else {
-          _mm256_storeu_si256(p + 0, res_01_shift);
-          if (w - j > 8) {
-            _mm256_storeu_si256(p + 1, res_23_shift);
-          }
-        }
+    for (i = 0; i < h; i += 2) {
+      data = &src_ptr[i * src_stride + j];
+      const __m256i src_67a = _mm256_permute2x128_si256(
+          src6, _mm256_castsi128_si256(
+                    _mm_loadu_si128((__m128i *)(data + 7 * src_stride))),
+          0x20);
+
+      src6 = _mm256_castsi128_si256(
+          _mm_loadu_si128((__m128i *)(data + 8 * src_stride)));
+      const __m256i src_78a = _mm256_permute2x128_si256(
+          _mm256_castsi128_si256(
+              _mm_loadu_si128((__m128i *)(data + 7 * src_stride))),
+          src6, 0x20);
+
+      s[3] = _mm256_unpacklo_epi8(src_67a, src_78a);
+      s[7] = _mm256_unpackhi_epi8(src_67a, src_78a);
+
+      const __m256i res_lo = convolve(s, coeffs);
+
+      const __m256i res_lo_0_32b =
+          _mm256_cvtepi16_epi32(_mm256_castsi256_si128(res_lo));
+      const __m256i res_lo_0_shift = _mm256_slli_epi32(res_lo_0_32b, bits);
+
+      // Accumulate values into the destination buffer
+      add_store_aligned(&dst[i * dst_stride + j], &res_lo_0_shift, &avg_mask);
+
+      const __m256i res_lo_1_32b =
+          _mm256_cvtepi16_epi32(_mm256_extracti128_si256(res_lo, 1));
+      const __m256i res_lo_1_shift = _mm256_slli_epi32(res_lo_1_32b, bits);
+
+      add_store_aligned(&dst[i * dst_stride + j + dst_stride], &res_lo_1_shift,
+                        &avg_mask);
+
+      if (w - j > 8) {
+        const __m256i res_hi = convolve(s + 4, coeffs);
+
+        const __m256i res_hi_0_32b =
+            _mm256_cvtepi16_epi32(_mm256_castsi256_si128(res_hi));
+        const __m256i res_hi_0_shift = _mm256_slli_epi32(res_hi_0_32b, bits);
+
+        add_store_aligned(&dst[i * dst_stride + j + 8], &res_hi_0_shift,
+                          &avg_mask);
+
+        const __m256i res_hi_1_32b =
+            _mm256_cvtepi16_epi32(_mm256_extracti128_si256(res_hi, 1));
+        const __m256i res_hi_1_shift = _mm256_slli_epi32(res_hi_1_32b, bits);
+
+        add_store_aligned(&dst[i * dst_stride + j + 8 + dst_stride],
+                          &res_hi_1_shift, &avg_mask);
       }
+      s[0] = s[1];
+      s[1] = s[2];
+      s[2] = s[3];
+
+      s[4] = s[5];
+      s[5] = s[6];
+      s[6] = s[7];
     }
   }
 }
@@ -485,122 +485,148 @@
                             InterpFilterParams *filter_params_y,
                             const int subpel_x_q4, const int subpel_y_q4,
                             ConvolveParams *conv_params) {
-  if (w < 16) {
-    av1_convolve_y_sr_sse2(src, src_stride, dst, dst_stride, w, h,
-                           filter_params_x, filter_params_y, subpel_x_q4,
-                           subpel_y_q4, conv_params);
-    return;
-  }
-  {
-    int i, j;
-    const int fo_vert = filter_params_y->taps / 2 - 1;
-    const uint8_t *const src_ptr = src - fo_vert * src_stride;
-    const int16_t *y_filter = av1_get_interp_filter_subpel_kernel(
-        *filter_params_y, subpel_y_q4 & SUBPEL_MASK);
-    const __m128i coeffs_y8 = _mm_loadu_si128((__m128i *)y_filter);
-    const __m256i coeffs_y = _mm256_insertf128_si256(
-        _mm256_castsi128_si256(coeffs_y8), coeffs_y8, 1);
-    // right shift is F-1 because we are already dividing
-    // filter co-efficients by 2
-    const int right_shift_bits = (FILTER_BITS - 1);
-    const __m128i right_shift = _mm_cvtsi32_si128(right_shift_bits);
-    const __m256i right_shift_const =
-        _mm256_set1_epi16((1 << right_shift_bits) >> 1);
+  int i, j;
+  const int fo_vert = filter_params_y->taps / 2 - 1;
+  const uint8_t *const src_ptr = src - fo_vert * src_stride;
 
-    // right shift all filter co-efficients by 1 to reduce the bits required.
-    // This extra right shift will be taken care of at the end while rounding
-    // the result.
-    // Since all filter co-efficients are even, this change will not affect the
-    // end result
-    const __m256i coeffs_y_1 = _mm256_srai_epi16(coeffs_y, 1);
+  // right shift is F-1 because we are already dividing
+  // filter co-efficients by 2
+  const int right_shift_bits = (FILTER_BITS - 1);
+  const __m128i right_shift = _mm_cvtsi32_si128(right_shift_bits);
+  const __m256i right_shift_const =
+      _mm256_set1_epi16((1 << right_shift_bits) >> 1);
+  __m256i coeffs[4], s[8];
 
-    // coeffs 0 1 0 1 0 1 0 1
-    const __m256i coeff_01 =
-        _mm256_shuffle_epi8(coeffs_y_1, _mm256_set1_epi16(0x0200u));
-    // coeffs 2 3 2 3 2 3 2 3
-    const __m256i coeff_23 =
-        _mm256_shuffle_epi8(coeffs_y_1, _mm256_set1_epi16(0x0604u));
-    // coeffs 4 5 4 5 4 5 4 5
-    const __m256i coeff_45 =
-        _mm256_shuffle_epi8(coeffs_y_1, _mm256_set1_epi16(0x0a08u));
-    // coeffs 6 7 6 7 6 7 6 7
-    const __m256i coeff_67 =
-        _mm256_shuffle_epi8(coeffs_y_1, _mm256_set1_epi16(0x0e0cu));
+  prepare_coeffs(filter_params_y, subpel_y_q4, coeffs);
 
-    const __m256i shuf = _mm256_load_si256((__m256i const *)g_shuf1);
+  (void)filter_params_x;
+  (void)subpel_x_q4;
+  (void)conv_params;
 
-    (void)filter_params_x;
-    (void)subpel_x_q4;
+  for (j = 0; j < w; j += 16) {
+    const uint8_t *data = &src_ptr[j];
+    __m256i src6;
 
-    for (i = 0; i < h; ++i) {
-      for (j = 0; j < w; j += 16) {
-        const uint8_t *data = &src_ptr[i * src_stride + j];
-        // Load lines a and b. Line a to lower 128, line b to upper 128
-        const __m256i src_01a = _mm256_permute2x128_si256(
-            _mm256_castsi128_si256(
-                _mm_loadu_si128((__m128i *)(data + 0 * src_stride))),
-            _mm256_castsi128_si256(
-                _mm_loadu_si128((__m128i *)(data + 1 * src_stride))),
-            0x20);
-        const __m256i src_23a = _mm256_permute2x128_si256(
-            _mm256_castsi128_si256(
-                _mm_loadu_si128((__m128i *)(data + 2 * src_stride))),
-            _mm256_castsi128_si256(
-                _mm_loadu_si128((__m128i *)(data + 3 * src_stride))),
-            0x20);
-        const __m256i src_45a = _mm256_permute2x128_si256(
-            _mm256_castsi128_si256(
-                _mm_loadu_si128((__m128i *)(data + 4 * src_stride))),
-            _mm256_castsi128_si256(
-                _mm_loadu_si128((__m128i *)(data + 5 * src_stride))),
-            0x20);
-        const __m256i src_67a = _mm256_permute2x128_si256(
-            _mm256_castsi128_si256(
-                _mm_loadu_si128((__m128i *)(data + 6 * src_stride))),
-            _mm256_castsi128_si256(
-                _mm_loadu_si128((__m128i *)(data + 7 * src_stride))),
-            0x20);
+    // Load lines a and b. Line a to lower 128, line b to upper 128
+    const __m256i src_01a = _mm256_permute2x128_si256(
+        _mm256_castsi128_si256(
+            _mm_loadu_si128((__m128i *)(data + 0 * src_stride))),
+        _mm256_castsi128_si256(
+            _mm_loadu_si128((__m128i *)(data + 1 * src_stride))),
+        0x20);
 
-        // Permute across lanes. (a_lo a_hi b_lo b_hi -> a_lo b_lo a_hi b_hi)
-        const __m256i src_01b = _mm256_permute4x64_epi64(src_01a, 0xd8);
-        const __m256i src_23b = _mm256_permute4x64_epi64(src_23a, 0xd8);
-        const __m256i src_45b = _mm256_permute4x64_epi64(src_45a, 0xd8);
-        const __m256i src_67b = _mm256_permute4x64_epi64(src_67a, 0xd8);
-        // Interleave a and b within lanes.
-        const __m256i src_01 = _mm256_shuffle_epi8(src_01b, shuf);
-        const __m256i src_23 = _mm256_shuffle_epi8(src_23b, shuf);
-        const __m256i src_45 = _mm256_shuffle_epi8(src_45b, shuf);
-        const __m256i src_67 = _mm256_shuffle_epi8(src_67b, shuf);
+    const __m256i src_12a = _mm256_permute2x128_si256(
+        _mm256_castsi128_si256(
+            _mm_loadu_si128((__m128i *)(data + 1 * src_stride))),
+        _mm256_castsi128_si256(
+            _mm_loadu_si128((__m128i *)(data + 2 * src_stride))),
+        0x20);
 
-        // Filter source pixels
-        const __m256i res_01 = _mm256_maddubs_epi16(src_01, coeff_01);
-        const __m256i res_23 = _mm256_maddubs_epi16(src_23, coeff_23);
-        const __m256i res_45 = _mm256_maddubs_epi16(src_45, coeff_45);
-        const __m256i res_67 = _mm256_maddubs_epi16(src_67, coeff_67);
+    const __m256i src_23a = _mm256_permute2x128_si256(
+        _mm256_castsi128_si256(
+            _mm_loadu_si128((__m128i *)(data + 2 * src_stride))),
+        _mm256_castsi128_si256(
+            _mm_loadu_si128((__m128i *)(data + 3 * src_stride))),
+        0x20);
 
-        // order: 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
-        __m256i res_16b = _mm256_add_epi16(_mm256_add_epi16(res_01, res_45),
-                                           _mm256_add_epi16(res_23, res_67));
+    const __m256i src_34a = _mm256_permute2x128_si256(
+        _mm256_castsi128_si256(
+            _mm_loadu_si128((__m128i *)(data + 3 * src_stride))),
+        _mm256_castsi128_si256(
+            _mm_loadu_si128((__m128i *)(data + 4 * src_stride))),
+        0x20);
+
+    const __m256i src_45a = _mm256_permute2x128_si256(
+        _mm256_castsi128_si256(
+            _mm_loadu_si128((__m128i *)(data + 4 * src_stride))),
+        _mm256_castsi128_si256(
+            _mm_loadu_si128((__m128i *)(data + 5 * src_stride))),
+        0x20);
+
+    src6 = _mm256_castsi128_si256(
+        _mm_loadu_si128((__m128i *)(data + 6 * src_stride)));
+    const __m256i src_56a = _mm256_permute2x128_si256(
+        _mm256_castsi128_si256(
+            _mm_loadu_si128((__m128i *)(data + 5 * src_stride))),
+        src6, 0x20);
+
+    s[0] = _mm256_unpacklo_epi8(src_01a, src_12a);
+    s[1] = _mm256_unpacklo_epi8(src_23a, src_34a);
+    s[2] = _mm256_unpacklo_epi8(src_45a, src_56a);
+
+    s[4] = _mm256_unpackhi_epi8(src_01a, src_12a);
+    s[5] = _mm256_unpackhi_epi8(src_23a, src_34a);
+    s[6] = _mm256_unpackhi_epi8(src_45a, src_56a);
+
+    for (i = 0; i < h; i += 2) {
+      data = &src_ptr[i * src_stride + j];
+      const __m256i src_67a = _mm256_permute2x128_si256(
+          src6, _mm256_castsi128_si256(
+                    _mm_loadu_si128((__m128i *)(data + 7 * src_stride))),
+          0x20);
+
+      src6 = _mm256_castsi128_si256(
+          _mm_loadu_si128((__m128i *)(data + 8 * src_stride)));
+      const __m256i src_78a = _mm256_permute2x128_si256(
+          _mm256_castsi128_si256(
+              _mm_loadu_si128((__m128i *)(data + 7 * src_stride))),
+          src6, 0x20);
+
+      s[3] = _mm256_unpacklo_epi8(src_67a, src_78a);
+      s[7] = _mm256_unpackhi_epi8(src_67a, src_78a);
+
+      const __m256i res_lo = convolve(s, coeffs);
+
+      /* rounding code */
+      // shift by F - 1
+      const __m256i res_16b_lo = _mm256_sra_epi16(
+          _mm256_add_epi16(res_lo, right_shift_const), right_shift);
+      // 8 bit conversion and saturation to uint8
+      __m256i res_8b_lo = _mm256_packus_epi16(res_16b_lo, res_16b_lo);
+
+      if (w - j > 8) {
+        const __m256i res_hi = convolve(s + 4, coeffs);
 
         /* rounding code */
         // shift by F - 1
-        __m256i res_16b_shift = _mm256_sra_epi16(
-            _mm256_add_epi16(res_16b, right_shift_const), right_shift);
+        const __m256i res_16b_hi = _mm256_sra_epi16(
+            _mm256_add_epi16(res_hi, right_shift_const), right_shift);
         // 8 bit conversion and saturation to uint8
-        __m256i res_8b = _mm256_packus_epi16(res_16b_shift, res_16b_shift);
-        res_8b = _mm256_permute4x64_epi64(res_8b, 216);
-        // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
-        __m128i res = _mm256_castsi256_si128(res_8b);
+        __m256i res_8b_hi = _mm256_packus_epi16(res_16b_hi, res_16b_hi);
 
-        // Store values into the destination buffer
-        if (w - j > 8) {
-          _mm_storeu_si128((__m128i *)&dst[i * dst_stride + j], res);
-        } else if (w - j > 4) {
-          _mm_storel_epi64((__m128i *)&dst[i * dst_stride + j], res);
+        __m256i res_a = _mm256_unpacklo_epi64(res_8b_lo, res_8b_hi);
+
+        const __m128i res_0 = _mm256_castsi256_si128(res_a);
+        const __m128i res_1 = _mm256_extracti128_si256(res_a, 1);
+
+        _mm_storeu_si128((__m128i *)&dst[i * dst_stride + j], res_0);
+        _mm_storeu_si128((__m128i *)&dst[i * dst_stride + j + dst_stride],
+                         res_1);
+      } else {
+        const __m128i res_0 = _mm256_castsi256_si128(res_8b_lo);
+        const __m128i res_1 = _mm256_extracti128_si256(res_8b_lo, 1);
+        if (w - j > 4) {
+          _mm_storel_epi64((__m128i *)&dst[i * dst_stride + j], res_0);
+          _mm_storel_epi64((__m128i *)&dst[i * dst_stride + j + dst_stride],
+                           res_1);
+        } else if (w - j > 2) {
+          xx_storel_32(&dst[i * dst_stride + j], res_0);
+          xx_storel_32(&dst[i * dst_stride + j + dst_stride], res_1);
         } else {
-          xx_storel_32(&dst[i * dst_stride + j], res);
+          __m128i *const p_0 = (__m128i *)&dst[i * dst_stride + j];
+          __m128i *const p_1 = (__m128i *)&dst[i * dst_stride + j + dst_stride];
+          *(uint16_t *)p_0 = _mm_cvtsi128_si32(res_0);
+          *(uint16_t *)p_1 = _mm_cvtsi128_si32(res_1);
         }
       }
+
+      s[0] = s[1];
+      s[1] = s[2];
+      s[2] = s[3];
+
+      s[4] = s[5];
+      s[5] = s[6];
+      s[6] = s[7];
     }
   }
 }
@@ -615,44 +641,18 @@
   int dst_stride = conv_params->dst_stride;
   int i, j;
   const int fo_horiz = filter_params_x->taps / 2 - 1;
-  const int do_average = conv_params->do_average;
   const uint8_t *const src_ptr = src - fo_horiz;
   const int bits = FILTER_BITS - conv_params->round_1;
+  const __m256i avg_mask = _mm256_set1_epi32(conv_params->do_average ? -1 : 0);
 
-  __m256i filt[4], s[4];
+  __m256i filt[4], coeffs[4];
 
-  filt[0] = _mm256_loadu_si256((__m256i const *)filt1_global_avx2);
-  filt[1] = _mm256_loadu_si256((__m256i const *)filt2_global_avx2);
-  filt[2] = _mm256_loadu_si256((__m256i const *)filt3_global_avx2);
-  filt[3] = _mm256_loadu_si256((__m256i const *)filt4_global_avx2);
+  filt[0] = _mm256_load_si256((__m256i const *)filt1_global_avx2);
+  filt[1] = _mm256_load_si256((__m256i const *)filt2_global_avx2);
+  filt[2] = _mm256_load_si256((__m256i const *)filt3_global_avx2);
+  filt[3] = _mm256_load_si256((__m256i const *)filt4_global_avx2);
 
-  const int16_t *x_filter = av1_get_interp_filter_subpel_kernel(
-      *filter_params_x, subpel_x_q4 & SUBPEL_MASK);
-
-  const __m128i coeffs_x8 = _mm_loadu_si128((__m128i *)x_filter);
-  // since not all compilers yet support _mm256_set_m128i()
-  const __m256i coeffs_x =
-      _mm256_insertf128_si256(_mm256_castsi128_si256(coeffs_x8), coeffs_x8, 1);
-
-  // right shift all filter co-efficients by 1 to reduce the bits required.
-  // This extra right shift will be taken care of at the end while rounding the
-  // result.
-  // Since all filter co-efficients are even, this change will not affect the
-  // end result
-  const __m256i coeffs_x_1 = _mm256_srai_epi16(coeffs_x, 1);
-
-  // coeffs 0 1 0 1 0 1 0 1
-  const __m256i coeff_01 =
-      _mm256_shuffle_epi8(coeffs_x_1, _mm256_set1_epi16(0x0200u));
-  // coeffs 2 3 2 3 2 3 2 3
-  const __m256i coeff_23 =
-      _mm256_shuffle_epi8(coeffs_x_1, _mm256_set1_epi16(0x0604u));
-  // coeffs 4 5 4 5 4 5 4 5
-  const __m256i coeff_45 =
-      _mm256_shuffle_epi8(coeffs_x_1, _mm256_set1_epi16(0x0a08u));
-  // coeffs 6 7 6 7 6 7 6 7
-  const __m256i coeff_67 =
-      _mm256_shuffle_epi8(coeffs_x_1, _mm256_set1_epi16(0x0e0cu));
+  prepare_coeffs(filter_params_x, subpel_x_q4, coeffs);
 
   const __m256i round_const =
       _mm256_set1_epi16((1 << (conv_params->round_0 - 1)) >> 1);
@@ -671,21 +671,8 @@
           _mm256_loadu_si256((__m256i *)&src_ptr[i * src_stride + j]),
           _MM_SHUFFLE(2, 1, 1, 0));
 
-      // filter the source buffer
-      s[0] = _mm256_shuffle_epi8(data, filt[0]);
-      s[1] = _mm256_shuffle_epi8(data, filt[1]);
-      s[2] = _mm256_shuffle_epi8(data, filt[2]);
-      s[3] = _mm256_shuffle_epi8(data, filt[3]);
+      __m256i res = convolve_x(data, coeffs, filt);
 
-      const __m256i res_0 = _mm256_maddubs_epi16(s[0], coeff_01);
-      const __m256i res_1 = _mm256_maddubs_epi16(s[1], coeff_23);
-      const __m256i res_2 = _mm256_maddubs_epi16(s[2], coeff_45);
-      const __m256i res_3 = _mm256_maddubs_epi16(s[3], coeff_67);
-
-      const __m256i res_a = _mm256_add_epi16(res_0, res_2);
-      const __m256i res_b = _mm256_add_epi16(res_1, res_3);
-
-      __m256i res = _mm256_add_epi16(res_a, res_b);
       res = _mm256_sra_epi16(_mm256_add_epi16(res, round_const), round_shift);
 
       const __m256i res_lo_round =
@@ -697,21 +684,10 @@
       const __m256i res_hi_shift = _mm256_slli_epi32(res_hi_round, bits);
 
       // Accumulate values into the destination buffer
-      __m256i *const p = (__m256i *)&dst[i * dst_stride + j];
-      if (do_average) {
-        const __m256i dst_lo = _mm256_loadu_si256(p + 0);
-        const __m256i dst_hi = _mm256_loadu_si256(p + 1);
-        const __m256i res_lo = _mm256_add_epi32(dst_lo, res_lo_shift);
-        const __m256i res_hi = _mm256_add_epi32(dst_hi, res_hi_shift);
-        _mm256_storeu_si256(p + 0, res_lo);
-        if (w - j > 8) {
-          _mm256_storeu_si256(p + 1, res_hi);
-        }
-      } else {
-        _mm256_storeu_si256(p + 0, res_lo_shift);
-        if (w - j > 8) {
-          _mm256_storeu_si256(p + 1, res_hi_shift);
-        }
+      add_store_aligned(&dst[i * dst_stride + j], &res_lo_shift, &avg_mask);
+      if (w - j > 8) {
+        add_store_aligned(&dst[i * dst_stride + j + 8], &res_hi_shift,
+                          &avg_mask);
       }
     }
   }
@@ -723,102 +699,60 @@
                             InterpFilterParams *filter_params_y,
                             const int subpel_x_q4, const int subpel_y_q4,
                             ConvolveParams *conv_params) {
-  if (w < 4) {
-    av1_convolve_x_sr_sse2(src, src_stride, dst, dst_stride, w, h,
-                           filter_params_x, filter_params_y, subpel_x_q4,
-                           subpel_y_q4, conv_params);
-    return;
-  }
-  {
-    int i, j;
-    const int fo_horiz = filter_params_x->taps / 2 - 1;
-    const uint8_t *const src_ptr = src - fo_horiz;
+  int i, j;
+  const int fo_horiz = filter_params_x->taps / 2 - 1;
+  const uint8_t *const src_ptr = src - fo_horiz;
 
-    __m256i filt[4], s[4];
+  __m256i filt[4], coeffs[4];
 
-    filt[0] = _mm256_load_si256((__m256i const *)filt1_global_avx2);
-    filt[1] = _mm256_load_si256((__m256i const *)filt2_global_avx2);
-    filt[2] = _mm256_load_si256((__m256i const *)filt3_global_avx2);
-    filt[3] = _mm256_load_si256((__m256i const *)filt4_global_avx2);
+  filt[0] = _mm256_load_si256((__m256i const *)filt1_global_avx2);
+  filt[1] = _mm256_load_si256((__m256i const *)filt2_global_avx2);
+  filt[2] = _mm256_load_si256((__m256i const *)filt3_global_avx2);
+  filt[3] = _mm256_load_si256((__m256i const *)filt4_global_avx2);
 
-    const int16_t *x_filter = av1_get_interp_filter_subpel_kernel(
-        *filter_params_x, subpel_x_q4 & SUBPEL_MASK);
+  prepare_coeffs(filter_params_x, subpel_x_q4, coeffs);
 
-    const __m128i coeffs_x8 = _mm_loadu_si128((__m128i *)x_filter);
-    // since not all compilers yet support _mm256_set_m128i()
-    const __m256i coeffs_x = _mm256_insertf128_si256(
-        _mm256_castsi128_si256(coeffs_x8), coeffs_x8, 1);
+  const __m256i round_const =
+      _mm256_set1_epi16(((1 << (conv_params->round_0 - 1)) >> 1) +
+                        ((1 << (FILTER_BITS - 1)) >> 1));
+  const __m128i round_shift = _mm_cvtsi32_si128(FILTER_BITS - 1);
 
-    // right shift all filter co-efficients by 1 to reduce the bits required.
-    // This extra right shift will be taken care of at the end while rounding
-    // the result.
-    // Since all filter co-efficients are even, this change will not affect the
-    // end result
-    const __m256i coeffs_x_1 = _mm256_srai_epi16(coeffs_x, 1);
+  (void)filter_params_y;
+  (void)subpel_y_q4;
 
-    // coeffs 0 1 0 1 0 1 0 1
-    const __m256i coeff_01 =
-        _mm256_shuffle_epi8(coeffs_x_1, _mm256_set1_epi16(0x0200u));
-    // coeffs 2 3 2 3 2 3 2 3
-    const __m256i coeff_23 =
-        _mm256_shuffle_epi8(coeffs_x_1, _mm256_set1_epi16(0x0604u));
-    // coeffs 4 5 4 5 4 5 4 5
-    const __m256i coeff_45 =
-        _mm256_shuffle_epi8(coeffs_x_1, _mm256_set1_epi16(0x0a08u));
-    // coeffs 6 7 6 7 6 7 6 7
-    const __m256i coeff_67 =
-        _mm256_shuffle_epi8(coeffs_x_1, _mm256_set1_epi16(0x0e0cu));
+  for (i = 0; i < h; ++i) {
+    for (j = 0; j < w; j += 16) {
+      // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 8 9 10 11 12 13 14 15 16 17 18
+      // 19 20 21 22 23
+      const __m256i data = _mm256_inserti128_si256(
+          _mm256_loadu_si256((__m256i *)&src_ptr[(i * src_stride) + j]),
+          _mm_loadu_si128((__m128i *)&src_ptr[(i * src_stride) + (j + 8)]), 1);
 
-    const __m256i round_const =
-        _mm256_set1_epi16(((1 << (conv_params->round_0 - 1)) >> 1) +
-                          ((1 << (FILTER_BITS - 1)) >> 1));
-    const __m128i round_shift = _mm_cvtsi32_si128(FILTER_BITS - 1);
+      __m256i res_16b = convolve_x(data, coeffs, filt);
 
-    (void)filter_params_y;
-    (void)subpel_y_q4;
+      // Combine V round and 2F-H-V round into a single rounding
+      res_16b =
+          _mm256_sra_epi16(_mm256_add_epi16(res_16b, round_const), round_shift);
 
-    for (i = 0; i < h; ++i) {
-      for (j = 0; j < w; j += 16) {
-        // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 8 9 10 11 12 13 14 15 16 17 18
-        // 19 20 21 22 23
-        const __m256i data = _mm256_inserti128_si256(
-            _mm256_loadu_si256((__m256i *)&src_ptr[(i * src_stride) + j]),
-            _mm_loadu_si128((__m128i *)&src_ptr[(i * src_stride) + (j + 8)]),
-            1);
+      /* rounding code */
+      // 8 bit conversion and saturation to uint8
+      __m256i res_8b = _mm256_packus_epi16(res_16b, res_16b);
 
-        // filter the source buffer
-        s[0] = _mm256_shuffle_epi8(data, filt[0]);
-        s[1] = _mm256_shuffle_epi8(data, filt[1]);
-        s[2] = _mm256_shuffle_epi8(data, filt[2]);
-        s[3] = _mm256_shuffle_epi8(data, filt[3]);
-
-        const __m256i res_0 = _mm256_maddubs_epi16(s[0], coeff_01);
-        const __m256i res_1 = _mm256_maddubs_epi16(s[1], coeff_23);
-        const __m256i res_2 = _mm256_maddubs_epi16(s[2], coeff_45);
-        const __m256i res_3 = _mm256_maddubs_epi16(s[3], coeff_67);
-
-        const __m256i res_a = _mm256_add_epi16(res_0, res_2);
-        const __m256i res_b = _mm256_add_epi16(res_1, res_3);
-
-        __m256i res_16b = _mm256_add_epi16(res_a, res_b);
-        // Combine V round and 2F-H-V round into a single rounding
-        res_16b = _mm256_sra_epi16(_mm256_add_epi16(res_16b, round_const),
-                                   round_shift);
-
-        /* rounding code */
-        // 8 bit conversion and saturation to uint8
-        __m256i res_8b = _mm256_packus_epi16(res_16b, res_16b);
-        res_8b = _mm256_permute4x64_epi64(res_8b, 216);
+      // Store values into the destination buffer
+      if (w - j > 8) {
         // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
+        res_8b = _mm256_permute4x64_epi64(res_8b, 216);
         __m128i res = _mm256_castsi256_si128(res_8b);
-
-        // Store values into the destination buffer
-        if (w - j > 8) {
-          _mm_storeu_si128((__m128i *)&dst[i * dst_stride + j], res);
-        } else if (w - j > 4) {
+        _mm_storeu_si128((__m128i *)&dst[i * dst_stride + j], res);
+      } else {
+        __m128i res = _mm256_castsi256_si128(res_8b);
+        if (w - j > 4) {
           _mm_storel_epi64((__m128i *)&dst[i * dst_stride + j], res);
-        } else {
+        } else if (w - j > 2) {
           xx_storel_32(&dst[i * dst_stride + j], res);
+        } else {
+          __m128i *const p = (__m128i *)&dst[i * dst_stride + j];
+          *(uint16_t *)p = _mm_cvtsi128_si32(res);
         }
       }
     }