Refactor Neon compound convolution functions 2/7

Refactor and tidy up the Armv8.0 MLA implementation of
av1_dist_wtd_convolve_x_neon:

1) Use a rounding-right-shift-and-accumulate instruction instead of
   separately adding both 'horiz_const' and 'round_offset' constants.
2) Return unsigned types from the convolution kernel inline functions
   - removing the need for a lot of bulky result vector type casting.

Change-Id: Ibb12f9cef1df2522b71add85bd4263c136a36178
diff --git a/aom_dsp/arm/transpose_neon.h b/aom_dsp/arm/transpose_neon.h
index 8b97638..d151c58 100644
--- a/aom_dsp/arm/transpose_neon.h
+++ b/aom_dsp/arm/transpose_neon.h
@@ -676,6 +676,39 @@
   out[7] = d3.val[1];
 }
 
+static INLINE void transpose_u16_4x4d(uint16x4_t *a0, uint16x4_t *a1,
+                                      uint16x4_t *a2, uint16x4_t *a3) {
+  // Swap 16 bit elements. Goes from:
+  // a0: 00 01 02 03
+  // a1: 10 11 12 13
+  // a2: 20 21 22 23
+  // a3: 30 31 32 33
+  // to:
+  // b0.val[0]: 00 10 02 12
+  // b0.val[1]: 01 11 03 13
+  // b1.val[0]: 20 30 22 32
+  // b1.val[1]: 21 31 23 33
+
+  const uint16x4x2_t b0 = vtrn_u16(*a0, *a1);
+  const uint16x4x2_t b1 = vtrn_u16(*a2, *a3);
+
+  // Swap 32 bit elements resulting in:
+  // c0.val[0]: 00 10 20 30
+  // c0.val[1]: 02 12 22 32
+  // c1.val[0]: 01 11 21 31
+  // c1.val[1]: 03 13 23 33
+
+  const uint32x2x2_t c0 = vtrn_u32(vreinterpret_u32_u16(b0.val[0]),
+                                   vreinterpret_u32_u16(b1.val[0]));
+  const uint32x2x2_t c1 = vtrn_u32(vreinterpret_u32_u16(b0.val[1]),
+                                   vreinterpret_u32_u16(b1.val[1]));
+
+  *a0 = vreinterpret_u16_u32(c0.val[0]);
+  *a1 = vreinterpret_u16_u32(c1.val[0]);
+  *a2 = vreinterpret_u16_u32(c0.val[1]);
+  *a3 = vreinterpret_u16_u32(c1.val[1]);
+}
+
 static INLINE void transpose_s16_4x4d(int16x4_t *a0, int16x4_t *a1,
                                       int16x4_t *a2, int16x4_t *a3) {
   // Swap 16 bit elements. Goes from:
diff --git a/av1/common/arm/jnt_convolve_neon.c b/av1/common/arm/jnt_convolve_neon.c
index 22d915f..177a837 100644
--- a/av1/common/arm/jnt_convolve_neon.c
+++ b/av1/common/arm/jnt_convolve_neon.c
@@ -1394,46 +1394,50 @@
 
 #else  // !(defined(__aarch64__) && defined(__ARM_FEATURE_DOTPROD))
 
-static INLINE int16x4_t
-convolve8_x_4x4_s16(const int16x4_t s0, const int16x4_t s1, const int16x4_t s2,
-                    const int16x4_t s3, const int16x4_t s4, const int16x4_t s5,
-                    const int16x4_t s6, const int16x4_t s7,
-                    const int16x8_t filter, const int16x4_t horiz_const) {
-  const int16x4_t filter_lo = vget_low_s16(filter);
-  const int16x4_t filter_hi = vget_high_s16(filter);
-  int16x4_t sum = horiz_const;
+static INLINE uint16x4_t convolve8_4_x(const int16x4_t s0, const int16x4_t s1,
+                                       const int16x4_t s2, const int16x4_t s3,
+                                       const int16x4_t s4, const int16x4_t s5,
+                                       const int16x4_t s6, const int16x4_t s7,
+                                       const int16x8_t x_filter,
+                                       const int16x4_t round_offset) {
+  const int16x4_t x_filter_0_3 = vget_low_s16(x_filter);
+  const int16x4_t x_filter_4_7 = vget_high_s16(x_filter);
 
-  sum = vmla_lane_s16(sum, s0, filter_lo, 0);
-  sum = vmla_lane_s16(sum, s1, filter_lo, 1);
-  sum = vmla_lane_s16(sum, s2, filter_lo, 2);
-  sum = vmla_lane_s16(sum, s3, filter_lo, 3);
-  sum = vmla_lane_s16(sum, s4, filter_hi, 0);
-  sum = vmla_lane_s16(sum, s5, filter_hi, 1);
-  sum = vmla_lane_s16(sum, s6, filter_hi, 2);
-  sum = vmla_lane_s16(sum, s7, filter_hi, 3);
+  int16x4_t sum = vmul_lane_s16(s0, x_filter_0_3, 0);
+  sum = vmla_lane_s16(sum, s1, x_filter_0_3, 1);
+  sum = vmla_lane_s16(sum, s2, x_filter_0_3, 2);
+  sum = vmla_lane_s16(sum, s3, x_filter_0_3, 3);
+  sum = vmla_lane_s16(sum, s4, x_filter_4_7, 0);
+  sum = vmla_lane_s16(sum, s5, x_filter_4_7, 1);
+  sum = vmla_lane_s16(sum, s6, x_filter_4_7, 2);
+  sum = vmla_lane_s16(sum, s7, x_filter_4_7, 3);
 
-  return sum;
+  // We halved the convolution filter values so -1 from the right shift.
+  int16x4_t res = vrsra_n_s16(round_offset, sum, ROUND0_BITS - 1);
+  return vreinterpret_u16_s16(res);
 }
 
-static INLINE int16x8_t
-convolve8_x_8x8_s16(const int16x8_t s0, const int16x8_t s1, const int16x8_t s2,
-                    const int16x8_t s3, const int16x8_t s4, const int16x8_t s5,
-                    const int16x8_t s6, const int16x8_t s7,
-                    const int16x8_t filter, const int16x8_t horiz_const) {
-  const int16x4_t filter_lo = vget_low_s16(filter);
-  const int16x4_t filter_hi = vget_high_s16(filter);
-  int16x8_t sum = horiz_const;
+static INLINE uint16x8_t convolve8_8_x(const int16x8_t s0, const int16x8_t s1,
+                                       const int16x8_t s2, const int16x8_t s3,
+                                       const int16x8_t s4, const int16x8_t s5,
+                                       const int16x8_t s6, const int16x8_t s7,
+                                       const int16x8_t x_filter,
+                                       const int16x8_t round_offset) {
+  const int16x4_t x_filter_0_3 = vget_low_s16(x_filter);
+  const int16x4_t x_filter_4_7 = vget_high_s16(x_filter);
 
-  sum = vmlaq_lane_s16(sum, s0, filter_lo, 0);
-  sum = vmlaq_lane_s16(sum, s1, filter_lo, 1);
-  sum = vmlaq_lane_s16(sum, s2, filter_lo, 2);
-  sum = vmlaq_lane_s16(sum, s3, filter_lo, 3);
-  sum = vmlaq_lane_s16(sum, s4, filter_hi, 0);
-  sum = vmlaq_lane_s16(sum, s5, filter_hi, 1);
-  sum = vmlaq_lane_s16(sum, s6, filter_hi, 2);
-  sum = vmlaq_lane_s16(sum, s7, filter_hi, 3);
+  int16x8_t sum = vmulq_lane_s16(s0, x_filter_0_3, 0);
+  sum = vmlaq_lane_s16(sum, s1, x_filter_0_3, 1);
+  sum = vmlaq_lane_s16(sum, s2, x_filter_0_3, 2);
+  sum = vmlaq_lane_s16(sum, s3, x_filter_0_3, 3);
+  sum = vmlaq_lane_s16(sum, s4, x_filter_4_7, 0);
+  sum = vmlaq_lane_s16(sum, s5, x_filter_4_7, 1);
+  sum = vmlaq_lane_s16(sum, s6, x_filter_4_7, 2);
+  sum = vmlaq_lane_s16(sum, s7, x_filter_4_7, 3);
 
-  return sum;
+  // We halved the convolution filter values so -1 from the right shift.
+  int16x8_t res = vrsraq_n_s16(round_offset, sum, ROUND0_BITS - 1);
+  return vreinterpretq_u16_s16(res);
 }
 
 void av1_dist_wtd_convolve_x_neon(const uint8_t *src, int src_stride,
@@ -1441,131 +1445,103 @@
                                   const InterpFilterParams *filter_params_x,
                                   const int subpel_x_qn,
                                   ConvolveParams *conv_params) {
-  assert(!(w % 4));
-  assert(!(h % 4));
+  assert(w % 4 == 0);
+  assert(h % 4 == 0);
 
-  CONV_BUF_TYPE *dst = conv_params->dst;
-  int dst_stride = conv_params->dst_stride;
-  const int horiz_offset = filter_params_x->taps / 2 - 1;
   const int bd = 8;
   const int offset_bits = bd + 2 * FILTER_BITS - ROUND0_BITS;
   const int16_t round_offset = (1 << (offset_bits - COMPOUND_ROUND1_BITS)) +
                                (1 << (offset_bits - COMPOUND_ROUND1_BITS - 1));
   const int16x8_t round_offset_vec = vdupq_n_s16(round_offset);
+
+  const int do_average = conv_params->do_average;
+  const int use_dist_wtd_comp_avg = conv_params->use_dist_wtd_comp_avg;
   const uint16_t fwd_offset = conv_params->fwd_offset;
   const uint16_t bck_offset = conv_params->bck_offset;
-  const int use_dist_wtd_comp_avg = conv_params->use_dist_wtd_comp_avg;
 
-  // horizontal filter
+  // Horizontal filter.
   const int16_t *x_filter_ptr = av1_get_interp_filter_subpel_kernel(
       filter_params_x, subpel_x_qn & SUBPEL_MASK);
-
-  const uint8_t *src_ptr = src - horiz_offset;
-
   // Filter values are even, so downshift by 1 to reduce intermediate precision
   // requirements.
   const int16x8_t x_filter = vshrq_n_s16(vld1q_s16(x_filter_ptr), 1);
 
+  const int horiz_offset = filter_params_x->taps / 2 - 1;
+  const uint8_t *src_ptr = src - horiz_offset;
+  CONV_BUF_TYPE *dst_ptr = conv_params->dst;
+  uint8_t *dst8_ptr = dst8;
+  int dst_stride = conv_params->dst_stride;
   const uint8_t *s;
   uint8_t *d_u8;
-  uint8_t *dst_u8_ptr;
-  CONV_BUF_TYPE *d, *dst_ptr;
-  int width, height;
+  CONV_BUF_TYPE *d;
+  int width;
+  int height = h;
+
   uint8x8_t t0;
 #if defined(__aarch64__)
   uint8x8_t t1, t2, t3, t4, t5, t6, t7;
 #endif  // defined(__aarch64__)
-  s = src_ptr;
-  dst_ptr = dst;
-  dst_u8_ptr = dst8;
-  width = w;
-  height = h;
 
   if ((w == 4) || (h == 4)) {
-    int16x4_t s0, s1, s2, s3, s4, s5, s6, s7, d0;
-    int16x8_t tt0;
-    uint16x4_t res4;
+    int16x4_t s0, s1, s2, s3, s4, s5, s6, s7, s8;
+    uint16x4_t d0, dd0;
+    uint8x8_t d01;
 #if defined(__aarch64__)
-    int16x4_t s8, s9, s10, d1, d2, d3;
-    int16x8_t tt1, tt2, tt3, t01, t23;
-    uint16x4_t res5, res6, res7;
-    int16x8_t u0, u1;
-#else   // !defined(__aarch64__)
-    int16x4_t temp_0;
+    int16x4_t s9, s10;
+    uint16x4_t d1, d2, d3, dd1, dd2, dd3;
+    uint8x8_t d23;
 #endif  // defined(__aarch64__)
-    // This shim of 1 << ((ROUND0_BITS - 1) - 1) enables us to use non-rounding
-    // shifts - which are generally faster than rounding shifts on modern CPUs.
-    // The outermost -1 is needed because we halved the filter values.
-    const int16x4_t horiz_const = vdup_n_s16(1 << ((ROUND0_BITS - 1) - 1));
 
     do {
-      s = src_ptr;
       d = dst_ptr;
-      d_u8 = dst_u8_ptr;
+      d_u8 = dst8_ptr;
       width = w;
-      __builtin_prefetch(s + 0 * src_stride);
-#if defined(__aarch64__)
-      __builtin_prefetch(s + 1 * src_stride);
-      __builtin_prefetch(s + 2 * src_stride);
-      __builtin_prefetch(s + 3 * src_stride);
 
-      load_u8_8x4(s, src_stride, &t0, &t1, &t2, &t3);
+      __builtin_prefetch(src_ptr + 0 * src_stride);
+#if defined(__aarch64__)
+      __builtin_prefetch(src_ptr + 1 * src_stride);
+      __builtin_prefetch(src_ptr + 2 * src_stride);
+      __builtin_prefetch(src_ptr + 3 * src_stride);
+
+      load_u8_8x4(src_ptr, src_stride, &t0, &t1, &t2, &t3);
       transpose_u8_8x4(&t0, &t1, &t2, &t3);
-      tt0 = vreinterpretq_s16_u16(vmovl_u8(t0));
-      tt1 = vreinterpretq_s16_u16(vmovl_u8(t1));
-      tt2 = vreinterpretq_s16_u16(vmovl_u8(t2));
-      tt3 = vreinterpretq_s16_u16(vmovl_u8(t3));
-      s0 = vget_low_s16(tt0);
-      s1 = vget_low_s16(tt1);
-      s2 = vget_low_s16(tt2);
-      s3 = vget_low_s16(tt3);
-      s4 = vget_high_s16(tt0);
-      s5 = vget_high_s16(tt1);
-      s6 = vget_high_s16(tt2);
+
+      s0 = vget_low_s16(vreinterpretq_s16_u16(vmovl_u8(t0)));
+      s1 = vget_low_s16(vreinterpretq_s16_u16(vmovl_u8(t1)));
+      s2 = vget_low_s16(vreinterpretq_s16_u16(vmovl_u8(t2)));
+      s3 = vget_low_s16(vreinterpretq_s16_u16(vmovl_u8(t3)));
+      s4 = vget_high_s16(vreinterpretq_s16_u16(vmovl_u8(t0)));
+      s5 = vget_high_s16(vreinterpretq_s16_u16(vmovl_u8(t1)));
+      s6 = vget_high_s16(vreinterpretq_s16_u16(vmovl_u8(t2)));
+
       __builtin_prefetch(d + 0 * dst_stride);
       __builtin_prefetch(d + 1 * dst_stride);
       __builtin_prefetch(d + 2 * dst_stride);
       __builtin_prefetch(d + 3 * dst_stride);
-      s += 7;
+
+      s = src_ptr + 7;
+
       do {
         load_unaligned_u8_4x4(s, src_stride, &t0, &t1);
-
         transpose_u8_4x4(&t0, &t1);
-        u0 = vreinterpretq_s16_u16(vmovl_u8(t0));
-        u1 = vreinterpretq_s16_u16(vmovl_u8(t1));
 
-        s7 = vget_low_s16(u0);
-        s8 = vget_low_s16(u1);
-        s9 = vget_high_s16(u0);
-        s10 = vget_high_s16(u1);
+        s7 = vget_low_s16(vreinterpretq_s16_u16(vmovl_u8(t0)));
+        s8 = vget_low_s16(vreinterpretq_s16_u16(vmovl_u8(t1)));
+        s9 = vget_high_s16(vreinterpretq_s16_u16(vmovl_u8(t0)));
+        s10 = vget_high_s16(vreinterpretq_s16_u16(vmovl_u8(t1)));
 
-        d0 = convolve8_x_4x4_s16(s0, s1, s2, s3, s4, s5, s6, s7, x_filter,
-                                 horiz_const);
-        d1 = convolve8_x_4x4_s16(s1, s2, s3, s4, s5, s6, s7, s8, x_filter,
-                                 horiz_const);
-        d2 = convolve8_x_4x4_s16(s2, s3, s4, s5, s6, s7, s8, s9, x_filter,
-                                 horiz_const);
-        d3 = convolve8_x_4x4_s16(s3, s4, s5, s6, s7, s8, s9, s10, x_filter,
-                                 horiz_const);
+        d0 = convolve8_4_x(s0, s1, s2, s3, s4, s5, s6, s7, x_filter,
+                           vget_low_s16(round_offset_vec));
+        d1 = convolve8_4_x(s1, s2, s3, s4, s5, s6, s7, s8, x_filter,
+                           vget_low_s16(round_offset_vec));
+        d2 = convolve8_4_x(s2, s3, s4, s5, s6, s7, s8, s9, x_filter,
+                           vget_low_s16(round_offset_vec));
+        d3 = convolve8_4_x(s3, s4, s5, s6, s7, s8, s9, s10, x_filter,
+                           vget_low_s16(round_offset_vec));
 
-        t01 = vcombine_s16(d0, d1);
-        t23 = vcombine_s16(d2, d3);
+        transpose_u16_4x4d(&d0, &d1, &d2, &d3);
 
-        // We halved the convolution filter values so -1 from the right shift.
-        t01 = vshrq_n_s16(t01, ROUND0_BITS - 1);
-        t23 = vshrq_n_s16(t23, ROUND0_BITS - 1);
-
-        t01 = vaddq_s16(t01, round_offset_vec);
-        t23 = vaddq_s16(t23, round_offset_vec);
-
-        d0 = vget_low_s16(t01);
-        d1 = vget_high_s16(t01);
-        d2 = vget_low_s16(t23);
-        d3 = vget_high_s16(t23);
-
-        transpose_s16_4x4d(&d0, &d1, &d2, &d3);
-
-        if (conv_params->do_average) {
+        if (do_average) {
           __builtin_prefetch(d + 0 * dst_stride);
           __builtin_prefetch(d + 1 * dst_stride);
           __builtin_prefetch(d + 2 * dst_stride);
@@ -1576,21 +1552,18 @@
           __builtin_prefetch(d_u8 + 2 * dst8_stride);
           __builtin_prefetch(d_u8 + 3 * dst8_stride);
 
-          load_u16_4x4(d, dst_stride, &res4, &res5, &res6, &res7);
+          load_u16_4x4(d, dst_stride, &dd0, &dd1, &dd2, &dd3);
 
-          compute_avg_4x4(res4, res5, res6, res7, vreinterpret_u16_s16(d0),
-                          vreinterpret_u16_s16(d1), vreinterpret_u16_s16(d2),
-                          vreinterpret_u16_s16(d3), fwd_offset, bck_offset,
-                          round_offset_vec, use_dist_wtd_comp_avg, &t0, &t1);
+          compute_avg_4x4(dd0, dd1, dd2, dd3, d0, d1, d2, d3, fwd_offset,
+                          bck_offset, round_offset_vec, use_dist_wtd_comp_avg,
+                          &d01, &d23);
 
-          store_u8_4x1(d_u8 + 0 * dst8_stride, t0, 0);
-          store_u8_4x1(d_u8 + 1 * dst8_stride, t0, 1);
-          store_u8_4x1(d_u8 + 2 * dst8_stride, t1, 0);
-          store_u8_4x1(d_u8 + 3 * dst8_stride, t1, 1);
+          store_u8_4x1(d_u8 + 0 * dst8_stride, d01, 0);
+          store_u8_4x1(d_u8 + 1 * dst8_stride, d01, 1);
+          store_u8_4x1(d_u8 + 2 * dst8_stride, d23, 0);
+          store_u8_4x1(d_u8 + 3 * dst8_stride, d23, 1);
         } else {
-          store_u16_4x4(d, dst_stride, vreinterpret_u16_s16(d0),
-                        vreinterpret_u16_s16(d1), vreinterpret_u16_s16(d2),
-                        vreinterpret_u16_s16(d3));
+          store_u16_4x4(d, dst_stride, d0, d1, d2, d3);
         }
 
         s0 = s4;
@@ -1604,84 +1577,77 @@
         d += 4;
         d_u8 += 4;
         width -= 4;
-      } while (width > 0);
+      } while (width != 0);
       src_ptr += 4 * src_stride;
       dst_ptr += 4 * dst_stride;
-      dst_u8_ptr += 4 * dst8_stride;
+      dst8_ptr += 4 * dst8_stride;
       height -= 4;
 #else   // !defined(__aarch64__)
-      t0 = vld1_u8(s);                            // a0 a1 a2 a3 a4 a5 a6 a7
-      tt0 = vreinterpretq_s16_u16(vmovl_u8(t0));  // a0 a1 a2 a3 a4 a5 a6 a7
-      s0 = vget_low_s16(tt0);                     // a0 a1 a2 a3
-      s4 = vget_high_s16(tt0);                    // a4 a5 a6 a7
+      t0 = vld1_u8(src_ptr);  // a0 a1 a2 a3 a4 a5 a6 a7
+      s0 = vget_low_s16(vreinterpretq_s16_u16(vmovl_u8(t0)));
+      s4 = vget_high_s16(vreinterpretq_s16_u16(vmovl_u8(t0)));
+
       __builtin_prefetch(d);
 
-      s += 8;
+      s = src_ptr + 8;
+
       do {
         t0 = vld1_u8(s);  // a8 a9 a10 a11
+        s8 = vget_low_s16(vreinterpretq_s16_u16(vmovl_u8(t0)));
 
-        // a8 a9 a10 a11
-        s7 = vget_low_s16(vreinterpretq_s16_u16(vmovl_u8(t0)));
-        temp_0 = s7;
         s1 = vext_s16(s0, s4, 1);  // a1 a2 a3 a4
         s2 = vext_s16(s0, s4, 2);  // a2 a3 a4 a5
         s3 = vext_s16(s0, s4, 3);  // a3 a4 a5 a6
-        s5 = vext_s16(s4, s7, 1);  // a5 a6 a7 a8
-        s6 = vext_s16(s4, s7, 2);  // a6 a7 a8 a9
-        s7 = vext_s16(s4, s7, 3);  // a7 a8 a9 a10
+        s5 = vext_s16(s4, s8, 1);  // a5 a6 a7 a8
+        s6 = vext_s16(s4, s8, 2);  // a6 a7 a8 a9
+        s7 = vext_s16(s4, s8, 3);  // a7 a8 a9 a10
 
-        d0 = convolve8_x_4x4_s16(s0, s1, s2, s3, s4, s5, s6, s7, x_filter,
-                                 horiz_const);
-        // We halved the convolution filter values so -1 from the right shift.
-        d0 = vshr_n_s16(d0, ROUND0_BITS - 1);
-        d0 = vadd_s16(d0, vget_low_s16(round_offset_vec));
-        s0 = s4;
-        s4 = temp_0;
-        if (conv_params->do_average) {
+        d0 = convolve8_4_x(s0, s1, s2, s3, s4, s5, s6, s7, x_filter,
+                           vget_low_s16(round_offset_vec));
+
+        if (do_average) {
           __builtin_prefetch(d);
           __builtin_prefetch(d_u8);
 
-          res4 = vld1_u16(d);
+          dd0 = vld1_u16(d);
 
-          compute_avg_4x1(res4, vreinterpret_u16_s16(d0), fwd_offset,
-                          bck_offset, vget_low_s16(round_offset_vec),
-                          use_dist_wtd_comp_avg, &t0);
+          compute_avg_4x1(dd0, d0, fwd_offset, bck_offset,
+                          vget_low_s16(round_offset_vec), use_dist_wtd_comp_avg,
+                          &d01);
 
-          store_u8_4x1(d_u8, t0, 0);
+          store_u8_4x1(d_u8, d01, 0);
         } else {
-          vst1_u16(d, vreinterpret_u16_s16(d0));
+          vst1_u16(d, d0);
         }
 
+        s0 = s4;
+        s4 = s8;
         s += 4;
         d += 4;
         d_u8 += 4;
         width -= 4;
-      } while (width > 0);
+      } while (width != 0);
       src_ptr += src_stride;
       dst_ptr += dst_stride;
-      dst_u8_ptr += dst8_stride;
+      dst8_ptr += dst8_stride;
       height--;
 #endif  // defined(__aarch64__)
-    } while (height > 0);
+    } while (height != 0);
   } else {
-    CONV_BUF_TYPE *d_tmp;
-    uint8_t *d_u8_tmp;
-    int16x8_t s0, s1, s2, s3, s4, s5, s6, s7;
-    int16x8_t res0;
-    uint16x8_t res8;
-    // This shim of 1 << ((ROUND0_BITS - 1) - 1) enables us to use non-rounding
-    // shifts - which are generally faster than rounding shifts on modern CPUs.
-    // The outermost -1 is needed because we halved the filter values.
-    const int16x8_t horiz_const = vdupq_n_s16(1 << ((ROUND0_BITS - 1) - 1));
+    int16x8_t s0, s1, s2, s3, s4, s5, s6, s7, s8;
+    uint16x8_t d0, dd0;
+    uint8x8_t d0_u8;
 
-    d = dst_ptr = dst;
-    d_u8 = dst_u8_ptr = dst8;
     do {
+      d = dst_ptr;
+      d_u8 = dst8_ptr;
+      width = w;
+
 #if defined(__aarch64__)
-      int16x8_t s11, s12, s13, s14;
-      int16x8_t s8, s9, s10;
-      int16x8_t res1, res2, res3, res4, res5, res6, res7;
-      uint16x8_t res9, res10, res11;
+      int16x8_t s9, s10, s11, s12, s13, s14;
+      uint16x8_t d1, d2, d3, d4, d5, d6, d7, dd1, dd2, dd3, dd4, dd5, dd6, dd7;
+      uint8x8_t d1_u8, d2_u8, d3_u8, d4_u8, d5_u8, d6_u8, d7_u8;
+
       __builtin_prefetch(src_ptr + 0 * src_stride);
       __builtin_prefetch(src_ptr + 1 * src_stride);
       __builtin_prefetch(src_ptr + 2 * src_stride);
@@ -1690,8 +1656,10 @@
       __builtin_prefetch(src_ptr + 5 * src_stride);
       __builtin_prefetch(src_ptr + 6 * src_stride);
       __builtin_prefetch(src_ptr + 7 * src_stride);
+
       load_u8_8x8(src_ptr, src_stride, &t0, &t1, &t2, &t3, &t4, &t5, &t6, &t7);
       transpose_u8_8x8(&t0, &t1, &t2, &t3, &t4, &t5, &t6, &t7);
+
       s0 = vreinterpretq_s16_u16(vmovl_u8(t0));
       s1 = vreinterpretq_s16_u16(vmovl_u8(t1));
       s2 = vreinterpretq_s16_u16(vmovl_u8(t2));
@@ -1700,11 +1668,6 @@
       s5 = vreinterpretq_s16_u16(vmovl_u8(t5));
       s6 = vreinterpretq_s16_u16(vmovl_u8(t6));
 
-      width = w;
-      s = src_ptr + 7;
-      d = dst_ptr;
-      d_u8_tmp = dst_u8_ptr;
-
       __builtin_prefetch(dst_ptr + 0 * dst_stride);
       __builtin_prefetch(dst_ptr + 1 * dst_stride);
       __builtin_prefetch(dst_ptr + 2 * dst_stride);
@@ -1714,12 +1677,12 @@
       __builtin_prefetch(dst_ptr + 6 * dst_stride);
       __builtin_prefetch(dst_ptr + 7 * dst_stride);
 
-      do {
-        d_u8 = d_u8_tmp;
-        d_tmp = d;
+      s = src_ptr + 7;
 
+      do {
         load_u8_8x8(s, src_stride, &t0, &t1, &t2, &t3, &t4, &t5, &t6, &t7);
         transpose_u8_8x8(&t0, &t1, &t2, &t3, &t4, &t5, &t6, &t7);
+
         s7 = vreinterpretq_s16_u16(vmovl_u8(t0));
         s8 = vreinterpretq_s16_u16(vmovl_u8(t1));
         s9 = vreinterpretq_s16_u16(vmovl_u8(t2));
@@ -1729,77 +1692,44 @@
         s13 = vreinterpretq_s16_u16(vmovl_u8(t6));
         s14 = vreinterpretq_s16_u16(vmovl_u8(t7));
 
-        res0 = convolve8_x_8x8_s16(s0, s1, s2, s3, s4, s5, s6, s7, x_filter,
-                                   horiz_const);
-        res1 = convolve8_x_8x8_s16(s1, s2, s3, s4, s5, s6, s7, s8, x_filter,
-                                   horiz_const);
-        res2 = convolve8_x_8x8_s16(s2, s3, s4, s5, s6, s7, s8, s9, x_filter,
-                                   horiz_const);
-        res3 = convolve8_x_8x8_s16(s3, s4, s5, s6, s7, s8, s9, s10, x_filter,
-                                   horiz_const);
-        res4 = convolve8_x_8x8_s16(s4, s5, s6, s7, s8, s9, s10, s11, x_filter,
-                                   horiz_const);
-        res5 = convolve8_x_8x8_s16(s5, s6, s7, s8, s9, s10, s11, s12, x_filter,
-                                   horiz_const);
-        res6 = convolve8_x_8x8_s16(s6, s7, s8, s9, s10, s11, s12, s13, x_filter,
-                                   horiz_const);
-        res7 = convolve8_x_8x8_s16(s7, s8, s9, s10, s11, s12, s13, s14,
-                                   x_filter, horiz_const);
+        d0 = convolve8_8_x(s0, s1, s2, s3, s4, s5, s6, s7, x_filter,
+                           round_offset_vec);
+        d1 = convolve8_8_x(s1, s2, s3, s4, s5, s6, s7, s8, x_filter,
+                           round_offset_vec);
+        d2 = convolve8_8_x(s2, s3, s4, s5, s6, s7, s8, s9, x_filter,
+                           round_offset_vec);
+        d3 = convolve8_8_x(s3, s4, s5, s6, s7, s8, s9, s10, x_filter,
+                           round_offset_vec);
+        d4 = convolve8_8_x(s4, s5, s6, s7, s8, s9, s10, s11, x_filter,
+                           round_offset_vec);
+        d5 = convolve8_8_x(s5, s6, s7, s8, s9, s10, s11, s12, x_filter,
+                           round_offset_vec);
+        d6 = convolve8_8_x(s6, s7, s8, s9, s10, s11, s12, s13, x_filter,
+                           round_offset_vec);
+        d7 = convolve8_8_x(s7, s8, s9, s10, s11, s12, s13, s14, x_filter,
+                           round_offset_vec);
 
-        // We halved the convolution filter values so -1 from the right shift.
-        res0 = vshrq_n_s16(res0, ROUND0_BITS - 1);
-        res1 = vshrq_n_s16(res1, ROUND0_BITS - 1);
-        res2 = vshrq_n_s16(res2, ROUND0_BITS - 1);
-        res3 = vshrq_n_s16(res3, ROUND0_BITS - 1);
-        res4 = vshrq_n_s16(res4, ROUND0_BITS - 1);
-        res5 = vshrq_n_s16(res5, ROUND0_BITS - 1);
-        res6 = vshrq_n_s16(res6, ROUND0_BITS - 1);
-        res7 = vshrq_n_s16(res7, ROUND0_BITS - 1);
-
-        res0 = vaddq_s16(res0, round_offset_vec);
-        res1 = vaddq_s16(res1, round_offset_vec);
-        res2 = vaddq_s16(res2, round_offset_vec);
-        res3 = vaddq_s16(res3, round_offset_vec);
-        res4 = vaddq_s16(res4, round_offset_vec);
-        res5 = vaddq_s16(res5, round_offset_vec);
-        res6 = vaddq_s16(res6, round_offset_vec);
-        res7 = vaddq_s16(res7, round_offset_vec);
-
-        transpose_s16_8x8(&res0, &res1, &res2, &res3, &res4, &res5, &res6,
-                          &res7);
+        transpose_u16_8x8(&d0, &d1, &d2, &d3, &d4, &d5, &d6, &d7);
 
         if (conv_params->do_average) {
-          load_u16_8x4(d_tmp, dst_stride, &res8, &res9, &res10, &res11);
-          d_tmp += 4 * dst_stride;
+          load_u16_8x4(d, dst_stride, &dd0, &dd1, &dd2, &dd3);
 
-          compute_avg_8x4(
-              res8, res9, res10, res11, vreinterpretq_u16_s16(res0),
-              vreinterpretq_u16_s16(res1), vreinterpretq_u16_s16(res2),
-              vreinterpretq_u16_s16(res3), fwd_offset, bck_offset,
-              round_offset_vec, use_dist_wtd_comp_avg, &t0, &t1, &t2, &t3);
+          compute_avg_8x4(dd0, dd1, dd2, dd3, d0, d1, d2, d3, fwd_offset,
+                          bck_offset, round_offset_vec, use_dist_wtd_comp_avg,
+                          &d0_u8, &d1_u8, &d2_u8, &d3_u8);
 
-          store_u8_8x4(d_u8, dst8_stride, t0, t1, t2, t3);
-          d_u8 += 4 * dst8_stride;
+          store_u8_8x4(d_u8, dst8_stride, d0_u8, d1_u8, d2_u8, d3_u8);
 
-          load_u16_8x4(d_tmp, dst_stride, &res8, &res9, &res10, &res11);
-          d_tmp += 4 * dst_stride;
+          load_u16_8x4(d + 4 * dst_stride, dst_stride, &dd4, &dd5, &dd6, &dd7);
 
-          compute_avg_8x4(
-              res8, res9, res10, res11, vreinterpretq_u16_s16(res4),
-              vreinterpretq_u16_s16(res5), vreinterpretq_u16_s16(res6),
-              vreinterpretq_u16_s16(res7), fwd_offset, bck_offset,
-              round_offset_vec, use_dist_wtd_comp_avg, &t0, &t1, &t2, &t3);
+          compute_avg_8x4(dd4, dd5, dd6, dd7, d4, d5, d6, d7, fwd_offset,
+                          bck_offset, round_offset_vec, use_dist_wtd_comp_avg,
+                          &d4_u8, &d5_u8, &d6_u8, &d7_u8);
 
-          store_u8_8x4(d_u8, dst8_stride, t0, t1, t2, t3);
-          d_u8 += 4 * dst8_stride;
+          store_u8_8x4(d_u8 + 4 * dst8_stride, dst8_stride, d4_u8, d5_u8, d6_u8,
+                       d7_u8);
         } else {
-          store_u16_8x8(
-              d_tmp, dst_stride, vreinterpretq_u16_s16(res0),
-              vreinterpretq_u16_s16(res1), vreinterpretq_u16_s16(res2),
-              vreinterpretq_u16_s16(res3), vreinterpretq_u16_s16(res4),
-              vreinterpretq_u16_s16(res5), vreinterpretq_u16_s16(res6),
-              vreinterpretq_u16_s16(res7));
-          d_tmp += 8 * dst_stride;
+          store_u16_8x8(d, dst_stride, d0, d1, d2, d3, d4, d5, d6, d7);
         }
 
         s0 = s8;
@@ -1811,75 +1741,61 @@
         s6 = s14;
         s += 8;
         d += 8;
-        d_u8_tmp += 8;
+        d_u8 += 8;
         width -= 8;
-      } while (width > 0);
+      } while (width != 0);
       src_ptr += 8 * src_stride;
       dst_ptr += 8 * dst_stride;
-      dst_u8_ptr += 8 * dst8_stride;
+      dst8_ptr += 8 * dst8_stride;
       height -= 8;
 #else   // !defined(__aarch64__)
-      int16x8_t temp_0;
       __builtin_prefetch(src_ptr);
+
       t0 = vld1_u8(src_ptr);
       s0 = vreinterpretq_s16_u16(vmovl_u8(t0));  // a0 a1 a2 a3 a4 a5 a6 a7
 
-      width = w;
-      s = src_ptr + 8;
-      d = dst_ptr;
-      d_u8_tmp = dst_u8_ptr;
-
       __builtin_prefetch(dst_ptr);
 
+      s = src_ptr + 8;
+
       do {
-        d_u8 = d_u8_tmp;
-        d_tmp = d;
-
         t0 = vld1_u8(s);  // a8 a9 a10 a11 a12 a13 a14 a15
-        s7 = vreinterpretq_s16_u16(vmovl_u8(t0));
-        temp_0 = s0;
-        s0 = s7;
+        s8 = vreinterpretq_s16_u16(vmovl_u8(t0));
 
-        s1 = vextq_s16(temp_0, s7, 1);  // a1 a2 a3 a4 a5 a6 a7 a8
-        s2 = vextq_s16(temp_0, s7, 2);  // a2 a3 a4 a5 a6 a7 a8 a9
-        s3 = vextq_s16(temp_0, s7, 3);  // a3 a4 a5 a6 a7 a8 a9 a10
-        s4 = vextq_s16(temp_0, s7, 4);  // a4 a5 a6 a7 a8 a9 a10 a11
-        s5 = vextq_s16(temp_0, s7, 5);  // a5 a6 a7 a8 a9 a10 a11 a12
-        s6 = vextq_s16(temp_0, s7, 6);  // a6 a7 a8 a9 a10 a11 a12 a13
-        s7 = vextq_s16(temp_0, s7, 7);  // a7 a8 a9 a10 a11 a12 a13 a14
+        s1 = vextq_s16(s0, s8, 1);  // a1 a2 a3 a4 a5 a6 a7 a8
+        s2 = vextq_s16(s0, s8, 2);  // a2 a3 a4 a5 a6 a7 a8 a9
+        s3 = vextq_s16(s0, s8, 3);  // a3 a4 a5 a6 a7 a8 a9 a10
+        s4 = vextq_s16(s0, s8, 4);  // a4 a5 a6 a7 a8 a9 a10 a11
+        s5 = vextq_s16(s0, s8, 5);  // a5 a6 a7 a8 a9 a10 a11 a12
+        s6 = vextq_s16(s0, s8, 6);  // a6 a7 a8 a9 a10 a11 a12 a13
+        s7 = vextq_s16(s0, s8, 7);  // a7 a8 a9 a10 a11 a12 a13 a14
 
-        res0 = convolve8_x_8x8_s16(temp_0, s1, s2, s3, s4, s5, s6, s7, x_filter,
-                                   horiz_const);
-        // We halved the convolution filter values so -1 from the right shift.
-        res0 = vshrq_n_s16(res0, ROUND0_BITS - 1);
-        res0 = vaddq_s16(res0, round_offset_vec);
+        d0 = convolve8_8_x(s0, s1, s2, s3, s4, s5, s6, s7, x_filter,
+                           round_offset_vec);
 
-        if (conv_params->do_average) {
-          res8 = vld1q_u16(d_tmp);
-          d_tmp += dst_stride;
+        if (do_average) {
+          dd0 = vld1q_u16(d);
 
-          compute_avg_8x1(res8, vreinterpretq_u16_s16(res0), fwd_offset,
-                          bck_offset, round_offset_vec, use_dist_wtd_comp_avg,
-                          &t0);
+          compute_avg_8x1(dd0, d0, fwd_offset, bck_offset, round_offset_vec,
+                          use_dist_wtd_comp_avg, &d0_u8);
 
-          vst1_u8(d_u8, t0);
-          d_u8 += dst8_stride;
+          vst1_u8(d_u8, d0_u8);
         } else {
-          vst1q_u16(d_tmp, vreinterpretq_u16_s16(res0));
-          d_tmp += dst_stride;
+          vst1q_u16(d, d0);
         }
 
+        s0 = s8;
         s += 8;
         d += 8;
+        d_u8 += 8;
         width -= 8;
-        d_u8_tmp += 8;
-      } while (width > 0);
+      } while (width != 0);
       src_ptr += src_stride;
       dst_ptr += dst_stride;
-      dst_u8_ptr += dst8_stride;
+      dst8_ptr += dst8_stride;
       height--;
 #endif  // defined(__aarch64__)
-    } while (height > 0);
+    } while (height != 0);
   }
 }