[NEON] Refactor of highbd convolve X & Y functions

Moved highbd_convolve_y_sr_8tap_neon() and highbd_convolve_x_sr_8tap_neon()
as separate functions.
Added offset to helper highbd_convolve* functions, will be used in 2D
convolution functions.

Change-Id: I9fdd51bd3d4a4337839752bd0537ace0007dcaf9
diff --git a/av1/common/arm/highbd_convolve_neon.c b/av1/common/arm/highbd_convolve_neon.c
index fcd059b..26b7df8 100644
--- a/av1/common/arm/highbd_convolve_neon.c
+++ b/av1/common/arm/highbd_convolve_neon.c
@@ -23,10 +23,11 @@
 #include "av1/common/arm/highbd_convolve_neon.h"
 
 static INLINE void highbd_convolve_y_sr_6tap_neon(
-    const uint16_t *src_ptr, int src_stride, uint16_t *dst_ptr,
-    const int dst_stride, int w, int h, const int16_t *y_filter_ptr, int bd) {
+    const uint16_t *src_ptr, int src_stride, uint16_t *dst_ptr, int dst_stride,
+    int w, int h, const int16_t *y_filter_ptr, const int bd) {
   const uint16x8_t max = vdupq_n_u16((1 << bd) - 1);
   const int16x8_t y_filter_0_7 = vld1q_s16(y_filter_ptr);
+  const int32x4_t zero_s32 = vdupq_n_s32(0);
 
   if (w <= 4) {
     uint16x4_t t0, t1, t2, t3, t4, t5, t6, t7, t8;
@@ -52,10 +53,14 @@
       s7 = vreinterpret_s16_u16(t7);
       s8 = vreinterpret_s16_u16(t8);
 
-      d0 = highbd_convolve6_4_s32_s16(s0, s1, s2, s3, s4, s5, y_filter_0_7);
-      d1 = highbd_convolve6_4_s32_s16(s1, s2, s3, s4, s5, s6, y_filter_0_7);
-      d2 = highbd_convolve6_4_s32_s16(s2, s3, s4, s5, s6, s7, y_filter_0_7);
-      d3 = highbd_convolve6_4_s32_s16(s3, s4, s5, s6, s7, s8, y_filter_0_7);
+      d0 = highbd_convolve6_4_s32_s16(s0, s1, s2, s3, s4, s5, y_filter_0_7,
+                                      zero_s32);
+      d1 = highbd_convolve6_4_s32_s16(s1, s2, s3, s4, s5, s6, y_filter_0_7,
+                                      zero_s32);
+      d2 = highbd_convolve6_4_s32_s16(s2, s3, s4, s5, s6, s7, y_filter_0_7,
+                                      zero_s32);
+      d3 = highbd_convolve6_4_s32_s16(s3, s4, s5, s6, s7, s8, y_filter_0_7,
+                                      zero_s32);
 
       d01 = vcombine_u16(d0, d1);
       d23 = vcombine_u16(d2, d3);
@@ -114,10 +119,14 @@
         s7 = vreinterpretq_s16_u16(t7);
         s8 = vreinterpretq_s16_u16(t8);
 
-        d0 = highbd_convolve6_8_s32_s16(s0, s1, s2, s3, s4, s5, y_filter_0_7);
-        d1 = highbd_convolve6_8_s32_s16(s1, s2, s3, s4, s5, s6, y_filter_0_7);
-        d2 = highbd_convolve6_8_s32_s16(s2, s3, s4, s5, s6, s7, y_filter_0_7);
-        d3 = highbd_convolve6_8_s32_s16(s3, s4, s5, s6, s7, s8, y_filter_0_7);
+        d0 = highbd_convolve6_8_s32_s16(s0, s1, s2, s3, s4, s5, y_filter_0_7,
+                                        zero_s32);
+        d1 = highbd_convolve6_8_s32_s16(s1, s2, s3, s4, s5, s6, y_filter_0_7,
+                                        zero_s32);
+        d2 = highbd_convolve6_8_s32_s16(s2, s3, s4, s5, s6, s7, y_filter_0_7,
+                                        zero_s32);
+        d3 = highbd_convolve6_8_s32_s16(s3, s4, s5, s6, s7, s8, y_filter_0_7,
+                                        zero_s32);
 
         d0 = vminq_u16(d0, max);
         d1 = vminq_u16(d1, max);
@@ -147,12 +156,155 @@
   }
 }
 
+static INLINE void highbd_convolve_y_sr_8tap_neon(
+    const uint16_t *src_ptr, int src_stride, uint16_t *dst_ptr, int dst_stride,
+    int w, int h, const int16_t *y_filter_ptr, int bd) {
+  const uint16x8_t max = vdupq_n_u16((1 << bd) - 1);
+  const int16x8_t y_filter = vld1q_s16(y_filter_ptr);
+  const int32x4_t zero_s32 = vdupq_n_s32(0);
+
+  if (w <= 4) {
+    uint16x4_t t0, t1, t2, t3, t4, t5, t6, t7, t8, t9, t10;
+    int16x4_t s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10;
+    uint16x4_t d0, d1, d2, d3;
+    uint16x8_t d01, d23;
+
+    const uint16_t *s = src_ptr;
+    uint16_t *d = dst_ptr;
+
+    load_u16_4x7(s, src_stride, &t0, &t1, &t2, &t3, &t4, &t5, &t6);
+    s0 = vreinterpret_s16_u16(t0);
+    s1 = vreinterpret_s16_u16(t1);
+    s2 = vreinterpret_s16_u16(t2);
+    s3 = vreinterpret_s16_u16(t3);
+    s4 = vreinterpret_s16_u16(t4);
+    s5 = vreinterpret_s16_u16(t5);
+    s6 = vreinterpret_s16_u16(t6);
+
+    s += 7 * src_stride;
+
+    do {
+      load_u16_4x4(s, src_stride, &t7, &t8, &t9, &t10);
+      s7 = vreinterpret_s16_u16(t7);
+      s8 = vreinterpret_s16_u16(t8);
+      s9 = vreinterpret_s16_u16(t9);
+      s10 = vreinterpret_s16_u16(t10);
+
+      d0 = highbd_convolve8_4_s32_s16(s0, s1, s2, s3, s4, s5, s6, s7, y_filter,
+                                      zero_s32);
+      d1 = highbd_convolve8_4_s32_s16(s1, s2, s3, s4, s5, s6, s7, s8, y_filter,
+                                      zero_s32);
+      d2 = highbd_convolve8_4_s32_s16(s2, s3, s4, s5, s6, s7, s8, s9, y_filter,
+                                      zero_s32);
+      d3 = highbd_convolve8_4_s32_s16(s3, s4, s5, s6, s7, s8, s9, s10, y_filter,
+                                      zero_s32);
+
+      d01 = vcombine_u16(d0, d1);
+      d23 = vcombine_u16(d2, d3);
+
+      d01 = vminq_u16(d01, max);
+      d23 = vminq_u16(d23, max);
+
+      if (w == 2) {
+        store_u16q_2x1(d + 0 * dst_stride, d01, 0);
+        store_u16q_2x1(d + 1 * dst_stride, d01, 2);
+        if (h != 2) {
+          store_u16q_2x1(d + 2 * dst_stride, d23, 0);
+          store_u16q_2x1(d + 3 * dst_stride, d23, 2);
+        }
+      } else {
+        vst1_u16(d + 0 * dst_stride, vget_low_u16(d01));
+        vst1_u16(d + 1 * dst_stride, vget_high_u16(d01));
+        if (h != 2) {
+          vst1_u16(d + 2 * dst_stride, vget_low_u16(d23));
+          vst1_u16(d + 3 * dst_stride, vget_high_u16(d23));
+        }
+      }
+
+      s0 = s4;
+      s1 = s5;
+      s2 = s6;
+      s3 = s7;
+      s4 = s8;
+      s5 = s9;
+      s6 = s10;
+      s += 4 * src_stride;
+      d += 4 * dst_stride;
+      h -= 4;
+    } while (h > 0);
+  } else {
+    int height;
+    uint16x8_t t0, t1, t2, t3, t4, t5, t6, t7, t8, t9, t10;
+    int16x8_t s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10;
+    uint16x8_t d0, d1, d2, d3;
+    do {
+      const uint16_t *s = src_ptr;
+      uint16_t *d = dst_ptr;
+
+      load_u16_8x7(s, src_stride, &t0, &t1, &t2, &t3, &t4, &t5, &t6);
+      s0 = vreinterpretq_s16_u16(t0);
+      s1 = vreinterpretq_s16_u16(t1);
+      s2 = vreinterpretq_s16_u16(t2);
+      s3 = vreinterpretq_s16_u16(t3);
+      s4 = vreinterpretq_s16_u16(t4);
+      s5 = vreinterpretq_s16_u16(t5);
+      s6 = vreinterpretq_s16_u16(t6);
+
+      s += 7 * src_stride;
+      height = h;
+
+      do {
+        load_u16_8x4(s, src_stride, &t7, &t8, &t9, &t10);
+        s7 = vreinterpretq_s16_u16(t7);
+        s8 = vreinterpretq_s16_u16(t8);
+        s9 = vreinterpretq_s16_u16(t9);
+        s10 = vreinterpretq_s16_u16(t10);
+
+        d0 = highbd_convolve8_8_s32_s16(s0, s1, s2, s3, s4, s5, s6, s7,
+                                        y_filter, zero_s32);
+        d1 = highbd_convolve8_8_s32_s16(s1, s2, s3, s4, s5, s6, s7, s8,
+                                        y_filter, zero_s32);
+        d2 = highbd_convolve8_8_s32_s16(s2, s3, s4, s5, s6, s7, s8, s9,
+                                        y_filter, zero_s32);
+        d3 = highbd_convolve8_8_s32_s16(s3, s4, s5, s6, s7, s8, s9, s10,
+                                        y_filter, zero_s32);
+
+        d0 = vminq_u16(d0, max);
+        d1 = vminq_u16(d1, max);
+        d2 = vminq_u16(d2, max);
+        d3 = vminq_u16(d3, max);
+
+        if (h == 2) {
+          store_u16_8x2(d, dst_stride, d0, d1);
+        } else {
+          store_u16_8x4(d, dst_stride, d0, d1, d2, d3);
+        }
+
+        s0 = s4;
+        s1 = s5;
+        s2 = s6;
+        s3 = s7;
+        s4 = s8;
+        s5 = s9;
+        s6 = s10;
+        s += 4 * src_stride;
+        d += 4 * dst_stride;
+        height -= 4;
+      } while (height > 0);
+      src_ptr += 8;
+      dst_ptr += 8;
+      w -= 8;
+    } while (w > 0);
+  }
+}
+
 static INLINE void highbd_convolve_y_sr_12tap_neon(
     const uint16_t *src_ptr, int src_stride, uint16_t *dst_ptr, int dst_stride,
     int w, int h, const int16_t *y_filter_ptr, int bd) {
   const uint16x8_t max = vdupq_n_u16((1 << bd) - 1);
   const int16x8_t y_filter_0_7 = vld1q_s16(y_filter_ptr);
   const int16x4_t y_filter_8_11 = vld1_s16(y_filter_ptr + 8);
+  const int32x4_t zero_s32 = vdupq_n_s32(0);
 
   if (w <= 4) {
     uint16x4_t t0, t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11, t12, t13, t14;
@@ -186,18 +338,18 @@
       s13 = vreinterpret_s16_u16(t13);
       s14 = vreinterpret_s16_u16(t14);
 
-      d0 = highbd_convolve12_y_4x4_s32_s16(s0, s1, s2, s3, s4, s5, s6, s7, s8,
-                                           s9, s10, s11, y_filter_0_7,
-                                           y_filter_8_11);
-      d1 = highbd_convolve12_y_4x4_s32_s16(s1, s2, s3, s4, s5, s6, s7, s8, s9,
-                                           s10, s11, s12, y_filter_0_7,
-                                           y_filter_8_11);
-      d2 = highbd_convolve12_y_4x4_s32_s16(s2, s3, s4, s5, s6, s7, s8, s9, s10,
-                                           s11, s12, s13, y_filter_0_7,
-                                           y_filter_8_11);
-      d3 = highbd_convolve12_y_4x4_s32_s16(s3, s4, s5, s6, s7, s8, s9, s10, s11,
-                                           s12, s13, s14, y_filter_0_7,
-                                           y_filter_8_11);
+      d0 = highbd_convolve12_y_4_s32_s16(s0, s1, s2, s3, s4, s5, s6, s7, s8, s9,
+                                         s10, s11, y_filter_0_7, y_filter_8_11,
+                                         zero_s32);
+      d1 = highbd_convolve12_y_4_s32_s16(s1, s2, s3, s4, s5, s6, s7, s8, s9,
+                                         s10, s11, s12, y_filter_0_7,
+                                         y_filter_8_11, zero_s32);
+      d2 = highbd_convolve12_y_4_s32_s16(s2, s3, s4, s5, s6, s7, s8, s9, s10,
+                                         s11, s12, s13, y_filter_0_7,
+                                         y_filter_8_11, zero_s32);
+      d3 = highbd_convolve12_y_4_s32_s16(s3, s4, s5, s6, s7, s8, s9, s10, s11,
+                                         s12, s13, s14, y_filter_0_7,
+                                         y_filter_8_11, zero_s32);
 
       d01 = vcombine_u16(d0, d1);
       d23 = vcombine_u16(d2, d3);
@@ -269,18 +421,18 @@
         s13 = vreinterpretq_s16_u16(t13);
         s14 = vreinterpretq_s16_u16(t14);
 
-        d0 = highbd_convolve12_y_8x4_s32_s16(s0, s1, s2, s3, s4, s5, s6, s7, s8,
-                                             s9, s10, s11, y_filter_0_7,
-                                             y_filter_8_11);
-        d1 = highbd_convolve12_y_8x4_s32_s16(s1, s2, s3, s4, s5, s6, s7, s8, s9,
-                                             s10, s11, s12, y_filter_0_7,
-                                             y_filter_8_11);
-        d2 = highbd_convolve12_y_8x4_s32_s16(s2, s3, s4, s5, s6, s7, s8, s9,
-                                             s10, s11, s12, s13, y_filter_0_7,
-                                             y_filter_8_11);
-        d3 = highbd_convolve12_y_8x4_s32_s16(s3, s4, s5, s6, s7, s8, s9, s10,
-                                             s11, s12, s13, s14, y_filter_0_7,
-                                             y_filter_8_11);
+        d0 = highbd_convolve12_y_8_s32_s16(s0, s1, s2, s3, s4, s5, s6, s7, s8,
+                                           s9, s10, s11, y_filter_0_7,
+                                           y_filter_8_11, zero_s32);
+        d1 = highbd_convolve12_y_8_s32_s16(s1, s2, s3, s4, s5, s6, s7, s8, s9,
+                                           s10, s11, s12, y_filter_0_7,
+                                           y_filter_8_11, zero_s32);
+        d2 = highbd_convolve12_y_8_s32_s16(s2, s3, s4, s5, s6, s7, s8, s9, s10,
+                                           s11, s12, s13, y_filter_0_7,
+                                           y_filter_8_11, zero_s32);
+        d3 = highbd_convolve12_y_8_s32_s16(s3, s4, s5, s6, s7, s8, s9, s10, s11,
+                                           s12, s13, s14, y_filter_0_7,
+                                           y_filter_8_11, zero_s32);
 
         d0 = vminq_u16(d0, max);
         d1 = vminq_u16(d1, max);
@@ -321,13 +473,11 @@
                                    const InterpFilterParams *filter_params_y,
                                    const int subpel_y_qn, int bd) {
   const int y_filter_taps = get_filter_tap(filter_params_y, subpel_y_qn);
-  const int vert_offset = filter_params_y->taps / 2 - 1;
-  const uint16x8_t max = vdupq_n_u16((1 << bd) - 1);
-
+  const int vert_zero_s32 = filter_params_y->taps / 2 - 1;
   const int16_t *y_filter_ptr = av1_get_interp_filter_subpel_kernel(
       filter_params_y, subpel_y_qn & SUBPEL_MASK);
 
-  src -= vert_offset * src_stride;
+  src -= vert_zero_s32 * src_stride;
 
   if (y_filter_taps > 8) {
     highbd_convolve_y_sr_12tap_neon(src, src_stride, dst, dst_stride, w, h,
@@ -340,110 +490,95 @@
     return;
   }
 
-  const int16x8_t y_filter = vld1q_s16(y_filter_ptr);
+  highbd_convolve_y_sr_8tap_neon(src, src_stride, dst, dst_stride, w, h,
+                                 y_filter_ptr, bd);
+}
+
+static INLINE void highbd_convolve_x_sr_8tap_neon(
+    const uint16_t *src_ptr, int src_stride, uint16_t *dst_ptr, int dst_stride,
+    int w, int h, const int16_t *x_filter_ptr, ConvolveParams *conv_params,
+    int bd) {
+  const int16x8_t x_filter = vld1q_s16(x_filter_ptr);
+  const uint16x8_t max = vdupq_n_u16((1 << bd) - 1);
+  const int32x4_t shift_s32 = vdupq_n_s32(-conv_params->round_0);
+  const int bits = FILTER_BITS - conv_params->round_0;
+  const int16x8_t bits_s16 = vdupq_n_s16(-bits);
+  const int32x4_t zero_s32 = vdupq_n_s32(0);
 
   if (w <= 4) {
-    uint16x4_t t0, t1, t2, t3, t4, t5, t6, t7, t8, t9, t10;
-    int16x4_t s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10;
-    uint16x4_t d0, d1, d2, d3;
-    uint16x8_t d01, d23;
+    uint16x8_t t0, t1, t2, t3;
+    int16x8_t s0, s1, s2, s3;
+    uint16x4_t d0, d1;
+    uint16x8_t d01;
 
-    const uint16_t *s = src;
-    uint16_t *d = dst;
-
-    load_u16_4x7(s, src_stride, &t0, &t1, &t2, &t3, &t4, &t5, &t6);
-    s0 = vreinterpret_s16_u16(t0);
-    s1 = vreinterpret_s16_u16(t1);
-    s2 = vreinterpret_s16_u16(t2);
-    s3 = vreinterpret_s16_u16(t3);
-    s4 = vreinterpret_s16_u16(t4);
-    s5 = vreinterpret_s16_u16(t5);
-    s6 = vreinterpret_s16_u16(t6);
-
-    s += 7 * src_stride;
+    const uint16_t *s = src_ptr;
+    uint16_t *d = dst_ptr;
 
     do {
-      load_u16_4x4(s, src_stride, &t7, &t8, &t9, &t10);
-      s7 = vreinterpret_s16_u16(t7);
-      s8 = vreinterpret_s16_u16(t8);
-      s9 = vreinterpret_s16_u16(t9);
-      s10 = vreinterpret_s16_u16(t10);
-
-      d0 = highbd_convolve8_4_s32_s16(s0, s1, s2, s3, s4, s5, s6, s7, y_filter);
-      d1 = highbd_convolve8_4_s32_s16(s1, s2, s3, s4, s5, s6, s7, s8, y_filter);
-      d2 = highbd_convolve8_4_s32_s16(s2, s3, s4, s5, s6, s7, s8, s9, y_filter);
-      d3 =
-          highbd_convolve8_4_s32_s16(s3, s4, s5, s6, s7, s8, s9, s10, y_filter);
-
-      d01 = vcombine_u16(d0, d1);
-      d23 = vcombine_u16(d2, d3);
-
-      d01 = vminq_u16(d01, max);
-      d23 = vminq_u16(d23, max);
-
-      if (w == 2) {
-        store_u16q_2x1(d + 0 * dst_stride, d01, 0);
-        store_u16q_2x1(d + 1 * dst_stride, d01, 2);
-        if (h != 2) {
-          store_u16q_2x1(d + 2 * dst_stride, d23, 0);
-          store_u16q_2x1(d + 3 * dst_stride, d23, 2);
-        }
-      } else {
-        vst1_u16(d + 0 * dst_stride, vget_low_u16(d01));
-        vst1_u16(d + 1 * dst_stride, vget_high_u16(d01));
-        if (h != 2) {
-          vst1_u16(d + 2 * dst_stride, vget_low_u16(d23));
-          vst1_u16(d + 3 * dst_stride, vget_high_u16(d23));
-        }
-      }
-
-      s0 = s4;
-      s1 = s5;
-      s2 = s6;
-      s3 = s7;
-      s4 = s8;
-      s5 = s9;
-      s6 = s10;
-      s += 4 * src_stride;
-      d += 4 * dst_stride;
-      h -= 4;
-    } while (h > 0);
-  } else {
-    int height;
-    uint16x8_t t0, t1, t2, t3, t4, t5, t6, t7, t8, t9, t10;
-    int16x8_t s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10;
-    uint16x8_t d0, d1, d2, d3;
-    do {
-      const uint16_t *s = src;
-      uint16_t *d = dst;
-
-      load_u16_8x7(s, src_stride, &t0, &t1, &t2, &t3, &t4, &t5, &t6);
+      load_u16_8x2(s, src_stride, &t0, &t2);
+      load_u16_8x2(s + 8, src_stride, &t1, &t3);
       s0 = vreinterpretq_s16_u16(t0);
       s1 = vreinterpretq_s16_u16(t1);
       s2 = vreinterpretq_s16_u16(t2);
       s3 = vreinterpretq_s16_u16(t3);
+
+      d0 = highbd_convolve8_horiz4_s32_s16(s0, s1, x_filter, shift_s32,
+                                           zero_s32);
+      d1 = highbd_convolve8_horiz4_s32_s16(s2, s3, x_filter, shift_s32,
+                                           zero_s32);
+
+      d01 = vcombine_u16(d0, d1);
+      d01 = vqrshlq_u16(d01, bits_s16);
+      d01 = vminq_u16(d01, max);
+
+      if (w == 2) {
+        store_u16q_2x1(d + 0 * dst_stride, d01, 0);
+        store_u16q_2x1(d + 1 * dst_stride, d01, 2);
+      } else {
+        vst1_u16(d + 0 * dst_stride, vget_low_u16(d01));
+        vst1_u16(d + 1 * dst_stride, vget_high_u16(d01));
+      }
+
+      s += 2 * src_stride;
+      d += 2 * dst_stride;
+      h -= 2;
+    } while (h > 0);
+  } else {
+    int height = h;
+    uint16x8_t t0, t1, t2, t3, t4, t5, t6, t7;
+    int16x8_t s0, s1, s2, s3, s4, s5, s6, s7;
+    uint16x8_t d0, d1, d2, d3;
+    do {
+      int width = w;
+      const uint16_t *s = src_ptr;
+      uint16_t *d = dst_ptr;
+      load_u16_8x4(s, src_stride, &t0, &t2, &t4, &t6);
+      s0 = vreinterpretq_s16_u16(t0);
+      s2 = vreinterpretq_s16_u16(t2);
       s4 = vreinterpretq_s16_u16(t4);
-      s5 = vreinterpretq_s16_u16(t5);
       s6 = vreinterpretq_s16_u16(t6);
 
-      s += 7 * src_stride;
-      height = h;
-
+      s += 8;
       do {
-        load_u16_8x4(s, src_stride, &t7, &t8, &t9, &t10);
+        load_u16_8x4(s, src_stride, &t1, &t3, &t5, &t7);
+        s1 = vreinterpretq_s16_u16(t1);
+        s3 = vreinterpretq_s16_u16(t3);
+        s5 = vreinterpretq_s16_u16(t5);
         s7 = vreinterpretq_s16_u16(t7);
-        s8 = vreinterpretq_s16_u16(t8);
-        s9 = vreinterpretq_s16_u16(t9);
-        s10 = vreinterpretq_s16_u16(t10);
 
-        d0 = highbd_convolve8_8_s32_s16(s0, s1, s2, s3, s4, s5, s6, s7,
-                                        y_filter);
-        d1 = highbd_convolve8_8_s32_s16(s1, s2, s3, s4, s5, s6, s7, s8,
-                                        y_filter);
-        d2 = highbd_convolve8_8_s32_s16(s2, s3, s4, s5, s6, s7, s8, s9,
-                                        y_filter);
-        d3 = highbd_convolve8_8_s32_s16(s3, s4, s5, s6, s7, s8, s9, s10,
-                                        y_filter);
+        d0 = highbd_convolve8_horiz8_s32_s16(s0, s1, x_filter, shift_s32,
+                                             zero_s32);
+        d1 = highbd_convolve8_horiz8_s32_s16(s2, s3, x_filter, shift_s32,
+                                             zero_s32);
+        d2 = highbd_convolve8_horiz8_s32_s16(s4, s5, x_filter, shift_s32,
+                                             zero_s32);
+        d3 = highbd_convolve8_horiz8_s32_s16(s6, s7, x_filter, shift_s32,
+                                             zero_s32);
+
+        d0 = vqrshlq_u16(d0, bits_s16);
+        d1 = vqrshlq_u16(d1, bits_s16);
+        d2 = vqrshlq_u16(d2, bits_s16);
+        d3 = vqrshlq_u16(d3, bits_s16);
 
         d0 = vminq_u16(d0, max);
         d1 = vminq_u16(d1, max);
@@ -456,21 +591,18 @@
           store_u16_8x4(d, dst_stride, d0, d1, d2, d3);
         }
 
-        s0 = s4;
-        s1 = s5;
-        s2 = s6;
-        s3 = s7;
-        s4 = s8;
-        s5 = s9;
-        s6 = s10;
-        s += 4 * src_stride;
-        d += 4 * dst_stride;
-        height -= 4;
-      } while (height > 0);
-      src += 8;
-      dst += 8;
-      w -= 8;
-    } while (w > 0);
+        s0 = s1;
+        s2 = s3;
+        s4 = s5;
+        s6 = s7;
+        s += 8;
+        d += 8;
+        width -= 8;
+      } while (width > 0);
+      src_ptr += 4 * src_stride;
+      dst_ptr += 4 * dst_stride;
+      height -= 4;
+    } while (height > 0);
   }
 }
 
@@ -484,6 +616,7 @@
   const int16x8_t bits_s16 = vdupq_n_s16(-bits);
   const int16x8_t x_filter_0_7 = vld1q_s16(x_filter_ptr);
   const int16x4_t x_filter_8_11 = vld1_s16(x_filter_ptr + 8);
+  const int32x4_t zero_s32 = vdupq_n_s32(0);
 
   if (w <= 4) {
     uint16x8_t t0, t1, t2, t3;
@@ -503,9 +636,9 @@
       s3 = vreinterpretq_s16_u16(t3);
 
       d0 = highbd_convolve12_horiz4_s32_s16(s0, s1, x_filter_0_7, x_filter_8_11,
-                                            shift_s32);
+                                            shift_s32, zero_s32);
       d1 = highbd_convolve12_horiz4_s32_s16(s2, s3, x_filter_0_7, x_filter_8_11,
-                                            shift_s32);
+                                            shift_s32, zero_s32);
 
       d01 = vcombine_u16(d0, d1);
       d01 = vqrshlq_u16(d01, bits_s16);
@@ -551,14 +684,14 @@
         s10 = vreinterpretq_s16_u16(t10);
         s11 = vreinterpretq_s16_u16(t11);
 
-        d0 = highbd_convolve12_horiz8_s32_s16(s0, s1, s2, x_filter_0_7,
-                                              x_filter_8_11, shift_s32);
-        d1 = highbd_convolve12_horiz8_s32_s16(s3, s4, s5, x_filter_0_7,
-                                              x_filter_8_11, shift_s32);
-        d2 = highbd_convolve12_horiz8_s32_s16(s6, s7, s8, x_filter_0_7,
-                                              x_filter_8_11, shift_s32);
-        d3 = highbd_convolve12_horiz8_s32_s16(s9, s10, s11, x_filter_0_7,
-                                              x_filter_8_11, shift_s32);
+        d0 = highbd_convolve12_horiz8_s32_s16(
+            s0, s1, s2, x_filter_0_7, x_filter_8_11, shift_s32, zero_s32);
+        d1 = highbd_convolve12_horiz8_s32_s16(
+            s3, s4, s5, x_filter_0_7, x_filter_8_11, shift_s32, zero_s32);
+        d2 = highbd_convolve12_horiz8_s32_s16(
+            s6, s7, s8, x_filter_0_7, x_filter_8_11, shift_s32, zero_s32);
+        d3 = highbd_convolve12_horiz8_s32_s16(
+            s9, s10, s11, x_filter_0_7, x_filter_8_11, shift_s32, zero_s32);
 
         d0 = vqrshlq_u16(d0, bits_s16);
         d1 = vqrshlq_u16(d1, bits_s16);
@@ -601,11 +734,11 @@
                                    const int subpel_x_qn,
                                    ConvolveParams *conv_params, int bd) {
   const int x_filter_taps = get_filter_tap(filter_params_x, subpel_x_qn);
-  const int horiz_offset = filter_params_x->taps / 2 - 1;
+  const int horiz_zero_s32 = filter_params_x->taps / 2 - 1;
   const int16_t *x_filter_ptr = av1_get_interp_filter_subpel_kernel(
       filter_params_x, subpel_x_qn & SUBPEL_MASK);
 
-  src -= horiz_offset;
+  src -= horiz_zero_s32;
 
   if (x_filter_taps > 8) {
     highbd_convolve_x_sr_12tap_neon(src, src_stride, dst, dst_stride, w, h,
@@ -613,103 +746,6 @@
     return;
   }
 
-  const int16x8_t x_filter = vld1q_s16(x_filter_ptr);
-  const uint16x8_t max = vdupq_n_u16((1 << bd) - 1);
-  const int32x4_t shift_s32 = vdupq_n_s32(-conv_params->round_0);
-  const int bits = FILTER_BITS - conv_params->round_0;
-  const int16x8_t bits_s16 = vdupq_n_s16(-bits);
-
-  if (w <= 4) {
-    uint16x8_t t0, t1, t2, t3;
-    int16x8_t s0, s1, s2, s3;
-    uint16x4_t d0, d1;
-    uint16x8_t d01;
-
-    const uint16_t *s = src;
-    uint16_t *d = dst;
-
-    do {
-      load_u16_8x2(s, src_stride, &t0, &t2);
-      load_u16_8x2(s + 8, src_stride, &t1, &t3);
-      s0 = vreinterpretq_s16_u16(t0);
-      s1 = vreinterpretq_s16_u16(t1);
-      s2 = vreinterpretq_s16_u16(t2);
-      s3 = vreinterpretq_s16_u16(t3);
-
-      d0 = highbd_convolve8_horiz4_s32_s16(s0, s1, x_filter, shift_s32);
-      d1 = highbd_convolve8_horiz4_s32_s16(s2, s3, x_filter, shift_s32);
-
-      d01 = vcombine_u16(d0, d1);
-      d01 = vqrshlq_u16(d01, bits_s16);
-      d01 = vminq_u16(d01, max);
-
-      if (w == 2) {
-        store_u16q_2x1(d + 0 * dst_stride, d01, 0);
-        store_u16q_2x1(d + 1 * dst_stride, d01, 2);
-      } else {
-        vst1_u16(d + 0 * dst_stride, vget_low_u16(d01));
-        vst1_u16(d + 1 * dst_stride, vget_high_u16(d01));
-      }
-
-      s += 2 * src_stride;
-      d += 2 * dst_stride;
-      h -= 2;
-    } while (h > 0);
-  } else {
-    int height = h;
-    uint16x8_t t0, t1, t2, t3, t4, t5, t6, t7;
-    int16x8_t s0, s1, s2, s3, s4, s5, s6, s7;
-    uint16x8_t d0, d1, d2, d3;
-    do {
-      int width = w;
-      const uint16_t *s = src;
-      uint16_t *d = dst;
-      load_u16_8x4(s, src_stride, &t0, &t2, &t4, &t6);
-      s0 = vreinterpretq_s16_u16(t0);
-      s2 = vreinterpretq_s16_u16(t2);
-      s4 = vreinterpretq_s16_u16(t4);
-      s6 = vreinterpretq_s16_u16(t6);
-
-      s += 8;
-      do {
-        load_u16_8x4(s, src_stride, &t1, &t3, &t5, &t7);
-        s1 = vreinterpretq_s16_u16(t1);
-        s3 = vreinterpretq_s16_u16(t3);
-        s5 = vreinterpretq_s16_u16(t5);
-        s7 = vreinterpretq_s16_u16(t7);
-
-        d0 = highbd_convolve8_horiz8_s32_s16(s0, s1, x_filter, shift_s32);
-        d1 = highbd_convolve8_horiz8_s32_s16(s2, s3, x_filter, shift_s32);
-        d2 = highbd_convolve8_horiz8_s32_s16(s4, s5, x_filter, shift_s32);
-        d3 = highbd_convolve8_horiz8_s32_s16(s6, s7, x_filter, shift_s32);
-
-        d0 = vqrshlq_u16(d0, bits_s16);
-        d1 = vqrshlq_u16(d1, bits_s16);
-        d2 = vqrshlq_u16(d2, bits_s16);
-        d3 = vqrshlq_u16(d3, bits_s16);
-
-        d0 = vminq_u16(d0, max);
-        d1 = vminq_u16(d1, max);
-        d2 = vminq_u16(d2, max);
-        d3 = vminq_u16(d3, max);
-
-        if (h == 2) {
-          store_u16_8x2(d, dst_stride, d0, d1);
-        } else {
-          store_u16_8x4(d, dst_stride, d0, d1, d2, d3);
-        }
-
-        s0 = s1;
-        s2 = s3;
-        s4 = s5;
-        s6 = s7;
-        s += 8;
-        d += 8;
-        width -= 8;
-      } while (width > 0);
-      src += 4 * src_stride;
-      dst += 4 * dst_stride;
-      height -= 4;
-    } while (height > 0);
-  }
+  highbd_convolve_x_sr_8tap_neon(src, src_stride, dst, dst_stride, w, h,
+                                 x_filter_ptr, conv_params, bd);
 }
diff --git a/av1/common/arm/highbd_convolve_neon.h b/av1/common/arm/highbd_convolve_neon.h
index 831421a..fde881c 100644
--- a/av1/common/arm/highbd_convolve_neon.h
+++ b/av1/common/arm/highbd_convolve_neon.h
@@ -17,11 +17,11 @@
 static INLINE int32x4_t highbd_convolve6_4_s32(
     const int16x4_t s0, const int16x4_t s1, const int16x4_t s2,
     const int16x4_t s3, const int16x4_t s4, const int16x4_t s5,
-    const int16x8_t y_filter) {
+    const int16x8_t y_filter, const int32x4_t offset) {
   const int16x4_t y_filter_lo = vget_low_s16(y_filter);
   const int16x4_t y_filter_hi = vget_high_s16(y_filter);
 
-  int32x4_t sum = vmull_lane_s16(s0, y_filter_lo, 1);
+  int32x4_t sum = vmlal_lane_s16(offset, s0, y_filter_lo, 1);
   sum = vmlal_lane_s16(sum, s1, y_filter_lo, 2);
   sum = vmlal_lane_s16(sum, s2, y_filter_lo, 3);
   sum = vmlal_lane_s16(sum, s3, y_filter_hi, 0);
@@ -34,8 +34,9 @@
 static INLINE uint16x4_t highbd_convolve6_4_s32_s16(
     const int16x4_t s0, const int16x4_t s1, const int16x4_t s2,
     const int16x4_t s3, const int16x4_t s4, const int16x4_t s5,
-    const int16x8_t y_filter) {
-  int32x4_t sum = highbd_convolve6_4_s32(s0, s1, s2, s3, s4, s5, y_filter);
+    const int16x8_t y_filter, const int32x4_t offset) {
+  int32x4_t sum =
+      highbd_convolve6_4_s32(s0, s1, s2, s3, s4, s5, y_filter, offset);
 
   return vqrshrun_n_s32(sum, COMPOUND_ROUND1_BITS);
 }
@@ -43,18 +44,19 @@
 static INLINE void highbd_convolve6_8_s32(
     const int16x8_t s0, const int16x8_t s1, const int16x8_t s2,
     const int16x8_t s3, const int16x8_t s4, const int16x8_t s5,
-    const int16x8_t y_filter, int32x4_t *sum0, int32x4_t *sum1) {
+    const int16x8_t y_filter, const int32x4_t offset, int32x4_t *sum0,
+    int32x4_t *sum1) {
   const int16x4_t y_filter_lo = vget_low_s16(y_filter);
   const int16x4_t y_filter_hi = vget_high_s16(y_filter);
 
-  *sum0 = vmull_lane_s16(vget_low_s16(s0), y_filter_lo, 1);
+  *sum0 = vmlal_lane_s16(offset, vget_low_s16(s0), y_filter_lo, 1);
   *sum0 = vmlal_lane_s16(*sum0, vget_low_s16(s1), y_filter_lo, 2);
   *sum0 = vmlal_lane_s16(*sum0, vget_low_s16(s2), y_filter_lo, 3);
   *sum0 = vmlal_lane_s16(*sum0, vget_low_s16(s3), y_filter_hi, 0);
   *sum0 = vmlal_lane_s16(*sum0, vget_low_s16(s4), y_filter_hi, 1);
   *sum0 = vmlal_lane_s16(*sum0, vget_low_s16(s5), y_filter_hi, 2);
 
-  *sum1 = vmull_lane_s16(vget_high_s16(s0), y_filter_lo, 1);
+  *sum1 = vmlal_lane_s16(offset, vget_high_s16(s0), y_filter_lo, 1);
   *sum1 = vmlal_lane_s16(*sum1, vget_high_s16(s1), y_filter_lo, 2);
   *sum1 = vmlal_lane_s16(*sum1, vget_high_s16(s2), y_filter_lo, 3);
   *sum1 = vmlal_lane_s16(*sum1, vget_high_s16(s3), y_filter_hi, 0);
@@ -65,10 +67,11 @@
 static INLINE uint16x8_t highbd_convolve6_8_s32_s16(
     const int16x8_t s0, const int16x8_t s1, const int16x8_t s2,
     const int16x8_t s3, const int16x8_t s4, const int16x8_t s5,
-    const int16x8_t y_filter) {
+    const int16x8_t y_filter, const int32x4_t offset) {
   int32x4_t sum0;
   int32x4_t sum1;
-  highbd_convolve6_8_s32(s0, s1, s2, s3, s4, s5, y_filter, &sum0, &sum1);
+  highbd_convolve6_8_s32(s0, s1, s2, s3, s4, s5, y_filter, offset, &sum0,
+                         &sum1);
 
   return vcombine_u16(vqrshrun_n_s32(sum0, COMPOUND_ROUND1_BITS),
                       vqrshrun_n_s32(sum1, COMPOUND_ROUND1_BITS));
@@ -77,11 +80,12 @@
 static INLINE int32x4_t highbd_convolve8_4_s32(
     const int16x4_t s0, const int16x4_t s1, const int16x4_t s2,
     const int16x4_t s3, const int16x4_t s4, const int16x4_t s5,
-    const int16x4_t s6, const int16x4_t s7, const int16x8_t y_filter) {
+    const int16x4_t s6, const int16x4_t s7, const int16x8_t y_filter,
+    const int32x4_t offset) {
   const int16x4_t y_filter_lo = vget_low_s16(y_filter);
   const int16x4_t y_filter_hi = vget_high_s16(y_filter);
 
-  int32x4_t sum = vmull_lane_s16(s0, y_filter_lo, 0);
+  int32x4_t sum = vmlal_lane_s16(offset, s0, y_filter_lo, 0);
   sum = vmlal_lane_s16(sum, s1, y_filter_lo, 1);
   sum = vmlal_lane_s16(sum, s2, y_filter_lo, 2);
   sum = vmlal_lane_s16(sum, s3, y_filter_lo, 3);
@@ -96,9 +100,10 @@
 static INLINE uint16x4_t highbd_convolve8_4_s32_s16(
     const int16x4_t s0, const int16x4_t s1, const int16x4_t s2,
     const int16x4_t s3, const int16x4_t s4, const int16x4_t s5,
-    const int16x4_t s6, const int16x4_t s7, const int16x8_t y_filter) {
+    const int16x4_t s6, const int16x4_t s7, const int16x8_t y_filter,
+    const int32x4_t offset) {
   int32x4_t sum =
-      highbd_convolve8_4_s32(s0, s1, s2, s3, s4, s5, s6, s7, y_filter);
+      highbd_convolve8_4_s32(s0, s1, s2, s3, s4, s5, s6, s7, y_filter, offset);
 
   return vqrshrun_n_s32(sum, COMPOUND_ROUND1_BITS);
 }
@@ -107,11 +112,11 @@
     const int16x8_t s0, const int16x8_t s1, const int16x8_t s2,
     const int16x8_t s3, const int16x8_t s4, const int16x8_t s5,
     const int16x8_t s6, const int16x8_t s7, const int16x8_t y_filter,
-    int32x4_t *sum0, int32x4_t *sum1) {
+    const int32x4_t offset, int32x4_t *sum0, int32x4_t *sum1) {
   const int16x4_t y_filter_lo = vget_low_s16(y_filter);
   const int16x4_t y_filter_hi = vget_high_s16(y_filter);
 
-  *sum0 = vmull_lane_s16(vget_low_s16(s0), y_filter_lo, 0);
+  *sum0 = vmlal_lane_s16(offset, vget_low_s16(s0), y_filter_lo, 0);
   *sum0 = vmlal_lane_s16(*sum0, vget_low_s16(s1), y_filter_lo, 1);
   *sum0 = vmlal_lane_s16(*sum0, vget_low_s16(s2), y_filter_lo, 2);
   *sum0 = vmlal_lane_s16(*sum0, vget_low_s16(s3), y_filter_lo, 3);
@@ -120,7 +125,7 @@
   *sum0 = vmlal_lane_s16(*sum0, vget_low_s16(s6), y_filter_hi, 2);
   *sum0 = vmlal_lane_s16(*sum0, vget_low_s16(s7), y_filter_hi, 3);
 
-  *sum1 = vmull_lane_s16(vget_high_s16(s0), y_filter_lo, 0);
+  *sum1 = vmlal_lane_s16(offset, vget_high_s16(s0), y_filter_lo, 0);
   *sum1 = vmlal_lane_s16(*sum1, vget_high_s16(s1), y_filter_lo, 1);
   *sum1 = vmlal_lane_s16(*sum1, vget_high_s16(s2), y_filter_lo, 2);
   *sum1 = vmlal_lane_s16(*sum1, vget_high_s16(s3), y_filter_lo, 3);
@@ -133,26 +138,28 @@
 static INLINE uint16x8_t highbd_convolve8_8_s32_s16(
     const int16x8_t s0, const int16x8_t s1, const int16x8_t s2,
     const int16x8_t s3, const int16x8_t s4, const int16x8_t s5,
-    const int16x8_t s6, const int16x8_t s7, const int16x8_t y_filter) {
+    const int16x8_t s6, const int16x8_t s7, const int16x8_t y_filter,
+    const int32x4_t offset) {
   int32x4_t sum0;
   int32x4_t sum1;
-  highbd_convolve8_8_s32(s0, s1, s2, s3, s4, s5, s6, s7, y_filter, &sum0,
-                         &sum1);
+  highbd_convolve8_8_s32(s0, s1, s2, s3, s4, s5, s6, s7, y_filter, offset,
+                         &sum0, &sum1);
 
   return vcombine_u16(vqrshrun_n_s32(sum0, COMPOUND_ROUND1_BITS),
                       vqrshrun_n_s32(sum1, COMPOUND_ROUND1_BITS));
 }
 
-static INLINE int32x4_t highbd_convolve12_y_4x4_s32(
+static INLINE int32x4_t highbd_convolve12_y_4_s32(
     const int16x4_t s0, const int16x4_t s1, const int16x4_t s2,
     const int16x4_t s3, const int16x4_t s4, const int16x4_t s5,
     const int16x4_t s6, const int16x4_t s7, const int16x4_t s8,
     const int16x4_t s9, const int16x4_t s10, const int16x4_t s11,
-    const int16x8_t y_filter_0_7, const int16x4_t y_filter_8_11) {
+    const int16x8_t y_filter_0_7, const int16x4_t y_filter_8_11,
+    const int32x4_t offset) {
   const int16x4_t y_filter_0_3 = vget_low_s16(y_filter_0_7);
   const int16x4_t y_filter_4_7 = vget_high_s16(y_filter_0_7);
 
-  int32x4_t sum = vmull_lane_s16(s0, y_filter_0_3, 0);
+  int32x4_t sum = vmlal_lane_s16(offset, s0, y_filter_0_3, 0);
   sum = vmlal_lane_s16(sum, s1, y_filter_0_3, 1);
   sum = vmlal_lane_s16(sum, s2, y_filter_0_3, 2);
   sum = vmlal_lane_s16(sum, s3, y_filter_0_3, 3);
@@ -168,30 +175,31 @@
   return sum;
 }
 
-static INLINE uint16x4_t highbd_convolve12_y_4x4_s32_s16(
+static INLINE uint16x4_t highbd_convolve12_y_4_s32_s16(
     const int16x4_t s0, const int16x4_t s1, const int16x4_t s2,
     const int16x4_t s3, const int16x4_t s4, const int16x4_t s5,
     const int16x4_t s6, const int16x4_t s7, const int16x4_t s8,
     const int16x4_t s9, const int16x4_t s10, const int16x4_t s11,
-    const int16x8_t y_filter_0_7, const int16x4_t y_filter_8_11) {
+    const int16x8_t y_filter_0_7, const int16x4_t y_filter_8_11,
+    const int32x4_t offset) {
   int32x4_t sum =
-      highbd_convolve12_y_4x4_s32(s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10,
-                                  s11, y_filter_0_7, y_filter_8_11);
+      highbd_convolve12_y_4_s32(s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10,
+                                s11, y_filter_0_7, y_filter_8_11, offset);
 
-  return vqrshrun_n_s32(sum, FILTER_BITS);
+  return vqrshrun_n_s32(sum, COMPOUND_ROUND1_BITS);
 }
 
-static INLINE void highbd_convolve12_y_8x4_s32(
+static INLINE void highbd_convolve12_y_8_s32(
     const int16x8_t s0, const int16x8_t s1, const int16x8_t s2,
     const int16x8_t s3, const int16x8_t s4, const int16x8_t s5,
     const int16x8_t s6, const int16x8_t s7, const int16x8_t s8,
     const int16x8_t s9, const int16x8_t s10, const int16x8_t s11,
     const int16x8_t y_filter_0_7, const int16x4_t y_filter_8_11,
-    int32x4_t *sum0, int32x4_t *sum1) {
+    const int32x4_t offset, int32x4_t *sum0, int32x4_t *sum1) {
   const int16x4_t y_filter_0_3 = vget_low_s16(y_filter_0_7);
   const int16x4_t y_filter_4_7 = vget_high_s16(y_filter_0_7);
 
-  *sum0 = vmull_lane_s16(vget_low_s16(s0), y_filter_0_3, 0);
+  *sum0 = vmlal_lane_s16(offset, vget_low_s16(s0), y_filter_0_3, 0);
   *sum0 = vmlal_lane_s16(*sum0, vget_low_s16(s1), y_filter_0_3, 1);
   *sum0 = vmlal_lane_s16(*sum0, vget_low_s16(s2), y_filter_0_3, 2);
   *sum0 = vmlal_lane_s16(*sum0, vget_low_s16(s3), y_filter_0_3, 3);
@@ -204,7 +212,7 @@
   *sum0 = vmlal_lane_s16(*sum0, vget_low_s16(s10), y_filter_8_11, 2);
   *sum0 = vmlal_lane_s16(*sum0, vget_low_s16(s11), y_filter_8_11, 3);
 
-  *sum1 = vmull_lane_s16(vget_high_s16(s0), y_filter_0_3, 0);
+  *sum1 = vmlal_lane_s16(offset, vget_high_s16(s0), y_filter_0_3, 0);
   *sum1 = vmlal_lane_s16(*sum1, vget_high_s16(s1), y_filter_0_3, 1);
   *sum1 = vmlal_lane_s16(*sum1, vget_high_s16(s2), y_filter_0_3, 2);
   *sum1 = vmlal_lane_s16(*sum1, vget_high_s16(s3), y_filter_0_3, 3);
@@ -218,23 +226,25 @@
   *sum1 = vmlal_lane_s16(*sum1, vget_high_s16(s11), y_filter_8_11, 3);
 }
 
-static INLINE uint16x8_t highbd_convolve12_y_8x4_s32_s16(
+static INLINE uint16x8_t highbd_convolve12_y_8_s32_s16(
     const int16x8_t s0, const int16x8_t s1, const int16x8_t s2,
     const int16x8_t s3, const int16x8_t s4, const int16x8_t s5,
     const int16x8_t s6, const int16x8_t s7, const int16x8_t s8,
     const int16x8_t s9, const int16x8_t s10, const int16x8_t s11,
-    const int16x8_t y_filter_0_7, const int16x4_t y_filter_8_11) {
+    const int16x8_t y_filter_0_7, const int16x4_t y_filter_8_11,
+    const int32x4_t offset) {
   int32x4_t sum0;
   int32x4_t sum1;
-  highbd_convolve12_y_8x4_s32(s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11,
-                              y_filter_0_7, y_filter_8_11, &sum0, &sum1);
+  highbd_convolve12_y_8_s32(s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11,
+                            y_filter_0_7, y_filter_8_11, offset, &sum0, &sum1);
 
-  return vcombine_u16(vqrshrun_n_s32(sum0, FILTER_BITS),
-                      vqrshrun_n_s32(sum1, FILTER_BITS));
+  return vcombine_u16(vqrshrun_n_s32(sum0, COMPOUND_ROUND1_BITS),
+                      vqrshrun_n_s32(sum1, COMPOUND_ROUND1_BITS));
 }
 
 static INLINE int32x4_t highbd_convolve8_horiz4_s32(
-    const int16x8_t s0, const int16x8_t s1, const int16x8_t x_filter_0_7) {
+    const int16x8_t s0, const int16x8_t s1, const int16x8_t x_filter_0_7,
+    const int32x4_t offset) {
   const int16x8_t s2 = vextq_s16(s0, s1, 1);
   const int16x8_t s3 = vextq_s16(s0, s1, 2);
   const int16x8_t s4 = vextq_s16(s0, s1, 3);
@@ -248,23 +258,21 @@
   const int16x4_t s7_lo = vget_high_s16(s4);
 
   return highbd_convolve8_4_s32(s0_lo, s1_lo, s2_lo, s3_lo, s4_lo, s5_lo, s6_lo,
-                                s7_lo, x_filter_0_7);
+                                s7_lo, x_filter_0_7, offset);
 }
 
 static INLINE uint16x4_t highbd_convolve8_horiz4_s32_s16(
     const int16x8_t s0, const int16x8_t s1, const int16x8_t x_filter_0_7,
-    const int32x4_t shift_s32) {
-  int32x4_t sum = highbd_convolve8_horiz4_s32(s0, s1, x_filter_0_7);
+    const int32x4_t shift_s32, const int32x4_t offset) {
+  int32x4_t sum = highbd_convolve8_horiz4_s32(s0, s1, x_filter_0_7, offset);
 
   sum = vqrshlq_s32(sum, shift_s32);
   return vqmovun_s32(sum);
 }
 
-static INLINE void highbd_convolve8_horiz8_s32(const int16x8_t s0,
-                                               const int16x8_t s0_hi,
-                                               const int16x8_t x_filter_0_7,
-                                               int32x4_t *sum0,
-                                               int32x4_t *sum1) {
+static INLINE void highbd_convolve8_horiz8_s32(
+    const int16x8_t s0, const int16x8_t s0_hi, const int16x8_t x_filter_0_7,
+    const int32x4_t offset, int32x4_t *sum0, int32x4_t *sum1) {
   const int16x8_t s1 = vextq_s16(s0, s0_hi, 1);
   const int16x8_t s2 = vextq_s16(s0, s0_hi, 2);
   const int16x8_t s3 = vextq_s16(s0, s0_hi, 3);
@@ -273,15 +281,15 @@
   const int16x8_t s6 = vextq_s16(s0, s0_hi, 6);
   const int16x8_t s7 = vextq_s16(s0, s0_hi, 7);
 
-  highbd_convolve8_8_s32(s0, s1, s2, s3, s4, s5, s6, s7, x_filter_0_7, sum0,
-                         sum1);
+  highbd_convolve8_8_s32(s0, s1, s2, s3, s4, s5, s6, s7, x_filter_0_7, offset,
+                         sum0, sum1);
 }
 
 static INLINE uint16x8_t highbd_convolve8_horiz8_s32_s16(
     const int16x8_t s0, const int16x8_t s1, const int16x8_t x_filter_0_7,
-    const int32x4_t shift_s32) {
+    const int32x4_t shift_s32, const int32x4_t offset) {
   int32x4_t sum0, sum1;
-  highbd_convolve8_horiz8_s32(s0, s1, x_filter_0_7, &sum0, &sum1);
+  highbd_convolve8_horiz8_s32(s0, s1, x_filter_0_7, offset, &sum0, &sum1);
 
   sum0 = vqrshlq_s32(sum0, shift_s32);
   sum1 = vqrshlq_s32(sum1, shift_s32);
@@ -291,7 +299,7 @@
 
 static INLINE int32x4_t highbd_convolve12_horiz4_s32(
     const int16x8_t s0, const int16x8_t s1, const int16x8_t x_filter_0_7,
-    const int16x4_t x_filter_8_11) {
+    const int16x4_t x_filter_8_11, const int32x4_t offset) {
   const int16x8_t s2 = vextq_s16(s0, s1, 1);
   const int16x8_t s3 = vextq_s16(s0, s1, 2);
   const int16x8_t s4 = vextq_s16(s0, s1, 3);
@@ -312,16 +320,17 @@
   const int16x4_t s10_lo = vget_high_s16(s7);
   const int16x4_t s11_lo = vget_high_s16(s8);
 
-  return highbd_convolve12_y_4x4_s32(s0_lo, s1_lo, s2_lo, s3_lo, s4_lo, s5_lo,
-                                     s6_lo, s7_lo, s8_lo, s9_lo, s10_lo, s11_lo,
-                                     x_filter_0_7, x_filter_8_11);
+  return highbd_convolve12_y_4_s32(s0_lo, s1_lo, s2_lo, s3_lo, s4_lo, s5_lo,
+                                   s6_lo, s7_lo, s8_lo, s9_lo, s10_lo, s11_lo,
+                                   x_filter_0_7, x_filter_8_11, offset);
 }
 
 static INLINE uint16x4_t highbd_convolve12_horiz4_s32_s16(
     const int16x8_t s0, const int16x8_t s1, const int16x8_t x_filter_0_7,
-    const int16x4_t x_filter_8_11, const int32x4_t shift_s32) {
+    const int16x4_t x_filter_8_11, const int32x4_t shift_s32,
+    const int32x4_t offset) {
   int32x4_t sum =
-      highbd_convolve12_horiz4_s32(s0, s1, x_filter_0_7, x_filter_8_11);
+      highbd_convolve12_horiz4_s32(s0, s1, x_filter_0_7, x_filter_8_11, offset);
 
   sum = vqrshlq_s32(sum, shift_s32);
   return vqmovun_s32(sum);
@@ -330,7 +339,7 @@
 static INLINE void highbd_convolve12_horiz8_s32(
     const int16x8_t s0_0, const int16x8_t s0_1, const int16x8_t s0_2,
     const int16x8_t x_filter_0_7, const int16x4_t x_filter_8_11,
-    int32x4_t *sum0, int32x4_t *sum1) {
+    const int32x4_t offset, int32x4_t *sum0, int32x4_t *sum1) {
   const int16x8_t s1 = vextq_s16(s0_0, s0_1, 1);
   const int16x8_t s2 = vextq_s16(s0_0, s0_1, 2);
   const int16x8_t s3 = vextq_s16(s0_0, s0_1, 3);
@@ -343,17 +352,17 @@
   const int16x8_t s10 = vextq_s16(s0_1, s0_2, 2);
   const int16x8_t s11 = vextq_s16(s0_1, s0_2, 3);
 
-  highbd_convolve12_y_8x4_s32(s0_0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10,
-                              s11, x_filter_0_7, x_filter_8_11, sum0, sum1);
+  highbd_convolve12_y_8_s32(s0_0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11,
+                            x_filter_0_7, x_filter_8_11, offset, sum0, sum1);
 }
 
 static INLINE uint16x8_t highbd_convolve12_horiz8_s32_s16(
     const int16x8_t s0, const int16x8_t s1, const int16x8_t s2,
     const int16x8_t x_filter_0_7, const int16x4_t x_filter_8_11,
-    const int32x4_t shift_s32) {
+    const int32x4_t shift_s32, const int32x4_t offset) {
   int32x4_t sum0, sum1;
-  highbd_convolve12_horiz8_s32(s0, s1, s2, x_filter_0_7, x_filter_8_11, &sum0,
-                               &sum1);
+  highbd_convolve12_horiz8_s32(s0, s1, s2, x_filter_0_7, x_filter_8_11, offset,
+                               &sum0, &sum1);
 
   sum0 = vqrshlq_s32(sum0, shift_s32);
   sum1 = vqrshlq_s32(sum1, shift_s32);