[NEON] highbd implementation of av1_highbd_convolve_x_sr_neon 2/2

Approximately 4x faster, total gain ~1%. Second part for the 12-tap filter.

Change-Id: I617bbe7032a0be48b723e5d3a34cb47b6e937d7d
diff --git a/av1/common/arm/highbd_convolve_neon.c b/av1/common/arm/highbd_convolve_neon.c
index 68c1358..fcd059b 100644
--- a/av1/common/arm/highbd_convolve_neon.c
+++ b/av1/common/arm/highbd_convolve_neon.c
@@ -474,30 +474,150 @@
   }
 }
 
+static INLINE void highbd_convolve_x_sr_12tap_neon(
+    const uint16_t *src_ptr, int src_stride, uint16_t *dst_ptr, int dst_stride,
+    int w, int h, const int16_t *x_filter_ptr, ConvolveParams *conv_params,
+    int bd) {
+  const uint16x8_t max = vdupq_n_u16((1 << bd) - 1);
+  const int32x4_t shift_s32 = vdupq_n_s32(-conv_params->round_0);
+  const int bits = FILTER_BITS - conv_params->round_0;
+  const int16x8_t bits_s16 = vdupq_n_s16(-bits);
+  const int16x8_t x_filter_0_7 = vld1q_s16(x_filter_ptr);
+  const int16x4_t x_filter_8_11 = vld1_s16(x_filter_ptr + 8);
+
+  if (w <= 4) {
+    uint16x8_t t0, t1, t2, t3;
+    int16x8_t s0, s1, s2, s3;
+    uint16x4_t d0, d1;
+    uint16x8_t d01;
+
+    const uint16_t *s = src_ptr;
+    uint16_t *d = dst_ptr;
+
+    do {
+      load_u16_8x2(s, src_stride, &t0, &t2);
+      load_u16_8x2(s + 8, src_stride, &t1, &t3);
+      s0 = vreinterpretq_s16_u16(t0);
+      s1 = vreinterpretq_s16_u16(t1);
+      s2 = vreinterpretq_s16_u16(t2);
+      s3 = vreinterpretq_s16_u16(t3);
+
+      d0 = highbd_convolve12_horiz4_s32_s16(s0, s1, x_filter_0_7, x_filter_8_11,
+                                            shift_s32);
+      d1 = highbd_convolve12_horiz4_s32_s16(s2, s3, x_filter_0_7, x_filter_8_11,
+                                            shift_s32);
+
+      d01 = vcombine_u16(d0, d1);
+      d01 = vqrshlq_u16(d01, bits_s16);
+      d01 = vminq_u16(d01, max);
+
+      if (w == 2) {
+        store_u16q_2x1(d + 0 * dst_stride, d01, 0);
+        store_u16q_2x1(d + 1 * dst_stride, d01, 2);
+      } else {
+        vst1_u16(d + 0 * dst_stride, vget_low_u16(d01));
+        vst1_u16(d + 1 * dst_stride, vget_high_u16(d01));
+      }
+
+      s += 2 * src_stride;
+      d += 2 * dst_stride;
+      h -= 2;
+    } while (h > 0);
+  } else {
+    int height = h;
+    uint16x8_t t0, t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11;
+    int16x8_t s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11;
+    uint16x8_t d0, d1, d2, d3;
+    do {
+      int width = w;
+      const uint16_t *s = src_ptr;
+      uint16_t *d = dst_ptr;
+      load_u16_8x4(s, src_stride, &t0, &t3, &t6, &t9);
+      s0 = vreinterpretq_s16_u16(t0);
+      s3 = vreinterpretq_s16_u16(t3);
+      s6 = vreinterpretq_s16_u16(t6);
+      s9 = vreinterpretq_s16_u16(t9);
+
+      s += 8;
+      do {
+        load_u16_8x4(s, src_stride, &t1, &t4, &t7, &t10);
+        load_u16_8x4(s + 8, src_stride, &t2, &t5, &t8, &t11);
+        s1 = vreinterpretq_s16_u16(t1);
+        s2 = vreinterpretq_s16_u16(t2);
+        s4 = vreinterpretq_s16_u16(t4);
+        s5 = vreinterpretq_s16_u16(t5);
+        s7 = vreinterpretq_s16_u16(t7);
+        s8 = vreinterpretq_s16_u16(t8);
+        s10 = vreinterpretq_s16_u16(t10);
+        s11 = vreinterpretq_s16_u16(t11);
+
+        d0 = highbd_convolve12_horiz8_s32_s16(s0, s1, s2, x_filter_0_7,
+                                              x_filter_8_11, shift_s32);
+        d1 = highbd_convolve12_horiz8_s32_s16(s3, s4, s5, x_filter_0_7,
+                                              x_filter_8_11, shift_s32);
+        d2 = highbd_convolve12_horiz8_s32_s16(s6, s7, s8, x_filter_0_7,
+                                              x_filter_8_11, shift_s32);
+        d3 = highbd_convolve12_horiz8_s32_s16(s9, s10, s11, x_filter_0_7,
+                                              x_filter_8_11, shift_s32);
+
+        d0 = vqrshlq_u16(d0, bits_s16);
+        d1 = vqrshlq_u16(d1, bits_s16);
+        d2 = vqrshlq_u16(d2, bits_s16);
+        d3 = vqrshlq_u16(d3, bits_s16);
+
+        d0 = vminq_u16(d0, max);
+        d1 = vminq_u16(d1, max);
+        d2 = vminq_u16(d2, max);
+        d3 = vminq_u16(d3, max);
+
+        if (h == 2) {
+          store_u16_8x2(d, dst_stride, d0, d1);
+        } else {
+          store_u16_8x4(d, dst_stride, d0, d1, d2, d3);
+        }
+
+        s0 = s1;
+        s1 = s2;
+        s3 = s4;
+        s4 = s5;
+        s6 = s7;
+        s7 = s8;
+        s9 = s10;
+        s10 = s11;
+        s += 8;
+        d += 8;
+        width -= 8;
+      } while (width > 0);
+      src_ptr += 4 * src_stride;
+      dst_ptr += 4 * dst_stride;
+      height -= 4;
+    } while (height > 0);
+  }
+}
+
 void av1_highbd_convolve_x_sr_neon(const uint16_t *src, int src_stride,
                                    uint16_t *dst, int dst_stride, int w, int h,
                                    const InterpFilterParams *filter_params_x,
                                    const int subpel_x_qn,
                                    ConvolveParams *conv_params, int bd) {
   const int x_filter_taps = get_filter_tap(filter_params_x, subpel_x_qn);
+  const int horiz_offset = filter_params_x->taps / 2 - 1;
+  const int16_t *x_filter_ptr = av1_get_interp_filter_subpel_kernel(
+      filter_params_x, subpel_x_qn & SUBPEL_MASK);
+
+  src -= horiz_offset;
 
   if (x_filter_taps > 8) {
-    av1_highbd_convolve_x_sr_c(src, src_stride, dst, dst_stride, w, h,
-                               filter_params_x, subpel_x_qn, conv_params, bd);
+    highbd_convolve_x_sr_12tap_neon(src, src_stride, dst, dst_stride, w, h,
+                                    x_filter_ptr, conv_params, bd);
     return;
   }
 
-  const int horiz_offset = filter_params_x->taps / 2 - 1;
+  const int16x8_t x_filter = vld1q_s16(x_filter_ptr);
   const uint16x8_t max = vdupq_n_u16((1 << bd) - 1);
   const int32x4_t shift_s32 = vdupq_n_s32(-conv_params->round_0);
   const int bits = FILTER_BITS - conv_params->round_0;
   const int16x8_t bits_s16 = vdupq_n_s16(-bits);
-  const int16_t *x_filter_ptr = av1_get_interp_filter_subpel_kernel(
-      filter_params_x, subpel_x_qn & SUBPEL_MASK);
-
-  const int16x8_t x_filter = vld1q_s16(x_filter_ptr);
-
-  src -= horiz_offset;
 
   if (w <= 4) {
     uint16x8_t t0, t1, t2, t3;
diff --git a/av1/common/arm/highbd_convolve_neon.h b/av1/common/arm/highbd_convolve_neon.h
index f5b4508..831421a 100644
--- a/av1/common/arm/highbd_convolve_neon.h
+++ b/av1/common/arm/highbd_convolve_neon.h
@@ -289,4 +289,76 @@
   return vcombine_u16(vqmovun_s32(sum0), vqmovun_s32(sum1));
 }
 
+static INLINE int32x4_t highbd_convolve12_horiz4_s32(
+    const int16x8_t s0, const int16x8_t s1, const int16x8_t x_filter_0_7,
+    const int16x4_t x_filter_8_11) {
+  const int16x8_t s2 = vextq_s16(s0, s1, 1);
+  const int16x8_t s3 = vextq_s16(s0, s1, 2);
+  const int16x8_t s4 = vextq_s16(s0, s1, 3);
+  const int16x8_t s5 = vextq_s16(s0, s1, 4);
+  const int16x8_t s6 = vextq_s16(s0, s1, 5);
+  const int16x8_t s7 = vextq_s16(s0, s1, 6);
+  const int16x8_t s8 = vextq_s16(s0, s1, 7);
+  const int16x4_t s0_lo = vget_low_s16(s0);
+  const int16x4_t s1_lo = vget_low_s16(s2);
+  const int16x4_t s2_lo = vget_low_s16(s3);
+  const int16x4_t s3_lo = vget_low_s16(s4);
+  const int16x4_t s4_lo = vget_high_s16(s0);
+  const int16x4_t s5_lo = vget_high_s16(s2);
+  const int16x4_t s6_lo = vget_high_s16(s3);
+  const int16x4_t s7_lo = vget_high_s16(s4);
+  const int16x4_t s8_lo = vget_high_s16(s5);
+  const int16x4_t s9_lo = vget_high_s16(s6);
+  const int16x4_t s10_lo = vget_high_s16(s7);
+  const int16x4_t s11_lo = vget_high_s16(s8);
+
+  return highbd_convolve12_y_4x4_s32(s0_lo, s1_lo, s2_lo, s3_lo, s4_lo, s5_lo,
+                                     s6_lo, s7_lo, s8_lo, s9_lo, s10_lo, s11_lo,
+                                     x_filter_0_7, x_filter_8_11);
+}
+
+static INLINE uint16x4_t highbd_convolve12_horiz4_s32_s16(
+    const int16x8_t s0, const int16x8_t s1, const int16x8_t x_filter_0_7,
+    const int16x4_t x_filter_8_11, const int32x4_t shift_s32) {
+  int32x4_t sum =
+      highbd_convolve12_horiz4_s32(s0, s1, x_filter_0_7, x_filter_8_11);
+
+  sum = vqrshlq_s32(sum, shift_s32);
+  return vqmovun_s32(sum);
+}
+
+static INLINE void highbd_convolve12_horiz8_s32(
+    const int16x8_t s0_0, const int16x8_t s0_1, const int16x8_t s0_2,
+    const int16x8_t x_filter_0_7, const int16x4_t x_filter_8_11,
+    int32x4_t *sum0, int32x4_t *sum1) {
+  const int16x8_t s1 = vextq_s16(s0_0, s0_1, 1);
+  const int16x8_t s2 = vextq_s16(s0_0, s0_1, 2);
+  const int16x8_t s3 = vextq_s16(s0_0, s0_1, 3);
+  const int16x8_t s4 = vextq_s16(s0_0, s0_1, 4);
+  const int16x8_t s5 = vextq_s16(s0_0, s0_1, 5);
+  const int16x8_t s6 = vextq_s16(s0_0, s0_1, 6);
+  const int16x8_t s7 = vextq_s16(s0_0, s0_1, 7);
+  const int16x8_t s8 = s0_1;
+  const int16x8_t s9 = vextq_s16(s0_1, s0_2, 1);
+  const int16x8_t s10 = vextq_s16(s0_1, s0_2, 2);
+  const int16x8_t s11 = vextq_s16(s0_1, s0_2, 3);
+
+  highbd_convolve12_y_8x4_s32(s0_0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10,
+                              s11, x_filter_0_7, x_filter_8_11, sum0, sum1);
+}
+
+static INLINE uint16x8_t highbd_convolve12_horiz8_s32_s16(
+    const int16x8_t s0, const int16x8_t s1, const int16x8_t s2,
+    const int16x8_t x_filter_0_7, const int16x4_t x_filter_8_11,
+    const int32x4_t shift_s32) {
+  int32x4_t sum0, sum1;
+  highbd_convolve12_horiz8_s32(s0, s1, s2, x_filter_0_7, x_filter_8_11, &sum0,
+                               &sum1);
+
+  sum0 = vqrshlq_s32(sum0, shift_s32);
+  sum1 = vqrshlq_s32(sum1, shift_s32);
+
+  return vcombine_u16(vqmovun_s32(sum0), vqmovun_s32(sum1));
+}
+
 #endif  // AOM_AV1_COMMON_ARM_HIGHBD_CONVOLVE_NEON_H_