Add Neon Low Bit-depth for SADSkip

Performance:
Speed 	Improvement over C
0	3.2x
1	2.5x
2	2.53x
3	1.8x
4	1.78x
5	1.7x

Change-Id: I97d27c984295624d7856cdfaab9e25221803b92e
diff --git a/aom_dsp/aom_dsp_rtcd_defs.pl b/aom_dsp/aom_dsp_rtcd_defs.pl
index 5295156..0d6cf35 100755
--- a/aom_dsp/aom_dsp_rtcd_defs.pl
+++ b/aom_dsp/aom_dsp_rtcd_defs.pl
@@ -667,26 +667,26 @@
   specialize qw/aom_sad16x64                    sse2/;
   specialize qw/aom_sad64x16                    sse2/;
 
-  specialize qw/aom_sad_skip_128x128    avx2          sse2/;
-  specialize qw/aom_sad_skip_128x64     avx2          sse2/;
-  specialize qw/aom_sad_skip_64x128     avx2          sse2/;
-  specialize qw/aom_sad_skip_64x64      avx2          sse2/;
-  specialize qw/aom_sad_skip_64x32      avx2          sse2/;
-  specialize qw/aom_sad_skip_32x64      avx2          sse2/;
-  specialize qw/aom_sad_skip_32x32      avx2          sse2/;
-  specialize qw/aom_sad_skip_32x16      avx2          sse2/;
-  specialize qw/aom_sad_skip_16x32                    sse2/;
-  specialize qw/aom_sad_skip_16x16                    sse2/;
-  specialize qw/aom_sad_skip_16x8                     sse2/;
-  specialize qw/aom_sad_skip_8x16                     sse2/;
-  specialize qw/aom_sad_skip_8x8                      sse2/;
-  specialize qw/aom_sad_skip_4x8                      sse2/;
+  specialize qw/aom_sad_skip_128x128    avx2          sse2  neon/;
+  specialize qw/aom_sad_skip_128x64     avx2          sse2  neon/;
+  specialize qw/aom_sad_skip_64x128     avx2          sse2  neon/;
+  specialize qw/aom_sad_skip_64x64      avx2          sse2  neon/;
+  specialize qw/aom_sad_skip_64x32      avx2          sse2  neon/;
+  specialize qw/aom_sad_skip_32x64      avx2          sse2  neon/;
+  specialize qw/aom_sad_skip_32x32      avx2          sse2  neon/;
+  specialize qw/aom_sad_skip_32x16      avx2          sse2  neon/;
+  specialize qw/aom_sad_skip_16x32                    sse2  neon/;
+  specialize qw/aom_sad_skip_16x16                    sse2  neon/;
+  specialize qw/aom_sad_skip_16x8                     sse2  neon/;
+  specialize qw/aom_sad_skip_8x16                     sse2  neon/;
+  specialize qw/aom_sad_skip_8x8                      sse2  neon/;
+  specialize qw/aom_sad_skip_4x8                      sse2  neon/;
 
-  specialize qw/aom_sad_skip_4x16                     sse2/;
-  specialize qw/aom_sad_skip_8x32                     sse2/;
-  specialize qw/aom_sad_skip_32x8                     sse2/;
-  specialize qw/aom_sad_skip_16x64                    sse2/;
-  specialize qw/aom_sad_skip_64x16                    sse2/;
+  specialize qw/aom_sad_skip_4x16                     sse2  neon/;
+  specialize qw/aom_sad_skip_8x32                     sse2  neon/;
+  specialize qw/aom_sad_skip_32x8                     sse2  neon/;
+  specialize qw/aom_sad_skip_16x64                    sse2  neon/;
+  specialize qw/aom_sad_skip_64x16                    sse2  neon/;
 
   specialize qw/aom_sad128x128_avg avx2     sse2/;
   specialize qw/aom_sad128x64_avg  avx2     sse2/;
@@ -907,30 +907,30 @@
   specialize qw/aom_sad32x8x4d  sse2/;
   specialize qw/aom_sad64x16x4d sse2/;
 
-  specialize qw/aom_sad_skip_128x128x4d avx2 sse2/;
-  specialize qw/aom_sad_skip_128x64x4d  avx2 sse2/;
-  specialize qw/aom_sad_skip_64x128x4d  avx2 sse2/;
-  specialize qw/aom_sad_skip_64x64x4d   avx2 sse2/;
-  specialize qw/aom_sad_skip_64x32x4d   avx2 sse2/;
-  specialize qw/aom_sad_skip_64x16x4d   avx2 sse2/;
-  specialize qw/aom_sad_skip_32x64x4d   avx2 sse2/;
-  specialize qw/aom_sad_skip_32x32x4d   avx2 sse2/;
-  specialize qw/aom_sad_skip_32x16x4d   avx2 sse2/;
-  specialize qw/aom_sad_skip_32x8x4d    avx2 sse2/;
+  specialize qw/aom_sad_skip_128x128x4d avx2 sse2 neon/;
+  specialize qw/aom_sad_skip_128x64x4d  avx2 sse2 neon/;
+  specialize qw/aom_sad_skip_64x128x4d  avx2 sse2 neon/;
+  specialize qw/aom_sad_skip_64x64x4d   avx2 sse2 neon/;
+  specialize qw/aom_sad_skip_64x32x4d   avx2 sse2 neon/;
+  specialize qw/aom_sad_skip_64x16x4d   avx2 sse2 neon/;
+  specialize qw/aom_sad_skip_32x64x4d   avx2 sse2 neon/;
+  specialize qw/aom_sad_skip_32x32x4d   avx2 sse2 neon/;
+  specialize qw/aom_sad_skip_32x16x4d   avx2 sse2 neon/;
+  specialize qw/aom_sad_skip_32x8x4d    avx2 sse2 neon/;
 
-  specialize qw/aom_sad_skip_16x64x4d        sse2/;
-  specialize qw/aom_sad_skip_16x32x4d        sse2/;
-  specialize qw/aom_sad_skip_16x16x4d        sse2/;
-  specialize qw/aom_sad_skip_16x8x4d         sse2/;
-  specialize qw/aom_sad_skip_8x16x4d         sse2/;
-  specialize qw/aom_sad_skip_8x8x4d          sse2/;
-  specialize qw/aom_sad_skip_4x16x4d         sse2/;
-  specialize qw/aom_sad_skip_4x8x4d          sse2/;
-  specialize qw/aom_sad_skip_4x32x4d         sse2/;
-  specialize qw/aom_sad_skip_4x16x4d         sse2/;
-  specialize qw/aom_sad_skip_8x32x4d         sse2/;
-  specialize qw/aom_sad_skip_32x8x4d         sse2/;
-  specialize qw/aom_sad_skip_64x16x4d        sse2/;
+  specialize qw/aom_sad_skip_16x64x4d        sse2 neon/;
+  specialize qw/aom_sad_skip_16x32x4d        sse2 neon/;
+  specialize qw/aom_sad_skip_16x16x4d        sse2 neon/;
+  specialize qw/aom_sad_skip_16x8x4d         sse2 neon/;
+  specialize qw/aom_sad_skip_8x16x4d         sse2 neon/;
+  specialize qw/aom_sad_skip_8x8x4d          sse2 neon/;
+  specialize qw/aom_sad_skip_4x16x4d         sse2 neon/;
+  specialize qw/aom_sad_skip_4x8x4d          sse2 neon/;
+  specialize qw/aom_sad_skip_4x32x4d         sse2 neon/;
+  specialize qw/aom_sad_skip_4x16x4d         sse2 neon/;
+  specialize qw/aom_sad_skip_8x32x4d         sse2 neon/;
+  specialize qw/aom_sad_skip_32x8x4d         sse2 neon/;
+  specialize qw/aom_sad_skip_64x16x4d        sse2 neon/;
 
   specialize qw/aom_sad128x128x4d_avg sse2/;
   specialize qw/aom_sad128x64x4d_avg  sse2/;
diff --git a/aom_dsp/arm/sad4d_neon.c b/aom_dsp/arm/sad4d_neon.c
index 606950a..0e633b5 100644
--- a/aom_dsp/arm/sad4d_neon.c
+++ b/aom_dsp/arm/sad4d_neon.c
@@ -224,3 +224,368 @@
   res[2] = horizontal_long_add_16x8(vec_sum_ref2_lo, vec_sum_ref2_hi);
   res[3] = horizontal_long_add_16x8(vec_sum_ref3_lo, vec_sum_ref3_hi);
 }
+
+static INLINE unsigned int horizontal_add_16x4(const uint16x4_t vec_16x4) {
+  const uint32x2_t a = vpaddl_u16(vec_16x4);
+  const uint64x1_t b = vpaddl_u32(a);
+  return vget_lane_u32(vreinterpret_u32_u64(b), 0);
+}
+
+static INLINE unsigned int horizontal_add_16x8(const uint16x8_t vec_16x8) {
+  const uint32x4_t a = vpaddlq_u16(vec_16x8);
+  const uint64x2_t b = vpaddlq_u32(a);
+  const uint32x2_t c = vadd_u32(vreinterpret_u32_u64(vget_low_u64(b)),
+                                vreinterpret_u32_u64(vget_high_u64(b)));
+  return vget_lane_u32(c, 0);
+}
+
+static void sad_row4_neon(uint16x4_t *vec_src, const uint8x8_t q0,
+                          const uint8x8_t ref) {
+  uint8x8_t q2 = vabd_u8(q0, ref);
+  *vec_src = vpadal_u8(*vec_src, q2);
+}
+
+static void sad_row8_neon(uint16x4_t *vec_src, const uint8x8_t *q0,
+                          const uint8_t *ref_ptr) {
+  uint8x8_t q1 = vld1_u8(ref_ptr);
+  uint8x8_t q2 = vabd_u8(*q0, q1);
+  *vec_src = vpadal_u8(*vec_src, q2);
+}
+
+static void sad_row16_neon(uint16x8_t *vec_src, const uint8x16_t *q0,
+                           const uint8_t *ref_ptr) {
+  uint8x16_t q1 = vld1q_u8(ref_ptr);
+  uint8x16_t q2 = vabdq_u8(*q0, q1);
+  *vec_src = vpadalq_u8(*vec_src, q2);
+}
+
+void aom_sadMxNx4d_neon(int width, int height, const uint8_t *src,
+                        int src_stride, const uint8_t *const ref[4],
+                        int ref_stride, uint32_t res[4]) {
+  const uint8_t *ref0, *ref1, *ref2, *ref3;
+
+  ref0 = ref[0];
+  ref1 = ref[1];
+  ref2 = ref[2];
+  ref3 = ref[3];
+
+  res[0] = 0;
+  res[1] = 0;
+  res[2] = 0;
+  res[3] = 0;
+
+  switch (width) {
+    case 4: {
+      uint32_t src4, ref40, ref41, ref42, ref43;
+      uint32x2_t q8 = vdup_n_u32(0);
+      uint32x2_t q4 = vdup_n_u32(0);
+      uint32x2_t q5 = vdup_n_u32(0);
+      uint32x2_t q6 = vdup_n_u32(0);
+      uint32x2_t q7 = vdup_n_u32(0);
+
+      for (int i = 0; i < height / 2; i++) {
+        uint16x4_t q0 = vdup_n_u16(0);
+        uint16x4_t q1 = vdup_n_u16(0);
+        uint16x4_t q2 = vdup_n_u16(0);
+        uint16x4_t q3 = vdup_n_u16(0);
+
+        memcpy(&src4, src, 4);
+        memcpy(&ref40, ref0, 4);
+        memcpy(&ref41, ref1, 4);
+        memcpy(&ref42, ref2, 4);
+        memcpy(&ref43, ref3, 4);
+
+        src += src_stride;
+        ref0 += ref_stride;
+        ref1 += ref_stride;
+        ref2 += ref_stride;
+        ref3 += ref_stride;
+
+        q8 = vset_lane_u32(src4, q8, 0);
+        q4 = vset_lane_u32(ref40, q4, 0);
+        q5 = vset_lane_u32(ref41, q5, 0);
+        q6 = vset_lane_u32(ref42, q6, 0);
+        q7 = vset_lane_u32(ref43, q7, 0);
+
+        memcpy(&src4, src, 4);
+        memcpy(&ref40, ref0, 4);
+        memcpy(&ref41, ref1, 4);
+        memcpy(&ref42, ref2, 4);
+        memcpy(&ref43, ref3, 4);
+
+        src += src_stride;
+        ref0 += ref_stride;
+        ref1 += ref_stride;
+        ref2 += ref_stride;
+        ref3 += ref_stride;
+
+        q8 = vset_lane_u32(src4, q8, 1);
+        q4 = vset_lane_u32(ref40, q4, 1);
+        q5 = vset_lane_u32(ref41, q5, 1);
+        q6 = vset_lane_u32(ref42, q6, 1);
+        q7 = vset_lane_u32(ref43, q7, 1);
+
+        sad_row4_neon(&q0, vreinterpret_u8_u32(q8), vreinterpret_u8_u32(q4));
+        sad_row4_neon(&q1, vreinterpret_u8_u32(q8), vreinterpret_u8_u32(q5));
+        sad_row4_neon(&q2, vreinterpret_u8_u32(q8), vreinterpret_u8_u32(q6));
+        sad_row4_neon(&q3, vreinterpret_u8_u32(q8), vreinterpret_u8_u32(q7));
+
+        res[0] += horizontal_add_16x4(q0);
+        res[1] += horizontal_add_16x4(q1);
+        res[2] += horizontal_add_16x4(q2);
+        res[3] += horizontal_add_16x4(q3);
+      }
+      break;
+    }
+    case 8: {
+      for (int i = 0; i < height; i++) {
+        uint16x4_t q0 = vdup_n_u16(0);
+        uint16x4_t q1 = vdup_n_u16(0);
+        uint16x4_t q2 = vdup_n_u16(0);
+        uint16x4_t q3 = vdup_n_u16(0);
+
+        uint8x8_t q5 = vld1_u8(src);
+
+        sad_row8_neon(&q0, &q5, ref0);
+        sad_row8_neon(&q1, &q5, ref1);
+        sad_row8_neon(&q2, &q5, ref2);
+        sad_row8_neon(&q3, &q5, ref3);
+
+        src += src_stride;
+        ref0 += ref_stride;
+        ref1 += ref_stride;
+        ref2 += ref_stride;
+        ref3 += ref_stride;
+
+        res[0] += horizontal_add_16x4(q0);
+        res[1] += horizontal_add_16x4(q1);
+        res[2] += horizontal_add_16x4(q2);
+        res[3] += horizontal_add_16x4(q3);
+      }
+      break;
+    }
+    case 16: {
+      for (int i = 0; i < height; i++) {
+        uint16x8_t q0 = vdupq_n_u16(0);
+        uint16x8_t q1 = vdupq_n_u16(0);
+        uint16x8_t q2 = vdupq_n_u16(0);
+        uint16x8_t q3 = vdupq_n_u16(0);
+
+        uint8x16_t q4 = vld1q_u8(src);
+
+        sad_row16_neon(&q0, &q4, ref0);
+        sad_row16_neon(&q1, &q4, ref1);
+        sad_row16_neon(&q2, &q4, ref2);
+        sad_row16_neon(&q3, &q4, ref3);
+
+        src += src_stride;
+        ref0 += ref_stride;
+        ref1 += ref_stride;
+        ref2 += ref_stride;
+        ref3 += ref_stride;
+
+        res[0] += horizontal_add_16x8(q0);
+        res[1] += horizontal_add_16x8(q1);
+        res[2] += horizontal_add_16x8(q2);
+        res[3] += horizontal_add_16x8(q3);
+      }
+      break;
+    }
+    case 32: {
+      for (int i = 0; i < height; i++) {
+        uint16x8_t q0 = vdupq_n_u16(0);
+        uint16x8_t q1 = vdupq_n_u16(0);
+        uint16x8_t q2 = vdupq_n_u16(0);
+        uint16x8_t q3 = vdupq_n_u16(0);
+
+        uint8x16_t q4 = vld1q_u8(src);
+
+        sad_row16_neon(&q0, &q4, ref0);
+        sad_row16_neon(&q1, &q4, ref1);
+        sad_row16_neon(&q2, &q4, ref2);
+        sad_row16_neon(&q3, &q4, ref3);
+
+        q4 = vld1q_u8(src + 16);
+
+        sad_row16_neon(&q0, &q4, ref0 + 16);
+        sad_row16_neon(&q1, &q4, ref1 + 16);
+        sad_row16_neon(&q2, &q4, ref2 + 16);
+        sad_row16_neon(&q3, &q4, ref3 + 16);
+
+        src += src_stride;
+        ref0 += ref_stride;
+        ref1 += ref_stride;
+        ref2 += ref_stride;
+        ref3 += ref_stride;
+
+        res[0] += horizontal_add_16x8(q0);
+        res[1] += horizontal_add_16x8(q1);
+        res[2] += horizontal_add_16x8(q2);
+        res[3] += horizontal_add_16x8(q3);
+      }
+      break;
+    }
+    case 64: {
+      for (int i = 0; i < height; i++) {
+        uint16x8_t q0 = vdupq_n_u16(0);
+        uint16x8_t q1 = vdupq_n_u16(0);
+        uint16x8_t q2 = vdupq_n_u16(0);
+        uint16x8_t q3 = vdupq_n_u16(0);
+
+        uint8x16_t q4 = vld1q_u8(src);
+
+        sad_row16_neon(&q0, &q4, ref0);
+        sad_row16_neon(&q1, &q4, ref1);
+        sad_row16_neon(&q2, &q4, ref2);
+        sad_row16_neon(&q3, &q4, ref3);
+
+        q4 = vld1q_u8(src + 16);
+
+        sad_row16_neon(&q0, &q4, ref0 + 16);
+        sad_row16_neon(&q1, &q4, ref1 + 16);
+        sad_row16_neon(&q2, &q4, ref2 + 16);
+        sad_row16_neon(&q3, &q4, ref3 + 16);
+
+        q4 = vld1q_u8(src + 32);
+
+        sad_row16_neon(&q0, &q4, ref0 + 32);
+        sad_row16_neon(&q1, &q4, ref1 + 32);
+        sad_row16_neon(&q2, &q4, ref2 + 32);
+        sad_row16_neon(&q3, &q4, ref3 + 32);
+
+        q4 = vld1q_u8(src + 48);
+
+        sad_row16_neon(&q0, &q4, ref0 + 48);
+        sad_row16_neon(&q1, &q4, ref1 + 48);
+        sad_row16_neon(&q2, &q4, ref2 + 48);
+        sad_row16_neon(&q3, &q4, ref3 + 48);
+
+        src += src_stride;
+        ref0 += ref_stride;
+        ref1 += ref_stride;
+        ref2 += ref_stride;
+        ref3 += ref_stride;
+
+        res[0] += horizontal_add_16x8(q0);
+        res[1] += horizontal_add_16x8(q1);
+        res[2] += horizontal_add_16x8(q2);
+        res[3] += horizontal_add_16x8(q3);
+      }
+      break;
+    }
+    case 128: {
+      for (int i = 0; i < height; i++) {
+        uint16x8_t q0 = vdupq_n_u16(0);
+        uint16x8_t q1 = vdupq_n_u16(0);
+        uint16x8_t q2 = vdupq_n_u16(0);
+        uint16x8_t q3 = vdupq_n_u16(0);
+
+        uint8x16_t q4 = vld1q_u8(src);
+
+        sad_row16_neon(&q0, &q4, ref0);
+        sad_row16_neon(&q1, &q4, ref1);
+        sad_row16_neon(&q2, &q4, ref2);
+        sad_row16_neon(&q3, &q4, ref3);
+
+        q4 = vld1q_u8(src + 16);
+
+        sad_row16_neon(&q0, &q4, ref0 + 16);
+        sad_row16_neon(&q1, &q4, ref1 + 16);
+        sad_row16_neon(&q2, &q4, ref2 + 16);
+        sad_row16_neon(&q3, &q4, ref3 + 16);
+
+        q4 = vld1q_u8(src + 32);
+
+        sad_row16_neon(&q0, &q4, ref0 + 32);
+        sad_row16_neon(&q1, &q4, ref1 + 32);
+        sad_row16_neon(&q2, &q4, ref2 + 32);
+        sad_row16_neon(&q3, &q4, ref3 + 32);
+
+        q4 = vld1q_u8(src + 48);
+
+        sad_row16_neon(&q0, &q4, ref0 + 48);
+        sad_row16_neon(&q1, &q4, ref1 + 48);
+        sad_row16_neon(&q2, &q4, ref2 + 48);
+        sad_row16_neon(&q3, &q4, ref3 + 48);
+
+        q4 = vld1q_u8(src + 64);
+
+        sad_row16_neon(&q0, &q4, ref0 + 64);
+        sad_row16_neon(&q1, &q4, ref1 + 64);
+        sad_row16_neon(&q2, &q4, ref2 + 64);
+        sad_row16_neon(&q3, &q4, ref3 + 64);
+
+        q4 = vld1q_u8(src + 80);
+
+        sad_row16_neon(&q0, &q4, ref0 + 80);
+        sad_row16_neon(&q1, &q4, ref1 + 80);
+        sad_row16_neon(&q2, &q4, ref2 + 80);
+        sad_row16_neon(&q3, &q4, ref3 + 80);
+
+        q4 = vld1q_u8(src + 96);
+
+        sad_row16_neon(&q0, &q4, ref0 + 96);
+        sad_row16_neon(&q1, &q4, ref1 + 96);
+        sad_row16_neon(&q2, &q4, ref2 + 96);
+        sad_row16_neon(&q3, &q4, ref3 + 96);
+
+        q4 = vld1q_u8(src + 112);
+
+        sad_row16_neon(&q0, &q4, ref0 + 112);
+        sad_row16_neon(&q1, &q4, ref1 + 112);
+        sad_row16_neon(&q2, &q4, ref2 + 112);
+        sad_row16_neon(&q3, &q4, ref3 + 112);
+
+        src += src_stride;
+        ref0 += ref_stride;
+        ref1 += ref_stride;
+        ref2 += ref_stride;
+        ref3 += ref_stride;
+
+        res[0] += horizontal_add_16x8(q0);
+        res[1] += horizontal_add_16x8(q1);
+        res[2] += horizontal_add_16x8(q2);
+        res[3] += horizontal_add_16x8(q3);
+      }
+    }
+  }
+}
+
+#define sad_skip_MxN_neon(m, n)                                             \
+  void aom_sad_skip_##m##x##n##x4d_neon(const uint8_t *src, int src_stride, \
+                                        const uint8_t *const ref[4],        \
+                                        int ref_stride, uint32_t res[4]) {  \
+    aom_sadMxNx4d_neon(m, ((n) >> 1), src, 2 * src_stride, ref,             \
+                       2 * ref_stride, res);                                \
+    res[0] <<= 1;                                                           \
+    res[1] <<= 1;                                                           \
+    res[2] <<= 1;                                                           \
+    res[3] <<= 1;                                                           \
+  }
+
+sad_skip_MxN_neon(4, 8);
+sad_skip_MxN_neon(4, 16);
+sad_skip_MxN_neon(4, 32);
+
+sad_skip_MxN_neon(8, 8);
+sad_skip_MxN_neon(8, 16);
+sad_skip_MxN_neon(8, 32);
+
+sad_skip_MxN_neon(16, 8);
+sad_skip_MxN_neon(16, 16);
+sad_skip_MxN_neon(16, 32);
+sad_skip_MxN_neon(16, 64);
+
+sad_skip_MxN_neon(32, 8);
+sad_skip_MxN_neon(32, 16);
+sad_skip_MxN_neon(32, 32);
+sad_skip_MxN_neon(32, 64);
+
+sad_skip_MxN_neon(64, 16);
+sad_skip_MxN_neon(64, 32);
+sad_skip_MxN_neon(64, 64);
+sad_skip_MxN_neon(64, 128);
+
+sad_skip_MxN_neon(128, 64);
+sad_skip_MxN_neon(128, 128);
+#undef sad_skip_MxN_neon
diff --git a/aom_dsp/arm/sad_neon.c b/aom_dsp/arm/sad_neon.c
index 18581f2..17d9994 100644
--- a/aom_dsp/arm/sad_neon.c
+++ b/aom_dsp/arm/sad_neon.c
@@ -10,13 +10,12 @@
  */
 
 #include <arm_neon.h>
-
 #include "config/aom_config.h"
-
+#include "config/aom_dsp_rtcd.h"
 #include "aom/aom_integer.h"
 
-unsigned int aom_sad8x16_neon(unsigned char *src_ptr, int src_stride,
-                              unsigned char *ref_ptr, int ref_stride) {
+unsigned int aom_sad8x16_neon(const uint8_t *src_ptr, int src_stride,
+                              const uint8_t *ref_ptr, int ref_stride) {
   uint8x8_t d0, d8;
   uint16x8_t q12;
   uint32x4_t q1;
@@ -46,8 +45,8 @@
   return vget_lane_u32(d5, 0);
 }
 
-unsigned int aom_sad4x4_neon(unsigned char *src_ptr, int src_stride,
-                             unsigned char *ref_ptr, int ref_stride) {
+unsigned int aom_sad4x4_neon(const uint8_t *src_ptr, int src_stride,
+                             const uint8_t *ref_ptr, int ref_stride) {
   uint8x8_t d0, d8;
   uint16x8_t q12;
   uint32x2_t d1;
@@ -74,8 +73,8 @@
   return vget_lane_u32(vreinterpret_u32_u64(d3), 0);
 }
 
-unsigned int aom_sad16x8_neon(unsigned char *src_ptr, int src_stride,
-                              unsigned char *ref_ptr, int ref_stride) {
+unsigned int aom_sad16x8_neon(const uint8_t *src_ptr, int src_stride,
+                              const uint8_t *ref_ptr, int ref_stride) {
   uint8x16_t q0, q4;
   uint16x8_t q12, q13;
   uint32x4_t q1;
@@ -293,3 +292,262 @@
   }
   return horizontal_add_16x8(vec_accum);
 }
+
+static INLINE unsigned int sad128xh_neon(const uint8_t *src_ptr, int src_stride,
+                                         const uint8_t *ref_ptr, int ref_stride,
+                                         int h) {
+  int sum = 0;
+  for (int i = 0; i < h; i++) {
+    uint16x8_t q3 = vdupq_n_u16(0);
+
+    uint8x16_t q0 = vld1q_u8(src_ptr);
+    uint8x16_t q1 = vld1q_u8(ref_ptr);
+    uint8x16_t q2 = vabdq_u8(q0, q1);
+    q3 = vpadalq_u8(q3, q2);
+
+    q0 = vld1q_u8(src_ptr + 16);
+    q1 = vld1q_u8(ref_ptr + 16);
+    q2 = vabdq_u8(q0, q1);
+    q3 = vpadalq_u8(q3, q2);
+
+    q0 = vld1q_u8(src_ptr + 32);
+    q1 = vld1q_u8(ref_ptr + 32);
+    q2 = vabdq_u8(q0, q1);
+    q3 = vpadalq_u8(q3, q2);
+
+    q0 = vld1q_u8(src_ptr + 48);
+    q1 = vld1q_u8(ref_ptr + 48);
+    q2 = vabdq_u8(q0, q1);
+    q3 = vpadalq_u8(q3, q2);
+
+    q0 = vld1q_u8(src_ptr + 64);
+    q1 = vld1q_u8(ref_ptr + 64);
+    q2 = vabdq_u8(q0, q1);
+    q3 = vpadalq_u8(q3, q2);
+
+    q0 = vld1q_u8(src_ptr + 80);
+    q1 = vld1q_u8(ref_ptr + 80);
+    q2 = vabdq_u8(q0, q1);
+    q3 = vpadalq_u8(q3, q2);
+
+    q0 = vld1q_u8(src_ptr + 96);
+    q1 = vld1q_u8(ref_ptr + 96);
+    q2 = vabdq_u8(q0, q1);
+    q3 = vpadalq_u8(q3, q2);
+
+    q0 = vld1q_u8(src_ptr + 112);
+    q1 = vld1q_u8(ref_ptr + 112);
+    q2 = vabdq_u8(q0, q1);
+    q3 = vpadalq_u8(q3, q2);
+
+    src_ptr += src_stride;
+    ref_ptr += ref_stride;
+
+    sum += horizontal_add_16x8(q3);
+  }
+
+  return sum;
+}
+
+static INLINE unsigned int sad64xh_neon(const uint8_t *src_ptr, int src_stride,
+                                        const uint8_t *ref_ptr, int ref_stride,
+                                        int h) {
+  int sum = 0;
+  for (int i = 0; i < h; i++) {
+    uint16x8_t q3 = vdupq_n_u16(0);
+
+    uint8x16_t q0 = vld1q_u8(src_ptr);
+    uint8x16_t q1 = vld1q_u8(ref_ptr);
+    uint8x16_t q2 = vabdq_u8(q0, q1);
+    q3 = vpadalq_u8(q3, q2);
+
+    q0 = vld1q_u8(src_ptr + 16);
+    q1 = vld1q_u8(ref_ptr + 16);
+    q2 = vabdq_u8(q0, q1);
+    q3 = vpadalq_u8(q3, q2);
+
+    q0 = vld1q_u8(src_ptr + 32);
+    q1 = vld1q_u8(ref_ptr + 32);
+    q2 = vabdq_u8(q0, q1);
+    q3 = vpadalq_u8(q3, q2);
+
+    q0 = vld1q_u8(src_ptr + 48);
+    q1 = vld1q_u8(ref_ptr + 48);
+    q2 = vabdq_u8(q0, q1);
+    q3 = vpadalq_u8(q3, q2);
+
+    src_ptr += src_stride;
+    ref_ptr += ref_stride;
+
+    sum += horizontal_add_16x8(q3);
+  }
+
+  return sum;
+}
+
+static INLINE unsigned int sad32xh_neon(const uint8_t *src_ptr, int src_stride,
+                                        const uint8_t *ref_ptr, int ref_stride,
+                                        int h) {
+  int sum = 0;
+  for (int i = 0; i < h; i++) {
+    uint16x8_t q3 = vdupq_n_u16(0);
+
+    uint8x16_t q0 = vld1q_u8(src_ptr);
+    uint8x16_t q1 = vld1q_u8(ref_ptr);
+    uint8x16_t q2 = vabdq_u8(q0, q1);
+    q3 = vpadalq_u8(q3, q2);
+
+    q0 = vld1q_u8(src_ptr + 16);
+    q1 = vld1q_u8(ref_ptr + 16);
+    q2 = vabdq_u8(q0, q1);
+    q3 = vpadalq_u8(q3, q2);
+
+    sum += horizontal_add_16x8(q3);
+
+    src_ptr += src_stride;
+    ref_ptr += ref_stride;
+  }
+
+  return sum;
+}
+
+static INLINE unsigned int sad16xh_neon(const uint8_t *src_ptr, int src_stride,
+                                        const uint8_t *ref_ptr, int ref_stride,
+                                        int h) {
+  int sum = 0;
+  for (int i = 0; i < h; i++) {
+    uint8x8_t q0 = vld1_u8(src_ptr);
+    uint8x8_t q1 = vld1_u8(ref_ptr);
+    for (int x = 0; x < 4; x++)
+      sum += vget_lane_u16(vpaddl_u8(vabd_u8(q0, q1)), x);
+    q0 = vld1_u8(src_ptr + 8);
+    q1 = vld1_u8(ref_ptr + 8);
+    for (int x = 0; x < 4; x++)
+      sum += vget_lane_u16(vpaddl_u8(vabd_u8(q0, q1)), x);
+
+    src_ptr += src_stride;
+    ref_ptr += ref_stride;
+  }
+
+  return sum;
+}
+
+static INLINE unsigned int sad8xh_neon(const uint8_t *src_ptr, int src_stride,
+                                       const uint8_t *ref_ptr, int ref_stride,
+                                       int h) {
+  uint16x8_t q3 = vdupq_n_u16(0);
+  for (int y = 0; y < h; y++) {
+    uint8x8_t q0 = vld1_u8(src_ptr);
+    uint8x8_t q1 = vld1_u8(ref_ptr);
+    src_ptr += src_stride;
+    ref_ptr += ref_stride;
+    q3 = vabal_u8(q3, q0, q1);
+  }
+  return horizontal_add_16x8(q3);
+}
+
+static INLINE unsigned int sad4xh_neon(const uint8_t *src_ptr, int src_stride,
+                                       const uint8_t *ref_ptr, int ref_stride,
+                                       int h) {
+  uint16x8_t q3 = vdupq_n_u16(0);
+  uint32x2_t q0 = vdup_n_u32(0);
+  uint32x2_t q1 = vdup_n_u32(0);
+  uint32_t src4, ref4;
+  for (int y = 0; y < h / 2; y++) {
+    memcpy(&src4, src_ptr, 4);
+    memcpy(&ref4, ref_ptr, 4);
+    src_ptr += src_stride;
+    ref_ptr += ref_stride;
+    q0 = vset_lane_u32(src4, q0, 0);
+    q1 = vset_lane_u32(ref4, q1, 0);
+
+    memcpy(&src4, src_ptr, 4);
+    memcpy(&ref4, ref_ptr, 4);
+    src_ptr += src_stride;
+    ref_ptr += ref_stride;
+    q0 = vset_lane_u32(src4, q0, 1);
+    q1 = vset_lane_u32(ref4, q1, 1);
+
+    q3 = vabal_u8(q3, vreinterpret_u8_u32(q0), vreinterpret_u8_u32(q1));
+  }
+  return horizontal_add_16x8(q3);
+}
+
+#define FSADS128_H(h)                                                    \
+  unsigned int aom_sad_skip_128x##h##_neon(                              \
+      const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr,    \
+      int ref_stride) {                                                  \
+    const uint32_t sum = sad128xh_neon(src_ptr, 2 * src_stride, ref_ptr, \
+                                       2 * ref_stride, h / 2);           \
+    return 2 * sum;                                                      \
+  }
+FSADS128_H(128);
+FSADS128_H(64);
+#undef FSADS128_H
+
+#define FSADS64_H(h)                                                          \
+  unsigned int aom_sad_skip_64x##h##_neon(                                    \
+      const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr,         \
+      int ref_stride) {                                                       \
+    return 2 * sad64xh_neon(src_ptr, src_stride * 2, ref_ptr, ref_stride * 2, \
+                            h / 2);                                           \
+  }
+
+FSADS64_H(128);
+FSADS64_H(64);
+FSADS64_H(32);
+FSADS64_H(16);
+#undef FSADS64_H
+
+#define FSADS32_H(h)                                                          \
+  unsigned int aom_sad_skip_32x##h##_neon(                                    \
+      const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr,         \
+      int ref_stride) {                                                       \
+    return 2 * sad32xh_neon(src_ptr, src_stride * 2, ref_ptr, ref_stride * 2, \
+                            h / 2);                                           \
+  }
+
+FSADS32_H(64);
+FSADS32_H(32);
+FSADS32_H(16);
+FSADS32_H(8);
+#undef FSADS32_H
+
+#define FSADS16_H(h)                                                          \
+  unsigned int aom_sad_skip_16x##h##_neon(                                    \
+      const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr,         \
+      int ref_stride) {                                                       \
+    return 2 * sad16xh_neon(src_ptr, src_stride * 2, ref_ptr, ref_stride * 2, \
+                            h / 2);                                           \
+  }
+
+FSADS16_H(64);
+FSADS16_H(32);
+FSADS16_H(16);
+FSADS16_H(8);
+#undef FSADS16_H
+
+#define FSADS8_H(h)                                                          \
+  unsigned int aom_sad_skip_8x##h##_neon(                                    \
+      const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr,        \
+      int ref_stride) {                                                      \
+    return 2 * sad8xh_neon(src_ptr, src_stride * 2, ref_ptr, ref_stride * 2, \
+                           h / 2);                                           \
+  }
+
+FSADS8_H(32);
+FSADS8_H(16);
+FSADS8_H(8);
+#undef FSADS8_H
+
+#define FSADS4_H(h)                                                          \
+  unsigned int aom_sad_skip_4x##h##_neon(                                    \
+      const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr,        \
+      int ref_stride) {                                                      \
+    return 2 * sad4xh_neon(src_ptr, src_stride * 2, ref_ptr, ref_stride * 2, \
+                           h / 2);                                           \
+  }
+
+FSADS4_H(16);
+FSADS4_H(8);
+#undef FSADS4_H
diff --git a/test/sad_test.cc b/test/sad_test.cc
index fdcf49f..e561859 100644
--- a/test/sad_test.cc
+++ b/test/sad_test.cc
@@ -1780,6 +1780,53 @@
   make_tuple(16, 16, &aom_sad16x16x4d_neon, -1),
 };
 INSTANTIATE_TEST_SUITE_P(NEON, SADx4Test, ::testing::ValuesIn(x4d_neon_tests));
+const SadSkipMxNParam skip_neon_tests[] = {
+  make_tuple(128, 128, &aom_sad_skip_128x128_neon, -1),
+  make_tuple(128, 64, &aom_sad_skip_128x64_neon, -1),
+  make_tuple(64, 128, &aom_sad_skip_64x128_neon, -1),
+  make_tuple(64, 64, &aom_sad_skip_64x64_neon, -1),
+  make_tuple(64, 32, &aom_sad_skip_64x32_neon, -1),
+  make_tuple(64, 16, &aom_sad_skip_64x16_neon, -1),
+  make_tuple(32, 64, &aom_sad_skip_32x64_neon, -1),
+  make_tuple(32, 32, &aom_sad_skip_32x32_neon, -1),
+  make_tuple(32, 16, &aom_sad_skip_32x16_neon, -1),
+  make_tuple(32, 8, &aom_sad_skip_32x8_neon, -1),
+  make_tuple(16, 64, &aom_sad_skip_16x64_neon, -1),
+  make_tuple(16, 32, &aom_sad_skip_16x32_neon, -1),
+  make_tuple(16, 16, &aom_sad_skip_16x16_neon, -1),
+  make_tuple(16, 8, &aom_sad_skip_16x8_neon, -1),
+  make_tuple(8, 32, &aom_sad_skip_8x32_neon, -1),
+  make_tuple(8, 16, &aom_sad_skip_8x16_neon, -1),
+  make_tuple(8, 8, &aom_sad_skip_8x8_neon, -1),
+  make_tuple(4, 16, &aom_sad_skip_4x16_neon, -1),
+  make_tuple(4, 8, &aom_sad_skip_4x8_neon, -1),
+};
+INSTANTIATE_TEST_SUITE_P(NEON, SADSkipTest,
+                         ::testing::ValuesIn(skip_neon_tests));
+
+const SadSkipMxNx4Param skip_x4d_neon_tests[] = {
+  make_tuple(128, 128, &aom_sad_skip_128x128x4d_neon, -1),
+  make_tuple(128, 64, &aom_sad_skip_128x64x4d_neon, -1),
+  make_tuple(64, 128, &aom_sad_skip_64x128x4d_neon, -1),
+  make_tuple(64, 64, &aom_sad_skip_64x64x4d_neon, -1),
+  make_tuple(64, 32, &aom_sad_skip_64x32x4d_neon, -1),
+  make_tuple(64, 16, &aom_sad_skip_64x16x4d_neon, -1),
+  make_tuple(32, 64, &aom_sad_skip_32x64x4d_neon, -1),
+  make_tuple(32, 32, &aom_sad_skip_32x32x4d_neon, -1),
+  make_tuple(32, 16, &aom_sad_skip_32x16x4d_neon, -1),
+  make_tuple(32, 8, &aom_sad_skip_32x8x4d_neon, -1),
+  make_tuple(16, 64, &aom_sad_skip_16x64x4d_neon, -1),
+  make_tuple(16, 32, &aom_sad_skip_16x32x4d_neon, -1),
+  make_tuple(16, 16, &aom_sad_skip_16x16x4d_neon, -1),
+  make_tuple(16, 8, &aom_sad_skip_16x8x4d_neon, -1),
+  make_tuple(8, 8, &aom_sad_skip_8x8x4d_neon, -1),
+  make_tuple(8, 16, &aom_sad_skip_8x16x4d_neon, -1),
+  make_tuple(8, 32, &aom_sad_skip_8x32x4d_neon, -1),
+  make_tuple(4, 8, &aom_sad_skip_4x8x4d_neon, -1),
+  make_tuple(4, 16, &aom_sad_skip_4x16x4d_neon, -1),
+};
+INSTANTIATE_TEST_SUITE_P(NEON, SADSkipx4Test,
+                         ::testing::ValuesIn(skip_x4d_neon_tests));
 #endif  // HAVE_NEON
 
 //------------------------------------------------------------------------------