Add NEON version of few quantize functions

Speedup:
av1_quantize_fp_32x32_c vs av1_quantize_fp_32x32_neon avg gain 4.4
av1_quantize_fp_64x64_c vs &av1_quantize_fp_64x64_neon avg gain 3.7
aom_quantize_b_c vs aom_quantize_b_neon avg gain 5.2
aom_quantize_b_32x32_c vs &aom_quantize_b_32x32_neon(aom_quantize_b_helper inside) avg gain 5.0
aom_quantize_b_64x64_c vs &aom_quantize_b_64x64_neon(aom_quantize_b_helper inside) avg gain 4.2

Tested via NEON/QuantizeTest.DISABLED_Speed

Change-Id: I829578b6a11c0bac69d4514d54c1652eb50144f3
diff --git a/aom_dsp/aom_dsp_rtcd_defs.pl b/aom_dsp/aom_dsp_rtcd_defs.pl
index b7d5a41..2481f2f 100755
--- a/aom_dsp/aom_dsp_rtcd_defs.pl
+++ b/aom_dsp/aom_dsp_rtcd_defs.pl
@@ -529,19 +529,19 @@
 #
 if (aom_config("CONFIG_AV1_ENCODER") eq "yes") {
   add_proto qw/void aom_quantize_b/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan";
-  specialize qw/aom_quantize_b sse2/, "$ssse3_x86_64", "$avx_x86_64";
+  specialize qw/aom_quantize_b sse2 neon/, "$ssse3_x86_64", "$avx_x86_64";
 
   add_proto qw/void aom_quantize_b_adaptive/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan";
   specialize qw/aom_quantize_b_adaptive sse2 avx2/;
 
   add_proto qw/void aom_quantize_b_32x32/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan";
-  specialize qw/aom_quantize_b_32x32/, "$ssse3_x86_64", "$avx_x86_64";
+  specialize qw/aom_quantize_b_32x32 neon/, "$ssse3_x86_64", "$avx_x86_64";
 
   add_proto qw/void aom_quantize_b_32x32_adaptive/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan";
   specialize qw/aom_quantize_b_32x32_adaptive sse2/;
 
   add_proto qw/void aom_quantize_b_64x64/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan";
-  specialize qw/aom_quantize_b_64x64 ssse3/;
+  specialize qw/aom_quantize_b_64x64 neon ssse3/;
 
   add_proto qw/void aom_quantize_b_64x64_adaptive/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan";
   specialize qw/aom_quantize_b_64x64_adaptive sse2/;
diff --git a/av1/common/av1_rtcd_defs.pl b/av1/common/av1_rtcd_defs.pl
index 296c6c5..760f667 100644
--- a/av1/common/av1_rtcd_defs.pl
+++ b/av1/common/av1_rtcd_defs.pl
@@ -225,10 +225,13 @@
 
 
   add_proto qw/void av1_quantize_fp_32x32/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan";
-  specialize qw/av1_quantize_fp_32x32 avx2/;
+  specialize qw/av1_quantize_fp_32x32 neon avx2/;
 
   add_proto qw/void av1_quantize_fp_64x64/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan";
-  specialize qw/av1_quantize_fp_64x64 avx2/;
+  specialize qw/av1_quantize_fp_64x64 neon avx2/;
+
+  add_proto qw/void aom_quantize_b_helper/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan, const qm_val_t *qm_ptr, const qm_val_t *iqm_ptr,	const int log_scale";
+  specialize qw/aom_quantize_b_helper neon/;
 
   # fdct functions
 
diff --git a/av1/encoder/arm/neon/quantize_neon.c b/av1/encoder/arm/neon/quantize_neon.c
index c2f50a2..0a8bf1a 100644
--- a/av1/encoder/arm/neon/quantize_neon.c
+++ b/av1/encoder/arm/neon/quantize_neon.c
@@ -213,3 +213,892 @@
   }
 #endif  // __aarch64__
 }
+
+void av1_quantize_fp_32x32_neon(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
+                                const int16_t *zbin_ptr,
+                                const int16_t *round_ptr,
+                                const int16_t *quant_ptr,
+                                const int16_t *quant_shift_ptr,
+                                tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr,
+                                const int16_t *dequant_ptr, uint16_t *eob_ptr,
+                                const int16_t *scan, const int16_t *iscan) {
+  const int log_scale = 1;
+  const int rounding[2] = { ROUND_POWER_OF_TWO(round_ptr[0], log_scale),
+                            ROUND_POWER_OF_TWO(round_ptr[1], log_scale) };
+
+  (void)zbin_ptr;
+  (void)quant_shift_ptr;
+  (void)scan;
+
+  memset(qcoeff_ptr, 0, n_coeffs * sizeof(*qcoeff_ptr));
+  memset(dqcoeff_ptr, 0, n_coeffs * sizeof(*dqcoeff_ptr));
+
+  const int16x8_t zero = vdupq_n_s16(0);
+  int16x8_t v_eobmax_76543210 = vreinterpretq_s16_u16(vceqq_s16(zero, zero));
+  int16x8_t round = vdupq_n_s16(rounding[1]);
+  int16x8_t quant = vdupq_n_s16(quant_ptr[1]);
+  int16x8_t dequant = vdupq_n_s16(dequant_ptr[1]);
+  dequant = vsetq_lane_s16(dequant_ptr[0], dequant, 0);
+
+  int16x8_t coeff = load_tran_low_to_s16q(&coeff_ptr[0]);
+
+  int16x8_t abs = vabsq_s16(coeff);
+  uint16x8_t check = vcgeq_s16(abs, vshrq_n_s16(dequant, 2));
+  uint64_t nz_check = vget_lane_u64(vreinterpret_u64_u8(vmovn_u16(check)), 0);
+  if (nz_check) {
+    const int16x8_t coeff_sign = vshrq_n_s16(coeff, 15);
+    const int16x8_t v_iscan = vld1q_s16(&iscan[0]);
+    round = vsetq_lane_s16(rounding[0], round, 0);
+    quant = vsetq_lane_s16(quant_ptr[0], quant, 0);
+
+    abs = vqaddq_s16(abs, round);
+    int16x8_t temp = vqdmulhq_s16(abs, quant);
+    int16x8_t qcoeff_temp = vsubq_s16(veorq_s16(temp, coeff_sign), coeff_sign);
+    abs = vreinterpretq_s16_u16(
+        vshrq_n_u16(vreinterpretq_u16_s16(vmulq_s16(temp, dequant)), 1));
+    int16x8_t dqcoeff_temp = vsubq_s16(veorq_s16(abs, coeff_sign), coeff_sign);
+
+    int16x8_t coeff_nz_mask =
+        vbslq_s16(check, qcoeff_temp, load_tran_low_to_s16q(&qcoeff_ptr[0]));
+    store_s16q_to_tran_low(&qcoeff_ptr[0], coeff_nz_mask);
+    coeff_nz_mask =
+        vbslq_s16(check, dqcoeff_temp, load_tran_low_to_s16q(&dqcoeff_ptr[0]));
+    store_s16q_to_tran_low(&dqcoeff_ptr[0], coeff_nz_mask);
+
+    round = vsetq_lane_s16(rounding[1], round, 0);
+    quant = vsetq_lane_s16(quant_ptr[1], quant, 0);
+
+    uint16x8_t vtmp_mask = vcgtq_s16(abs, zero);
+    const uint16x8_t v_nz_mask = vandq_u16(vtmp_mask, check);
+    check = vandq_u16(v_nz_mask, vcgtq_s16(v_iscan, v_eobmax_76543210));
+    v_eobmax_76543210 = vbslq_s16(check, v_iscan, v_eobmax_76543210);
+  }
+
+  dequant = vsetq_lane_s16(dequant_ptr[1], dequant, 0);
+
+  for (int i = 8; i < n_coeffs; i += 8) {
+    coeff = load_tran_low_to_s16q(&coeff_ptr[i]);
+    abs = vabsq_s16(coeff);
+    check = vcgeq_s16(abs, vshrq_n_s16(dequant, 2));
+
+    nz_check = vget_lane_u64(vreinterpret_u64_u8(vmovn_u16(check)), 0);
+    if (nz_check) {
+      const int16x8_t coeff_sign = vshrq_n_s16(coeff, 15);
+      const int16x8_t v_iscan = vld1q_s16(&iscan[i]);
+
+      abs = vqaddq_s16(abs, round);
+      int16x8_t temp = vqdmulhq_s16(abs, quant);
+      int16x8_t qcoeff_temp =
+          vsubq_s16(veorq_s16(temp, coeff_sign), coeff_sign);
+      abs = vreinterpretq_s16_u16(
+          vshrq_n_u16(vreinterpretq_u16_s16(vmulq_s16(temp, dequant)), 1));
+      int16x8_t dqcoeff_temp =
+          vsubq_s16(veorq_s16(abs, coeff_sign), coeff_sign);
+
+      int16x8_t coeff_nz_mask =
+          vbslq_s16(check, qcoeff_temp, load_tran_low_to_s16q(&qcoeff_ptr[i]));
+      store_s16q_to_tran_low(&qcoeff_ptr[i], coeff_nz_mask);
+      coeff_nz_mask = vbslq_s16(check, dqcoeff_temp,
+                                load_tran_low_to_s16q(&dqcoeff_ptr[i]));
+      store_s16q_to_tran_low(&dqcoeff_ptr[i], coeff_nz_mask);
+
+      uint16x8_t vtmp_mask = vcgtq_s16(abs, zero);
+      const uint16x8_t v_nz_mask = vandq_u16(vtmp_mask, check);
+      check = vandq_u16(v_nz_mask, vcgtq_s16(v_iscan, v_eobmax_76543210));
+      v_eobmax_76543210 = vbslq_s16(check, v_iscan, v_eobmax_76543210);
+    }
+  }
+#ifdef __aarch64__
+  *eob_ptr = vmaxvq_s16(v_eobmax_76543210) + 1;
+#else
+  {
+    const int16x4_t v_eobmax_3210 = vmax_s16(vget_low_s16(v_eobmax_76543210),
+                                             vget_high_s16(v_eobmax_76543210));
+    const int64x1_t v_eobmax_xx32 =
+        vshr_n_s64(vreinterpret_s64_s16(v_eobmax_3210), 32);
+    const int16x4_t v_eobmax_tmp =
+        vmax_s16(v_eobmax_3210, vreinterpret_s16_s64(v_eobmax_xx32));
+    const int64x1_t v_eobmax_xxx3 =
+        vshr_n_s64(vreinterpret_s64_s16(v_eobmax_tmp), 16);
+    const int16x4_t v_eobmax_final =
+        vmax_s16(v_eobmax_tmp, vreinterpret_s16_s64(v_eobmax_xxx3));
+
+    *eob_ptr = (uint16_t)vget_lane_s16(v_eobmax_final, 0);
+  }
+#endif  // __aarch64__
+}
+
+void av1_quantize_fp_64x64_neon(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
+                                const int16_t *zbin_ptr,
+                                const int16_t *round_ptr,
+                                const int16_t *quant_ptr,
+                                const int16_t *quant_shift_ptr,
+                                tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr,
+                                const int16_t *dequant_ptr, uint16_t *eob_ptr,
+                                const int16_t *scan, const int16_t *iscan) {
+  const int log_scale = 2;
+  const int16x8_t v_log_scale =
+      vreinterpretq_s16_s64(vdupq_n_s64(0xFFFEFFFEFFFEFFFE));
+
+  const int rounding[2] = { ROUND_POWER_OF_TWO(round_ptr[0], log_scale),
+                            ROUND_POWER_OF_TWO(round_ptr[1], log_scale) };
+
+  (void)zbin_ptr;
+  (void)quant_shift_ptr;
+  (void)scan;
+
+  memset(qcoeff_ptr, 0, n_coeffs * sizeof(*qcoeff_ptr));
+  memset(dqcoeff_ptr, 0, n_coeffs * sizeof(*dqcoeff_ptr));
+
+  const int16x8_t zero = vdupq_n_s16(0);
+  int16x8_t v_eobmax_76543210 = vreinterpretq_s16_u16(vceqq_s16(zero, zero));
+
+  int16x8_t round = vdupq_n_s16(rounding[1]);
+  int16x8_t quant = vdupq_n_s16(quant_ptr[1]);
+  int16x8_t dequant = vdupq_n_s16(dequant_ptr[1]);
+  dequant = vsetq_lane_s16(dequant_ptr[0], dequant, 0);
+
+  int16x8_t coeff = load_tran_low_to_s16q(&coeff_ptr[0]);
+  int16x8_t abs = vabsq_s16(coeff);
+  uint16x8_t check = vcgeq_u16(vshlq_n_u16(vreinterpretq_u16_s16(abs), 1),
+                               vshrq_n_u16(vreinterpretq_u16_s16(dequant), 2));
+  uint64_t nz_check = vget_lane_u64(vreinterpret_u64_u8(vmovn_u16(check)), 0);
+  if (nz_check) {
+    const int16x8_t coeff_sign = vshrq_n_s16(coeff, 15);
+    const int16x8_t v_iscan = vld1q_s16(&iscan[0]);
+    round = vsetq_lane_s16(rounding[0], round, 0);
+    quant = vsetq_lane_s16(quant_ptr[0], quant, 0);
+    abs = vqaddq_s16(abs, round);
+    int16x8_t temp =
+        vorrq_s16(vshlq_n_s16(vqdmulhq_s16(abs, quant), 1),
+                  vreinterpretq_s16_u16(vshrq_n_u16(
+                      vreinterpretq_u16_s16(vmulq_s16(abs, quant)), 14)));
+    int16x8_t qcoeff_temp = vsubq_s16(veorq_s16(temp, coeff_sign), coeff_sign);
+
+    abs = vreinterpretq_s16_u16(vshlq_u16(
+        vreinterpretq_u16_s16(vmulq_s16(temp, dequant)), v_log_scale));
+    abs = vorrq_s16(vshlq_n_s16(vqdmulhq_s16(temp, dequant), 13), abs);
+    int16x8_t dqcoeff_temp = vsubq_s16(veorq_s16(abs, coeff_sign), coeff_sign);
+    int16x8_t coeff_nz_mask =
+        vbslq_s16(check, qcoeff_temp, load_tran_low_to_s16q(&qcoeff_ptr[0]));
+    store_s16q_to_tran_low(&qcoeff_ptr[0], coeff_nz_mask);
+    coeff_nz_mask =
+        vbslq_s16(check, dqcoeff_temp, load_tran_low_to_s16q(&dqcoeff_ptr[0]));
+    store_s16q_to_tran_low(&dqcoeff_ptr[0], coeff_nz_mask);
+
+    round = vsetq_lane_s16(rounding[1], round, 0);
+    quant = vsetq_lane_s16(quant_ptr[1], quant, 0);
+
+    uint16x8_t vtmp_mask = vcgtq_s16(abs, zero);
+    const uint16x8_t v_nz_mask = vandq_u16(vtmp_mask, check);
+    check = vandq_u16(v_nz_mask, vcgtq_s16(v_iscan, v_eobmax_76543210));
+    v_eobmax_76543210 = vbslq_s16(check, v_iscan, v_eobmax_76543210);
+  }
+
+  dequant = vsetq_lane_s16(dequant_ptr[1], dequant, 0);
+
+  for (int i = 8; i < n_coeffs; i += 8) {
+    coeff = load_tran_low_to_s16q(&coeff_ptr[i]);
+    abs = vabsq_s16(coeff);
+    check = vcgeq_u16(vshlq_n_u16(vreinterpretq_u16_s16(abs), 1),
+                      vshrq_n_u16(vreinterpretq_u16_s16(dequant), 2));
+    nz_check = vget_lane_u64(vreinterpret_u64_u8(vmovn_u16(check)), 0);
+    if (nz_check) {
+      const int16x8_t coeff_sign = vshrq_n_s16(coeff, 15);
+      const int16x8_t v_iscan = vld1q_s16(&iscan[i]);
+      abs = vqaddq_s16(abs, round);
+      int16x8_t temp =
+          vorrq_s16(vshlq_n_s16(vqdmulhq_s16(abs, quant), 1),
+                    vreinterpretq_s16_u16(vshrq_n_u16(
+                        vreinterpretq_u16_s16(vmulq_s16(abs, quant)), 14)));
+
+      int16x8_t qcoeff_temp =
+          vsubq_s16(veorq_s16(temp, coeff_sign), coeff_sign);
+
+      abs = vreinterpretq_s16_u16(vshlq_u16(
+          vreinterpretq_u16_s16(vmulq_s16(temp, dequant)), v_log_scale));
+      abs = vorrq_s16(vshlq_n_s16(vqdmulhq_s16(temp, dequant), 13), abs);
+
+      int16x8_t dqcoeff_temp =
+          vsubq_s16(veorq_s16(abs, coeff_sign), coeff_sign);
+      int16x8_t coeff_nz_mask =
+          vbslq_s16(check, qcoeff_temp, load_tran_low_to_s16q(&qcoeff_ptr[i]));
+      store_s16q_to_tran_low(&qcoeff_ptr[i], coeff_nz_mask);
+      coeff_nz_mask = vbslq_s16(check, dqcoeff_temp,
+                                load_tran_low_to_s16q(&dqcoeff_ptr[i]));
+      store_s16q_to_tran_low(&dqcoeff_ptr[i], coeff_nz_mask);
+
+      uint16x8_t vtmp_mask = vcgtq_s16(abs, zero);
+      const uint16x8_t v_nz_mask = vandq_u16(vtmp_mask, check);
+
+      check = vandq_u16(v_nz_mask, vcgtq_s16(v_iscan, v_eobmax_76543210));
+      v_eobmax_76543210 = vbslq_s16(check, v_iscan, v_eobmax_76543210);
+    }
+  }
+#ifdef __aarch64__
+  *eob_ptr = vmaxvq_s16(v_eobmax_76543210) + 1;
+#else
+  {
+    const int16x4_t v_eobmax_3210 = vmax_s16(vget_low_s16(v_eobmax_76543210),
+                                             vget_high_s16(v_eobmax_76543210));
+    const int64x1_t v_eobmax_xx32 =
+        vshr_n_s64(vreinterpret_s64_s16(v_eobmax_3210), 32);
+    const int16x4_t v_eobmax_tmp =
+        vmax_s16(v_eobmax_3210, vreinterpret_s16_s64(v_eobmax_xx32));
+    const int64x1_t v_eobmax_xxx3 =
+        vshr_n_s64(vreinterpret_s64_s16(v_eobmax_tmp), 16);
+    const int16x4_t v_eobmax_final =
+        vmax_s16(v_eobmax_tmp, vreinterpret_s16_s64(v_eobmax_xxx3));
+
+    *eob_ptr = (uint16_t)vget_lane_s16(v_eobmax_final, 0);
+  }
+#endif  // __aarch64__
+}
+
+void aom_quantize_b_neon(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
+                         const int16_t *zbin_ptr, const int16_t *round_ptr,
+                         const int16_t *quant_ptr,
+                         const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr,
+                         tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr,
+                         uint16_t *eob_ptr, const int16_t *scan,
+                         const int16_t *iscan) {
+  (void)quant_shift_ptr;
+  (void)scan;
+
+  const int zbins[2] = { zbin_ptr[0], zbin_ptr[1] };
+
+  memset(qcoeff_ptr, 0, n_coeffs * sizeof(*qcoeff_ptr));
+  memset(dqcoeff_ptr, 0, n_coeffs * sizeof(*dqcoeff_ptr));
+
+  const int16x8_t zero = vdupq_n_s16(0);
+  int16x8_t v_eobmax_76543210 = vreinterpretq_s16_u16(vceqq_s16(zero, zero));
+
+  int16x8_t vzbins = vdupq_n_s16(zbins[1]), vround = vdupq_n_s16(round_ptr[1]);
+  int16x8_t vdequant = vdupq_n_s16(dequant_ptr[1]);
+  int16x8_t vquant = vdupq_n_s16(quant_ptr[1]);
+  int16x8_t vquant_shift = vdupq_n_s16(quant_shift_ptr[1]);
+
+  int16x8_t v_coeff = load_tran_low_to_s16q(&coeff_ptr[0]);
+  int16x8_t v_coeff_sign = vshrq_n_s16(v_coeff, 15);
+  int16x8_t v_abs = vabsq_s16(v_coeff);
+
+  vzbins = vsetq_lane_s16(zbins[0], vzbins, 0);
+
+  uint16x8_t vcond = vcgeq_s16(v_abs, vzbins);
+  uint64_t nz_check = vget_lane_u64(vreinterpret_u64_u8(vmovn_u16(vcond)), 0);
+  if (nz_check) {
+    vround = vsetq_lane_s16(round_ptr[0], vround, 0);
+    vquant = vsetq_lane_s16(quant_ptr[0], vquant, 0);
+    vdequant = vsetq_lane_s16(dequant_ptr[0], vdequant, 0);
+    vquant_shift = vsetq_lane_s16(quant_shift_ptr[0], vquant_shift, 0);
+
+    int16x8_t vtmp = vqaddq_s16(v_abs, vround);
+    int16x8_t vtmp2 = vsraq_n_s16(vtmp, vqdmulhq_s16(vtmp, vquant), 1);
+    vtmp2 = vshrq_n_s16(vqdmulhq_s16(vtmp2, vquant_shift), 1);
+
+    int16x8_t vdest = vsubq_s16(veorq_s16(vtmp2, v_coeff_sign), v_coeff_sign);
+    int16x8_t coeff_nz_mask =
+        vbslq_s16(vcond, vdest, load_tran_low_to_s16q(&qcoeff_ptr[0]));
+    store_s16q_to_tran_low(&qcoeff_ptr[0], coeff_nz_mask);
+    int16x8_t v_deq_abs = vmulq_s16(vtmp2, vdequant);
+
+    vdest = vsubq_s16(veorq_s16(v_deq_abs, v_coeff_sign), v_coeff_sign);
+    coeff_nz_mask =
+        vbslq_s16(vcond, vdest, load_tran_low_to_s16q(&dqcoeff_ptr[0]));
+    store_s16q_to_tran_low(&dqcoeff_ptr[0], coeff_nz_mask);
+
+    vround = vsetq_lane_s16(round_ptr[1], vround, 0);
+    vquant = vsetq_lane_s16(quant_ptr[1], vquant, 0);
+    vdequant = vsetq_lane_s16(dequant_ptr[1], vdequant, 0);
+    vquant_shift = vsetq_lane_s16(quant_shift_ptr[1], vquant_shift, 0);
+
+    uint16x8_t vtmp_mask = vcgtq_s16(vtmp2, zero);
+    const uint16x8_t v_nz_mask = vandq_u16(vtmp_mask, vcond);
+    int16x8_t v_iscan = vld1q_s16(&iscan[0]);
+    vcond = vandq_u16(v_nz_mask, vcgtq_s16(v_iscan, v_eobmax_76543210));
+    v_eobmax_76543210 = vbslq_s16(vcond, v_iscan, v_eobmax_76543210);
+  }
+  vzbins = vsetq_lane_s16(zbins[1], vzbins, 0);
+
+  for (int i = 8; i < n_coeffs; i += 8) {
+    v_coeff = load_tran_low_to_s16q(&coeff_ptr[i]);
+    v_coeff_sign = vshrq_n_s16(v_coeff, 15);
+    v_abs = vabsq_s16(v_coeff);
+    vcond = vcgeq_s16(v_abs, vzbins);
+
+    nz_check = vget_lane_u64(vreinterpret_u64_u8(vmovn_u16(vcond)), 0);
+    if (nz_check) {
+      int16x8_t vtmp = vqaddq_s16(v_abs, vround);
+      int16x8_t vtmp2 = vsraq_n_s16(vtmp, vqdmulhq_s16(vtmp, vquant), 1);
+
+      vtmp2 = vshrq_n_s16(vqdmulhq_s16(vtmp2, vquant_shift), 1);
+      int16x8_t vdest = vsubq_s16(veorq_s16(vtmp2, v_coeff_sign), v_coeff_sign);
+      int16x8_t coeff_nz_mask =
+          vbslq_s16(vcond, vdest, load_tran_low_to_s16q(&qcoeff_ptr[i]));
+      store_s16q_to_tran_low(&qcoeff_ptr[i], coeff_nz_mask);
+      int16x8_t v_deq_abs = vmulq_s16(vtmp2, vdequant);
+      vdest = vsubq_s16(veorq_s16(v_deq_abs, v_coeff_sign), v_coeff_sign);
+      coeff_nz_mask =
+          vbslq_s16(vcond, vdest, load_tran_low_to_s16q(&dqcoeff_ptr[i]));
+      store_s16q_to_tran_low(&dqcoeff_ptr[i], coeff_nz_mask);
+
+      uint16x8_t vtmp_mask = vcgtq_s16(vtmp2, zero);
+      const uint16x8_t v_nz_mask = vandq_u16(vtmp_mask, vcond);
+      int16x8_t v_iscan = vld1q_s16(&iscan[i]);
+      vcond = vandq_u16(v_nz_mask, vcgtq_s16(v_iscan, v_eobmax_76543210));
+      v_eobmax_76543210 = vbslq_s16(vcond, v_iscan, v_eobmax_76543210);
+    }
+  }
+
+#ifdef __aarch64__
+  *eob_ptr = vmaxvq_s16(v_eobmax_76543210) + 1;
+#else
+  {
+    const int16x4_t v_eobmax_3210 = vmax_s16(vget_low_s16(v_eobmax_76543210),
+                                             vget_high_s16(v_eobmax_76543210));
+    const int64x1_t v_eobmax_xx32 =
+        vshr_n_s64(vreinterpret_s64_s16(v_eobmax_3210), 32);
+    const int16x4_t v_eobmax_tmp =
+        vmax_s16(v_eobmax_3210, vreinterpret_s16_s64(v_eobmax_xx32));
+    const int64x1_t v_eobmax_xxx3 =
+        vshr_n_s64(vreinterpret_s64_s16(v_eobmax_tmp), 16);
+    const int16x4_t v_eobmax_final =
+        vmax_s16(v_eobmax_tmp, vreinterpret_s16_s64(v_eobmax_xxx3));
+
+    *eob_ptr = (uint16_t)vget_lane_s16(v_eobmax_final, 0) + 1;
+  }
+#endif  // __aarch64__
+}
+
+#define QM_MULL_SHIFT(x0, x1)                                              \
+  vreinterpretq_s16_u16(vorrq_u16(                                         \
+      vreinterpretq_u16_s16(vshlq_n_s16(                                   \
+          vqdmulhq_s16(x0, vreinterpretq_s16_u16(x1)), 15 - AOM_QM_BITS)), \
+      vshrq_n_u16(vmulq_u16(vreinterpretq_u16_s16(x0), x1), AOM_QM_BITS)))
+
+static void aom_quantize_b_helper_16x16_neon(
+    const tran_low_t *coeff_ptr, intptr_t n_coeffs, const int16_t *zbin_ptr,
+    const int16_t *round_ptr, const int16_t *quant_ptr,
+    const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr,
+    tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr,
+    const int16_t *scan, const int16_t *iscan, const qm_val_t *qm_ptr,
+    const qm_val_t *iqm_ptr) {
+  (void)scan;
+
+  uint16x8_t vwt, viwt;
+  const int zbins[2] = { zbin_ptr[0], zbin_ptr[1] };
+
+  memset(qcoeff_ptr, 0, n_coeffs * sizeof(*qcoeff_ptr));
+  memset(dqcoeff_ptr, 0, n_coeffs * sizeof(*dqcoeff_ptr));
+
+  const int16x8_t zero = vdupq_n_s16(0);
+  int16x8_t v_eobmax_76543210 = vreinterpretq_s16_u16(vceqq_s16(zero, zero));
+
+  int16x8_t vzbins = vdupq_n_s16(zbins[1]), vround = vdupq_n_s16(round_ptr[1]);
+  int16x8_t vdequant = vdupq_n_s16(dequant_ptr[1]);
+  int16x8_t vquant = vdupq_n_s16(quant_ptr[1]);
+  int16x8_t vquant_shift = vdupq_n_s16(quant_shift_ptr[1]);
+
+  int16x8_t v_coeff = load_tran_low_to_s16q(&coeff_ptr[0]);
+  int16x8_t v_coeff_sign = vshrq_n_s16(v_coeff, 15);
+  int16x8_t v_abs = vabsq_s16(v_coeff);
+  vzbins = vsetq_lane_s16(zbins[0], vzbins, 0);
+  uint16x8_t vcond;
+  if (qm_ptr == NULL) {
+    vcond = vcgeq_s16(v_abs, vzbins);
+  } else {
+    vwt = vmovl_u8(vld1_u8(&qm_ptr[0]));
+    vcond = vcgeq_s16(QM_MULL_SHIFT(v_abs, vwt), vzbins);
+  }
+  uint64_t nz_check = vget_lane_u64(vreinterpret_u64_u8(vmovn_u16(vcond)), 0);
+  if (nz_check) {
+    vround = vsetq_lane_s16(round_ptr[0], vround, 0);
+    vquant = vsetq_lane_s16(quant_ptr[0], vquant, 0);
+    vdequant = vsetq_lane_s16(dequant_ptr[0], vdequant, 0);
+    vquant_shift = vsetq_lane_s16(quant_shift_ptr[0], vquant_shift, 0);
+
+    int16x8_t vtmp = vqaddq_s16(v_abs, vround);
+
+    int16x8_t vtmp2;
+    if (qm_ptr == NULL) {
+      vtmp2 = vsraq_n_s16(vtmp, vqdmulhq_s16(vtmp, vquant), 1);
+    } else {
+      vtmp2 = QM_MULL_SHIFT(vtmp2, vwt);
+      vtmp2 = vaddq_s16(vtmp2, vtmp);
+    }
+
+    vtmp2 = vshrq_n_s16(vqdmulhq_s16(vtmp2, vquant_shift), 1);
+    int16x8_t vdest = vsubq_s16(veorq_s16(vtmp2, v_coeff_sign), v_coeff_sign);
+    int16x8_t coeff_nz_mask =
+        vbslq_s16(vcond, vdest, load_tran_low_to_s16q(&qcoeff_ptr[0]));
+    store_s16q_to_tran_low(&qcoeff_ptr[0], coeff_nz_mask);
+
+    if (iqm_ptr != NULL) {
+      viwt = vmovl_u8(vld1_u8(&iqm_ptr[0]));
+      vdequant = QM_MULL_SHIFT(vdequant, viwt);
+    }
+    int16x8_t v_deq_abs = vmulq_s16(vtmp2, vdequant);
+    vdest = vsubq_s16(veorq_s16(v_deq_abs, v_coeff_sign), v_coeff_sign);
+    coeff_nz_mask =
+        vbslq_s16(vcond, vdest, load_tran_low_to_s16q(&dqcoeff_ptr[0]));
+    store_s16q_to_tran_low(&dqcoeff_ptr[0], coeff_nz_mask);
+
+    vround = vsetq_lane_s16(round_ptr[1], vround, 0);
+    vquant = vsetq_lane_s16(quant_ptr[1], vquant, 0);
+    vdequant = vsetq_lane_s16(dequant_ptr[1], vdequant, 0);
+    vquant_shift = vsetq_lane_s16(quant_shift_ptr[1], vquant_shift, 0);
+
+    uint16x8_t vtmp_mask = vcgtq_s16(vtmp2, zero);
+    const uint16x8_t v_nz_mask = vandq_u16(vtmp_mask, vcond);
+    int16x8_t v_iscan = vld1q_s16(&iscan[0]);
+    vcond = vandq_u16(v_nz_mask, vcgtq_s16(v_iscan, v_eobmax_76543210));
+    v_eobmax_76543210 = vbslq_s16(vcond, v_iscan, v_eobmax_76543210);
+  }
+  vzbins = vsetq_lane_s16(zbins[1], vzbins, 0);
+
+  for (int i = 8; i < n_coeffs; i += 8) {
+    v_coeff = load_tran_low_to_s16q(&coeff_ptr[i]);
+    v_coeff_sign = vshrq_n_s16(v_coeff, 15);
+    v_abs = vabsq_s16(v_coeff);
+
+    if (qm_ptr == NULL) {
+      vcond = vcgeq_s16(v_abs, vzbins);
+    } else {
+      vwt = vmovl_u8(vld1_u8(&qm_ptr[i]));
+      vcond = vcgeq_s16(QM_MULL_SHIFT(v_abs, vwt), vzbins);
+    }
+    nz_check = vget_lane_u64(vreinterpret_u64_u8(vmovn_u16(vcond)), 0);
+    if (nz_check) {
+      int16x8_t vtmp = vqaddq_s16(v_abs, vround);
+
+      int16x8_t vtmp2;
+      if (qm_ptr == NULL) {
+        vtmp2 = vsraq_n_s16(vtmp, vqdmulhq_s16(vtmp, vquant), 1);
+      } else {
+        vtmp2 = QM_MULL_SHIFT(vtmp2, vwt);
+        vtmp2 = vaddq_s16(vtmp2, vtmp);
+      }
+
+      vtmp2 = vshrq_n_s16(vqdmulhq_s16(vtmp2, vquant_shift), 1);
+      int16x8_t vdest = vsubq_s16(veorq_s16(vtmp2, v_coeff_sign), v_coeff_sign);
+      int16x8_t coeff_nz_mask =
+          vbslq_s16(vcond, vdest, load_tran_low_to_s16q(&qcoeff_ptr[i]));
+      store_s16q_to_tran_low(&qcoeff_ptr[i], coeff_nz_mask);
+
+      if (iqm_ptr != NULL) {
+        viwt = vmovl_u8(vld1_u8(&iqm_ptr[i]));
+        vdequant = QM_MULL_SHIFT(vdequant, viwt);
+      }
+      int16x8_t v_deq_abs = vmulq_s16(vtmp2, vdequant);
+      vdest = vsubq_s16(veorq_s16(v_deq_abs, v_coeff_sign), v_coeff_sign);
+      coeff_nz_mask =
+          vbslq_s16(vcond, vdest, load_tran_low_to_s16q(&dqcoeff_ptr[i]));
+      store_s16q_to_tran_low(&dqcoeff_ptr[i], coeff_nz_mask);
+
+      uint16x8_t vtmp_mask = vcgtq_s16(vtmp2, zero);
+      const uint16x8_t v_nz_mask = vandq_u16(vtmp_mask, vcond);
+      int16x8_t v_iscan = vld1q_s16(&iscan[i]);
+      vcond = vandq_u16(v_nz_mask, vcgtq_s16(v_iscan, v_eobmax_76543210));
+      v_eobmax_76543210 = vbslq_s16(vcond, v_iscan, v_eobmax_76543210);
+    }
+  }
+
+#ifdef __aarch64__
+  *eob_ptr = vmaxvq_s16(v_eobmax_76543210) + 1;
+#else
+  {
+    const int16x4_t v_eobmax_3210 = vmax_s16(vget_low_s16(v_eobmax_76543210),
+                                             vget_high_s16(v_eobmax_76543210));
+    const int64x1_t v_eobmax_xx32 =
+        vshr_n_s64(vreinterpret_s64_s16(v_eobmax_3210), 32);
+    const int16x4_t v_eobmax_tmp =
+        vmax_s16(v_eobmax_3210, vreinterpret_s16_s64(v_eobmax_xx32));
+    const int64x1_t v_eobmax_xxx3 =
+        vshr_n_s64(vreinterpret_s64_s16(v_eobmax_tmp), 16);
+    const int16x4_t v_eobmax_final =
+        vmax_s16(v_eobmax_tmp, vreinterpret_s16_s64(v_eobmax_xxx3));
+
+    *eob_ptr = (uint16_t)vget_lane_s16(v_eobmax_final, 0) + 1;
+  }
+#endif  // __aarch64__
+}
+
+static void aom_quantize_b_helper_32x32_neon(
+    const tran_low_t *coeff_ptr, intptr_t n_coeffs, const int16_t *zbin_ptr,
+    const int16_t *round_ptr, const int16_t *quant_ptr,
+    const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr,
+    tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr,
+    const int16_t *scan, const int16_t *iscan, const qm_val_t *qm_ptr,
+    const qm_val_t *iqm_ptr) {
+  (void)scan;
+
+  uint16x8_t vwt, viwt;
+  const int log_scale = 1;
+  const int zbins[2] = { ROUND_POWER_OF_TWO(zbin_ptr[0], log_scale),
+                         ROUND_POWER_OF_TWO(zbin_ptr[1], log_scale) };
+
+  memset(qcoeff_ptr, 0, n_coeffs * sizeof(*qcoeff_ptr));
+  memset(dqcoeff_ptr, 0, n_coeffs * sizeof(*dqcoeff_ptr));
+
+  const int16x8_t zero = vdupq_n_s16(0);
+  int16x8_t v_eobmax_76543210 = vreinterpretq_s16_u16(vceqq_s16(zero, zero));
+  const int16x8_t v_log_scale = v_eobmax_76543210;
+
+  int16x8_t vzbins = vdupq_n_s16(zbins[1]),
+            vround = vdupq_n_s16(ROUND_POWER_OF_TWO(round_ptr[1], log_scale));
+  int16x8_t vdequant = vdupq_n_s16(dequant_ptr[1]);
+  int16x8_t vquant = vdupq_n_s16(quant_ptr[1]);
+  int16x8_t vquant_shift = vdupq_n_s16(quant_shift_ptr[1]);
+
+  int16x8_t v_coeff = load_tran_low_to_s16q(&coeff_ptr[0]);
+  int16x8_t v_coeff_sign = vshrq_n_s16(v_coeff, 15);
+  int16x8_t v_abs = vabsq_s16(v_coeff);
+  vzbins = vsetq_lane_s16(zbins[0], vzbins, 0);
+  uint16x8_t vcond;
+  if (qm_ptr == NULL) {
+    vcond = vcgeq_s16(v_abs, vzbins);
+  } else {
+    vwt = vmovl_u8(vld1_u8(&qm_ptr[0]));
+    vcond = vcgeq_s16(QM_MULL_SHIFT(v_abs, vwt), vzbins);
+  }
+  uint64_t nz_check = vget_lane_u64(vreinterpret_u64_u8(vmovn_u16(vcond)), 0);
+  if (nz_check) {
+    vround =
+        vsetq_lane_s16(ROUND_POWER_OF_TWO(round_ptr[0], log_scale), vround, 0);
+    vquant = vsetq_lane_s16(quant_ptr[0], vquant, 0);
+    vdequant = vsetq_lane_s16(dequant_ptr[0], vdequant, 0);
+    vquant_shift = vsetq_lane_s16(quant_shift_ptr[0], vquant_shift, 0);
+
+    int16x8_t vtmp = vqaddq_s16(v_abs, vround);
+
+    int16x8_t vtmp2;
+    if (qm_ptr == NULL) {
+      vtmp2 = vsraq_n_s16(vtmp, vqdmulhq_s16(vtmp, vquant), 1);
+    } else {
+      vtmp2 = QM_MULL_SHIFT(vtmp2, vwt);
+      vtmp2 = vaddq_s16(vtmp2, vtmp);
+    }
+
+    vtmp2 = vqdmulhq_s16(vtmp2, vquant_shift);
+    int16x8_t vdest = vsubq_s16(veorq_s16(vtmp2, v_coeff_sign), v_coeff_sign);
+    int16x8_t coeff_nz_mask =
+        vbslq_s16(vcond, vdest, load_tran_low_to_s16q(&qcoeff_ptr[0]));
+    store_s16q_to_tran_low(&qcoeff_ptr[0], coeff_nz_mask);
+
+    if (iqm_ptr != NULL) {
+      viwt = vmovl_u8(vld1_u8(&iqm_ptr[0]));
+      vdequant = QM_MULL_SHIFT(vdequant, viwt);
+    }
+    int16x8_t v_deq_abs = vreinterpretq_s16_u16(vshlq_u16(
+        vreinterpretq_u16_s16(vmulq_s16(vtmp2, vdequant)), v_log_scale));
+    vdest = vsubq_s16(veorq_s16(v_deq_abs, v_coeff_sign), v_coeff_sign);
+    coeff_nz_mask =
+        vbslq_s16(vcond, vdest, load_tran_low_to_s16q(&dqcoeff_ptr[0]));
+    store_s16q_to_tran_low(&dqcoeff_ptr[0], coeff_nz_mask);
+
+    vzbins = vsetq_lane_s16(zbins[1], vzbins, 0);
+    vround =
+        vsetq_lane_s16(ROUND_POWER_OF_TWO(round_ptr[1], log_scale), vround, 0);
+    vquant = vsetq_lane_s16(quant_ptr[1], vquant, 0);
+    vdequant = vsetq_lane_s16(dequant_ptr[1], vdequant, 0);
+    vquant_shift = vsetq_lane_s16(quant_shift_ptr[1], vquant_shift, 0);
+
+    uint16x8_t vtmp_mask = vcgtq_s16(vtmp2, zero);
+    const uint16x8_t v_nz_mask = vandq_u16(vtmp_mask, vcond);
+    int16x8_t v_iscan = vld1q_s16(&iscan[0]);
+    vcond = vandq_u16(v_nz_mask, vcgtq_s16(v_iscan, v_eobmax_76543210));
+    v_eobmax_76543210 = vbslq_s16(vcond, v_iscan, v_eobmax_76543210);
+  }
+  vzbins = vsetq_lane_s16(zbins[1], vzbins, 0);
+
+  for (int i = 8; i < n_coeffs; i += 8) {
+    v_coeff = load_tran_low_to_s16q(&coeff_ptr[i]);
+    v_coeff_sign = vshrq_n_s16(v_coeff, 15);
+    v_abs = vabsq_s16(v_coeff);
+
+    if (qm_ptr == NULL) {
+      vcond = vcgeq_s16(v_abs, vzbins);
+    } else {
+      vwt = vmovl_u8(vld1_u8(&qm_ptr[i]));
+      vcond = vcgeq_s16(QM_MULL_SHIFT(v_abs, vwt), vzbins);
+    }
+    nz_check = vget_lane_u64(vreinterpret_u64_u8(vmovn_u16(vcond)), 0);
+    if (nz_check) {
+      int16x8_t vtmp = vqaddq_s16(v_abs, vround);
+
+      int16x8_t vtmp2;
+      if (qm_ptr == NULL) {
+        vtmp2 = vsraq_n_s16(vtmp, vqdmulhq_s16(vtmp, vquant), 1);
+      } else {
+        vtmp2 = QM_MULL_SHIFT(vtmp2, vwt);
+        vtmp2 = vaddq_s16(vtmp2, vtmp);
+      }
+      vtmp2 = vqdmulhq_s16(vtmp2, vquant_shift);
+
+      int16x8_t vdest = vsubq_s16(veorq_s16(vtmp2, v_coeff_sign), v_coeff_sign);
+      int16x8_t coeff_nz_mask =
+          vbslq_s16(vcond, vdest, load_tran_low_to_s16q(&qcoeff_ptr[i]));
+      store_s16q_to_tran_low(&qcoeff_ptr[i], coeff_nz_mask);
+
+      if (iqm_ptr != NULL) {
+        viwt = vmovl_u8(vld1_u8(&iqm_ptr[i]));
+        vdequant = QM_MULL_SHIFT(vdequant, viwt);
+      }
+      int16x8_t v_deq_abs = vreinterpretq_s16_u16(vshlq_u16(
+          vreinterpretq_u16_s16(vmulq_s16(vtmp2, vdequant)), v_log_scale));
+      vdest = vsubq_s16(veorq_s16(v_deq_abs, v_coeff_sign), v_coeff_sign);
+      coeff_nz_mask =
+          vbslq_s16(vcond, vdest, load_tran_low_to_s16q(&dqcoeff_ptr[i]));
+      store_s16q_to_tran_low(&dqcoeff_ptr[i], coeff_nz_mask);
+
+      uint16x8_t vtmp_mask = vcgtq_s16(vtmp2, zero);
+      const uint16x8_t v_nz_mask = vandq_u16(vtmp_mask, vcond);
+      int16x8_t v_iscan = vld1q_s16(&iscan[i]);
+      vcond = vandq_u16(v_nz_mask, vcgtq_s16(v_iscan, v_eobmax_76543210));
+      v_eobmax_76543210 = vbslq_s16(vcond, v_iscan, v_eobmax_76543210);
+    }
+  }
+
+#ifdef __aarch64__
+  *eob_ptr = vmaxvq_s16(v_eobmax_76543210) + 1;
+#else
+  {
+    const int16x4_t v_eobmax_3210 = vmax_s16(vget_low_s16(v_eobmax_76543210),
+                                             vget_high_s16(v_eobmax_76543210));
+    const int64x1_t v_eobmax_xx32 =
+        vshr_n_s64(vreinterpret_s64_s16(v_eobmax_3210), 32);
+    const int16x4_t v_eobmax_tmp =
+        vmax_s16(v_eobmax_3210, vreinterpret_s16_s64(v_eobmax_xx32));
+    const int64x1_t v_eobmax_xxx3 =
+        vshr_n_s64(vreinterpret_s64_s16(v_eobmax_tmp), 16);
+    const int16x4_t v_eobmax_final =
+        vmax_s16(v_eobmax_tmp, vreinterpret_s16_s64(v_eobmax_xxx3));
+
+    *eob_ptr = (uint16_t)vget_lane_s16(v_eobmax_final, 0) + 1;
+  }
+#endif  // __aarch64__
+}
+
+static void aom_quantize_b_helper_64x64_neon(
+    const tran_low_t *coeff_ptr, intptr_t n_coeffs, const int16_t *zbin_ptr,
+    const int16_t *round_ptr, const int16_t *quant_ptr,
+    const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr,
+    tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr,
+    const int16_t *scan, const int16_t *iscan, const qm_val_t *qm_ptr,
+    const qm_val_t *iqm_ptr) {
+  (void)scan;
+
+  uint16x8_t vwt, viwt;
+  const int log_scale = 2;
+  const int16x8_t v_log_scale =
+      vreinterpretq_s16_s64(vdupq_n_s64(0xFFFEFFFEFFFEFFFE));
+
+  const int zbins[2] = { ROUND_POWER_OF_TWO(zbin_ptr[0], log_scale),
+                         ROUND_POWER_OF_TWO(zbin_ptr[1], log_scale) };
+
+  memset(qcoeff_ptr, 0, n_coeffs * sizeof(*qcoeff_ptr));
+  memset(dqcoeff_ptr, 0, n_coeffs * sizeof(*dqcoeff_ptr));
+
+  const int16x8_t zero = vdupq_n_s16(0);
+  int16x8_t v_eobmax_76543210 = vreinterpretq_s16_u16(vceqq_s16(zero, zero));
+  int16x8_t v_ones = vnegq_s16(v_eobmax_76543210);
+
+  int16x8_t vzbins = vdupq_n_s16(zbins[1]),
+            vround = vdupq_n_s16(ROUND_POWER_OF_TWO(round_ptr[1], log_scale));
+  int16x8_t vdequant = vdupq_n_s16(dequant_ptr[1]);
+  int16x8_t vquant = vdupq_n_s16(quant_ptr[1]);
+  int16x8_t vquant_shift = vdupq_n_s16(quant_shift_ptr[1]);
+
+  int16x8_t v_coeff = load_tran_low_to_s16q(&coeff_ptr[0]);
+  int16x8_t v_coeff_sign = vshrq_n_s16(v_coeff, 15);
+  int16x8_t v_abs = vabsq_s16(v_coeff);
+  vzbins = vsetq_lane_s16(zbins[0], vzbins, 0);
+  uint16x8_t vcond;
+  if (qm_ptr == NULL) {
+    vcond = vcgeq_s16(v_abs, vzbins);
+  } else {
+    vwt = vmovl_u8(vld1_u8(&qm_ptr[0]));
+    vcond = vcgeq_s16(QM_MULL_SHIFT(v_abs, vwt), vzbins);
+  }
+  uint64_t nz_check = vget_lane_u64(vreinterpret_u64_u8(vmovn_u16(vcond)), 0);
+  if (nz_check) {
+    vround =
+        vsetq_lane_s16(ROUND_POWER_OF_TWO(round_ptr[0], log_scale), vround, 0);
+    vquant = vsetq_lane_s16(quant_ptr[0], vquant, 0);
+    vdequant = vsetq_lane_s16(dequant_ptr[0], vdequant, 0);
+    vquant_shift = vsetq_lane_s16(quant_shift_ptr[0], vquant_shift, 0);
+    int16x8_t vtmp = vqaddq_s16(v_abs, vround);
+
+    int16x8_t vtmp2;
+    if (qm_ptr == NULL) {
+      vtmp2 = vsraq_n_s16(vtmp, vqdmulhq_s16(vtmp, vquant), 1);
+    } else {
+      vtmp2 = QM_MULL_SHIFT(vtmp2, vwt);
+      vtmp2 = vaddq_s16(vtmp2, vtmp);
+    }
+
+    int16x8_t ones =
+        vandq_s16(vshrq_n_s16(vmulq_s16(vtmp2, vquant_shift), 14), v_ones);
+    vtmp2 =
+        vaddq_s16(vshlq_s16(vqdmulhq_s16(vtmp2, vquant_shift), v_ones), ones);
+    int16x8_t vdest = vsubq_s16(veorq_s16(vtmp2, v_coeff_sign), v_coeff_sign);
+    int16x8_t coeff_nz_mask =
+        vbslq_s16(vcond, vdest, load_tran_low_to_s16q(&qcoeff_ptr[0]));
+    store_s16q_to_tran_low(&qcoeff_ptr[0], coeff_nz_mask);
+
+    if (iqm_ptr != NULL) {
+      viwt = vmovl_u8(vld1_u8(&iqm_ptr[0]));
+      vdequant = QM_MULL_SHIFT(vdequant, viwt);
+    }
+    int16x8_t v_deq_abs = vreinterpretq_s16_u16(vshlq_u16(
+        vreinterpretq_u16_s16(vmulq_s16(vtmp2, vdequant)), v_log_scale));
+    v_deq_abs =
+        vorrq_s16(vshlq_n_s16(vqdmulhq_s16(vtmp2, vdequant), 13), v_deq_abs);
+    vdest = vsubq_s16(veorq_s16(v_deq_abs, v_coeff_sign), v_coeff_sign);
+    coeff_nz_mask =
+        vbslq_s16(vcond, vdest, load_tran_low_to_s16q(&dqcoeff_ptr[0]));
+    store_s16q_to_tran_low(&dqcoeff_ptr[0], coeff_nz_mask);
+
+    vround =
+        vsetq_lane_s16(ROUND_POWER_OF_TWO(round_ptr[1], log_scale), vround, 0);
+    vquant = vsetq_lane_s16(quant_ptr[1], vquant, 0);
+    vdequant = vsetq_lane_s16(dequant_ptr[1], vdequant, 0);
+    vquant_shift = vsetq_lane_s16(quant_shift_ptr[1], vquant_shift, 0);
+
+    uint16x8_t vtmp_mask = vcgtq_s16(vtmp2, zero);
+    const uint16x8_t v_nz_mask = vandq_u16(vtmp_mask, vcond);
+    int16x8_t v_iscan = vld1q_s16(&iscan[0]);
+    vcond = vandq_u16(v_nz_mask, vcgtq_s16(v_iscan, v_eobmax_76543210));
+    v_eobmax_76543210 = vbslq_s16(vcond, v_iscan, v_eobmax_76543210);
+  }
+  vzbins = vsetq_lane_s16(zbins[1], vzbins, 0);
+
+  for (int i = 8; i < n_coeffs; i += 8) {
+    v_coeff = load_tran_low_to_s16q(&coeff_ptr[i]);
+    v_coeff_sign = vshrq_n_s16(v_coeff, 15);
+    v_abs = vabsq_s16(v_coeff);
+
+    if (qm_ptr == NULL) {
+      vcond = vcgeq_s16(v_abs, vzbins);
+    } else {
+      vwt = vmovl_u8(vld1_u8(&qm_ptr[i]));
+      vcond = vcgeq_s16(QM_MULL_SHIFT(v_abs, vwt), vzbins);
+    }
+    nz_check = vget_lane_u64(vreinterpret_u64_u8(vmovn_u16(vcond)), 0);
+    if (nz_check) {
+      int16x8_t vtmp = vqaddq_s16(v_abs, vround);
+
+      int16x8_t vtmp2;
+      if (qm_ptr == NULL) {
+        vtmp2 = vsraq_n_s16(vtmp, vqdmulhq_s16(vtmp, vquant), 1);
+      } else {
+        vtmp2 = QM_MULL_SHIFT(vtmp2, vwt);
+        vtmp2 = vaddq_s16(vtmp2, vtmp);
+      }
+
+      int16x8_t ones =
+          vandq_s16(vshrq_n_s16(vmulq_s16(vtmp2, vquant_shift), 14), v_ones);
+      vtmp2 =
+          vaddq_s16(vshlq_s16(vqdmulhq_s16(vtmp2, vquant_shift), v_ones), ones);
+      int16x8_t vdest = vsubq_s16(veorq_s16(vtmp2, v_coeff_sign), v_coeff_sign);
+      int16x8_t coeff_nz_mask =
+          vbslq_s16(vcond, vdest, load_tran_low_to_s16q(&qcoeff_ptr[i]));
+      store_s16q_to_tran_low(&qcoeff_ptr[i], coeff_nz_mask);
+
+      if (iqm_ptr != NULL) {
+        viwt = vmovl_u8(vld1_u8(&iqm_ptr[i]));
+        vdequant = QM_MULL_SHIFT(vdequant, viwt);
+      }
+      int16x8_t v_deq_abs = vreinterpretq_s16_u16(vshlq_u16(
+          vreinterpretq_u16_s16(vmulq_s16(vtmp2, vdequant)), v_log_scale));
+      v_deq_abs =
+          vorrq_s16(vshlq_n_s16(vqdmulhq_s16(vtmp2, vdequant), 13), v_deq_abs);
+      vdest = vsubq_s16(veorq_s16(v_deq_abs, v_coeff_sign), v_coeff_sign);
+      coeff_nz_mask =
+          vbslq_s16(vcond, vdest, load_tran_low_to_s16q(&dqcoeff_ptr[i]));
+      store_s16q_to_tran_low(&dqcoeff_ptr[i], coeff_nz_mask);
+
+      uint16x8_t vtmp_mask = vcgtq_s16(vtmp2, zero);
+      const uint16x8_t v_nz_mask = vandq_u16(vtmp_mask, vcond);
+      int16x8_t v_iscan = vld1q_s16(&iscan[i]);
+      vcond = vandq_u16(v_nz_mask, vcgtq_s16(v_iscan, v_eobmax_76543210));
+      v_eobmax_76543210 = vbslq_s16(vcond, v_iscan, v_eobmax_76543210);
+    }
+  }
+
+#ifdef __aarch64__
+  *eob_ptr = vmaxvq_s16(v_eobmax_76543210) + 1;
+#else
+  {
+    const int16x4_t v_eobmax_3210 = vmax_s16(vget_low_s16(v_eobmax_76543210),
+                                             vget_high_s16(v_eobmax_76543210));
+    const int64x1_t v_eobmax_xx32 =
+        vshr_n_s64(vreinterpret_s64_s16(v_eobmax_3210), 32);
+    const int16x4_t v_eobmax_tmp =
+        vmax_s16(v_eobmax_3210, vreinterpret_s16_s64(v_eobmax_xx32));
+    const int64x1_t v_eobmax_xxx3 =
+        vshr_n_s64(vreinterpret_s64_s16(v_eobmax_tmp), 16);
+    const int16x4_t v_eobmax_final =
+        vmax_s16(v_eobmax_tmp, vreinterpret_s16_s64(v_eobmax_xxx3));
+
+    *eob_ptr = (uint16_t)vget_lane_s16(v_eobmax_final, 0) + 1;
+  }
+#endif  // __aarch64__
+}
+
+void aom_quantize_b_helper_neon(
+    const tran_low_t *coeff_ptr, intptr_t n_coeffs, const int16_t *zbin_ptr,
+    const int16_t *round_ptr, const int16_t *quant_ptr,
+    const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr,
+    tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr,
+    const int16_t *scan, const int16_t *iscan, const qm_val_t *qm_ptr,
+    const qm_val_t *iqm_ptr, const int log_scale) {
+  switch (log_scale) {  // log_scale for AV1 encoder can be only 0, 1, 2
+    case 0:
+      aom_quantize_b_helper_16x16_neon(coeff_ptr, n_coeffs, zbin_ptr, round_ptr,
+                                       quant_ptr, quant_shift_ptr, qcoeff_ptr,
+                                       dqcoeff_ptr, dequant_ptr, eob_ptr, scan,
+                                       iscan, qm_ptr, iqm_ptr);
+      break;
+    case 1:
+      aom_quantize_b_helper_32x32_neon(coeff_ptr, n_coeffs, zbin_ptr, round_ptr,
+                                       quant_ptr, quant_shift_ptr, qcoeff_ptr,
+                                       dqcoeff_ptr, dequant_ptr, eob_ptr, scan,
+                                       iscan, qm_ptr, iqm_ptr);
+      break;
+    case 2:
+      aom_quantize_b_helper_64x64_neon(coeff_ptr, n_coeffs, zbin_ptr, round_ptr,
+                                       quant_ptr, quant_shift_ptr, qcoeff_ptr,
+                                       dqcoeff_ptr, dequant_ptr, eob_ptr, scan,
+                                       iscan, qm_ptr, iqm_ptr);
+      break;
+  }
+}
+
+void aom_quantize_b_32x32_neon(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
+                               const int16_t *zbin_ptr,
+                               const int16_t *round_ptr,
+                               const int16_t *quant_ptr,
+                               const int16_t *quant_shift_ptr,
+                               tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr,
+                               const int16_t *dequant_ptr, uint16_t *eob_ptr,
+                               const int16_t *scan, const int16_t *iscan) {
+  aom_quantize_b_helper_neon(coeff_ptr, n_coeffs, zbin_ptr, round_ptr,
+                             quant_ptr, quant_shift_ptr, qcoeff_ptr,
+                             dqcoeff_ptr, dequant_ptr, eob_ptr, scan, iscan,
+                             NULL, NULL, 1);
+}
+
+void aom_quantize_b_64x64_neon(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
+                               const int16_t *zbin_ptr,
+                               const int16_t *round_ptr,
+                               const int16_t *quant_ptr,
+                               const int16_t *quant_shift_ptr,
+                               tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr,
+                               const int16_t *dequant_ptr, uint16_t *eob_ptr,
+                               const int16_t *scan, const int16_t *iscan) {
+  aom_quantize_b_helper_neon(coeff_ptr, n_coeffs, zbin_ptr, round_ptr,
+                             quant_ptr, quant_shift_ptr, qcoeff_ptr,
+                             dqcoeff_ptr, dequant_ptr, eob_ptr, scan, iscan,
+                             NULL, NULL, 2);
+}
diff --git a/test/quantize_func_test.cc b/test/quantize_func_test.cc
index b40b38d..d69820e 100644
--- a/test/quantize_func_test.cc
+++ b/test/quantize_func_test.cc
@@ -514,7 +514,17 @@
   make_tuple(&av1_quantize_fp_c, &av1_quantize_fp_neon,
              static_cast<TX_SIZE>(TX_8X32), TYPE_FP, AOM_BITS_8),
   make_tuple(&av1_quantize_fp_c, &av1_quantize_fp_neon,
-             static_cast<TX_SIZE>(TX_32X8), TYPE_FP, AOM_BITS_8)
+             static_cast<TX_SIZE>(TX_32X8), TYPE_FP, AOM_BITS_8),
+  make_tuple(&av1_quantize_fp_32x32_c, &av1_quantize_fp_32x32_neon,
+             static_cast<TX_SIZE>(TX_32X32), TYPE_FP, AOM_BITS_8),
+  make_tuple(&av1_quantize_fp_64x64_c, &av1_quantize_fp_64x64_neon,
+             static_cast<TX_SIZE>(TX_64X64), TYPE_FP, AOM_BITS_8),
+  make_tuple(&aom_quantize_b_c, &aom_quantize_b_neon,
+             static_cast<TX_SIZE>(TX_16X16), TYPE_B, AOM_BITS_8),
+  make_tuple(&aom_quantize_b_32x32_c, &aom_quantize_b_32x32_neon,
+             static_cast<TX_SIZE>(TX_32X32), TYPE_B, AOM_BITS_8),
+  make_tuple(&aom_quantize_b_64x64_c, &aom_quantize_b_64x64_neon,
+             static_cast<TX_SIZE>(TX_64X64), TYPE_B, AOM_BITS_8)
 };
 
 INSTANTIATE_TEST_SUITE_P(NEON, QuantizeTest,