Add Neon implementation of Hadamard function for 32x32 case
Add a Neon implementation of aom_hadamard_32x32 function
that previously only used a scalar C implementation on Arm.
Also add test coverage for the new Neon function.
Change-Id: Ic09e060f7153d8066102b6501bd29f0d89396fa8
diff --git a/aom_dsp/aom_dsp_rtcd_defs.pl b/aom_dsp/aom_dsp_rtcd_defs.pl
index 5e34fd0..8b72746 100755
--- a/aom_dsp/aom_dsp_rtcd_defs.pl
+++ b/aom_dsp/aom_dsp_rtcd_defs.pl
@@ -1247,7 +1247,7 @@
specialize qw/aom_hadamard_16x16 avx2 sse2 neon/;
add_proto qw/void aom_hadamard_32x32/, "const int16_t *src_diff, ptrdiff_t src_stride, tran_low_t *coeff";
- specialize qw/aom_hadamard_32x32 avx2 sse2/;
+ specialize qw/aom_hadamard_32x32 avx2 sse2 neon/;
add_proto qw/void aom_hadamard_lp_8x8/, "const int16_t *src_diff, ptrdiff_t src_stride, int16_t *coeff";
specialize qw/aom_hadamard_lp_8x8 sse2 neon/;
diff --git a/aom_dsp/arm/hadamard_neon.c b/aom_dsp/arm/hadamard_neon.c
index 646d981..ec9fc2c 100644
--- a/aom_dsp/arm/hadamard_neon.c
+++ b/aom_dsp/arm/hadamard_neon.c
@@ -227,3 +227,42 @@
coeff += (4 + (((i >> 3) & 1) << 3));
}
}
+
+void aom_hadamard_32x32_neon(const int16_t *src_diff, ptrdiff_t src_stride,
+ tran_low_t *coeff) {
+ /* Top left first. */
+ aom_hadamard_16x16_neon(src_diff + 0 + 0 * src_stride, src_stride, coeff + 0);
+ /* Top right. */
+ aom_hadamard_16x16_neon(src_diff + 16 + 0 * src_stride, src_stride,
+ coeff + 256);
+ /* Bottom left. */
+ aom_hadamard_16x16_neon(src_diff + 0 + 16 * src_stride, src_stride,
+ coeff + 512);
+ /* Bottom right. */
+ aom_hadamard_16x16_neon(src_diff + 16 + 16 * src_stride, src_stride,
+ coeff + 768);
+
+ for (int i = 0; i < 256; i += 8) {
+ const int16x8_t a0 = load_tran_low_to_s16q(coeff);
+ const int16x8_t a1 = load_tran_low_to_s16q(coeff + 256);
+ const int16x8_t a2 = load_tran_low_to_s16q(coeff + 512);
+ const int16x8_t a3 = load_tran_low_to_s16q(coeff + 768);
+
+ const int16x8_t b0 = vshrq_n_s16(vaddq_s16(a0, a1), 2);
+ const int16x8_t b1 = vshrq_n_s16(vsubq_s16(a0, a1), 2);
+ const int16x8_t b2 = vshrq_n_s16(vaddq_s16(a2, a3), 2);
+ const int16x8_t b3 = vshrq_n_s16(vsubq_s16(a2, a3), 2);
+
+ const int16x8_t c0 = vaddq_s16(b0, b2);
+ const int16x8_t c1 = vaddq_s16(b1, b3);
+ const int16x8_t c2 = vsubq_s16(b0, b2);
+ const int16x8_t c3 = vsubq_s16(b1, b3);
+
+ store_s16q_to_tran_low(coeff + 0, c0);
+ store_s16q_to_tran_low(coeff + 256, c1);
+ store_s16q_to_tran_low(coeff + 512, c2);
+ store_s16q_to_tran_low(coeff + 768, c3);
+
+ coeff += 8;
+ }
+}
diff --git a/test/hadamard_test.cc b/test/hadamard_test.cc
index a15231f..8c4810a 100644
--- a/test/hadamard_test.cc
+++ b/test/hadamard_test.cc
@@ -351,7 +351,8 @@
NEON, HadamardLowbdTest,
::testing::Values(HadamardFuncWithSize(&aom_hadamard_4x4_neon, 4, 4),
HadamardFuncWithSize(&aom_hadamard_8x8_neon, 8, 8),
- HadamardFuncWithSize(&aom_hadamard_16x16_neon, 16, 16)));
+ HadamardFuncWithSize(&aom_hadamard_16x16_neon, 16, 16),
+ HadamardFuncWithSize(&aom_hadamard_32x32_neon, 32, 32)));
#endif // HAVE_NEON
// Tests for low precision