Add Neon implementation of high bitdepth variance functions
Add Neon implementation for 8- and 12-bit high bitdepth variance
functions. Also refactor the 10-bit implementation in the same way
- only widening data processing element types when absolutely
necessary.
The implementation is mostly a backport of this libvpx change[1].
Add the corresponding tests as well.
[1]https://chromium-review.googlesource.com/c/webm/libvpx/+/4277441
Change-Id: I0d36fdac665f5a561eca660d6de241ada8280fe6
diff --git a/aom_dsp/aom_dsp_rtcd_defs.pl b/aom_dsp/aom_dsp_rtcd_defs.pl
index d71ea2b..e05c276 100755
--- a/aom_dsp/aom_dsp_rtcd_defs.pl
+++ b/aom_dsp/aom_dsp_rtcd_defs.pl
@@ -1578,47 +1578,52 @@
if (aom_config("CONFIG_AV1_HIGHBITDEPTH") eq "yes") {
add_proto qw/unsigned int aom_highbd_12_variance128x128/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
- specialize qw/aom_highbd_12_variance128x128 sse2/;
+ specialize qw/aom_highbd_12_variance128x128 sse2 neon/;
add_proto qw/unsigned int aom_highbd_12_variance128x64/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
- specialize qw/aom_highbd_12_variance128x64 sse2/;
+ specialize qw/aom_highbd_12_variance128x64 sse2 neon/;
add_proto qw/unsigned int aom_highbd_12_variance64x128/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
- specialize qw/aom_highbd_12_variance64x128 sse2/;
+ specialize qw/aom_highbd_12_variance64x128 sse2 neon/;
add_proto qw/unsigned int aom_highbd_12_variance64x64/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
- specialize qw/aom_highbd_12_variance64x64 sse2/;
+ specialize qw/aom_highbd_12_variance64x64 sse2 neon/;
add_proto qw/unsigned int aom_highbd_12_variance64x32/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
- specialize qw/aom_highbd_12_variance64x32 sse2/;
+ specialize qw/aom_highbd_12_variance64x32 sse2 neon/;
add_proto qw/unsigned int aom_highbd_12_variance32x64/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
- specialize qw/aom_highbd_12_variance32x64 sse2/;
+ specialize qw/aom_highbd_12_variance32x64 sse2 neon/;
add_proto qw/unsigned int aom_highbd_12_variance32x32/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
- specialize qw/aom_highbd_12_variance32x32 sse2/;
+ specialize qw/aom_highbd_12_variance32x32 sse2 neon/;
add_proto qw/unsigned int aom_highbd_12_variance32x16/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
- specialize qw/aom_highbd_12_variance32x16 sse2/;
+ specialize qw/aom_highbd_12_variance32x16 sse2 neon/;
add_proto qw/unsigned int aom_highbd_12_variance16x32/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
- specialize qw/aom_highbd_12_variance16x32 sse2/;
+ specialize qw/aom_highbd_12_variance16x32 sse2 neon/;
add_proto qw/unsigned int aom_highbd_12_variance16x16/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
- specialize qw/aom_highbd_12_variance16x16 sse2/;
+ specialize qw/aom_highbd_12_variance16x16 sse2 neon/;
add_proto qw/unsigned int aom_highbd_12_variance16x8/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
- specialize qw/aom_highbd_12_variance16x8 sse2/;
+ specialize qw/aom_highbd_12_variance16x8 sse2 neon/;
add_proto qw/unsigned int aom_highbd_12_variance8x16/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
- specialize qw/aom_highbd_12_variance8x16 sse2/;
+ specialize qw/aom_highbd_12_variance8x16 sse2 neon/;
add_proto qw/unsigned int aom_highbd_12_variance8x8/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
- specialize qw/aom_highbd_12_variance8x8 sse2/;
+ specialize qw/aom_highbd_12_variance8x8 sse2 neon/;
add_proto qw/unsigned int aom_highbd_12_variance8x4/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+ specialize qw/aom_highbd_12_variance8x4 neon/;
+
add_proto qw/unsigned int aom_highbd_12_variance4x8/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+ specialize qw/aom_highbd_12_variance4x8 neon/;
+
add_proto qw/unsigned int aom_highbd_12_variance4x4/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+ specialize qw/aom_highbd_12_variance4x4 neon/;
add_proto qw/unsigned int aom_highbd_10_variance128x128/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
specialize qw/aom_highbd_10_variance128x128 sse2 avx2 neon/;
@@ -1660,51 +1665,83 @@
specialize qw/aom_highbd_10_variance8x8 sse2 avx2 neon/;
add_proto qw/unsigned int aom_highbd_10_variance8x4/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+ specialize qw/aom_highbd_10_variance8x4 neon/;
+
add_proto qw/unsigned int aom_highbd_10_variance4x8/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+ specialize qw/aom_highbd_10_variance4x8 neon/;
+
add_proto qw/unsigned int aom_highbd_10_variance4x4/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+ specialize qw/aom_highbd_10_variance4x4 neon/;
add_proto qw/unsigned int aom_highbd_8_variance128x128/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
- specialize qw/aom_highbd_8_variance128x128 sse2/;
+ specialize qw/aom_highbd_8_variance128x128 sse2 neon/;
add_proto qw/unsigned int aom_highbd_8_variance128x64/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
- specialize qw/aom_highbd_8_variance128x64 sse2/;
+ specialize qw/aom_highbd_8_variance128x64 sse2 neon/;
add_proto qw/unsigned int aom_highbd_8_variance64x128/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
- specialize qw/aom_highbd_8_variance64x128 sse2/;
+ specialize qw/aom_highbd_8_variance64x128 sse2 neon/;
add_proto qw/unsigned int aom_highbd_8_variance64x64/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
- specialize qw/aom_highbd_8_variance64x64 sse2/;
+ specialize qw/aom_highbd_8_variance64x64 sse2 neon/;
add_proto qw/unsigned int aom_highbd_8_variance64x32/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
- specialize qw/aom_highbd_8_variance64x32 sse2/;
+ specialize qw/aom_highbd_8_variance64x32 sse2 neon/;
add_proto qw/unsigned int aom_highbd_8_variance32x64/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
- specialize qw/aom_highbd_8_variance32x64 sse2/;
+ specialize qw/aom_highbd_8_variance32x64 sse2 neon/;
add_proto qw/unsigned int aom_highbd_8_variance32x32/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
- specialize qw/aom_highbd_8_variance32x32 sse2/;
+ specialize qw/aom_highbd_8_variance32x32 sse2 neon/;
add_proto qw/unsigned int aom_highbd_8_variance32x16/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
- specialize qw/aom_highbd_8_variance32x16 sse2/;
+ specialize qw/aom_highbd_8_variance32x16 sse2 neon/;
add_proto qw/unsigned int aom_highbd_8_variance16x32/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
- specialize qw/aom_highbd_8_variance16x32 sse2/;
+ specialize qw/aom_highbd_8_variance16x32 sse2 neon/;
add_proto qw/unsigned int aom_highbd_8_variance16x16/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
- specialize qw/aom_highbd_8_variance16x16 sse2/;
+ specialize qw/aom_highbd_8_variance16x16 sse2 neon/;
add_proto qw/unsigned int aom_highbd_8_variance16x8/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
- specialize qw/aom_highbd_8_variance16x8 sse2/;
+ specialize qw/aom_highbd_8_variance16x8 sse2 neon/;
add_proto qw/unsigned int aom_highbd_8_variance8x16/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
- specialize qw/aom_highbd_8_variance8x16 sse2/;
+ specialize qw/aom_highbd_8_variance8x16 sse2 neon/;
add_proto qw/unsigned int aom_highbd_8_variance8x8/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
- specialize qw/aom_highbd_8_variance8x8 sse2/;
+ specialize qw/aom_highbd_8_variance8x8 sse2 neon/;
add_proto qw/unsigned int aom_highbd_8_variance8x4/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+ specialize qw/aom_highbd_8_variance8x4 neon/;
+
add_proto qw/unsigned int aom_highbd_8_variance4x8/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+ specialize qw/aom_highbd_8_variance4x8 neon/;
+
add_proto qw/unsigned int aom_highbd_8_variance4x4/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+ specialize qw/aom_highbd_8_variance4x4 neon/;
+
+ if (aom_config("CONFIG_REALTIME_ONLY") ne "yes") {
+ foreach $bd (8, 10, 12) {
+ add_proto qw/unsigned int/, "aom_highbd_${bd}_variance64x16", "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+ specialize "aom_highbd_${bd}_variance64x16" , qw/neon/;
+
+ add_proto qw/unsigned int/, "aom_highbd_${bd}_variance32x8", "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+ specialize "aom_highbd_${bd}_variance32x8" , qw/neon/;
+
+ add_proto qw/unsigned int/, "aom_highbd_${bd}_variance16x64", "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+ specialize "aom_highbd_${bd}_variance16x64" , qw/neon/;
+
+ add_proto qw/unsigned int/, "aom_highbd_${bd}_variance16x4", "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+ specialize "aom_highbd_${bd}_variance16x4" , qw/neon/;
+
+ add_proto qw/unsigned int/, "aom_highbd_${bd}_variance8x32", "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+ specialize "aom_highbd_${bd}_variance8x32" , qw/neon/;
+
+ add_proto qw/unsigned int/, "aom_highbd_${bd}_variance4x16", "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
+ specialize "aom_highbd_${bd}_variance4x16" , qw/neon/;
+ }
+ }
add_proto qw/unsigned int aom_highbd_8_mse16x16/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int recon_stride, unsigned int *sse";
specialize qw/aom_highbd_8_mse16x16 sse2 neon/;
diff --git a/aom_dsp/arm/highbd_variance_neon.c b/aom_dsp/arm/highbd_variance_neon.c
index 3b88430..948f2f7 100644
--- a/aom_dsp/arm/highbd_variance_neon.c
+++ b/aom_dsp/arm/highbd_variance_neon.c
@@ -17,159 +17,368 @@
#include "aom_dsp/variance.h"
#include "aom_dsp/aom_filter.h"
+#include "aom_dsp/arm/mem_neon.h"
#include "aom_dsp/arm/sum_neon.h"
-typedef void (*high_variance_fn_t)(const uint16_t *src, int src_stride,
- const uint16_t *ref, int ref_stride,
- uint32_t *sse, int *sum);
+// Process a block of width 4 two rows at a time.
+static INLINE void highbd_variance_4xh_neon(const uint16_t *src_ptr,
+ int src_stride,
+ const uint16_t *ref_ptr,
+ int ref_stride, int h,
+ uint64_t *sse, int64_t *sum) {
+ int16x8_t sum_s16 = vdupq_n_s16(0);
+ int32x4_t sse_s32 = vdupq_n_s32(0);
-void aom_highbd_calc16x16var_neon(const uint16_t *src, int src_stride,
- const uint16_t *ref, int ref_stride,
- uint32_t *sse, int *sum) {
- int i, j;
- int16x8_t v_sum = vdupq_n_s16(0);
- int32x4_t v_sse_lo = vdupq_n_s32(0);
- int32x4_t v_sse_hi = vdupq_n_s32(0);
+ int i = h;
+ do {
+ const uint16x8_t s = load_unaligned_u16_4x2(src_ptr, src_stride);
+ const uint16x8_t r = load_unaligned_u16_4x2(ref_ptr, ref_stride);
- for (i = 0; i < 16; ++i) {
- for (j = 0; j < 16; j += 8) {
- const uint16x8_t v_a = vld1q_u16(&src[j]);
- const uint16x8_t v_b = vld1q_u16(&ref[j]);
- const int16x8_t sv_diff = vreinterpretq_s16_u16(vsubq_u16(v_a, v_b));
- v_sum = vaddq_s16(v_sum, sv_diff);
- v_sse_lo =
- vmlal_s16(v_sse_lo, vget_low_s16(sv_diff), vget_low_s16(sv_diff));
- v_sse_hi =
- vmlal_s16(v_sse_hi, vget_high_s16(sv_diff), vget_high_s16(sv_diff));
- }
- src += src_stride;
- ref += ref_stride;
- }
+ int16x8_t diff = vreinterpretq_s16_u16(vsubq_u16(s, r));
+ sum_s16 = vaddq_s16(sum_s16, diff);
- *sum = horizontal_add_s16x8(v_sum);
- *sse = (unsigned int)horizontal_add_s32x4(vaddq_s32(v_sse_lo, v_sse_hi));
+ sse_s32 = vmlal_s16(sse_s32, vget_low_s16(diff), vget_low_s16(diff));
+ sse_s32 = vmlal_s16(sse_s32, vget_high_s16(diff), vget_high_s16(diff));
+
+ src_ptr += 2 * src_stride;
+ ref_ptr += 2 * ref_stride;
+ i -= 2;
+ } while (i != 0);
+
+ *sum = horizontal_add_s16x8(sum_s16);
+ *sse = horizontal_add_s32x4(sse_s32);
}
-void aom_highbd_calc8x8var_neon(const uint16_t *src, int src_stride,
- const uint16_t *ref, int ref_stride,
- uint32_t *sse, int *sum) {
- int i;
- int16x8_t v_sum = vdupq_n_s16(0);
- int32x4_t v_sse_lo = vdupq_n_s32(0);
- int32x4_t v_sse_hi = vdupq_n_s32(0);
+// For 8-bit and 10-bit data, since we're using two int32x4 accumulators, all
+// block sizes can be processed in 32-bit elements (1023*1023*128*32 =
+// 4286582784 for a 128x128 block).
+static INLINE void highbd_variance_large_neon(const uint16_t *src_ptr,
+ int src_stride,
+ const uint16_t *ref_ptr,
+ int ref_stride, int w, int h,
+ uint64_t *sse, int64_t *sum) {
+ int32x4_t sum_s32 = vdupq_n_s32(0);
+ int32x4_t sse_s32[2] = { vdupq_n_s32(0), vdupq_n_s32(0) };
- for (i = 0; i < 8; ++i) {
- const uint16x8_t v_a = vld1q_u16(&src[0]);
- const uint16x8_t v_b = vld1q_u16(&ref[0]);
- const int16x8_t sv_diff = vreinterpretq_s16_u16(vsubq_u16(v_a, v_b));
- v_sum = vaddq_s16(v_sum, sv_diff);
- v_sse_lo =
- vmlal_s16(v_sse_lo, vget_low_s16(sv_diff), vget_low_s16(sv_diff));
- v_sse_hi =
- vmlal_s16(v_sse_hi, vget_high_s16(sv_diff), vget_high_s16(sv_diff));
- src += src_stride;
- ref += ref_stride;
- }
+ int i = h;
+ do {
+ int j = 0;
+ do {
+ const uint16x8_t s = vld1q_u16(src_ptr + j);
+ const uint16x8_t r = vld1q_u16(ref_ptr + j);
- *sum = horizontal_add_s16x8(v_sum);
- *sse = (unsigned int)horizontal_add_s32x4(vaddq_s32(v_sse_lo, v_sse_hi));
+ const int16x8_t diff = vreinterpretq_s16_u16(vsubq_u16(s, r));
+ sum_s32 = vpadalq_s16(sum_s32, diff);
+
+ sse_s32[0] =
+ vmlal_s16(sse_s32[0], vget_low_s16(diff), vget_low_s16(diff));
+ sse_s32[1] =
+ vmlal_s16(sse_s32[1], vget_high_s16(diff), vget_high_s16(diff));
+
+ j += 8;
+ } while (j < w);
+
+ src_ptr += src_stride;
+ ref_ptr += ref_stride;
+ } while (--i != 0);
+
+ *sum = horizontal_add_s32x4(sum_s32);
+ *sse = horizontal_long_add_u32x4(vaddq_u32(
+ vreinterpretq_u32_s32(sse_s32[0]), vreinterpretq_u32_s32(sse_s32[1])));
}
-void aom_highbd_calc4x4var_neon(const uint16_t *src, int src_stride,
- const uint16_t *ref, int ref_stride,
- uint32_t *sse, int *sum) {
- int i;
- int16x8_t v_sum = vdupq_n_s16(0);
- int32x4_t v_sse_lo = vdupq_n_s32(0);
- int32x4_t v_sse_hi = vdupq_n_s32(0);
-
- for (i = 0; i < 4; i += 2) {
- const uint16x4_t v_a_r0 = vld1_u16(&src[0]);
- const uint16x4_t v_b_r0 = vld1_u16(&ref[0]);
- const uint16x4_t v_a_r1 = vld1_u16(&src[src_stride]);
- const uint16x4_t v_b_r1 = vld1_u16(&ref[ref_stride]);
- const uint16x8_t v_a = vcombine_u16(v_a_r0, v_a_r1);
- const uint16x8_t v_b = vcombine_u16(v_b_r0, v_b_r1);
- const int16x8_t sv_diff = vreinterpretq_s16_u16(vsubq_u16(v_a, v_b));
- v_sum = vaddq_s16(v_sum, sv_diff);
- v_sse_lo =
- vmlal_s16(v_sse_lo, vget_low_s16(sv_diff), vget_low_s16(sv_diff));
- v_sse_hi =
- vmlal_s16(v_sse_hi, vget_high_s16(sv_diff), vget_high_s16(sv_diff));
- src += src_stride << 1;
- ref += ref_stride << 1;
- }
-
- *sum = horizontal_add_s16x8(v_sum);
- *sse = (unsigned int)horizontal_add_s32x4(vaddq_s32(v_sse_lo, v_sse_hi));
+static INLINE void highbd_variance_8xh_neon(const uint16_t *src, int src_stride,
+ const uint16_t *ref, int ref_stride,
+ int h, uint64_t *sse,
+ int64_t *sum) {
+ highbd_variance_large_neon(src, src_stride, ref, ref_stride, 8, h, sse, sum);
}
-static void highbd_10_variance_neon(const uint16_t *src, int src_stride,
- const uint16_t *ref, int ref_stride, int w,
- int h, uint32_t *sse, int *sum,
- high_variance_fn_t var_fn, int block_size) {
- int i, j;
- uint64_t sse_long = 0;
- int32_t sum_long = 0;
-
- for (i = 0; i < h; i += block_size) {
- for (j = 0; j < w; j += block_size) {
- unsigned int sse0;
- int sum0;
- var_fn(src + src_stride * i + j, src_stride, ref + ref_stride * i + j,
- ref_stride, &sse0, &sum0);
- sse_long += sse0;
- sum_long += sum0;
- }
- }
- *sum = ROUND_POWER_OF_TWO(sum_long, 2);
- *sse = (uint32_t)ROUND_POWER_OF_TWO(sse_long, 4);
+static INLINE void highbd_variance_16xh_neon(const uint16_t *src,
+ int src_stride,
+ const uint16_t *ref,
+ int ref_stride, int h,
+ uint64_t *sse, int64_t *sum) {
+ highbd_variance_large_neon(src, src_stride, ref, ref_stride, 16, h, sse, sum);
}
-#define VAR_FN(w, h, block_size, shift) \
- uint32_t aom_highbd_10_variance##w##x##h##_neon( \
- const uint8_t *src8, int src_stride, const uint8_t *ref8, \
- int ref_stride, uint32_t *sse) { \
- int sum; \
- int64_t var; \
- uint16_t *src = CONVERT_TO_SHORTPTR(src8); \
- uint16_t *ref = CONVERT_TO_SHORTPTR(ref8); \
- highbd_10_variance_neon( \
- src, src_stride, ref, ref_stride, w, h, sse, &sum, \
- aom_highbd_calc##block_size##x##block_size##var_neon, block_size); \
- var = (int64_t)(*sse) - (((int64_t)sum * sum) >> shift); \
- return (var >= 0) ? (uint32_t)var : 0; \
+static INLINE void highbd_variance_32xh_neon(const uint16_t *src,
+ int src_stride,
+ const uint16_t *ref,
+ int ref_stride, int h,
+ uint64_t *sse, int64_t *sum) {
+ highbd_variance_large_neon(src, src_stride, ref, ref_stride, 32, h, sse, sum);
+}
+
+static INLINE void highbd_variance_64xh_neon(const uint16_t *src,
+ int src_stride,
+ const uint16_t *ref,
+ int ref_stride, int h,
+ uint64_t *sse, int64_t *sum) {
+ highbd_variance_large_neon(src, src_stride, ref, ref_stride, 64, h, sse, sum);
+}
+
+static INLINE void highbd_variance_128xh_neon(const uint16_t *src,
+ int src_stride,
+ const uint16_t *ref,
+ int ref_stride, int h,
+ uint64_t *sse, int64_t *sum) {
+ highbd_variance_large_neon(src, src_stride, ref, ref_stride, 128, h, sse,
+ sum);
+}
+
+// For 12-bit data, we can only accumulate up to 128 elements in the sum of
+// squares (4095*4095*128 = 2146435200), and because we're using two int32x4
+// accumulators, we can only process up to 32 32-element rows (32*32/8 = 128)
+// or 16 64-element rows before we have to accumulate into 64-bit elements.
+// Therefore blocks of size 32x64, 64x32, 64x64, 64x128, 128x64, 128x128 are
+// processed in a different helper function.
+
+// Process a block of any size where the width is divisible by 8, with
+// accumulation into 64-bit elements.
+static INLINE void highbd_variance_xlarge_neon(
+ const uint16_t *src_ptr, int src_stride, const uint16_t *ref_ptr,
+ int ref_stride, int w, int h, int h_limit, uint64_t *sse, int64_t *sum) {
+ int32x4_t sum_s32 = vdupq_n_s32(0);
+ int64x2_t sse_s64 = vdupq_n_s64(0);
+
+ // 'h_limit' is the number of 'w'-width rows we can process before our 32-bit
+ // accumulator overflows. After hitting this limit we accumulate into 64-bit
+ // elements.
+ int h_tmp = h > h_limit ? h_limit : h;
+
+ int i = 0;
+ do {
+ int32x4_t sse_s32[2] = { vdupq_n_s32(0), vdupq_n_s32(0) };
+ do {
+ int j = 0;
+ do {
+ const uint16x8_t s0 = vld1q_u16(src_ptr + j);
+ const uint16x8_t r0 = vld1q_u16(ref_ptr + j);
+
+ const int16x8_t diff = vreinterpretq_s16_u16(vsubq_u16(s0, r0));
+ sum_s32 = vpadalq_s16(sum_s32, diff);
+
+ sse_s32[0] =
+ vmlal_s16(sse_s32[0], vget_low_s16(diff), vget_low_s16(diff));
+ sse_s32[1] =
+ vmlal_s16(sse_s32[1], vget_high_s16(diff), vget_high_s16(diff));
+
+ j += 8;
+ } while (j < w);
+
+ src_ptr += src_stride;
+ ref_ptr += ref_stride;
+ i++;
+ } while (i < h_tmp);
+
+ sse_s64 = vpadalq_s32(sse_s64, sse_s32[0]);
+ sse_s64 = vpadalq_s32(sse_s64, sse_s32[1]);
+ h_tmp += h_limit;
+ } while (i < h);
+
+ *sum = horizontal_add_s32x4(sum_s32);
+ *sse = (uint64_t)horizontal_add_s64x2(sse_s64);
+}
+
+static INLINE void highbd_variance_32xh_xlarge_neon(
+ const uint16_t *src, int src_stride, const uint16_t *ref, int ref_stride,
+ int h, uint64_t *sse, int64_t *sum) {
+ highbd_variance_xlarge_neon(src, src_stride, ref, ref_stride, 32, h, 32, sse,
+ sum);
+}
+
+static INLINE void highbd_variance_64xh_xlarge_neon(
+ const uint16_t *src, int src_stride, const uint16_t *ref, int ref_stride,
+ int h, uint64_t *sse, int64_t *sum) {
+ highbd_variance_xlarge_neon(src, src_stride, ref, ref_stride, 64, h, 16, sse,
+ sum);
+}
+
+static INLINE void highbd_variance_128xh_xlarge_neon(
+ const uint16_t *src, int src_stride, const uint16_t *ref, int ref_stride,
+ int h, uint64_t *sse, int64_t *sum) {
+ highbd_variance_xlarge_neon(src, src_stride, ref, ref_stride, 128, h, 8, sse,
+ sum);
+}
+
+#define HBD_VARIANCE_WXH_8_NEON(w, h) \
+ uint32_t aom_highbd_8_variance##w##x##h##_neon( \
+ const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, \
+ int ref_stride, uint32_t *sse) { \
+ int sum; \
+ uint64_t sse_long = 0; \
+ int64_t sum_long = 0; \
+ uint16_t *src = CONVERT_TO_SHORTPTR(src_ptr); \
+ uint16_t *ref = CONVERT_TO_SHORTPTR(ref_ptr); \
+ highbd_variance_##w##xh_neon(src, src_stride, ref, ref_stride, h, \
+ &sse_long, &sum_long); \
+ *sse = (uint32_t)sse_long; \
+ sum = (int)sum_long; \
+ return *sse - (uint32_t)(((int64_t)sum * sum) / (w * h)); \
}
-VAR_FN(128, 128, 16, 14)
-VAR_FN(128, 64, 16, 13)
-VAR_FN(64, 128, 16, 13)
-VAR_FN(64, 64, 16, 12)
-VAR_FN(64, 32, 16, 11)
-VAR_FN(32, 64, 16, 11)
-VAR_FN(32, 32, 16, 10)
-VAR_FN(32, 16, 16, 9)
-VAR_FN(16, 32, 16, 9)
-VAR_FN(16, 16, 16, 8)
-VAR_FN(16, 8, 8, 7)
-VAR_FN(8, 16, 8, 7)
-VAR_FN(8, 8, 8, 6)
+#define HBD_VARIANCE_WXH_10_NEON(w, h) \
+ uint32_t aom_highbd_10_variance##w##x##h##_neon( \
+ const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, \
+ int ref_stride, uint32_t *sse) { \
+ int sum; \
+ int64_t var; \
+ uint64_t sse_long = 0; \
+ int64_t sum_long = 0; \
+ uint16_t *src = CONVERT_TO_SHORTPTR(src_ptr); \
+ uint16_t *ref = CONVERT_TO_SHORTPTR(ref_ptr); \
+ highbd_variance_##w##xh_neon(src, src_stride, ref, ref_stride, h, \
+ &sse_long, &sum_long); \
+ *sse = (uint32_t)ROUND_POWER_OF_TWO(sse_long, 4); \
+ sum = (int)ROUND_POWER_OF_TWO(sum_long, 2); \
+ var = (int64_t)(*sse) - (((int64_t)sum * sum) / (w * h)); \
+ return (var >= 0) ? (uint32_t)var : 0; \
+ }
-VAR_FN(16, 4, 4, 6)
-VAR_FN(4, 16, 4, 6)
+#define HBD_VARIANCE_WXH_12_NEON(w, h) \
+ uint32_t aom_highbd_12_variance##w##x##h##_neon( \
+ const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, \
+ int ref_stride, uint32_t *sse) { \
+ int sum; \
+ int64_t var; \
+ uint64_t sse_long = 0; \
+ int64_t sum_long = 0; \
+ uint16_t *src = CONVERT_TO_SHORTPTR(src_ptr); \
+ uint16_t *ref = CONVERT_TO_SHORTPTR(ref_ptr); \
+ highbd_variance_##w##xh_neon(src, src_stride, ref, ref_stride, h, \
+ &sse_long, &sum_long); \
+ *sse = (uint32_t)ROUND_POWER_OF_TWO(sse_long, 8); \
+ sum = (int)ROUND_POWER_OF_TWO(sum_long, 4); \
+ var = (int64_t)(*sse) - (((int64_t)sum * sum) / (w * h)); \
+ return (var >= 0) ? (uint32_t)var : 0; \
+ }
-VAR_FN(8, 4, 4, 5)
-VAR_FN(4, 8, 4, 5)
-VAR_FN(4, 4, 4, 4)
+#define HBD_VARIANCE_WXH_12_XLARGE_NEON(w, h) \
+ uint32_t aom_highbd_12_variance##w##x##h##_neon( \
+ const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, \
+ int ref_stride, uint32_t *sse) { \
+ int sum; \
+ int64_t var; \
+ uint64_t sse_long = 0; \
+ int64_t sum_long = 0; \
+ uint16_t *src = CONVERT_TO_SHORTPTR(src_ptr); \
+ uint16_t *ref = CONVERT_TO_SHORTPTR(ref_ptr); \
+ highbd_variance_##w##xh_xlarge_neon(src, src_stride, ref, ref_stride, h, \
+ &sse_long, &sum_long); \
+ *sse = (uint32_t)ROUND_POWER_OF_TWO(sse_long, 8); \
+ sum = (int)ROUND_POWER_OF_TWO(sum_long, 4); \
+ var = (int64_t)(*sse) - (((int64_t)sum * sum) / (w * h)); \
+ return (var >= 0) ? (uint32_t)var : 0; \
+ }
+
+// 8-bit
+HBD_VARIANCE_WXH_8_NEON(4, 4)
+HBD_VARIANCE_WXH_8_NEON(4, 8)
+
+HBD_VARIANCE_WXH_8_NEON(8, 4)
+HBD_VARIANCE_WXH_8_NEON(8, 8)
+HBD_VARIANCE_WXH_8_NEON(8, 16)
+
+HBD_VARIANCE_WXH_8_NEON(16, 8)
+HBD_VARIANCE_WXH_8_NEON(16, 16)
+HBD_VARIANCE_WXH_8_NEON(16, 32)
+
+HBD_VARIANCE_WXH_8_NEON(32, 16)
+HBD_VARIANCE_WXH_8_NEON(32, 32)
+HBD_VARIANCE_WXH_8_NEON(32, 64)
+
+HBD_VARIANCE_WXH_8_NEON(64, 32)
+HBD_VARIANCE_WXH_8_NEON(64, 64)
+HBD_VARIANCE_WXH_8_NEON(64, 128)
+
+HBD_VARIANCE_WXH_8_NEON(128, 64)
+HBD_VARIANCE_WXH_8_NEON(128, 128)
+
+// 10-bit
+HBD_VARIANCE_WXH_10_NEON(4, 4)
+HBD_VARIANCE_WXH_10_NEON(4, 8)
+
+HBD_VARIANCE_WXH_10_NEON(8, 4)
+HBD_VARIANCE_WXH_10_NEON(8, 8)
+HBD_VARIANCE_WXH_10_NEON(8, 16)
+
+HBD_VARIANCE_WXH_10_NEON(16, 8)
+HBD_VARIANCE_WXH_10_NEON(16, 16)
+HBD_VARIANCE_WXH_10_NEON(16, 32)
+
+HBD_VARIANCE_WXH_10_NEON(32, 16)
+HBD_VARIANCE_WXH_10_NEON(32, 32)
+HBD_VARIANCE_WXH_10_NEON(32, 64)
+
+HBD_VARIANCE_WXH_10_NEON(64, 32)
+HBD_VARIANCE_WXH_10_NEON(64, 64)
+HBD_VARIANCE_WXH_10_NEON(64, 128)
+
+HBD_VARIANCE_WXH_10_NEON(128, 64)
+HBD_VARIANCE_WXH_10_NEON(128, 128)
+
+// 12-bit
+HBD_VARIANCE_WXH_12_NEON(4, 4)
+HBD_VARIANCE_WXH_12_NEON(4, 8)
+
+HBD_VARIANCE_WXH_12_NEON(8, 4)
+HBD_VARIANCE_WXH_12_NEON(8, 8)
+HBD_VARIANCE_WXH_12_NEON(8, 16)
+
+HBD_VARIANCE_WXH_12_NEON(16, 8)
+HBD_VARIANCE_WXH_12_NEON(16, 16)
+HBD_VARIANCE_WXH_12_NEON(16, 32)
+
+HBD_VARIANCE_WXH_12_NEON(32, 16)
+HBD_VARIANCE_WXH_12_NEON(32, 32)
+HBD_VARIANCE_WXH_12_XLARGE_NEON(32, 64)
+
+HBD_VARIANCE_WXH_12_XLARGE_NEON(64, 32)
+HBD_VARIANCE_WXH_12_XLARGE_NEON(64, 64)
+HBD_VARIANCE_WXH_12_XLARGE_NEON(64, 128)
+
+HBD_VARIANCE_WXH_12_XLARGE_NEON(128, 64)
+HBD_VARIANCE_WXH_12_XLARGE_NEON(128, 128)
#if !CONFIG_REALTIME_ONLY
-VAR_FN(64, 16, 16, 10)
-VAR_FN(16, 64, 16, 10)
-VAR_FN(8, 32, 8, 8)
-VAR_FN(32, 8, 8, 8)
-#endif // !CONFIG_REALTIME_ONLY
+// 8-bit
+HBD_VARIANCE_WXH_8_NEON(4, 16)
-#undef VAR_FN
+HBD_VARIANCE_WXH_8_NEON(8, 32)
+
+HBD_VARIANCE_WXH_8_NEON(16, 4)
+HBD_VARIANCE_WXH_8_NEON(16, 64)
+
+HBD_VARIANCE_WXH_8_NEON(32, 8)
+
+HBD_VARIANCE_WXH_8_NEON(64, 16)
+
+// 10-bit
+HBD_VARIANCE_WXH_10_NEON(4, 16)
+
+HBD_VARIANCE_WXH_10_NEON(8, 32)
+
+HBD_VARIANCE_WXH_10_NEON(16, 4)
+HBD_VARIANCE_WXH_10_NEON(16, 64)
+
+HBD_VARIANCE_WXH_10_NEON(32, 8)
+
+HBD_VARIANCE_WXH_10_NEON(64, 16)
+
+// 12-bit
+HBD_VARIANCE_WXH_12_NEON(4, 16)
+
+HBD_VARIANCE_WXH_12_NEON(8, 32)
+
+HBD_VARIANCE_WXH_12_NEON(16, 4)
+HBD_VARIANCE_WXH_12_NEON(16, 64)
+
+HBD_VARIANCE_WXH_12_NEON(32, 8)
+
+HBD_VARIANCE_WXH_12_NEON(64, 16)
+
+#endif // !CONFIG_REALTIME_ONLY
static INLINE uint32_t highbd_mse_wxh_neon(const uint16_t *src_ptr,
int src_stride,
diff --git a/aom_dsp/arm/mem_neon.h b/aom_dsp/arm/mem_neon.h
index fb92d9b..16d44c5 100644
--- a/aom_dsp/arm/mem_neon.h
+++ b/aom_dsp/arm/mem_neon.h
@@ -917,8 +917,8 @@
*s7 = vld1q_u16(s + 8);
}
-static INLINE void load_unaligned_u16_4x4(const uint16_t *buf, uint32_t stride,
- uint16x8_t *tu0, uint16x8_t *tu1) {
+static INLINE uint16x8_t load_unaligned_u16_4x2(const uint16_t *buf,
+ uint32_t stride) {
uint64_t a;
uint64x2_t a_u64;
@@ -929,13 +929,14 @@
memcpy(&a, buf, 8);
buf += stride;
a_u64 = vsetq_lane_u64(a, a_u64, 1);
- *tu0 = vreinterpretq_u16_u64(a_u64);
- memcpy(&a, buf, 8);
- buf += stride;
- a_u64 = vdupq_n_u64(a);
- memcpy(&a, buf, 8);
- a_u64 = vsetq_lane_u64(a, a_u64, 1);
- *tu1 = vreinterpretq_u16_u64(a_u64);
+ return vreinterpretq_u16_u64(a_u64);
+}
+
+static INLINE void load_unaligned_u16_4x4(const uint16_t *buf, uint32_t stride,
+ uint16x8_t *tu0, uint16x8_t *tu1) {
+ *tu0 = load_unaligned_u16_4x2(buf, stride);
+ buf += 2 * stride;
+ *tu1 = load_unaligned_u16_4x2(buf, stride);
}
static INLINE void load_s32_4x4(int32_t *s, int32_t p, int32x4_t *s1,
diff --git a/test/variance_test.cc b/test/variance_test.cc
index 8db54fc..ba9c9bb 100644
--- a/test/variance_test.cc
+++ b/test/variance_test.cc
@@ -3390,6 +3390,22 @@
#if CONFIG_AV1_HIGHBITDEPTH
const VarianceParams kArrayHBDVariance_neon[] = {
+ VarianceParams(7, 7, &aom_highbd_12_variance128x128_neon, 12),
+ VarianceParams(7, 6, &aom_highbd_12_variance128x64_neon, 12),
+ VarianceParams(6, 7, &aom_highbd_12_variance64x128_neon, 12),
+ VarianceParams(6, 6, &aom_highbd_12_variance64x64_neon, 12),
+ VarianceParams(6, 5, &aom_highbd_12_variance64x32_neon, 12),
+ VarianceParams(5, 6, &aom_highbd_12_variance32x64_neon, 12),
+ VarianceParams(5, 5, &aom_highbd_12_variance32x32_neon, 12),
+ VarianceParams(5, 4, &aom_highbd_12_variance32x16_neon, 12),
+ VarianceParams(4, 5, &aom_highbd_12_variance16x32_neon, 12),
+ VarianceParams(4, 4, &aom_highbd_12_variance16x16_neon, 12),
+ VarianceParams(4, 3, &aom_highbd_12_variance16x8_neon, 12),
+ VarianceParams(3, 4, &aom_highbd_12_variance8x16_neon, 12),
+ VarianceParams(3, 3, &aom_highbd_12_variance8x8_neon, 12),
+ VarianceParams(3, 2, &aom_highbd_12_variance8x4_neon, 12),
+ VarianceParams(2, 3, &aom_highbd_12_variance4x8_neon, 12),
+ VarianceParams(2, 2, &aom_highbd_12_variance4x4_neon, 12),
VarianceParams(7, 7, &aom_highbd_10_variance128x128_neon, 10),
VarianceParams(7, 6, &aom_highbd_10_variance128x64_neon, 10),
VarianceParams(6, 7, &aom_highbd_10_variance64x128_neon, 10),
@@ -3406,13 +3422,41 @@
VarianceParams(3, 2, &aom_highbd_10_variance8x4_neon, 10),
VarianceParams(2, 3, &aom_highbd_10_variance4x8_neon, 10),
VarianceParams(2, 2, &aom_highbd_10_variance4x4_neon, 10),
+ VarianceParams(7, 7, &aom_highbd_8_variance128x128_neon, 8),
+ VarianceParams(7, 6, &aom_highbd_8_variance128x64_neon, 8),
+ VarianceParams(6, 7, &aom_highbd_8_variance64x128_neon, 8),
+ VarianceParams(6, 6, &aom_highbd_8_variance64x64_neon, 8),
+ VarianceParams(6, 5, &aom_highbd_8_variance64x32_neon, 8),
+ VarianceParams(5, 6, &aom_highbd_8_variance32x64_neon, 8),
+ VarianceParams(5, 5, &aom_highbd_8_variance32x32_neon, 8),
+ VarianceParams(5, 4, &aom_highbd_8_variance32x16_neon, 8),
+ VarianceParams(4, 5, &aom_highbd_8_variance16x32_neon, 8),
+ VarianceParams(4, 4, &aom_highbd_8_variance16x16_neon, 8),
+ VarianceParams(4, 3, &aom_highbd_8_variance16x8_neon, 8),
+ VarianceParams(3, 4, &aom_highbd_8_variance8x16_neon, 8),
+ VarianceParams(3, 3, &aom_highbd_8_variance8x8_neon, 8),
+ VarianceParams(3, 2, &aom_highbd_8_variance8x4_neon, 8),
+ VarianceParams(2, 3, &aom_highbd_8_variance4x8_neon, 8),
+ VarianceParams(2, 2, &aom_highbd_8_variance4x4_neon, 8),
#if !CONFIG_REALTIME_ONLY
+ VarianceParams(6, 4, &aom_highbd_12_variance64x16_neon, 12),
+ VarianceParams(4, 6, &aom_highbd_12_variance16x64_neon, 12),
+ VarianceParams(5, 3, &aom_highbd_12_variance32x8_neon, 12),
+ VarianceParams(3, 5, &aom_highbd_12_variance8x32_neon, 12),
+ VarianceParams(4, 2, &aom_highbd_12_variance16x4_neon, 12),
+ VarianceParams(2, 4, &aom_highbd_12_variance4x16_neon, 12),
VarianceParams(6, 4, &aom_highbd_10_variance64x16_neon, 10),
VarianceParams(4, 6, &aom_highbd_10_variance16x64_neon, 10),
VarianceParams(5, 3, &aom_highbd_10_variance32x8_neon, 10),
VarianceParams(3, 5, &aom_highbd_10_variance8x32_neon, 10),
VarianceParams(4, 2, &aom_highbd_10_variance16x4_neon, 10),
VarianceParams(2, 4, &aom_highbd_10_variance4x16_neon, 10),
+ VarianceParams(6, 4, &aom_highbd_8_variance64x16_neon, 8),
+ VarianceParams(4, 6, &aom_highbd_8_variance16x64_neon, 8),
+ VarianceParams(5, 3, &aom_highbd_8_variance32x8_neon, 8),
+ VarianceParams(3, 5, &aom_highbd_8_variance8x32_neon, 8),
+ VarianceParams(4, 2, &aom_highbd_8_variance16x4_neon, 8),
+ VarianceParams(2, 4, &aom_highbd_8_variance4x16_neon, 8),
#endif
};