Add Neon DotProd implementation for av1_convolve_2d_scale
Add an Armv8.4 DotProd implementation for the horizontal filtering
part of av1_convolve_2d_scale.
The vertical pass operates on 16-bit types so the Armv8.0
implementation must still be used. Move functions associated with the
vertical pass into a header file so they can be shared by both the
Armv8.0 and Armv8.4 DotProd paths.
Add the corresponding tests as well.
Change-Id: I4865953a1aff7bde9b786d65a140da2501894cc7
diff --git a/av1/av1.cmake b/av1/av1.cmake
index 232e00f..f67778a 100644
--- a/av1/av1.cmake
+++ b/av1/av1.cmake
@@ -404,6 +404,7 @@
"${AOM_ROOT}/av1/common/arm/wiener_convolve_neon.c")
list(APPEND AOM_AV1_COMMON_INTRIN_NEON_DOTPROD
+ "${AOM_ROOT}/av1/common/arm/av1_convolve_scale_neon_dotprod.c"
"${AOM_ROOT}/av1/common/arm/compound_convolve_neon_dotprod.c"
"${AOM_ROOT}/av1/common/arm/convolve_neon_dotprod.c")
diff --git a/av1/common/arm/av1_convolve_scale_neon.c b/av1/common/arm/av1_convolve_scale_neon.c
index b8d934e..d9c2967 100644
--- a/av1/common/arm/av1_convolve_scale_neon.c
+++ b/av1/common/arm/av1_convolve_scale_neon.c
@@ -17,463 +17,7 @@
#include "aom_dsp/arm/mem_neon.h"
#include "aom_dsp/arm/transpose_neon.h"
-
-static INLINE int16x4_t compound_convolve8_4_v(
- const int16x4_t s0, const int16x4_t s1, const int16x4_t s2,
- const int16x4_t s3, const int16x4_t s4, const int16x4_t s5,
- const int16x4_t s6, const int16x4_t s7, const int16x8_t filter,
- const int32x4_t offset_const) {
- const int16x4_t filter_0_3 = vget_low_s16(filter);
- const int16x4_t filter_4_7 = vget_high_s16(filter);
-
- int32x4_t sum = offset_const;
- sum = vmlal_lane_s16(sum, s0, filter_0_3, 0);
- sum = vmlal_lane_s16(sum, s1, filter_0_3, 1);
- sum = vmlal_lane_s16(sum, s2, filter_0_3, 2);
- sum = vmlal_lane_s16(sum, s3, filter_0_3, 3);
- sum = vmlal_lane_s16(sum, s4, filter_4_7, 0);
- sum = vmlal_lane_s16(sum, s5, filter_4_7, 1);
- sum = vmlal_lane_s16(sum, s6, filter_4_7, 2);
- sum = vmlal_lane_s16(sum, s7, filter_4_7, 3);
-
- return vshrn_n_s32(sum, COMPOUND_ROUND1_BITS);
-}
-
-static INLINE int16x8_t compound_convolve8_8_v(
- const int16x8_t s0, const int16x8_t s1, const int16x8_t s2,
- const int16x8_t s3, const int16x8_t s4, const int16x8_t s5,
- const int16x8_t s6, const int16x8_t s7, const int16x8_t filter,
- const int32x4_t offset_const) {
- const int16x4_t filter_0_3 = vget_low_s16(filter);
- const int16x4_t filter_4_7 = vget_high_s16(filter);
-
- int32x4_t sum0 = offset_const;
- sum0 = vmlal_lane_s16(sum0, vget_low_s16(s0), filter_0_3, 0);
- sum0 = vmlal_lane_s16(sum0, vget_low_s16(s1), filter_0_3, 1);
- sum0 = vmlal_lane_s16(sum0, vget_low_s16(s2), filter_0_3, 2);
- sum0 = vmlal_lane_s16(sum0, vget_low_s16(s3), filter_0_3, 3);
- sum0 = vmlal_lane_s16(sum0, vget_low_s16(s4), filter_4_7, 0);
- sum0 = vmlal_lane_s16(sum0, vget_low_s16(s5), filter_4_7, 1);
- sum0 = vmlal_lane_s16(sum0, vget_low_s16(s6), filter_4_7, 2);
- sum0 = vmlal_lane_s16(sum0, vget_low_s16(s7), filter_4_7, 3);
-
- int32x4_t sum1 = offset_const;
- sum1 = vmlal_lane_s16(sum1, vget_high_s16(s0), filter_0_3, 0);
- sum1 = vmlal_lane_s16(sum1, vget_high_s16(s1), filter_0_3, 1);
- sum1 = vmlal_lane_s16(sum1, vget_high_s16(s2), filter_0_3, 2);
- sum1 = vmlal_lane_s16(sum1, vget_high_s16(s3), filter_0_3, 3);
- sum1 = vmlal_lane_s16(sum1, vget_high_s16(s4), filter_4_7, 0);
- sum1 = vmlal_lane_s16(sum1, vget_high_s16(s5), filter_4_7, 1);
- sum1 = vmlal_lane_s16(sum1, vget_high_s16(s6), filter_4_7, 2);
- sum1 = vmlal_lane_s16(sum1, vget_high_s16(s7), filter_4_7, 3);
-
- int16x4_t res0 = vshrn_n_s32(sum0, COMPOUND_ROUND1_BITS);
- int16x4_t res1 = vshrn_n_s32(sum1, COMPOUND_ROUND1_BITS);
-
- return vcombine_s16(res0, res1);
-}
-
-static INLINE void compound_convolve_vert_scale_neon(
- const int16_t *src, int src_stride, uint16_t *dst, int dst_stride, int w,
- int h, const int16_t *y_filter, int subpel_y_qn, int y_step_qn) {
- const int bd = 8;
- const int offset_bits = bd + 2 * FILTER_BITS - ROUND0_BITS;
- // A shim of 1 << (COMPOUND_ROUND1_BITS - 1) enables us to use
- // non-rounding shifts - which are generally faster than rounding shifts on
- // modern CPUs.
- const int32x4_t vert_offset =
- vdupq_n_s32((1 << offset_bits) + (1 << (COMPOUND_ROUND1_BITS - 1)));
-
- int y_qn = subpel_y_qn;
-
- if (w == 4) {
- do {
- const int16_t *s = &src[(y_qn >> SCALE_SUBPEL_BITS) * src_stride];
-
- const ptrdiff_t filter_offset =
- SUBPEL_TAPS * ((y_qn & SCALE_SUBPEL_MASK) >> SCALE_EXTRA_BITS);
- const int16x8_t filter = vld1q_s16(y_filter + filter_offset);
-
- int16x4_t s0, s1, s2, s3, s4, s5, s6, s7;
- load_s16_4x8(s, src_stride, &s0, &s1, &s2, &s3, &s4, &s5, &s6, &s7);
-
- int16x4_t d0 = compound_convolve8_4_v(s0, s1, s2, s3, s4, s5, s6, s7,
- filter, vert_offset);
-
- vst1_u16(dst, vreinterpret_u16_s16(d0));
-
- dst += dst_stride;
- y_qn += y_step_qn;
- } while (--h != 0);
- } else {
- do {
- const int16_t *s = &src[(y_qn >> SCALE_SUBPEL_BITS) * src_stride];
-
- const ptrdiff_t filter_offset =
- SUBPEL_TAPS * ((y_qn & SCALE_SUBPEL_MASK) >> SCALE_EXTRA_BITS);
- const int16x8_t filter = vld1q_s16(y_filter + filter_offset);
-
- int width = w;
- uint16_t *d = dst;
-
- do {
- int16x8_t s0, s1, s2, s3, s4, s5, s6, s7;
- load_s16_8x8(s, src_stride, &s0, &s1, &s2, &s3, &s4, &s5, &s6, &s7);
-
- int16x8_t d0 = compound_convolve8_8_v(s0, s1, s2, s3, s4, s5, s6, s7,
- filter, vert_offset);
-
- vst1q_u16(d, vreinterpretq_u16_s16(d0));
-
- s += 8;
- d += 8;
- width -= 8;
- } while (width != 0);
-
- dst += dst_stride;
- y_qn += y_step_qn;
- } while (--h != 0);
- }
-}
-
-static INLINE void compound_avg_convolve_vert_scale_neon(
- const int16_t *src, int src_stride, uint8_t *dst8, int dst8_stride,
- uint16_t *dst16, int dst16_stride, int w, int h, const int16_t *y_filter,
- int subpel_y_qn, int y_step_qn) {
- const int bd = 8;
- const int offset_bits = bd + 2 * FILTER_BITS - ROUND0_BITS;
- // A shim of 1 << (COMPOUND_ROUND1_BITS - 1) enables us to use
- // non-rounding shifts - which are generally faster than rounding shifts
- // on modern CPUs.
- const int32_t vert_offset_bits =
- (1 << offset_bits) + (1 << (COMPOUND_ROUND1_BITS - 1));
- // For the averaging code path substract round offset and convolve round.
- const int32_t avg_offset_bits = (1 << (offset_bits + 1)) + (1 << offset_bits);
- const int32x4_t vert_offset = vdupq_n_s32(vert_offset_bits - avg_offset_bits);
-
- int y_qn = subpel_y_qn;
-
- if (w == 4) {
- do {
- const int16_t *s = &src[(y_qn >> SCALE_SUBPEL_BITS) * src_stride];
-
- const ptrdiff_t filter_offset =
- SUBPEL_TAPS * ((y_qn & SCALE_SUBPEL_MASK) >> SCALE_EXTRA_BITS);
- const int16x8_t filter = vld1q_s16(y_filter + filter_offset);
-
- int16x4_t s0, s1, s2, s3, s4, s5, s6, s7;
- load_s16_4x8(s, src_stride, &s0, &s1, &s2, &s3, &s4, &s5, &s6, &s7);
-
- int16x4_t d0 = compound_convolve8_4_v(s0, s1, s2, s3, s4, s5, s6, s7,
- filter, vert_offset);
-
- int16x4_t dd0 = vreinterpret_s16_u16(vld1_u16(dst16));
-
- int16x4_t avg = vhadd_s16(dd0, d0);
- int16x8_t d0_s16 = vcombine_s16(avg, vdup_n_s16(0));
-
- uint8x8_t d0_u8 = vqrshrun_n_s16(
- d0_s16, (2 * FILTER_BITS - ROUND0_BITS - COMPOUND_ROUND1_BITS));
-
- store_u8_4x1(dst8, d0_u8);
-
- dst16 += dst16_stride;
- dst8 += dst8_stride;
- y_qn += y_step_qn;
- } while (--h != 0);
- } else {
- do {
- const int16_t *s = &src[(y_qn >> SCALE_SUBPEL_BITS) * src_stride];
-
- const ptrdiff_t filter_offset =
- SUBPEL_TAPS * ((y_qn & SCALE_SUBPEL_MASK) >> SCALE_EXTRA_BITS);
- const int16x8_t filter = vld1q_s16(y_filter + filter_offset);
-
- int width = w;
- uint8_t *dst8_ptr = dst8;
- uint16_t *dst16_ptr = dst16;
-
- do {
- int16x8_t s0, s1, s2, s3, s4, s5, s6, s7;
- load_s16_8x8(s, src_stride, &s0, &s1, &s2, &s3, &s4, &s5, &s6, &s7);
-
- int16x8_t d0 = compound_convolve8_8_v(s0, s1, s2, s3, s4, s5, s6, s7,
- filter, vert_offset);
-
- int16x8_t dd0 = vreinterpretq_s16_u16(vld1q_u16(dst16_ptr));
-
- int16x8_t avg = vhaddq_s16(dd0, d0);
-
- uint8x8_t d0_u8 = vqrshrun_n_s16(
- avg, (2 * FILTER_BITS - ROUND0_BITS - COMPOUND_ROUND1_BITS));
-
- vst1_u8(dst8_ptr, d0_u8);
-
- s += 8;
- dst8_ptr += 8;
- dst16_ptr += 8;
- width -= 8;
- } while (width != 0);
-
- dst16 += dst16_stride;
- dst8 += dst8_stride;
- y_qn += y_step_qn;
- } while (--h != 0);
- }
-}
-
-static INLINE void compound_dist_wtd_convolve_vert_scale_neon(
- const int16_t *src, int src_stride, uint8_t *dst8, int dst8_stride,
- uint16_t *dst16, int dst16_stride, int w, int h, const int16_t *y_filter,
- ConvolveParams *conv_params, int subpel_y_qn, int y_step_qn) {
- const int bd = 8;
- const int offset_bits = bd + 2 * FILTER_BITS - ROUND0_BITS;
- int y_qn = subpel_y_qn;
- // A shim of 1 << (COMPOUND_ROUND1_BITS - 1) enables us to use
- // non-rounding shifts - which are generally faster than rounding shifts on
- // modern CPUs.
- const int32x4_t vert_offset =
- vdupq_n_s32((1 << offset_bits) + (1 << (COMPOUND_ROUND1_BITS - 1)));
- // For the weighted averaging code path we have to substract round offset and
- // convolve round. The shim of 1 << (2 * FILTER_BITS - ROUND0_BITS -
- // COMPOUND_ROUND1_BITS - 1) enables us to use non-rounding shifts. The
- // additional shift by DIST_PRECISION_BITS is needed in order to merge two
- // shift calculations into one.
- const int32x4_t dist_wtd_offset = vdupq_n_s32(
- (1 << (2 * FILTER_BITS - ROUND0_BITS - COMPOUND_ROUND1_BITS - 1 +
- DIST_PRECISION_BITS)) -
- (1 << (offset_bits - COMPOUND_ROUND1_BITS + DIST_PRECISION_BITS)) -
- (1 << (offset_bits - COMPOUND_ROUND1_BITS - 1 + DIST_PRECISION_BITS)));
- const int16x4_t bck_offset = vdup_n_s16(conv_params->bck_offset);
- const int16x4_t fwd_offset = vdup_n_s16(conv_params->fwd_offset);
-
- if (w == 4) {
- do {
- const int16_t *s = &src[(y_qn >> SCALE_SUBPEL_BITS) * src_stride];
-
- const ptrdiff_t filter_offset =
- SUBPEL_TAPS * ((y_qn & SCALE_SUBPEL_MASK) >> SCALE_EXTRA_BITS);
- const int16x8_t filter = vld1q_s16(y_filter + filter_offset);
-
- int16x4_t s0, s1, s2, s3, s4, s5, s6, s7;
- load_s16_4x8(s, src_stride, &s0, &s1, &s2, &s3, &s4, &s5, &s6, &s7);
-
- int16x4_t d0 = compound_convolve8_4_v(s0, s1, s2, s3, s4, s5, s6, s7,
- filter, vert_offset);
-
- int16x4_t dd0 = vreinterpret_s16_u16(vld1_u16(dst16));
-
- int32x4_t dst_wtd_avg = vmlal_s16(dist_wtd_offset, bck_offset, d0);
- dst_wtd_avg = vmlal_s16(dst_wtd_avg, fwd_offset, dd0);
-
- int16x4_t d0_s16 = vshrn_n_s32(
- dst_wtd_avg, 2 * FILTER_BITS - ROUND0_BITS - COMPOUND_ROUND1_BITS +
- DIST_PRECISION_BITS);
-
- uint8x8_t d0_u8 = vqmovun_s16(vcombine_s16(d0_s16, vdup_n_s16(0)));
-
- store_u8_4x1(dst8, d0_u8);
-
- dst16 += dst16_stride;
- dst8 += dst8_stride;
- y_qn += y_step_qn;
- } while (--h != 0);
- } else {
- do {
- const int16_t *s = &src[(y_qn >> SCALE_SUBPEL_BITS) * src_stride];
-
- const ptrdiff_t filter_offset =
- SUBPEL_TAPS * ((y_qn & SCALE_SUBPEL_MASK) >> SCALE_EXTRA_BITS);
- const int16x8_t filter = vld1q_s16(y_filter + filter_offset);
-
- int width = w;
- uint8_t *dst8_ptr = dst8;
- uint16_t *dst16_ptr = dst16;
-
- do {
- int16x8_t s0, s1, s2, s3, s4, s5, s6, s7;
- load_s16_8x8(s, src_stride, &s0, &s1, &s2, &s3, &s4, &s5, &s6, &s7);
-
- int16x8_t d0 = compound_convolve8_8_v(s0, s1, s2, s3, s4, s5, s6, s7,
- filter, vert_offset);
-
- int16x8_t dd0 = vreinterpretq_s16_u16(vld1q_u16(dst16_ptr));
-
- int32x4_t dst_wtd_avg0 =
- vmlal_s16(dist_wtd_offset, bck_offset, vget_low_s16(d0));
- int32x4_t dst_wtd_avg1 =
- vmlal_s16(dist_wtd_offset, bck_offset, vget_high_s16(d0));
-
- dst_wtd_avg0 = vmlal_s16(dst_wtd_avg0, fwd_offset, vget_low_s16(dd0));
- dst_wtd_avg1 = vmlal_s16(dst_wtd_avg1, fwd_offset, vget_high_s16(dd0));
-
- int16x4_t d0_s16_0 = vshrn_n_s32(
- dst_wtd_avg0, 2 * FILTER_BITS - ROUND0_BITS - COMPOUND_ROUND1_BITS +
- DIST_PRECISION_BITS);
- int16x4_t d0_s16_1 = vshrn_n_s32(
- dst_wtd_avg1, 2 * FILTER_BITS - ROUND0_BITS - COMPOUND_ROUND1_BITS +
- DIST_PRECISION_BITS);
-
- uint8x8_t d0_u8 = vqmovun_s16(vcombine_s16(d0_s16_0, d0_s16_1));
-
- vst1_u8(dst8_ptr, d0_u8);
-
- s += 8;
- dst8_ptr += 8;
- dst16_ptr += 8;
- width -= 8;
- } while (width != 0);
-
- dst16 += dst16_stride;
- dst8 += dst8_stride;
- y_qn += y_step_qn;
- } while (--h != 0);
- }
-}
-
-static INLINE uint8x8_t convolve8_4_v(const int16x4_t s0, const int16x4_t s1,
- const int16x4_t s2, const int16x4_t s3,
- const int16x4_t s4, const int16x4_t s5,
- const int16x4_t s6, const int16x4_t s7,
- const int16x8_t filter,
- const int32x4_t offset_const) {
- const int16x4_t filter_0_3 = vget_low_s16(filter);
- const int16x4_t filter_4_7 = vget_high_s16(filter);
-
- int32x4_t sum = offset_const;
- sum = vmlal_lane_s16(sum, s0, filter_0_3, 0);
- sum = vmlal_lane_s16(sum, s1, filter_0_3, 1);
- sum = vmlal_lane_s16(sum, s2, filter_0_3, 2);
- sum = vmlal_lane_s16(sum, s3, filter_0_3, 3);
- sum = vmlal_lane_s16(sum, s4, filter_4_7, 0);
- sum = vmlal_lane_s16(sum, s5, filter_4_7, 1);
- sum = vmlal_lane_s16(sum, s6, filter_4_7, 2);
- sum = vmlal_lane_s16(sum, s7, filter_4_7, 3);
-
- int16x4_t res = vshrn_n_s32(sum, 2 * FILTER_BITS - ROUND0_BITS);
-
- return vqmovun_s16(vcombine_s16(res, vdup_n_s16(0)));
-}
-
-static INLINE uint8x8_t convolve8_8_v(const int16x8_t s0, const int16x8_t s1,
- const int16x8_t s2, const int16x8_t s3,
- const int16x8_t s4, const int16x8_t s5,
- const int16x8_t s6, const int16x8_t s7,
- const int16x8_t filter,
- const int32x4_t offset_const) {
- const int16x4_t filter_0_3 = vget_low_s16(filter);
- const int16x4_t filter_4_7 = vget_high_s16(filter);
-
- int32x4_t sum0 = offset_const;
- sum0 = vmlal_lane_s16(sum0, vget_low_s16(s0), filter_0_3, 0);
- sum0 = vmlal_lane_s16(sum0, vget_low_s16(s1), filter_0_3, 1);
- sum0 = vmlal_lane_s16(sum0, vget_low_s16(s2), filter_0_3, 2);
- sum0 = vmlal_lane_s16(sum0, vget_low_s16(s3), filter_0_3, 3);
- sum0 = vmlal_lane_s16(sum0, vget_low_s16(s4), filter_4_7, 0);
- sum0 = vmlal_lane_s16(sum0, vget_low_s16(s5), filter_4_7, 1);
- sum0 = vmlal_lane_s16(sum0, vget_low_s16(s6), filter_4_7, 2);
- sum0 = vmlal_lane_s16(sum0, vget_low_s16(s7), filter_4_7, 3);
-
- int32x4_t sum1 = offset_const;
- sum1 = vmlal_lane_s16(sum1, vget_high_s16(s0), filter_0_3, 0);
- sum1 = vmlal_lane_s16(sum1, vget_high_s16(s1), filter_0_3, 1);
- sum1 = vmlal_lane_s16(sum1, vget_high_s16(s2), filter_0_3, 2);
- sum1 = vmlal_lane_s16(sum1, vget_high_s16(s3), filter_0_3, 3);
- sum1 = vmlal_lane_s16(sum1, vget_high_s16(s4), filter_4_7, 0);
- sum1 = vmlal_lane_s16(sum1, vget_high_s16(s5), filter_4_7, 1);
- sum1 = vmlal_lane_s16(sum1, vget_high_s16(s6), filter_4_7, 2);
- sum1 = vmlal_lane_s16(sum1, vget_high_s16(s7), filter_4_7, 3);
-
- int16x4_t res0 = vshrn_n_s32(sum0, 2 * FILTER_BITS - ROUND0_BITS);
- int16x4_t res1 = vshrn_n_s32(sum1, 2 * FILTER_BITS - ROUND0_BITS);
-
- return vqmovun_s16(vcombine_s16(res0, res1));
-}
-
-static INLINE void convolve_vert_scale_neon(const int16_t *src, int src_stride,
- uint8_t *dst, int dst_stride, int w,
- int h, const int16_t *y_filter,
- int subpel_y_qn, int y_step_qn) {
- const int bd = 8;
- const int offset_bits = bd + 2 * FILTER_BITS - ROUND0_BITS;
- const int round_1 = 2 * FILTER_BITS - ROUND0_BITS;
- // The shim of 1 << (round_1 - 1) enables us to use non-rounding shifts.
- int32x4_t vert_offset =
- vdupq_n_s32((1 << (round_1 - 1)) - (1 << (offset_bits - 1)));
-
- int y_qn = subpel_y_qn;
- if (w == 4) {
- do {
- const int16_t *s = &src[(y_qn >> SCALE_SUBPEL_BITS) * src_stride];
-
- const ptrdiff_t filter_offset =
- SUBPEL_TAPS * ((y_qn & SCALE_SUBPEL_MASK) >> SCALE_EXTRA_BITS);
- const int16x8_t filter = vld1q_s16(y_filter + filter_offset);
-
- int16x4_t s0, s1, s2, s3, s4, s5, s6, s7;
- load_s16_4x8(s, src_stride, &s0, &s1, &s2, &s3, &s4, &s5, &s6, &s7);
-
- uint8x8_t d =
- convolve8_4_v(s0, s1, s2, s3, s4, s5, s6, s7, filter, vert_offset);
-
- store_u8_4x1(dst, d);
-
- dst += dst_stride;
- y_qn += y_step_qn;
- } while (--h != 0);
- } else if (w == 8) {
- do {
- const int16_t *s = &src[(y_qn >> SCALE_SUBPEL_BITS) * src_stride];
-
- const ptrdiff_t filter_offset =
- SUBPEL_TAPS * ((y_qn & SCALE_SUBPEL_MASK) >> SCALE_EXTRA_BITS);
- const int16x8_t filter = vld1q_s16(y_filter + filter_offset);
-
- int16x8_t s0, s1, s2, s3, s4, s5, s6, s7;
- load_s16_8x8(s, src_stride, &s0, &s1, &s2, &s3, &s4, &s5, &s6, &s7);
-
- uint8x8_t d =
- convolve8_8_v(s0, s1, s2, s3, s4, s5, s6, s7, filter, vert_offset);
-
- vst1_u8(dst, d);
-
- dst += dst_stride;
- y_qn += y_step_qn;
- } while (--h != 0);
- } else {
- do {
- const int16_t *s = &src[(y_qn >> SCALE_SUBPEL_BITS) * src_stride];
- uint8_t *d = dst;
- int width = w;
-
- const ptrdiff_t filter_offset =
- SUBPEL_TAPS * ((y_qn & SCALE_SUBPEL_MASK) >> SCALE_EXTRA_BITS);
- const int16x8_t filter = vld1q_s16(y_filter + filter_offset);
-
- do {
- int16x8_t s0[2], s1[2], s2[2], s3[2], s4[2], s5[2], s6[2], s7[2];
- load_s16_8x8(s, src_stride, &s0[0], &s1[0], &s2[0], &s3[0], &s4[0],
- &s5[0], &s6[0], &s7[0]);
- load_s16_8x8(s + 8, src_stride, &s0[1], &s1[1], &s2[1], &s3[1], &s4[1],
- &s5[1], &s6[1], &s7[1]);
-
- uint8x8_t d0 = convolve8_8_v(s0[0], s1[0], s2[0], s3[0], s4[0], s5[0],
- s6[0], s7[0], filter, vert_offset);
- uint8x8_t d1 = convolve8_8_v(s0[1], s1[1], s2[1], s3[1], s4[1], s5[1],
- s6[1], s7[1], filter, vert_offset);
-
- vst1q_u8(d, vcombine_u8(d0, d1));
-
- s += 16;
- d += 16;
- width -= 16;
- } while (width != 0);
-
- dst += dst_stride;
- y_qn += y_step_qn;
- } while (--h != 0);
- }
-}
+#include "av1/common/arm/convolve_scale_neon.h"
static INLINE int16x4_t convolve8_4_h(const int16x4_t s0, const int16x4_t s1,
const int16x4_t s2, const int16x4_t s3,
diff --git a/av1/common/arm/av1_convolve_scale_neon_dotprod.c b/av1/common/arm/av1_convolve_scale_neon_dotprod.c
new file mode 100644
index 0000000..619efa6
--- /dev/null
+++ b/av1/common/arm/av1_convolve_scale_neon_dotprod.c
@@ -0,0 +1,232 @@
+/*
+ * Copyright (c) 2024, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#include <assert.h>
+#include <arm_neon.h>
+
+#include "config/aom_config.h"
+#include "config/av1_rtcd.h"
+
+#include "aom_dsp/arm/mem_neon.h"
+#include "aom_dsp/arm/transpose_neon.h"
+#include "av1/common/arm/convolve_scale_neon.h"
+
+static INLINE int16x4_t convolve8_4_h(const uint8x8_t s0, const uint8x8_t s1,
+ const uint8x8_t s2, const uint8x8_t s3,
+ const int8x8_t filter,
+ const int32x4_t horiz_const) {
+ const int8x16_t filters = vcombine_s8(filter, filter);
+
+ uint8x16_t s01 = vcombine_u8(s0, s1);
+ uint8x16_t s23 = vcombine_u8(s2, s3);
+
+ // Transform sample range to [-128, 127] for 8-bit signed dot product.
+ int8x16_t s01_128 = vreinterpretq_s8_u8(vsubq_u8(s01, vdupq_n_u8(128)));
+ int8x16_t s23_128 = vreinterpretq_s8_u8(vsubq_u8(s23, vdupq_n_u8(128)));
+
+ int32x4_t sum01 = vdotq_s32(horiz_const, s01_128, filters);
+ int32x4_t sum23 = vdotq_s32(horiz_const, s23_128, filters);
+
+ int32x4_t sum = vpaddq_s32(sum01, sum23);
+
+ // We halved the filter values so -1 from right shift.
+ return vshrn_n_s32(sum, ROUND0_BITS - 1);
+}
+
+static INLINE int16x8_t convolve8_8_h(const uint8x8_t s0, const uint8x8_t s1,
+ const uint8x8_t s2, const uint8x8_t s3,
+ const uint8x8_t s4, const uint8x8_t s5,
+ const uint8x8_t s6, const uint8x8_t s7,
+ const int8x8_t filter,
+ const int32x4_t horiz_const) {
+ const int8x16_t filters = vcombine_s8(filter, filter);
+
+ uint8x16_t s01 = vcombine_u8(s0, s1);
+ uint8x16_t s23 = vcombine_u8(s2, s3);
+ uint8x16_t s45 = vcombine_u8(s4, s5);
+ uint8x16_t s67 = vcombine_u8(s6, s7);
+
+ // Transform sample range to [-128, 127] for 8-bit signed dot product.
+ int8x16_t s01_128 = vreinterpretq_s8_u8(vsubq_u8(s01, vdupq_n_u8(128)));
+ int8x16_t s23_128 = vreinterpretq_s8_u8(vsubq_u8(s23, vdupq_n_u8(128)));
+ int8x16_t s45_128 = vreinterpretq_s8_u8(vsubq_u8(s45, vdupq_n_u8(128)));
+ int8x16_t s67_128 = vreinterpretq_s8_u8(vsubq_u8(s67, vdupq_n_u8(128)));
+
+ int32x4_t sum01 = vdotq_s32(horiz_const, s01_128, filters);
+ int32x4_t sum23 = vdotq_s32(horiz_const, s23_128, filters);
+ int32x4_t sum45 = vdotq_s32(horiz_const, s45_128, filters);
+ int32x4_t sum67 = vdotq_s32(horiz_const, s67_128, filters);
+
+ int32x4_t sum0123 = vpaddq_s32(sum01, sum23);
+ int32x4_t sum4567 = vpaddq_s32(sum45, sum67);
+
+ // We halved the filter values so -1 from right shift.
+ return vcombine_s16(vshrn_n_s32(sum0123, ROUND0_BITS - 1),
+ vshrn_n_s32(sum4567, ROUND0_BITS - 1));
+}
+
+static INLINE void convolve_horiz_scale_neon_dotprod(
+ const uint8_t *src, int src_stride, int16_t *dst, int dst_stride, int w,
+ int h, const int16_t *x_filter, const int subpel_x_qn,
+ const int x_step_qn) {
+ DECLARE_ALIGNED(16, int16_t, temp[8 * 8]);
+ const int bd = 8;
+ // A shim of 1 << (ROUND0_BITS - 1) enables us to use non-rounding
+ // shifts - which are generally faster than rounding shifts on modern CPUs.
+ const int32_t horiz_offset =
+ (1 << (bd + FILTER_BITS - 1)) + (1 << (ROUND0_BITS - 1));
+ // The shim of 128 << FILTER_BITS is needed because we are subtracting 128
+ // from every source value.
+ const int32_t dotprod_offset = 128 << FILTER_BITS;
+ // Divide the total by 4: we halved the filter values and will use a pairwise
+ // add in the convolution kernel.
+ const int32x4_t horiz_offset_vec =
+ vdupq_n_s32((horiz_offset + dotprod_offset) >> 2);
+
+ if (w == 4) {
+ do {
+ int x_qn = subpel_x_qn;
+
+ // Process a 4x4 tile.
+ for (int r = 0; r < 4; r++) {
+ const uint8_t *const s = &src[x_qn >> SCALE_SUBPEL_BITS];
+
+ const ptrdiff_t filter_offset =
+ SUBPEL_TAPS * ((x_qn & SCALE_SUBPEL_MASK) >> SCALE_EXTRA_BITS);
+ // Filter values are all even so halve them to fit in int8_t.
+ const int8x8_t filter =
+ vshrn_n_s16(vld1q_s16(x_filter + filter_offset), 1);
+
+ uint8x8_t t0, t1, t2, t3;
+ load_u8_8x4(s, src_stride, &t0, &t1, &t2, &t3);
+
+ int16x4_t d0 = convolve8_4_h(t0, t1, t2, t3, filter, horiz_offset_vec);
+
+ vst1_s16(&temp[r * 4], d0);
+
+ x_qn += x_step_qn;
+ }
+
+ // Transpose the 4x4 result tile and store.
+ int16x4_t d0, d1, d2, d3;
+ load_s16_4x4(temp, 4, &d0, &d1, &d2, &d3);
+
+ transpose_elems_inplace_s16_4x4(&d0, &d1, &d2, &d3);
+
+ store_s16_4x4(dst, dst_stride, d0, d1, d2, d3);
+
+ dst += 4 * dst_stride;
+ src += 4 * src_stride;
+ h -= 4;
+ } while (h > 0);
+ } else {
+ do {
+ int x_qn = subpel_x_qn;
+ int16_t *d = dst;
+ int width = w;
+
+ do {
+ // Process an 8x8 tile.
+ for (int r = 0; r < 8; r++) {
+ const uint8_t *const s = &src[(x_qn >> SCALE_SUBPEL_BITS)];
+
+ const ptrdiff_t filter_offset =
+ SUBPEL_TAPS * ((x_qn & SCALE_SUBPEL_MASK) >> SCALE_EXTRA_BITS);
+ // Filter values are all even so halve them to fit in int8_t.
+ int8x8_t filter = vshrn_n_s16(vld1q_s16(x_filter + filter_offset), 1);
+
+ uint8x8_t t0, t1, t2, t3, t4, t5, t6, t7;
+ load_u8_8x8(s, src_stride, &t0, &t1, &t2, &t3, &t4, &t5, &t6, &t7);
+
+ int16x8_t d0 = convolve8_8_h(t0, t1, t2, t3, t4, t5, t6, t7, filter,
+ horiz_offset_vec);
+
+ vst1q_s16(&temp[r * 8], d0);
+
+ x_qn += x_step_qn;
+ }
+
+ // Transpose the 8x8 result tile and store.
+ int16x8_t d0, d1, d2, d3, d4, d5, d6, d7;
+ load_s16_8x8(temp, 8, &d0, &d1, &d2, &d3, &d4, &d5, &d6, &d7);
+
+ transpose_elems_inplace_s16_8x8(&d0, &d1, &d2, &d3, &d4, &d5, &d6, &d7);
+
+ store_s16_8x8(d, dst_stride, d0, d1, d2, d3, d4, d5, d6, d7);
+
+ d += 8;
+ width -= 8;
+ } while (width != 0);
+
+ dst += 8 * dst_stride;
+ src += 8 * src_stride;
+ h -= 8;
+ } while (h > 0);
+ }
+}
+
+void av1_convolve_2d_scale_neon_dotprod(
+ const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride, int w,
+ int h, const InterpFilterParams *filter_params_x,
+ const InterpFilterParams *filter_params_y, const int subpel_x_qn,
+ const int x_step_qn, const int subpel_y_qn, const int y_step_qn,
+ ConvolveParams *conv_params) {
+ if (w < 4 || h < 4) {
+ av1_convolve_2d_scale_c(src, src_stride, dst, dst_stride, w, h,
+ filter_params_x, filter_params_y, subpel_x_qn,
+ x_step_qn, subpel_y_qn, y_step_qn, conv_params);
+ return;
+ }
+
+ // For the interpolation 8-tap filters are used.
+ assert(filter_params_y->taps <= 8 && filter_params_x->taps <= 8);
+
+ DECLARE_ALIGNED(32, int16_t,
+ im_block[(2 * MAX_SB_SIZE + MAX_FILTER_TAP) * MAX_SB_SIZE]);
+ int im_h = (((h - 1) * y_step_qn + subpel_y_qn) >> SCALE_SUBPEL_BITS) +
+ filter_params_y->taps;
+ int im_stride = MAX_SB_SIZE;
+ CONV_BUF_TYPE *dst16 = conv_params->dst;
+ const int dst16_stride = conv_params->dst_stride;
+
+ // Account for needing filter_taps / 2 - 1 lines prior and filter_taps / 2
+ // lines post both horizontally and vertically.
+ const ptrdiff_t horiz_offset = filter_params_x->taps / 2 - 1;
+ const ptrdiff_t vert_offset = (filter_params_y->taps / 2 - 1) * src_stride;
+
+ // Horizontal filter
+ convolve_horiz_scale_neon_dotprod(
+ src - horiz_offset - vert_offset, src_stride, im_block, im_stride, w,
+ im_h, filter_params_x->filter_ptr, subpel_x_qn, x_step_qn);
+
+ // Vertical filter
+ if (UNLIKELY(conv_params->is_compound)) {
+ if (conv_params->do_average) {
+ if (conv_params->use_dist_wtd_comp_avg) {
+ compound_dist_wtd_convolve_vert_scale_neon(
+ im_block, im_stride, dst, dst_stride, dst16, dst16_stride, w, h,
+ filter_params_y->filter_ptr, conv_params, subpel_y_qn, y_step_qn);
+ } else {
+ compound_avg_convolve_vert_scale_neon(
+ im_block, im_stride, dst, dst_stride, dst16, dst16_stride, w, h,
+ filter_params_y->filter_ptr, subpel_y_qn, y_step_qn);
+ }
+ } else {
+ compound_convolve_vert_scale_neon(
+ im_block, im_stride, dst16, dst16_stride, w, h,
+ filter_params_y->filter_ptr, subpel_y_qn, y_step_qn);
+ }
+ } else {
+ convolve_vert_scale_neon(im_block, im_stride, dst, dst_stride, w, h,
+ filter_params_y->filter_ptr, subpel_y_qn,
+ y_step_qn);
+ }
+}
diff --git a/av1/common/arm/convolve_scale_neon.h b/av1/common/arm/convolve_scale_neon.h
new file mode 100644
index 0000000..c000e44
--- /dev/null
+++ b/av1/common/arm/convolve_scale_neon.h
@@ -0,0 +1,480 @@
+/*
+ * Copyright (c) 2024, Alliance for Open Media. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef AOM_AV1_COMMON_ARM_CONVOLVE_SCALE_NEON_H_
+#define AOM_AV1_COMMON_ARM_CONVOLVE_SCALE_NEON_H_
+
+#include <assert.h>
+#include <arm_neon.h>
+
+#include "config/aom_config.h"
+#include "config/av1_rtcd.h"
+
+#include "aom_dsp/arm/mem_neon.h"
+#include "aom_dsp/arm/transpose_neon.h"
+
+static INLINE int16x4_t compound_convolve8_4_v(
+ const int16x4_t s0, const int16x4_t s1, const int16x4_t s2,
+ const int16x4_t s3, const int16x4_t s4, const int16x4_t s5,
+ const int16x4_t s6, const int16x4_t s7, const int16x8_t filter,
+ const int32x4_t offset_const) {
+ const int16x4_t filter_0_3 = vget_low_s16(filter);
+ const int16x4_t filter_4_7 = vget_high_s16(filter);
+
+ int32x4_t sum = offset_const;
+ sum = vmlal_lane_s16(sum, s0, filter_0_3, 0);
+ sum = vmlal_lane_s16(sum, s1, filter_0_3, 1);
+ sum = vmlal_lane_s16(sum, s2, filter_0_3, 2);
+ sum = vmlal_lane_s16(sum, s3, filter_0_3, 3);
+ sum = vmlal_lane_s16(sum, s4, filter_4_7, 0);
+ sum = vmlal_lane_s16(sum, s5, filter_4_7, 1);
+ sum = vmlal_lane_s16(sum, s6, filter_4_7, 2);
+ sum = vmlal_lane_s16(sum, s7, filter_4_7, 3);
+
+ return vshrn_n_s32(sum, COMPOUND_ROUND1_BITS);
+}
+
+static INLINE int16x8_t compound_convolve8_8_v(
+ const int16x8_t s0, const int16x8_t s1, const int16x8_t s2,
+ const int16x8_t s3, const int16x8_t s4, const int16x8_t s5,
+ const int16x8_t s6, const int16x8_t s7, const int16x8_t filter,
+ const int32x4_t offset_const) {
+ const int16x4_t filter_0_3 = vget_low_s16(filter);
+ const int16x4_t filter_4_7 = vget_high_s16(filter);
+
+ int32x4_t sum0 = offset_const;
+ sum0 = vmlal_lane_s16(sum0, vget_low_s16(s0), filter_0_3, 0);
+ sum0 = vmlal_lane_s16(sum0, vget_low_s16(s1), filter_0_3, 1);
+ sum0 = vmlal_lane_s16(sum0, vget_low_s16(s2), filter_0_3, 2);
+ sum0 = vmlal_lane_s16(sum0, vget_low_s16(s3), filter_0_3, 3);
+ sum0 = vmlal_lane_s16(sum0, vget_low_s16(s4), filter_4_7, 0);
+ sum0 = vmlal_lane_s16(sum0, vget_low_s16(s5), filter_4_7, 1);
+ sum0 = vmlal_lane_s16(sum0, vget_low_s16(s6), filter_4_7, 2);
+ sum0 = vmlal_lane_s16(sum0, vget_low_s16(s7), filter_4_7, 3);
+
+ int32x4_t sum1 = offset_const;
+ sum1 = vmlal_lane_s16(sum1, vget_high_s16(s0), filter_0_3, 0);
+ sum1 = vmlal_lane_s16(sum1, vget_high_s16(s1), filter_0_3, 1);
+ sum1 = vmlal_lane_s16(sum1, vget_high_s16(s2), filter_0_3, 2);
+ sum1 = vmlal_lane_s16(sum1, vget_high_s16(s3), filter_0_3, 3);
+ sum1 = vmlal_lane_s16(sum1, vget_high_s16(s4), filter_4_7, 0);
+ sum1 = vmlal_lane_s16(sum1, vget_high_s16(s5), filter_4_7, 1);
+ sum1 = vmlal_lane_s16(sum1, vget_high_s16(s6), filter_4_7, 2);
+ sum1 = vmlal_lane_s16(sum1, vget_high_s16(s7), filter_4_7, 3);
+
+ int16x4_t res0 = vshrn_n_s32(sum0, COMPOUND_ROUND1_BITS);
+ int16x4_t res1 = vshrn_n_s32(sum1, COMPOUND_ROUND1_BITS);
+
+ return vcombine_s16(res0, res1);
+}
+
+static INLINE void compound_convolve_vert_scale_neon(
+ const int16_t *src, int src_stride, uint16_t *dst, int dst_stride, int w,
+ int h, const int16_t *y_filter, int subpel_y_qn, int y_step_qn) {
+ const int bd = 8;
+ const int offset_bits = bd + 2 * FILTER_BITS - ROUND0_BITS;
+ // A shim of 1 << (COMPOUND_ROUND1_BITS - 1) enables us to use
+ // non-rounding shifts - which are generally faster than rounding shifts on
+ // modern CPUs.
+ const int32x4_t vert_offset =
+ vdupq_n_s32((1 << offset_bits) + (1 << (COMPOUND_ROUND1_BITS - 1)));
+
+ int y_qn = subpel_y_qn;
+
+ if (w == 4) {
+ do {
+ const int16_t *s = &src[(y_qn >> SCALE_SUBPEL_BITS) * src_stride];
+
+ const ptrdiff_t filter_offset =
+ SUBPEL_TAPS * ((y_qn & SCALE_SUBPEL_MASK) >> SCALE_EXTRA_BITS);
+ const int16x8_t filter = vld1q_s16(y_filter + filter_offset);
+
+ int16x4_t s0, s1, s2, s3, s4, s5, s6, s7;
+ load_s16_4x8(s, src_stride, &s0, &s1, &s2, &s3, &s4, &s5, &s6, &s7);
+
+ int16x4_t d0 = compound_convolve8_4_v(s0, s1, s2, s3, s4, s5, s6, s7,
+ filter, vert_offset);
+
+ vst1_u16(dst, vreinterpret_u16_s16(d0));
+
+ dst += dst_stride;
+ y_qn += y_step_qn;
+ } while (--h != 0);
+ } else {
+ do {
+ const int16_t *s = &src[(y_qn >> SCALE_SUBPEL_BITS) * src_stride];
+
+ const ptrdiff_t filter_offset =
+ SUBPEL_TAPS * ((y_qn & SCALE_SUBPEL_MASK) >> SCALE_EXTRA_BITS);
+ const int16x8_t filter = vld1q_s16(y_filter + filter_offset);
+
+ int width = w;
+ uint16_t *d = dst;
+
+ do {
+ int16x8_t s0, s1, s2, s3, s4, s5, s6, s7;
+ load_s16_8x8(s, src_stride, &s0, &s1, &s2, &s3, &s4, &s5, &s6, &s7);
+
+ int16x8_t d0 = compound_convolve8_8_v(s0, s1, s2, s3, s4, s5, s6, s7,
+ filter, vert_offset);
+
+ vst1q_u16(d, vreinterpretq_u16_s16(d0));
+
+ s += 8;
+ d += 8;
+ width -= 8;
+ } while (width != 0);
+
+ dst += dst_stride;
+ y_qn += y_step_qn;
+ } while (--h != 0);
+ }
+}
+
+static INLINE void compound_avg_convolve_vert_scale_neon(
+ const int16_t *src, int src_stride, uint8_t *dst8, int dst8_stride,
+ uint16_t *dst16, int dst16_stride, int w, int h, const int16_t *y_filter,
+ int subpel_y_qn, int y_step_qn) {
+ const int bd = 8;
+ const int offset_bits = bd + 2 * FILTER_BITS - ROUND0_BITS;
+ // A shim of 1 << (COMPOUND_ROUND1_BITS - 1) enables us to use
+ // non-rounding shifts - which are generally faster than rounding shifts
+ // on modern CPUs.
+ const int32_t vert_offset_bits =
+ (1 << offset_bits) + (1 << (COMPOUND_ROUND1_BITS - 1));
+ // For the averaging code path substract round offset and convolve round.
+ const int32_t avg_offset_bits = (1 << (offset_bits + 1)) + (1 << offset_bits);
+ const int32x4_t vert_offset = vdupq_n_s32(vert_offset_bits - avg_offset_bits);
+
+ int y_qn = subpel_y_qn;
+
+ if (w == 4) {
+ do {
+ const int16_t *s = &src[(y_qn >> SCALE_SUBPEL_BITS) * src_stride];
+
+ const ptrdiff_t filter_offset =
+ SUBPEL_TAPS * ((y_qn & SCALE_SUBPEL_MASK) >> SCALE_EXTRA_BITS);
+ const int16x8_t filter = vld1q_s16(y_filter + filter_offset);
+
+ int16x4_t s0, s1, s2, s3, s4, s5, s6, s7;
+ load_s16_4x8(s, src_stride, &s0, &s1, &s2, &s3, &s4, &s5, &s6, &s7);
+
+ int16x4_t d0 = compound_convolve8_4_v(s0, s1, s2, s3, s4, s5, s6, s7,
+ filter, vert_offset);
+
+ int16x4_t dd0 = vreinterpret_s16_u16(vld1_u16(dst16));
+
+ int16x4_t avg = vhadd_s16(dd0, d0);
+ int16x8_t d0_s16 = vcombine_s16(avg, vdup_n_s16(0));
+
+ uint8x8_t d0_u8 = vqrshrun_n_s16(
+ d0_s16, 2 * FILTER_BITS - ROUND0_BITS - COMPOUND_ROUND1_BITS);
+
+ store_u8_4x1(dst8, d0_u8);
+
+ dst16 += dst16_stride;
+ dst8 += dst8_stride;
+ y_qn += y_step_qn;
+ } while (--h != 0);
+ } else {
+ do {
+ const int16_t *s = &src[(y_qn >> SCALE_SUBPEL_BITS) * src_stride];
+
+ const ptrdiff_t filter_offset =
+ SUBPEL_TAPS * ((y_qn & SCALE_SUBPEL_MASK) >> SCALE_EXTRA_BITS);
+ const int16x8_t filter = vld1q_s16(y_filter + filter_offset);
+
+ int width = w;
+ uint8_t *dst8_ptr = dst8;
+ uint16_t *dst16_ptr = dst16;
+
+ do {
+ int16x8_t s0, s1, s2, s3, s4, s5, s6, s7;
+ load_s16_8x8(s, src_stride, &s0, &s1, &s2, &s3, &s4, &s5, &s6, &s7);
+
+ int16x8_t d0 = compound_convolve8_8_v(s0, s1, s2, s3, s4, s5, s6, s7,
+ filter, vert_offset);
+
+ int16x8_t dd0 = vreinterpretq_s16_u16(vld1q_u16(dst16_ptr));
+
+ int16x8_t avg = vhaddq_s16(dd0, d0);
+
+ uint8x8_t d0_u8 = vqrshrun_n_s16(
+ avg, 2 * FILTER_BITS - ROUND0_BITS - COMPOUND_ROUND1_BITS);
+
+ vst1_u8(dst8_ptr, d0_u8);
+
+ s += 8;
+ dst8_ptr += 8;
+ dst16_ptr += 8;
+ width -= 8;
+ } while (width != 0);
+
+ dst16 += dst16_stride;
+ dst8 += dst8_stride;
+ y_qn += y_step_qn;
+ } while (--h != 0);
+ }
+}
+
+static INLINE void compound_dist_wtd_convolve_vert_scale_neon(
+ const int16_t *src, int src_stride, uint8_t *dst8, int dst8_stride,
+ uint16_t *dst16, int dst16_stride, int w, int h, const int16_t *y_filter,
+ ConvolveParams *conv_params, int subpel_y_qn, int y_step_qn) {
+ const int bd = 8;
+ const int offset_bits = bd + 2 * FILTER_BITS - ROUND0_BITS;
+ int y_qn = subpel_y_qn;
+ // A shim of 1 << (COMPOUND_ROUND1_BITS - 1) enables us to use
+ // non-rounding shifts - which are generally faster than rounding shifts on
+ // modern CPUs.
+ const int32x4_t vert_offset =
+ vdupq_n_s32((1 << offset_bits) + (1 << (COMPOUND_ROUND1_BITS - 1)));
+ // For the weighted averaging code path we have to substract round offset and
+ // convolve round. The shim of 1 << (2 * FILTER_BITS - ROUND0_BITS -
+ // COMPOUND_ROUND1_BITS - 1) enables us to use non-rounding shifts. The
+ // additional shift by DIST_PRECISION_BITS is needed in order to merge two
+ // shift calculations into one.
+ const int32x4_t dist_wtd_offset = vdupq_n_s32(
+ (1 << (2 * FILTER_BITS - ROUND0_BITS - COMPOUND_ROUND1_BITS - 1 +
+ DIST_PRECISION_BITS)) -
+ (1 << (offset_bits - COMPOUND_ROUND1_BITS + DIST_PRECISION_BITS)) -
+ (1 << (offset_bits - COMPOUND_ROUND1_BITS - 1 + DIST_PRECISION_BITS)));
+ const int16x4_t bck_offset = vdup_n_s16(conv_params->bck_offset);
+ const int16x4_t fwd_offset = vdup_n_s16(conv_params->fwd_offset);
+
+ if (w == 4) {
+ do {
+ const int16_t *s = &src[(y_qn >> SCALE_SUBPEL_BITS) * src_stride];
+
+ const ptrdiff_t filter_offset =
+ SUBPEL_TAPS * ((y_qn & SCALE_SUBPEL_MASK) >> SCALE_EXTRA_BITS);
+ const int16x8_t filter = vld1q_s16(y_filter + filter_offset);
+
+ int16x4_t s0, s1, s2, s3, s4, s5, s6, s7;
+ load_s16_4x8(s, src_stride, &s0, &s1, &s2, &s3, &s4, &s5, &s6, &s7);
+
+ int16x4_t d0 = compound_convolve8_4_v(s0, s1, s2, s3, s4, s5, s6, s7,
+ filter, vert_offset);
+
+ int16x4_t dd0 = vreinterpret_s16_u16(vld1_u16(dst16));
+
+ int32x4_t dst_wtd_avg = vmlal_s16(dist_wtd_offset, bck_offset, d0);
+ dst_wtd_avg = vmlal_s16(dst_wtd_avg, fwd_offset, dd0);
+
+ int16x4_t d0_s16 = vshrn_n_s32(
+ dst_wtd_avg, 2 * FILTER_BITS - ROUND0_BITS - COMPOUND_ROUND1_BITS +
+ DIST_PRECISION_BITS);
+
+ uint8x8_t d0_u8 = vqmovun_s16(vcombine_s16(d0_s16, vdup_n_s16(0)));
+
+ store_u8_4x1(dst8, d0_u8);
+
+ dst16 += dst16_stride;
+ dst8 += dst8_stride;
+ y_qn += y_step_qn;
+ } while (--h != 0);
+ } else {
+ do {
+ const int16_t *s = &src[(y_qn >> SCALE_SUBPEL_BITS) * src_stride];
+
+ const ptrdiff_t filter_offset =
+ SUBPEL_TAPS * ((y_qn & SCALE_SUBPEL_MASK) >> SCALE_EXTRA_BITS);
+ const int16x8_t filter = vld1q_s16(y_filter + filter_offset);
+
+ int width = w;
+ uint8_t *dst8_ptr = dst8;
+ uint16_t *dst16_ptr = dst16;
+
+ do {
+ int16x8_t s0, s1, s2, s3, s4, s5, s6, s7;
+ load_s16_8x8(s, src_stride, &s0, &s1, &s2, &s3, &s4, &s5, &s6, &s7);
+
+ int16x8_t d0 = compound_convolve8_8_v(s0, s1, s2, s3, s4, s5, s6, s7,
+ filter, vert_offset);
+
+ int16x8_t dd0 = vreinterpretq_s16_u16(vld1q_u16(dst16_ptr));
+
+ int32x4_t dst_wtd_avg0 =
+ vmlal_s16(dist_wtd_offset, bck_offset, vget_low_s16(d0));
+ int32x4_t dst_wtd_avg1 =
+ vmlal_s16(dist_wtd_offset, bck_offset, vget_high_s16(d0));
+
+ dst_wtd_avg0 = vmlal_s16(dst_wtd_avg0, fwd_offset, vget_low_s16(dd0));
+ dst_wtd_avg1 = vmlal_s16(dst_wtd_avg1, fwd_offset, vget_high_s16(dd0));
+
+ int16x4_t d0_s16_0 = vshrn_n_s32(
+ dst_wtd_avg0, 2 * FILTER_BITS - ROUND0_BITS - COMPOUND_ROUND1_BITS +
+ DIST_PRECISION_BITS);
+ int16x4_t d0_s16_1 = vshrn_n_s32(
+ dst_wtd_avg1, 2 * FILTER_BITS - ROUND0_BITS - COMPOUND_ROUND1_BITS +
+ DIST_PRECISION_BITS);
+
+ uint8x8_t d0_u8 = vqmovun_s16(vcombine_s16(d0_s16_0, d0_s16_1));
+
+ vst1_u8(dst8_ptr, d0_u8);
+
+ s += 8;
+ dst8_ptr += 8;
+ dst16_ptr += 8;
+ width -= 8;
+ } while (width != 0);
+
+ dst16 += dst16_stride;
+ dst8 += dst8_stride;
+ y_qn += y_step_qn;
+ } while (--h != 0);
+ }
+}
+
+static INLINE uint8x8_t convolve8_4_v(const int16x4_t s0, const int16x4_t s1,
+ const int16x4_t s2, const int16x4_t s3,
+ const int16x4_t s4, const int16x4_t s5,
+ const int16x4_t s6, const int16x4_t s7,
+ const int16x8_t filter,
+ const int32x4_t offset_const) {
+ const int16x4_t filter_0_3 = vget_low_s16(filter);
+ const int16x4_t filter_4_7 = vget_high_s16(filter);
+
+ int32x4_t sum = offset_const;
+ sum = vmlal_lane_s16(sum, s0, filter_0_3, 0);
+ sum = vmlal_lane_s16(sum, s1, filter_0_3, 1);
+ sum = vmlal_lane_s16(sum, s2, filter_0_3, 2);
+ sum = vmlal_lane_s16(sum, s3, filter_0_3, 3);
+ sum = vmlal_lane_s16(sum, s4, filter_4_7, 0);
+ sum = vmlal_lane_s16(sum, s5, filter_4_7, 1);
+ sum = vmlal_lane_s16(sum, s6, filter_4_7, 2);
+ sum = vmlal_lane_s16(sum, s7, filter_4_7, 3);
+
+ int16x4_t res = vshrn_n_s32(sum, 2 * FILTER_BITS - ROUND0_BITS);
+
+ return vqmovun_s16(vcombine_s16(res, vdup_n_s16(0)));
+}
+
+static INLINE uint8x8_t convolve8_8_v(const int16x8_t s0, const int16x8_t s1,
+ const int16x8_t s2, const int16x8_t s3,
+ const int16x8_t s4, const int16x8_t s5,
+ const int16x8_t s6, const int16x8_t s7,
+ const int16x8_t filter,
+ const int32x4_t offset_const) {
+ const int16x4_t filter_0_3 = vget_low_s16(filter);
+ const int16x4_t filter_4_7 = vget_high_s16(filter);
+
+ int32x4_t sum0 = offset_const;
+ sum0 = vmlal_lane_s16(sum0, vget_low_s16(s0), filter_0_3, 0);
+ sum0 = vmlal_lane_s16(sum0, vget_low_s16(s1), filter_0_3, 1);
+ sum0 = vmlal_lane_s16(sum0, vget_low_s16(s2), filter_0_3, 2);
+ sum0 = vmlal_lane_s16(sum0, vget_low_s16(s3), filter_0_3, 3);
+ sum0 = vmlal_lane_s16(sum0, vget_low_s16(s4), filter_4_7, 0);
+ sum0 = vmlal_lane_s16(sum0, vget_low_s16(s5), filter_4_7, 1);
+ sum0 = vmlal_lane_s16(sum0, vget_low_s16(s6), filter_4_7, 2);
+ sum0 = vmlal_lane_s16(sum0, vget_low_s16(s7), filter_4_7, 3);
+
+ int32x4_t sum1 = offset_const;
+ sum1 = vmlal_lane_s16(sum1, vget_high_s16(s0), filter_0_3, 0);
+ sum1 = vmlal_lane_s16(sum1, vget_high_s16(s1), filter_0_3, 1);
+ sum1 = vmlal_lane_s16(sum1, vget_high_s16(s2), filter_0_3, 2);
+ sum1 = vmlal_lane_s16(sum1, vget_high_s16(s3), filter_0_3, 3);
+ sum1 = vmlal_lane_s16(sum1, vget_high_s16(s4), filter_4_7, 0);
+ sum1 = vmlal_lane_s16(sum1, vget_high_s16(s5), filter_4_7, 1);
+ sum1 = vmlal_lane_s16(sum1, vget_high_s16(s6), filter_4_7, 2);
+ sum1 = vmlal_lane_s16(sum1, vget_high_s16(s7), filter_4_7, 3);
+
+ int16x4_t res0 = vshrn_n_s32(sum0, 2 * FILTER_BITS - ROUND0_BITS);
+ int16x4_t res1 = vshrn_n_s32(sum1, 2 * FILTER_BITS - ROUND0_BITS);
+
+ return vqmovun_s16(vcombine_s16(res0, res1));
+}
+
+static INLINE void convolve_vert_scale_neon(const int16_t *src, int src_stride,
+ uint8_t *dst, int dst_stride, int w,
+ int h, const int16_t *y_filter,
+ int subpel_y_qn, int y_step_qn) {
+ const int bd = 8;
+ const int offset_bits = bd + 2 * FILTER_BITS - ROUND0_BITS;
+ const int round_1 = 2 * FILTER_BITS - ROUND0_BITS;
+ // The shim of 1 << (round_1 - 1) enables us to use non-rounding shifts.
+ int32x4_t vert_offset =
+ vdupq_n_s32((1 << (round_1 - 1)) - (1 << (offset_bits - 1)));
+
+ int y_qn = subpel_y_qn;
+ if (w == 4) {
+ do {
+ const int16_t *s = &src[(y_qn >> SCALE_SUBPEL_BITS) * src_stride];
+
+ const ptrdiff_t filter_offset =
+ SUBPEL_TAPS * ((y_qn & SCALE_SUBPEL_MASK) >> SCALE_EXTRA_BITS);
+ const int16x8_t filter = vld1q_s16(y_filter + filter_offset);
+
+ int16x4_t s0, s1, s2, s3, s4, s5, s6, s7;
+ load_s16_4x8(s, src_stride, &s0, &s1, &s2, &s3, &s4, &s5, &s6, &s7);
+
+ uint8x8_t d =
+ convolve8_4_v(s0, s1, s2, s3, s4, s5, s6, s7, filter, vert_offset);
+
+ store_u8_4x1(dst, d);
+
+ dst += dst_stride;
+ y_qn += y_step_qn;
+ } while (--h != 0);
+ } else if (w == 8) {
+ do {
+ const int16_t *s = &src[(y_qn >> SCALE_SUBPEL_BITS) * src_stride];
+
+ const ptrdiff_t filter_offset =
+ SUBPEL_TAPS * ((y_qn & SCALE_SUBPEL_MASK) >> SCALE_EXTRA_BITS);
+ const int16x8_t filter = vld1q_s16(y_filter + filter_offset);
+
+ int16x8_t s0, s1, s2, s3, s4, s5, s6, s7;
+ load_s16_8x8(s, src_stride, &s0, &s1, &s2, &s3, &s4, &s5, &s6, &s7);
+
+ uint8x8_t d =
+ convolve8_8_v(s0, s1, s2, s3, s4, s5, s6, s7, filter, vert_offset);
+
+ vst1_u8(dst, d);
+
+ dst += dst_stride;
+ y_qn += y_step_qn;
+ } while (--h != 0);
+ } else {
+ do {
+ const int16_t *s = &src[(y_qn >> SCALE_SUBPEL_BITS) * src_stride];
+ uint8_t *d = dst;
+ int width = w;
+
+ const ptrdiff_t filter_offset =
+ SUBPEL_TAPS * ((y_qn & SCALE_SUBPEL_MASK) >> SCALE_EXTRA_BITS);
+ const int16x8_t filter = vld1q_s16(y_filter + filter_offset);
+
+ do {
+ int16x8_t s0[2], s1[2], s2[2], s3[2], s4[2], s5[2], s6[2], s7[2];
+ load_s16_8x8(s, src_stride, &s0[0], &s1[0], &s2[0], &s3[0], &s4[0],
+ &s5[0], &s6[0], &s7[0]);
+ load_s16_8x8(s + 8, src_stride, &s0[1], &s1[1], &s2[1], &s3[1], &s4[1],
+ &s5[1], &s6[1], &s7[1]);
+
+ uint8x8_t d0 = convolve8_8_v(s0[0], s1[0], s2[0], s3[0], s4[0], s5[0],
+ s6[0], s7[0], filter, vert_offset);
+ uint8x8_t d1 = convolve8_8_v(s0[1], s1[1], s2[1], s3[1], s4[1], s5[1],
+ s6[1], s7[1], filter, vert_offset);
+
+ vst1q_u8(d, vcombine_u8(d0, d1));
+
+ s += 16;
+ d += 16;
+ width -= 16;
+ } while (width != 0);
+
+ dst += dst_stride;
+ y_qn += y_step_qn;
+ } while (--h != 0);
+ }
+}
+
+#endif // AOM_AV1_COMMON_ARM_CONVOLVE_SCALE_NEON_H_
diff --git a/av1/common/av1_rtcd_defs.pl b/av1/common/av1_rtcd_defs.pl
index 7fa8b4e..f70dce4 100644
--- a/av1/common/av1_rtcd_defs.pl
+++ b/av1/common/av1_rtcd_defs.pl
@@ -606,7 +606,7 @@
specialize qw/av1_convolve_x_sr_intrabc neon/;
specialize qw/av1_convolve_y_sr sse2 avx2 neon neon_dotprod neon_i8mm/;
specialize qw/av1_convolve_y_sr_intrabc neon/;
- specialize qw/av1_convolve_2d_scale sse4_1 neon/;
+ specialize qw/av1_convolve_2d_scale sse4_1 neon neon_dotprod/;
specialize qw/av1_dist_wtd_convolve_2d ssse3 avx2 neon neon_dotprod neon_i8mm/;
specialize qw/av1_dist_wtd_convolve_2d_copy sse2 avx2 neon/;
specialize qw/av1_dist_wtd_convolve_x sse2 avx2 neon neon_dotprod neon_i8mm/;
diff --git a/test/av1_convolve_scale_test.cc b/test/av1_convolve_scale_test.cc
index a428f41..2a4bae4 100644
--- a/test/av1_convolve_scale_test.cc
+++ b/test/av1_convolve_scale_test.cc
@@ -394,6 +394,13 @@
::testing::ValuesIn(kBlockDim)));
#endif // HAVE_NEON
+#if HAVE_NEON_DOTPROD
+INSTANTIATE_TEST_SUITE_P(
+ NEON_DOTPROD, LowBDConvolveScaleTest,
+ ::testing::Combine(::testing::Values(av1_convolve_2d_scale_neon_dotprod),
+ ::testing::ValuesIn(kBlockDim)));
+#endif // HAVE_NEON_DOTPROD
+
#if HAVE_SSE4_1
INSTANTIATE_TEST_SUITE_P(
SSE4_1, LowBDConvolveScaleTest,