Add 4-tap specialisation to aom_highbd_convolve8_vert_neon
Add specialised path for 4-tap filters in
aom_highbd_convolve8_horiz_neon. This gives between 30% and 50% uplift
compared to using the 8-tap path.
Delete the 4-tap SVE path and use this new implementation instead, as it
is now faster.
Change-Id: Icf91eeb51fa227781872e907970974c2e9132df5
diff --git a/aom_dsp/arm/highbd_convolve8_neon.c b/aom_dsp/arm/highbd_convolve8_neon.c
index 75d1cd4..a433b95 100644
--- a/aom_dsp/arm/highbd_convolve8_neon.c
+++ b/aom_dsp/arm/highbd_convolve8_neon.c
@@ -158,38 +158,6 @@
}
}
-static INLINE uint16x4_t highbd_convolve4_4(
- const int16x4_t s0, const int16x4_t s1, const int16x4_t s2,
- const int16x4_t s3, const int16x4_t filter, const uint16x4_t max) {
- int32x4_t sum = vmull_lane_s16(s0, filter, 0);
- sum = vmlal_lane_s16(sum, s1, filter, 1);
- sum = vmlal_lane_s16(sum, s2, filter, 2);
- sum = vmlal_lane_s16(sum, s3, filter, 3);
-
- uint16x4_t res = vqrshrun_n_s32(sum, FILTER_BITS);
-
- return vmin_u16(res, max);
-}
-
-static INLINE uint16x8_t highbd_convolve4_8(
- const int16x8_t s0, const int16x8_t s1, const int16x8_t s2,
- const int16x8_t s3, const int16x4_t filter, const uint16x8_t max) {
- int32x4_t sum0 = vmull_lane_s16(vget_low_s16(s0), filter, 0);
- sum0 = vmlal_lane_s16(sum0, vget_low_s16(s1), filter, 1);
- sum0 = vmlal_lane_s16(sum0, vget_low_s16(s2), filter, 2);
- sum0 = vmlal_lane_s16(sum0, vget_low_s16(s3), filter, 3);
-
- int32x4_t sum1 = vmull_lane_s16(vget_high_s16(s0), filter, 0);
- sum1 = vmlal_lane_s16(sum1, vget_high_s16(s1), filter, 1);
- sum1 = vmlal_lane_s16(sum1, vget_high_s16(s2), filter, 2);
- sum1 = vmlal_lane_s16(sum1, vget_high_s16(s3), filter, 3);
-
- uint16x8_t res = vcombine_u16(vqrshrun_n_s32(sum0, FILTER_BITS),
- vqrshrun_n_s32(sum1, FILTER_BITS));
-
- return vminq_u16(res, max);
-}
-
static void highbd_convolve_horiz_4tap_neon(
const uint16_t *src_ptr, ptrdiff_t src_stride, uint16_t *dst_ptr,
ptrdiff_t dst_stride, const int16_t *x_filter_ptr, int w, int h, int bd) {
@@ -293,11 +261,9 @@
}
}
-static void highbd_convolve_vert_neon(const uint16_t *src_ptr,
- ptrdiff_t src_stride, uint16_t *dst_ptr,
- ptrdiff_t dst_stride,
- const int16_t *y_filter_ptr, int w, int h,
- int bd) {
+static void highbd_convolve_vert_8tap_neon(
+ const uint16_t *src_ptr, ptrdiff_t src_stride, uint16_t *dst_ptr,
+ ptrdiff_t dst_stride, const int16_t *y_filter_ptr, int w, int h, int bd) {
assert(w >= 4 && h >= 4);
const int16x8_t y_filter = vld1q_s16(y_filter_ptr);
@@ -399,7 +365,13 @@
uint16_t *dst = CONVERT_TO_SHORTPTR(dst8);
src -= (SUBPEL_TAPS / 2 - 1) * src_stride;
- highbd_convolve_vert_neon(src, src_stride, dst, dst_stride, filter_y, w, h,
- bd);
+
+ if (get_filter_taps_convolve8(filter_y) <= 4) {
+ highbd_convolve8_vert_4tap_neon(src + 2 * src_stride, src_stride, dst,
+ dst_stride, filter_y, w, h, bd);
+ } else {
+ highbd_convolve_vert_8tap_neon(src, src_stride, dst, dst_stride, filter_y,
+ w, h, bd);
+ }
}
}
diff --git a/aom_dsp/arm/highbd_convolve8_neon.h b/aom_dsp/arm/highbd_convolve8_neon.h
index 05cff79..0777378 100644
--- a/aom_dsp/arm/highbd_convolve8_neon.h
+++ b/aom_dsp/arm/highbd_convolve8_neon.h
@@ -95,4 +95,108 @@
}
}
+static INLINE uint16x4_t highbd_convolve4_4(
+ const int16x4_t s0, const int16x4_t s1, const int16x4_t s2,
+ const int16x4_t s3, const int16x4_t filter, const uint16x4_t max) {
+ int32x4_t sum = vmull_lane_s16(s0, filter, 0);
+ sum = vmlal_lane_s16(sum, s1, filter, 1);
+ sum = vmlal_lane_s16(sum, s2, filter, 2);
+ sum = vmlal_lane_s16(sum, s3, filter, 3);
+
+ uint16x4_t res = vqrshrun_n_s32(sum, FILTER_BITS);
+
+ return vmin_u16(res, max);
+}
+
+static INLINE uint16x8_t highbd_convolve4_8(
+ const int16x8_t s0, const int16x8_t s1, const int16x8_t s2,
+ const int16x8_t s3, const int16x4_t filter, const uint16x8_t max) {
+ int32x4_t sum0 = vmull_lane_s16(vget_low_s16(s0), filter, 0);
+ sum0 = vmlal_lane_s16(sum0, vget_low_s16(s1), filter, 1);
+ sum0 = vmlal_lane_s16(sum0, vget_low_s16(s2), filter, 2);
+ sum0 = vmlal_lane_s16(sum0, vget_low_s16(s3), filter, 3);
+
+ int32x4_t sum1 = vmull_lane_s16(vget_high_s16(s0), filter, 0);
+ sum1 = vmlal_lane_s16(sum1, vget_high_s16(s1), filter, 1);
+ sum1 = vmlal_lane_s16(sum1, vget_high_s16(s2), filter, 2);
+ sum1 = vmlal_lane_s16(sum1, vget_high_s16(s3), filter, 3);
+
+ uint16x8_t res = vcombine_u16(vqrshrun_n_s32(sum0, FILTER_BITS),
+ vqrshrun_n_s32(sum1, FILTER_BITS));
+
+ return vminq_u16(res, max);
+}
+
+static INLINE void highbd_convolve8_vert_4tap_neon(
+ const uint16_t *src_ptr, ptrdiff_t src_stride, uint16_t *dst_ptr,
+ ptrdiff_t dst_stride, const int16_t *y_filter_ptr, int w, int h, int bd) {
+ assert(w >= 4 && h >= 4);
+ const int16x4_t y_filter = vld1_s16(y_filter_ptr + 2);
+
+ if (w == 4) {
+ const uint16x4_t max = vdup_n_u16((1 << bd) - 1);
+ const int16_t *s = (const int16_t *)src_ptr;
+ uint16_t *d = dst_ptr;
+
+ int16x4_t s0, s1, s2;
+ load_s16_4x3(s, src_stride, &s0, &s1, &s2);
+ s += 3 * src_stride;
+
+ do {
+ int16x4_t s3, s4, s5, s6;
+ load_s16_4x4(s, src_stride, &s3, &s4, &s5, &s6);
+
+ uint16x4_t d0 = highbd_convolve4_4(s0, s1, s2, s3, y_filter, max);
+ uint16x4_t d1 = highbd_convolve4_4(s1, s2, s3, s4, y_filter, max);
+ uint16x4_t d2 = highbd_convolve4_4(s2, s3, s4, s5, y_filter, max);
+ uint16x4_t d3 = highbd_convolve4_4(s3, s4, s5, s6, y_filter, max);
+
+ store_u16_4x4(d, dst_stride, d0, d1, d2, d3);
+
+ s0 = s4;
+ s1 = s5;
+ s2 = s6;
+
+ s += 4 * src_stride;
+ d += 4 * dst_stride;
+ h -= 4;
+ } while (h > 0);
+ } else {
+ const uint16x8_t max = vdupq_n_u16((1 << bd) - 1);
+
+ do {
+ int height = h;
+ const int16_t *s = (const int16_t *)src_ptr;
+ uint16_t *d = dst_ptr;
+
+ int16x8_t s0, s1, s2;
+ load_s16_8x3(s, src_stride, &s0, &s1, &s2);
+ s += 3 * src_stride;
+
+ do {
+ int16x8_t s3, s4, s5, s6;
+ load_s16_8x4(s, src_stride, &s3, &s4, &s5, &s6);
+
+ uint16x8_t d0 = highbd_convolve4_8(s0, s1, s2, s3, y_filter, max);
+ uint16x8_t d1 = highbd_convolve4_8(s1, s2, s3, s4, y_filter, max);
+ uint16x8_t d2 = highbd_convolve4_8(s2, s3, s4, s5, y_filter, max);
+ uint16x8_t d3 = highbd_convolve4_8(s3, s4, s5, s6, y_filter, max);
+
+ store_u16_8x4(d, dst_stride, d0, d1, d2, d3);
+
+ s0 = s4;
+ s1 = s5;
+ s2 = s6;
+
+ s += 4 * src_stride;
+ d += 4 * dst_stride;
+ height -= 4;
+ } while (height > 0);
+ src_ptr += 8;
+ dst_ptr += 8;
+ w -= 8;
+ } while (w > 0);
+ }
+}
+
#endif // AOM_AOM_DSP_ARM_HIGHBD_CONVOLVE8_NEON_H_
diff --git a/aom_dsp/arm/highbd_convolve8_sve.c b/aom_dsp/arm/highbd_convolve8_sve.c
index fac65c9..789b38a 100644
--- a/aom_dsp/arm/highbd_convolve8_sve.c
+++ b/aom_dsp/arm/highbd_convolve8_sve.c
@@ -540,127 +540,6 @@
}
}
-static INLINE uint16x4_t highbd_convolve4_4_v(int16x8_t s[2], int16x8_t filter,
- uint16x4_t max) {
- int64x2_t sum01 = aom_svdot_lane_s16(vdupq_n_s64(0), s[0], filter, 0);
- int64x2_t sum23 = aom_svdot_lane_s16(vdupq_n_s64(0), s[1], filter, 0);
-
- int32x4_t sum0123 = vcombine_s32(vmovn_s64(sum01), vmovn_s64(sum23));
- uint16x4_t res = vqrshrun_n_s32(sum0123, FILTER_BITS);
-
- return vmin_u16(res, max);
-}
-
-static INLINE uint16x8_t highbd_convolve4_8_v(int16x8_t s[4], int16x8_t filter,
- uint16x8_t max) {
- int64x2_t sum01 = aom_svdot_lane_s16(vdupq_n_s64(0), s[0], filter, 0);
- int64x2_t sum23 = aom_svdot_lane_s16(vdupq_n_s64(0), s[1], filter, 0);
- int64x2_t sum45 = aom_svdot_lane_s16(vdupq_n_s64(0), s[2], filter, 0);
- int64x2_t sum67 = aom_svdot_lane_s16(vdupq_n_s64(0), s[3], filter, 0);
-
- int32x4_t s0123 = vcombine_s32(vmovn_s64(sum01), vmovn_s64(sum23));
- int32x4_t s4567 = vcombine_s32(vmovn_s64(sum45), vmovn_s64(sum67));
-
- uint16x8_t res = vcombine_u16(vqrshrun_n_s32(s0123, FILTER_BITS),
- vqrshrun_n_s32(s4567, FILTER_BITS));
-
- return vminq_u16(res, max);
-}
-
-static INLINE void highbd_convolve8_vert_4tap_sve(
- const uint16_t *src, ptrdiff_t src_stride, uint16_t *dst,
- ptrdiff_t dst_stride, const int16_t *filter_y, int width, int height,
- int bd) {
- const int16x8_t y_filter =
- vcombine_s16(vld1_s16(filter_y + 2), vdup_n_s16(0));
-
- uint8x16_t merge_block_tbl[3];
- merge_block_tbl[0] = vld1q_u8(kDotProdMergeBlockTbl);
- merge_block_tbl[1] = vld1q_u8(kDotProdMergeBlockTbl + 16);
- merge_block_tbl[2] = vld1q_u8(kDotProdMergeBlockTbl + 32);
-
- if (width == 4) {
- const uint16x4_t max = vdup_n_u16((1 << bd) - 1);
- int16_t *s = (int16_t *)src;
-
- int16x4_t s0, s1, s2;
- load_s16_4x3(s, src_stride, &s0, &s1, &s2);
- s += 3 * src_stride;
-
- do {
- int16x4_t s3, s4, s5, s6;
- load_s16_4x4(s, src_stride, &s3, &s4, &s5, &s6);
-
- // This operation combines a conventional transpose and the sample permute
- // required before computing the dot product.
- int16x8_t s0123[2], s1234[2], s2345[2], s3456[2];
- transpose_concat_4x4(s0, s1, s2, s3, s0123);
- transpose_concat_4x4(s1, s2, s3, s4, s1234);
- transpose_concat_4x4(s2, s3, s4, s5, s2345);
- transpose_concat_4x4(s3, s4, s5, s6, s3456);
-
- uint16x4_t d0 = highbd_convolve4_4_v(s0123, y_filter, max);
- uint16x4_t d1 = highbd_convolve4_4_v(s1234, y_filter, max);
- uint16x4_t d2 = highbd_convolve4_4_v(s2345, y_filter, max);
- uint16x4_t d3 = highbd_convolve4_4_v(s3456, y_filter, max);
-
- store_u16_4x4(dst, dst_stride, d0, d1, d2, d3);
-
- // Shuffle everything up four rows.
- s0 = s4;
- s1 = s5;
- s2 = s6;
-
- s += 4 * src_stride;
- dst += 4 * dst_stride;
- height -= 4;
- } while (height != 0);
- } else {
- const uint16x8_t max = vdupq_n_u16((1 << bd) - 1);
- do {
- int h = height;
- int16_t *s = (int16_t *)src;
- uint16_t *d = dst;
-
- int16x8_t s0, s1, s2;
- load_s16_8x3(s, src_stride, &s0, &s1, &s2);
- s += 3 * src_stride;
-
- do {
- int16x8_t s3, s4, s5, s6;
- load_s16_8x4(s, src_stride, &s3, &s4, &s5, &s6);
-
- // This operation combines a conventional transpose and the sample
- // permute required before computing the dot product.
- int16x8_t s0123[4], s1234[4], s2345[4], s3456[4];
- transpose_concat_8x4(s0, s1, s2, s3, s0123);
- transpose_concat_8x4(s1, s2, s3, s4, s1234);
- transpose_concat_8x4(s2, s3, s4, s5, s2345);
- transpose_concat_8x4(s3, s4, s5, s6, s3456);
-
- uint16x8_t d0 = highbd_convolve4_8_v(s0123, y_filter, max);
- uint16x8_t d1 = highbd_convolve4_8_v(s1234, y_filter, max);
- uint16x8_t d2 = highbd_convolve4_8_v(s2345, y_filter, max);
- uint16x8_t d3 = highbd_convolve4_8_v(s3456, y_filter, max);
-
- store_u16_8x4(d, dst_stride, d0, d1, d2, d3);
-
- // Shuffle everything up four rows.
- s0 = s4;
- s1 = s5;
- s2 = s6;
-
- s += 4 * src_stride;
- d += 4 * dst_stride;
- h -= 4;
- } while (h != 0);
- src += 8;
- dst += 8;
- width -= 8;
- } while (width != 0);
- }
-}
-
void aom_highbd_convolve8_vert_sve(const uint8_t *src8, ptrdiff_t src_stride,
uint8_t *dst8, ptrdiff_t dst_stride,
const int16_t *filter_x, int x_step_q4,
@@ -678,8 +557,8 @@
src -= (SUBPEL_TAPS / 2 - 1) * src_stride;
if (get_filter_taps_convolve8(filter_y) <= 4) {
- highbd_convolve8_vert_4tap_sve(src + 2 * src_stride, src_stride, dst,
- dst_stride, filter_y, width, height, bd);
+ highbd_convolve8_vert_4tap_neon(src + 2 * src_stride, src_stride, dst,
+ dst_stride, filter_y, width, height, bd);
} else {
highbd_convolve8_vert_8tap_sve(src, src_stride, dst, dst_stride, filter_y,
width, height, bd);