Further optimization of aom_convolve8_neon 4-tap
Use the full vector bandwidth for 4xh blocks rather than half of it and
load more vectors instead of loading a few and using VEXT. This gives up
to 10% uplift.
Change-Id: Icdad0952229f647d9dd6f6baafd0852d1a15ccc4
diff --git a/aom_dsp/arm/aom_convolve8_neon.c b/aom_dsp/arm/aom_convolve8_neon.c
index 9a3ff80..193844d 100644
--- a/aom_dsp/arm/aom_convolve8_neon.c
+++ b/aom_dsp/arm/aom_convolve8_neon.c
@@ -232,17 +232,6 @@
}
}
-static INLINE int16x4_t convolve4_4(const int16x4_t s0, const int16x4_t s1,
- const int16x4_t s2, const int16x4_t s3,
- const int16x4_t filter) {
- int16x4_t sum = vmul_lane_s16(s0, filter, 0);
- sum = vmla_lane_s16(sum, s1, filter, 1);
- sum = vmla_lane_s16(sum, s2, filter, 2);
- sum = vmla_lane_s16(sum, s3, filter, 3);
-
- return sum;
-}
-
static INLINE void convolve8_horiz_4tap_neon(const uint8_t *src,
ptrdiff_t src_stride, uint8_t *dst,
ptrdiff_t dst_stride,
@@ -254,26 +243,20 @@
if (w == 4) {
do {
- int16x8_t t0 =
- vreinterpretq_s16_u16(vmovl_u8(vld1_u8(src + 0 * src_stride)));
- int16x8_t t1 =
- vreinterpretq_s16_u16(vmovl_u8(vld1_u8(src + 1 * src_stride)));
+ uint8x8_t t01[4];
- int16x4_t s0[4], s1[4];
- s0[0] = vget_low_s16(t0);
- s0[1] = vget_low_s16(vextq_s16(t0, t0, 1));
- s0[2] = vget_low_s16(vextq_s16(t0, t0, 2));
- s0[3] = vget_low_s16(vextq_s16(t0, t0, 3));
+ t01[0] = load_unaligned_u8(src + 0, (int)src_stride);
+ t01[1] = load_unaligned_u8(src + 1, (int)src_stride);
+ t01[2] = load_unaligned_u8(src + 2, (int)src_stride);
+ t01[3] = load_unaligned_u8(src + 3, (int)src_stride);
- s1[0] = vget_low_s16(t1);
- s1[1] = vget_low_s16(vextq_s16(t1, t1, 1));
- s1[2] = vget_low_s16(vextq_s16(t1, t1, 2));
- s1[3] = vget_low_s16(vextq_s16(t1, t1, 3));
+ int16x8_t s01[4];
+ s01[0] = vreinterpretq_s16_u16(vmovl_u8(t01[0]));
+ s01[1] = vreinterpretq_s16_u16(vmovl_u8(t01[1]));
+ s01[2] = vreinterpretq_s16_u16(vmovl_u8(t01[2]));
+ s01[3] = vreinterpretq_s16_u16(vmovl_u8(t01[3]));
- int16x4_t d0 = convolve4_4(s0[0], s0[1], s0[2], s0[3], filter);
- int16x4_t d1 = convolve4_4(s1[0], s1[1], s1[2], s1[3], filter);
- // We halved the filter values so -1 from right shift.
- uint8x8_t d01 = vqrshrun_n_s16(vcombine_s16(d0, d1), FILTER_BITS - 1);
+ uint8x8_t d01 = convolve4_8(s01[0], s01[1], s01[2], s01[3], filter);
store_u8x4_strided_x2(dst + 0 * dst_stride, dst_stride, d01);
@@ -287,37 +270,27 @@
const uint8_t *s = src;
uint8_t *d = dst;
- int16x8_t t0 =
- vreinterpretq_s16_u16(vmovl_u8(vld1_u8(s + 0 * src_stride)));
- int16x8_t t1 =
- vreinterpretq_s16_u16(vmovl_u8(vld1_u8(s + 1 * src_stride)));
-
- s += 8;
do {
- int16x8_t t2 =
- vreinterpretq_s16_u16(vmovl_u8(vld1_u8(s + 0 * src_stride)));
- int16x8_t t3 =
- vreinterpretq_s16_u16(vmovl_u8(vld1_u8(s + 1 * src_stride)));
+ uint8x8_t t0[4], t1[4];
+ load_u8_8x4(s + 0 * src_stride, 1, &t0[0], &t0[1], &t0[2], &t0[3]);
+ load_u8_8x4(s + 1 * src_stride, 1, &t1[0], &t1[1], &t1[2], &t1[3]);
int16x8_t s0[4], s1[4];
- s0[0] = t0;
- s0[1] = vextq_s16(t0, t2, 1);
- s0[2] = vextq_s16(t0, t2, 2);
- s0[3] = vextq_s16(t0, t2, 3);
+ s0[0] = vreinterpretq_s16_u16(vmovl_u8(t0[0]));
+ s0[1] = vreinterpretq_s16_u16(vmovl_u8(t0[1]));
+ s0[2] = vreinterpretq_s16_u16(vmovl_u8(t0[2]));
+ s0[3] = vreinterpretq_s16_u16(vmovl_u8(t0[3]));
- s1[0] = t1;
- s1[1] = vextq_s16(t1, t3, 1);
- s1[2] = vextq_s16(t1, t3, 2);
- s1[3] = vextq_s16(t1, t3, 3);
+ s1[0] = vreinterpretq_s16_u16(vmovl_u8(t1[0]));
+ s1[1] = vreinterpretq_s16_u16(vmovl_u8(t1[1]));
+ s1[2] = vreinterpretq_s16_u16(vmovl_u8(t1[2]));
+ s1[3] = vreinterpretq_s16_u16(vmovl_u8(t1[3]));
uint8x8_t d0 = convolve4_8(s0[0], s0[1], s0[2], s0[3], filter);
uint8x8_t d1 = convolve4_8(s1[0], s1[1], s1[2], s1[3], filter);
store_u8_8x2(d, dst_stride, d0, d1);
- t0 = t2;
- t1 = t3;
-
s += 8;
d += 8;
width -= 8;