Remove partial vector loads from mem_neon.h These were only ever used to either (a) load the 0th lane of the vector (in which case we can simply zero-init the rest at zero cost), or (b) used as one of multiple calls to fully initialise the vector with a stride. The use of a macro with immediate is problematic since it depends on the compiler to be able to fully analyze the use of the vector to determine whether the initialization is safe or not. For loading the 0th lane we already had a `load_u8_4x1_lane0` to handle the zeroing automatically which we now use everywhere where it is needed, and for fully-initializing the vector with a stride we can simply pass in the stride and do the loading in the mem_neon.h helper function. This has the nice advantage that we can remove the load macros entirely. Additionally, since the `_lane0` suffix is no longer needed to distinguish from the lane-immediate macro version, the suffix is removed. Bug: aomedia:3507 Change-Id: I33ea4885c96c96059f0de67a5fdc4024b588d228
diff --git a/aom_dsp/arm/intrapred_neon.c b/aom_dsp/arm/intrapred_neon.c index 880f7f6..41f070e 100644 --- a/aom_dsp/arm/intrapred_neon.c +++ b/aom_dsp/arm/intrapred_neon.c
@@ -24,7 +24,7 @@ // DC 4x4 static INLINE uint16x8_t dc_load_sum_4(const uint8_t *in) { - const uint8x8_t a = load_u8_4x1_lane0(in); + const uint8x8_t a = load_u8_4x1(in); const uint16x4_t p0 = vpaddl_u8(a); const uint16x4_t p1 = vpadd_u16(p0, p0); return vcombine_u16(p1, vdup_n_u16(0)); @@ -354,7 +354,7 @@ void aom_dc_predictor_4x8_neon(uint8_t *dst, ptrdiff_t stride, const uint8_t *above, const uint8_t *left) { - uint8x8_t a = load_u8_4x1_lane0(above); + uint8x8_t a = load_u8_4x1(above); uint8x8_t l = vld1_u8(left); uint32_t sum = horizontal_add_u16x8(vaddl_u8(a, l)); uint32_t dc = calculate_dc_from_sum(4, 8, sum, 2, DC_MULTIPLIER_1X2); @@ -364,7 +364,7 @@ void aom_dc_predictor_8x4_neon(uint8_t *dst, ptrdiff_t stride, const uint8_t *above, const uint8_t *left) { uint8x8_t a = vld1_u8(above); - uint8x8_t l = load_u8_4x1_lane0(left); + uint8x8_t l = load_u8_4x1(left); uint32_t sum = horizontal_add_u16x8(vaddl_u8(a, l)); uint32_t dc = calculate_dc_from_sum(8, 4, sum, 2, DC_MULTIPLIER_1X2); dc_store_8xh(dst, stride, 4, vdup_n_u8(dc)); @@ -372,7 +372,7 @@ void aom_dc_predictor_4x16_neon(uint8_t *dst, ptrdiff_t stride, const uint8_t *above, const uint8_t *left) { - uint8x8_t a = load_u8_4x1_lane0(above); + uint8x8_t a = load_u8_4x1(above); uint8x16_t l = vld1q_u8(left); uint16x8_t sum_al = vaddw_u8(vpaddlq_u8(l), a); uint32_t sum = horizontal_add_u16x8(sum_al); @@ -383,7 +383,7 @@ void aom_dc_predictor_16x4_neon(uint8_t *dst, ptrdiff_t stride, const uint8_t *above, const uint8_t *left) { uint8x16_t a = vld1q_u8(above); - uint8x8_t l = load_u8_4x1_lane0(left); + uint8x8_t l = load_u8_4x1(left); uint16x8_t sum_al = vaddw_u8(vpaddlq_u8(a), l); uint32_t sum = horizontal_add_u16x8(sum_al); uint32_t dc = calculate_dc_from_sum(16, 4, sum, 2, DC_MULTIPLIER_1X4); @@ -620,7 +620,7 @@ void aom_v_predictor_4x4_neon(uint8_t *dst, ptrdiff_t stride, const uint8_t *above, const uint8_t *left) { (void)left; - v_store_4xh(dst, stride, 4, load_u8_4x1_lane0(above)); + v_store_4xh(dst, stride, 4, load_u8_4x1(above)); } void aom_v_predictor_8x8_neon(uint8_t *dst, ptrdiff_t stride, @@ -646,13 +646,13 @@ void aom_v_predictor_4x8_neon(uint8_t *dst, ptrdiff_t stride, const uint8_t *above, const uint8_t *left) { (void)left; - v_store_4xh(dst, stride, 8, load_u8_4x1_lane0(above)); + v_store_4xh(dst, stride, 8, load_u8_4x1(above)); } void aom_v_predictor_4x16_neon(uint8_t *dst, ptrdiff_t stride, const uint8_t *above, const uint8_t *left) { (void)left; - v_store_4xh(dst, stride, 16, load_u8_4x1_lane0(above)); + v_store_4xh(dst, stride, 16, load_u8_4x1(above)); } void aom_v_predictor_8x4_neon(uint8_t *dst, ptrdiff_t stride, @@ -856,7 +856,7 @@ void aom_h_predictor_4x4_neon(uint8_t *dst, ptrdiff_t stride, const uint8_t *above, const uint8_t *left) { - const uint8x8_t d0 = load_u8_4x1_lane0(left); + const uint8x8_t d0 = load_u8_4x1(left); (void)above; store_u8_4x1(dst + 0 * stride, vdup_lane_u8(d0, 0), 0); store_u8_4x1(dst + 1 * stride, vdup_lane_u8(d0, 1), 0); @@ -907,7 +907,7 @@ void aom_h_predictor_8x4_neon(uint8_t *dst, ptrdiff_t stride, const uint8_t *above, const uint8_t *left) { - const uint8x8_t d0 = load_u8_4x1_lane0(left); + const uint8x8_t d0 = load_u8_4x1(left); (void)above; vst1_u8(dst + 0 * stride, vdup_lane_u8(d0, 0)); vst1_u8(dst + 1 * stride, vdup_lane_u8(d0, 1)); @@ -936,7 +936,7 @@ void aom_h_predictor_16x4_neon(uint8_t *dst, ptrdiff_t stride, const uint8_t *above, const uint8_t *left) { - const uint8x8_t d0 = load_u8_4x1_lane0(left); + const uint8x8_t d0 = load_u8_4x1(left); (void)above; vst1q_u8(dst + 0 * stride, vdupq_lane_u8(d0, 0)); vst1q_u8(dst + 1 * stride, vdupq_lane_u8(d0, 1)); @@ -3174,10 +3174,10 @@ const uint8_t bottom_left = left_column[height - 1]; const uint8_t *const weights_y = smooth_weights + height - 4; - uint8x8_t top_v = load_u8_4x1_lane0(top_row); + uint8x8_t top_v = load_u8_4x1(top_row); const uint8x8_t top_right_v = vdup_n_u8(top_right); const uint8x8_t bottom_left_v = vdup_n_u8(bottom_left); - uint8x8_t weights_x_v = load_u8_4x1_lane0(smooth_weights); + uint8x8_t weights_x_v = load_u8_4x1(smooth_weights); const uint8x8_t scaled_weights_x = negate_s8(weights_x_v); const uint16x8_t weighted_tr = vmull_u8(scaled_weights_x, top_right_v); @@ -3409,7 +3409,7 @@ \ uint8x8_t top_v; \ if ((W) == 4) { \ - top_v = load_u8_4x1_lane0(top_row); \ + top_v = load_u8_4x1(top_row); \ } else { /* width == 8 */ \ top_v = vld1_u8(top_row); \ } \ @@ -3723,7 +3723,7 @@ const uint16x8_t top_left_x2 = vdupq_n_u16(top_row[-1] + top_row[-1]); uint8x8_t top; if (width == 4) { - top = load_u8_4x1_lane0(top_row); + top = load_u8_4x1(top_row); } else { // width == 8 top = vld1_u8(top_row); }
diff --git a/aom_dsp/arm/loopfilter_neon.c b/aom_dsp/arm/loopfilter_neon.c index fd2a50f..0e683a7 100644 --- a/aom_dsp/arm/loopfilter_neon.c +++ b/aom_dsp/arm/loopfilter_neon.c
@@ -886,20 +886,13 @@ void aom_lpf_horizontal_14_neon(uint8_t *src, int stride, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh) { - uint8x8_t p6q6 = load_u8_4x1_lane0(src - 7 * stride); - uint8x8_t p5q5 = load_u8_4x1_lane0(src - 6 * stride); - uint8x8_t p4q4 = load_u8_4x1_lane0(src - 5 * stride); - uint8x8_t p3q3 = load_u8_4x1_lane0(src - 4 * stride); - uint8x8_t p2q2 = load_u8_4x1_lane0(src - 3 * stride); - uint8x8_t p1q1 = load_u8_4x1_lane0(src - 2 * stride); - uint8x8_t p0q0 = load_u8_4x1_lane0(src - 1 * stride); - load_u8_4x1(src + 0 * stride, &p0q0, 1); - load_u8_4x1(src + 1 * stride, &p1q1, 1); - load_u8_4x1(src + 2 * stride, &p2q2, 1); - load_u8_4x1(src + 3 * stride, &p3q3, 1); - load_u8_4x1(src + 4 * stride, &p4q4, 1); - load_u8_4x1(src + 5 * stride, &p5q5, 1); - load_u8_4x1(src + 6 * stride, &p6q6, 1); + uint8x8_t p6q6 = load_u8_4x2(src - 7 * stride, 13 * stride); + uint8x8_t p5q5 = load_u8_4x2(src - 6 * stride, 11 * stride); + uint8x8_t p4q4 = load_u8_4x2(src - 5 * stride, 9 * stride); + uint8x8_t p3q3 = load_u8_4x2(src - 4 * stride, 7 * stride); + uint8x8_t p2q2 = load_u8_4x2(src - 3 * stride, 5 * stride); + uint8x8_t p1q1 = load_u8_4x2(src - 2 * stride, 3 * stride); + uint8x8_t p0q0 = load_u8_4x2(src - 1 * stride, 1 * stride); lpf_14_neon(&p6q6, &p5q5, &p4q4, &p3q3, &p2q2, &p1q1, &p0q0, *blimit, *limit, *thresh); @@ -1031,10 +1024,8 @@ void aom_lpf_horizontal_4_neon(uint8_t *src, int stride, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh) { - uint8x8_t p1q1 = load_u8_4x1_lane0(src - 2 * stride); - uint8x8_t p0q0 = load_u8_4x1_lane0(src - 1 * stride); - load_u8_4x1(src + 0 * stride, &p0q0, 1); - load_u8_4x1(src + 1 * stride, &p1q1, 1); + uint8x8_t p1q1 = load_u8_4x2(src - 2 * stride, 3 * stride); + uint8x8_t p0q0 = load_u8_4x2(src - 1 * stride, 1 * stride); lpf_4_neon(&p1q1, &p0q0, *blimit, *limit, *thresh);
diff --git a/aom_dsp/arm/mem_neon.h b/aom_dsp/arm/mem_neon.h index ca04bcc..d1ac648 100644 --- a/aom_dsp/arm/mem_neon.h +++ b/aom_dsp/arm/mem_neon.h
@@ -90,23 +90,31 @@ return vcombine_u8(vld1_u8(s), vld1_u8(s + p)); } -/* These intrinsics require immediate values, so we must use #defines - to enforce that. */ -#define load_u8_4x1(s, s0, lane) \ - do { \ - *(s0) = vreinterpret_u8_u32( \ - vld1_lane_u32((uint32_t *)(s), vreinterpret_u32_u8(*(s0)), lane)); \ - } while (0) -#define load_u16_2x1(s, s0, lane) \ - do { \ - *(s0) = vreinterpret_u16_u32( \ - vld1_lane_u32((uint32_t *)(s), vreinterpret_u32_u16(*(s0)), lane)); \ - } while (0) - // Load four bytes into the low half of a uint8x8_t, zero the upper half. -static INLINE uint8x8_t load_u8_4x1_lane0(const uint8_t *p) { +static INLINE uint8x8_t load_u8_4x1(const uint8_t *p) { uint8x8_t ret = vdup_n_u8(0); - load_u8_4x1(p, &ret, 0); + ret = vreinterpret_u8_u32( + vld1_lane_u32((const uint32_t *)p, vreinterpret_u32_u8(ret), 0)); + return ret; +} + +static INLINE uint8x8_t load_u8_4x2(const uint8_t *p, int stride) { + uint8x8_t ret = vdup_n_u8(0); + ret = vreinterpret_u8_u32( + vld1_lane_u32((const uint32_t *)p, vreinterpret_u32_u8(ret), 0)); + p += stride; + ret = vreinterpret_u8_u32( + vld1_lane_u32((const uint32_t *)p, vreinterpret_u32_u8(ret), 1)); + return ret; +} + +static INLINE uint16x4_t load_u16_2x2(const uint16_t *p, int stride) { + uint16x4_t ret = vdup_n_u16(0); + ret = vreinterpret_u16_u32( + vld1_lane_u32((const uint32_t *)p, vreinterpret_u32_u16(ret), 0)); + p += stride; + ret = vreinterpret_u16_u32( + vld1_lane_u32((const uint32_t *)p, vreinterpret_u32_u16(ret), 1)); return ret; }
diff --git a/av1/encoder/arm/neon/reconinter_enc_neon.c b/av1/encoder/arm/neon/reconinter_enc_neon.c index 8075460..03afa30 100644 --- a/av1/encoder/arm/neon/reconinter_enc_neon.c +++ b/av1/encoder/arm/neon/reconinter_enc_neon.c
@@ -220,10 +220,8 @@ } else { assert(width == 2); int i = height / 2; - uint16x4_t r = vdup_n_u16(0); do { - load_u16_2x1(ref + 0 * ref_stride, &r, 0); - load_u16_2x1(ref + 1 * ref_stride, &r, 1); + uint16x4_t r = load_u16_2x2(ref, ref_stride); store_u16_2x1(comp_pred + 0 * width, r, 0); store_u16_2x1(comp_pred + 1 * width, r, 1); ref += 2 * ref_stride;
diff --git a/av1/encoder/arm/neon/temporal_filter_neon.c b/av1/encoder/arm/neon/temporal_filter_neon.c index 38a6e4b..986f143 100644 --- a/av1/encoder/arm/neon/temporal_filter_neon.c +++ b/av1/encoder/arm/neon/temporal_filter_neon.c
@@ -456,15 +456,15 @@ if (w <= (width - 1) - 4) { uint16x8_t mask = vcombine_u16(vdup_n_u16(65535), vdup_n_u16(0)); uint8x8_t mat[3][3]; - mat[0][0] = load_u8_4x1_lane0(src_ptr - stride - 1); - mat[0][1] = load_u8_4x1_lane0(src_ptr - stride); - mat[0][2] = load_u8_4x1_lane0(src_ptr - stride + 1); - mat[1][0] = load_u8_4x1_lane0(src_ptr - 1); - mat[1][1] = load_u8_4x1_lane0(src_ptr); - mat[1][2] = load_u8_4x1_lane0(src_ptr + 1); - mat[2][0] = load_u8_4x1_lane0(src_ptr + stride - 1); - mat[2][1] = load_u8_4x1_lane0(src_ptr + stride); - mat[2][2] = load_u8_4x1_lane0(src_ptr + stride + 1); + mat[0][0] = load_u8_4x1(src_ptr - stride - 1); + mat[0][1] = load_u8_4x1(src_ptr - stride); + mat[0][2] = load_u8_4x1(src_ptr - stride + 1); + mat[1][0] = load_u8_4x1(src_ptr - 1); + mat[1][1] = load_u8_4x1(src_ptr); + mat[1][2] = load_u8_4x1(src_ptr + 1); + mat[2][0] = load_u8_4x1(src_ptr + stride - 1); + mat[2][1] = load_u8_4x1(src_ptr + stride); + mat[2][2] = load_u8_4x1(src_ptr + stride + 1); // Compute Sobel gradients. uint16x8_t gxa = vaddl_u8(mat[0][0], mat[2][0]);