blob: ef2f3af0f13af9efa8cfa80e0638d9f12e90de60 [file] [log] [blame]
Jerome Jiang410e6d12019-06-27 10:45:38 -07001/*
2 * Copyright (c) 2019, Alliance for Open Media. All Rights Reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
11#include <arm_neon.h>
Jonathan Wright1f1d6fc2023-05-23 22:32:19 +010012#include <assert.h>
Jerome Jiang410e6d12019-06-27 10:45:38 -070013
Wan-Teh Changab9cd8f2023-06-05 11:05:00 -070014#include "config/aom_config.h"
Jerome Jiang410e6d12019-06-27 10:45:38 -070015#include "config/aom_dsp_rtcd.h"
16#include "aom/aom_integer.h"
Yaowu Xu01d4a322021-07-15 07:46:13 -070017#include "aom_dsp/arm/mem_neon.h"
Jerome Jiang410e6d12019-06-27 10:45:38 -070018#include "aom_dsp/arm/sum_neon.h"
Yaowu Xu01d4a322021-07-15 07:46:13 -070019#include "aom_dsp/arm/transpose_neon.h"
James Zern2429d412022-02-03 13:01:01 -080020#include "aom_ports/mem.h"
Jerome Jiang410e6d12019-06-27 10:45:38 -070021
James Zernfe7676b2023-05-22 13:18:43 -070022#if !AOM_ARCH_AARCH64
James Zern81a0c432022-05-18 13:47:48 -070023static INLINE uint32x2_t horizontal_add_u16x8_v(const uint16x8_t a) {
24 const uint32x4_t b = vpaddlq_u16(a);
25 const uint64x2_t c = vpaddlq_u32(b);
26 return vadd_u32(vreinterpret_u32_u64(vget_low_u64(c)),
27 vreinterpret_u32_u64(vget_high_u64(c)));
28}
29#endif
30
Jerome Jiang410e6d12019-06-27 10:45:38 -070031unsigned int aom_avg_4x4_neon(const uint8_t *a, int a_stride) {
32 const uint8x16_t b = load_unaligned_u8q(a, a_stride);
33 const uint16x8_t c = vaddl_u8(vget_low_u8(b), vget_high_u8(b));
James Zernfe7676b2023-05-22 13:18:43 -070034#if AOM_ARCH_AARCH64
Jerome Jiang410e6d12019-06-27 10:45:38 -070035 const uint32_t d = vaddlvq_u16(c);
Jerome Jiang82958832019-07-01 16:50:51 -070036 return (d + 8) >> 4;
Jerome Jiang410e6d12019-06-27 10:45:38 -070037#else
James Zern81a0c432022-05-18 13:47:48 -070038 const uint32x2_t d = horizontal_add_u16x8_v(c);
Jerome Jiang410e6d12019-06-27 10:45:38 -070039 return vget_lane_u32(vrshr_n_u32(d, 4), 0);
40#endif
41}
42
43unsigned int aom_avg_8x8_neon(const uint8_t *a, int a_stride) {
44 uint16x8_t sum;
Jerome Jiang410e6d12019-06-27 10:45:38 -070045 uint8x8_t b = vld1_u8(a);
46 a += a_stride;
47 uint8x8_t c = vld1_u8(a);
48 a += a_stride;
49 sum = vaddl_u8(b, c);
50
51 for (int i = 0; i < 6; ++i) {
52 const uint8x8_t e = vld1_u8(a);
53 a += a_stride;
54 sum = vaddw_u8(sum, e);
55 }
56
James Zernfe7676b2023-05-22 13:18:43 -070057#if AOM_ARCH_AARCH64
James Zern81a0c432022-05-18 13:47:48 -070058 const uint32_t d = vaddlvq_u16(sum);
59 return (d + 32) >> 6;
60#else
61 const uint32x2_t d = horizontal_add_u16x8_v(sum);
Jerome Jiang410e6d12019-06-27 10:45:38 -070062 return vget_lane_u32(vrshr_n_u32(d, 6), 0);
James Zern81a0c432022-05-18 13:47:48 -070063#endif
Jerome Jiang410e6d12019-06-27 10:45:38 -070064}
Fyodor Kyslov093688b2020-01-29 16:43:15 -080065
Arun Singh Negi13eb90b2022-03-11 10:38:28 +053066void aom_avg_8x8_quad_neon(const uint8_t *s, int p, int x16_idx, int y16_idx,
67 int *avg) {
68 for (int k = 0; k < 4; k++) {
69 const int x8_idx = x16_idx + ((k & 1) << 3);
70 const int y8_idx = y16_idx + ((k >> 1) << 3);
71 const uint8_t *s_tmp = s + y8_idx * p + x8_idx;
72 avg[k] = aom_avg_8x8_neon(s_tmp, p);
73 }
74}
75
Fyodor Kyslov093688b2020-01-29 16:43:15 -080076int aom_satd_lp_neon(const int16_t *coeff, int length) {
77 const int16x4_t zero = vdup_n_s16(0);
78 int32x4_t accum = vdupq_n_s32(0);
79
80 do {
81 const int16x8_t src0 = vld1q_s16(coeff);
82 const int16x8_t src8 = vld1q_s16(coeff + 8);
83 accum = vabal_s16(accum, vget_low_s16(src0), zero);
84 accum = vabal_s16(accum, vget_high_s16(src0), zero);
85 accum = vabal_s16(accum, vget_low_s16(src8), zero);
86 accum = vabal_s16(accum, vget_high_s16(src8), zero);
87 length -= 16;
88 coeff += 16;
89 } while (length != 0);
90
James Zerna2403242022-07-12 21:56:47 -070091 return horizontal_add_s32x4(accum);
Fyodor Kyslov093688b2020-01-29 16:43:15 -080092}
Vitalii Dziumenko66d94b12020-04-17 14:40:25 +030093
venkat sanampudi6df621c2022-09-15 16:03:11 +053094void aom_int_pro_row_neon(int16_t *hbuf, const uint8_t *ref,
95 const int ref_stride, const int width,
96 const int height, int norm_factor) {
Jonathan Wrightcddfcd02023-05-24 15:26:30 +010097 assert(width % 16 == 0);
98 assert(height % 4 == 0);
Vitalii Dziumenko66d94b12020-04-17 14:40:25 +030099
Jonathan Wrightcddfcd02023-05-24 15:26:30 +0100100 const int16x8_t neg_norm_factor = vdupq_n_s16(-norm_factor);
101 uint16x8_t sum_lo[2], sum_hi[2];
102
103 int w = 0;
104 do {
105 const uint8_t *r = ref + w;
106 uint8x16_t r0 = vld1q_u8(r + 0 * ref_stride);
107 uint8x16_t r1 = vld1q_u8(r + 1 * ref_stride);
108 uint8x16_t r2 = vld1q_u8(r + 2 * ref_stride);
109 uint8x16_t r3 = vld1q_u8(r + 3 * ref_stride);
110
111 sum_lo[0] = vaddl_u8(vget_low_u8(r0), vget_low_u8(r1));
112 sum_hi[0] = vaddl_u8(vget_high_u8(r0), vget_high_u8(r1));
113 sum_lo[1] = vaddl_u8(vget_low_u8(r2), vget_low_u8(r3));
114 sum_hi[1] = vaddl_u8(vget_high_u8(r2), vget_high_u8(r3));
115
116 r += 4 * ref_stride;
117
118 for (int h = height - 4; h != 0; h -= 4) {
119 r0 = vld1q_u8(r + 0 * ref_stride);
120 r1 = vld1q_u8(r + 1 * ref_stride);
121 r2 = vld1q_u8(r + 2 * ref_stride);
122 r3 = vld1q_u8(r + 3 * ref_stride);
123
124 uint16x8_t tmp0_lo = vaddl_u8(vget_low_u8(r0), vget_low_u8(r1));
125 uint16x8_t tmp0_hi = vaddl_u8(vget_high_u8(r0), vget_high_u8(r1));
126 uint16x8_t tmp1_lo = vaddl_u8(vget_low_u8(r2), vget_low_u8(r3));
127 uint16x8_t tmp1_hi = vaddl_u8(vget_high_u8(r2), vget_high_u8(r3));
128
129 sum_lo[0] = vaddq_u16(sum_lo[0], tmp0_lo);
130 sum_hi[0] = vaddq_u16(sum_hi[0], tmp0_hi);
131 sum_lo[1] = vaddq_u16(sum_lo[1], tmp1_lo);
132 sum_hi[1] = vaddq_u16(sum_hi[1], tmp1_hi);
133
134 r += 4 * ref_stride;
venkat sanampudi6df621c2022-09-15 16:03:11 +0530135 }
136
Jonathan Wrightcddfcd02023-05-24 15:26:30 +0100137 sum_lo[0] = vaddq_u16(sum_lo[0], sum_lo[1]);
138 sum_hi[0] = vaddq_u16(sum_hi[0], sum_hi[1]);
venkat sanampudi6df621c2022-09-15 16:03:11 +0530139
Jonathan Wrightcddfcd02023-05-24 15:26:30 +0100140 const int16x8_t avg0 =
141 vshlq_s16(vreinterpretq_s16_u16(sum_lo[0]), neg_norm_factor);
142 const int16x8_t avg1 =
143 vshlq_s16(vreinterpretq_s16_u16(sum_hi[0]), neg_norm_factor);
144
145 vst1q_s16(hbuf + w, avg0);
146 vst1q_s16(hbuf + w + 8, avg1);
147 w += 16;
148 } while (w < width);
Vitalii Dziumenko66d94b12020-04-17 14:40:25 +0300149}
150
venkat sanampudi6df621c2022-09-15 16:03:11 +0530151void aom_int_pro_col_neon(int16_t *vbuf, const uint8_t *ref,
152 const int ref_stride, const int width,
153 const int height, int norm_factor) {
Jonathan Wright1f1d6fc2023-05-23 22:32:19 +0100154 assert(width % 16 == 0);
155 assert(height % 4 == 0);
156
157 const int16x4_t neg_norm_factor = vdup_n_s16(-norm_factor);
158 uint16x8_t sum[4];
159
160 int h = 0;
161 do {
162 sum[0] = vpaddlq_u8(vld1q_u8(ref + 0 * ref_stride));
163 sum[1] = vpaddlq_u8(vld1q_u8(ref + 1 * ref_stride));
164 sum[2] = vpaddlq_u8(vld1q_u8(ref + 2 * ref_stride));
165 sum[3] = vpaddlq_u8(vld1q_u8(ref + 3 * ref_stride));
166
167 for (int w = 16; w < width; w += 16) {
168 sum[0] = vpadalq_u8(sum[0], vld1q_u8(ref + 0 * ref_stride + w));
169 sum[1] = vpadalq_u8(sum[1], vld1q_u8(ref + 1 * ref_stride + w));
170 sum[2] = vpadalq_u8(sum[2], vld1q_u8(ref + 2 * ref_stride + w));
171 sum[3] = vpadalq_u8(sum[3], vld1q_u8(ref + 3 * ref_stride + w));
venkat sanampudi6df621c2022-09-15 16:03:11 +0530172 }
Vitalii Dziumenko66d94b12020-04-17 14:40:25 +0300173
Jonathan Wright1f1d6fc2023-05-23 22:32:19 +0100174 uint16x4_t sum_4d = vmovn_u32(horizontal_add_4d_u16x8(sum));
175 int16x4_t avg = vshl_s16(vreinterpret_s16_u16(sum_4d), neg_norm_factor);
176 vst1_s16(vbuf + h, avg);
177
178 ref += 4 * ref_stride;
179 h += 4;
180 } while (h < height);
Vitalii Dziumenko66d94b12020-04-17 14:40:25 +0300181}
182
183// coeff: 16 bits, dynamic range [-32640, 32640].
184// length: value range {16, 64, 256, 1024}.
185int aom_satd_neon(const tran_low_t *coeff, int length) {
186 const int32x4_t zero = vdupq_n_s32(0);
187 int32x4_t accum = zero;
188 do {
189 const int32x4_t src0 = vld1q_s32(&coeff[0]);
190 const int32x4_t src8 = vld1q_s32(&coeff[4]);
191 const int32x4_t src16 = vld1q_s32(&coeff[8]);
192 const int32x4_t src24 = vld1q_s32(&coeff[12]);
193 accum = vabaq_s32(accum, src0, zero);
194 accum = vabaq_s32(accum, src8, zero);
195 accum = vabaq_s32(accum, src16, zero);
196 accum = vabaq_s32(accum, src24, zero);
197 length -= 16;
198 coeff += 16;
199 } while (length != 0);
200
201 // satd: 26 bits, dynamic range [-32640 * 1024, 32640 * 1024]
Vitalii Dziumenko66d94b12020-04-17 14:40:25 +0300202 return horizontal_add_s32x4(accum);
Vitalii Dziumenko66d94b12020-04-17 14:40:25 +0300203}
Vitalii Dziumenko05492f52020-05-12 18:00:42 +0300204
James Zern6b11ba72022-10-05 11:30:05 -0700205int aom_vector_var_neon(const int16_t *ref, const int16_t *src, int bwl) {
Vitalii Dziumenko05492f52020-05-12 18:00:42 +0300206 int32x4_t v_mean = vdupq_n_s32(0);
207 int32x4_t v_sse = v_mean;
208 int16x8_t v_ref, v_src;
209 int16x4_t v_low;
210
211 int i, width = 4 << bwl;
212 for (i = 0; i < width; i += 8) {
213 v_ref = vld1q_s16(&ref[i]);
214 v_src = vld1q_s16(&src[i]);
215 const int16x8_t diff = vsubq_s16(v_ref, v_src);
216 // diff: dynamic range [-510, 510], 10 bits.
217 v_mean = vpadalq_s16(v_mean, diff);
218 v_low = vget_low_s16(diff);
219 v_sse = vmlal_s16(v_sse, v_low, v_low);
James Zernfe7676b2023-05-22 13:18:43 -0700220#if AOM_ARCH_AARCH64
Vitalii Dziumenko05492f52020-05-12 18:00:42 +0300221 v_sse = vmlal_high_s16(v_sse, diff, diff);
222#else
223 const int16x4_t v_high = vget_high_s16(diff);
224 v_sse = vmlal_s16(v_sse, v_high, v_high);
225#endif
226 }
chiyotsai44daaab2022-08-24 15:52:07 -0700227 const int mean = horizontal_add_s32x4(v_mean);
228 const int sse = horizontal_add_s32x4(v_sse);
229 const unsigned int mean_abs = mean >= 0 ? mean : -mean;
Vitalii Dziumenko05492f52020-05-12 18:00:42 +0300230 // (mean * mean): dynamic range 31 bits.
chiyotsai44daaab2022-08-24 15:52:07 -0700231 const int var = sse - ((mean_abs * mean_abs) >> (bwl + 2));
Vitalii Dziumenko05492f52020-05-12 18:00:42 +0300232 return var;
233}
James Zern2429d412022-02-03 13:01:01 -0800234
Salome Thirot79f35f72023-04-11 14:33:18 +0100235void aom_minmax_8x8_neon(const uint8_t *a, int a_stride, const uint8_t *b,
236 int b_stride, int *min, int *max) {
237 // Load and concatenate.
238 const uint8x16_t a01 = load_u8_8x2(a + 0 * a_stride, a_stride);
239 const uint8x16_t a23 = load_u8_8x2(a + 2 * a_stride, a_stride);
240 const uint8x16_t a45 = load_u8_8x2(a + 4 * a_stride, a_stride);
241 const uint8x16_t a67 = load_u8_8x2(a + 6 * a_stride, a_stride);
242
243 const uint8x16_t b01 = load_u8_8x2(b + 0 * b_stride, b_stride);
244 const uint8x16_t b23 = load_u8_8x2(b + 2 * b_stride, b_stride);
245 const uint8x16_t b45 = load_u8_8x2(b + 4 * b_stride, b_stride);
246 const uint8x16_t b67 = load_u8_8x2(b + 6 * b_stride, b_stride);
247
248 // Absolute difference.
249 const uint8x16_t ab01_diff = vabdq_u8(a01, b01);
250 const uint8x16_t ab23_diff = vabdq_u8(a23, b23);
251 const uint8x16_t ab45_diff = vabdq_u8(a45, b45);
252 const uint8x16_t ab67_diff = vabdq_u8(a67, b67);
253
254 // Max values between the Q vectors.
255 const uint8x16_t ab0123_max = vmaxq_u8(ab01_diff, ab23_diff);
256 const uint8x16_t ab4567_max = vmaxq_u8(ab45_diff, ab67_diff);
257 const uint8x16_t ab0123_min = vminq_u8(ab01_diff, ab23_diff);
258 const uint8x16_t ab4567_min = vminq_u8(ab45_diff, ab67_diff);
259
260 const uint8x16_t ab07_max = vmaxq_u8(ab0123_max, ab4567_max);
261 const uint8x16_t ab07_min = vminq_u8(ab0123_min, ab4567_min);
262
James Zernfe7676b2023-05-22 13:18:43 -0700263#if AOM_ARCH_AARCH64
Salome Thirot79f35f72023-04-11 14:33:18 +0100264 *min = *max = 0; // Clear high bits
265 *((uint8_t *)max) = vmaxvq_u8(ab07_max);
266 *((uint8_t *)min) = vminvq_u8(ab07_min);
267#else
268 // Split into 64-bit vectors and execute pairwise min/max.
269 uint8x8_t ab_max = vmax_u8(vget_high_u8(ab07_max), vget_low_u8(ab07_max));
270 uint8x8_t ab_min = vmin_u8(vget_high_u8(ab07_min), vget_low_u8(ab07_min));
271
272 // Enough runs of vpmax/min propagate the max/min values to every position.
273 ab_max = vpmax_u8(ab_max, ab_max);
274 ab_min = vpmin_u8(ab_min, ab_min);
275
276 ab_max = vpmax_u8(ab_max, ab_max);
277 ab_min = vpmin_u8(ab_min, ab_min);
278
279 ab_max = vpmax_u8(ab_max, ab_max);
280 ab_min = vpmin_u8(ab_min, ab_min);
281
282 *min = *max = 0; // Clear high bits
283 // Store directly to avoid costly neon->gpr transfer.
284 vst1_lane_u8((uint8_t *)max, ab_max, 0);
285 vst1_lane_u8((uint8_t *)min, ab_min, 0);
286#endif
287}