blob: 3a95ba28a11a011b050e1abcca3b71639b0a4e16 [file] [log] [blame]
Yaowu Xuc27fc142016-08-22 16:08:15 -07001/*
Yaowu Xu9c01aa12016-09-01 14:32:49 -07002 * Copyright (c) 2016, Alliance for Open Media. All rights reserved
Yaowu Xuc27fc142016-08-22 16:08:15 -07003 *
Yaowu Xu9c01aa12016-09-01 14:32:49 -07004 * This source code is subject to the terms of the BSD 2 Clause License and
5 * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
6 * was not distributed with this source code in the LICENSE file, you can
7 * obtain it at www.aomedia.org/license/software. If the Alliance for Open
8 * Media Patent License 1.0 was not distributed with this source code in the
9 * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
Yaowu Xuc27fc142016-08-22 16:08:15 -070010 */
11
12#include <arm_neon.h>
13
Tom Finegan44702c82018-05-22 13:00:39 -070014#include "config/aom_dsp_rtcd.h"
Tom Finegan60e653d2018-05-22 11:34:58 -070015#include "config/aom_config.h"
Jerome Jiang410e6d12019-06-27 10:45:38 -070016#include "aom_dsp/arm/sum_neon.h"
Yaowu Xuf883b422016-08-30 14:01:10 -070017#include "aom/aom_integer.h"
Yaowu Xuc27fc142016-08-22 16:08:15 -070018#include "aom_ports/mem.h"
19
Yaowu Xuc27fc142016-08-22 16:08:15 -070020// w * h must be less than 2048 or local variable v_sum may overflow.
21static void variance_neon_w8(const uint8_t *a, int a_stride, const uint8_t *b,
22 int b_stride, int w, int h, uint32_t *sse,
23 int *sum) {
24 int i, j;
25 int16x8_t v_sum = vdupq_n_s16(0);
26 int32x4_t v_sse_lo = vdupq_n_s32(0);
27 int32x4_t v_sse_hi = vdupq_n_s32(0);
28
29 for (i = 0; i < h; ++i) {
30 for (j = 0; j < w; j += 8) {
31 const uint8x8_t v_a = vld1_u8(&a[j]);
32 const uint8x8_t v_b = vld1_u8(&b[j]);
33 const uint16x8_t v_diff = vsubl_u8(v_a, v_b);
34 const int16x8_t sv_diff = vreinterpretq_s16_u16(v_diff);
35 v_sum = vaddq_s16(v_sum, sv_diff);
36 v_sse_lo =
37 vmlal_s16(v_sse_lo, vget_low_s16(sv_diff), vget_low_s16(sv_diff));
38 v_sse_hi =
39 vmlal_s16(v_sse_hi, vget_high_s16(sv_diff), vget_high_s16(sv_diff));
40 }
41 a += a_stride;
42 b += b_stride;
43 }
44
45 *sum = horizontal_add_s16x8(v_sum);
46 *sse = (unsigned int)horizontal_add_s32x4(vaddq_s32(v_sse_lo, v_sse_hi));
47}
48
Yaowu Xuf883b422016-08-30 14:01:10 -070049void aom_get8x8var_neon(const uint8_t *a, int a_stride, const uint8_t *b,
Yaowu Xuc27fc142016-08-22 16:08:15 -070050 int b_stride, unsigned int *sse, int *sum) {
51 variance_neon_w8(a, a_stride, b, b_stride, 8, 8, sse, sum);
52}
53
Yaowu Xuf883b422016-08-30 14:01:10 -070054void aom_get16x16var_neon(const uint8_t *a, int a_stride, const uint8_t *b,
Yaowu Xuc27fc142016-08-22 16:08:15 -070055 int b_stride, unsigned int *sse, int *sum) {
56 variance_neon_w8(a, a_stride, b, b_stride, 16, 16, sse, sum);
57}
58
Yaowu Xuf883b422016-08-30 14:01:10 -070059unsigned int aom_variance8x8_neon(const uint8_t *a, int a_stride,
Yaowu Xuc27fc142016-08-22 16:08:15 -070060 const uint8_t *b, int b_stride,
61 unsigned int *sse) {
62 int sum;
63 variance_neon_w8(a, a_stride, b, b_stride, 8, 8, sse, &sum);
James Zern859931e2017-04-03 21:09:17 -070064 return *sse - ((sum * sum) >> 6);
Yaowu Xuc27fc142016-08-22 16:08:15 -070065}
66
Yaowu Xuf883b422016-08-30 14:01:10 -070067unsigned int aom_variance16x16_neon(const uint8_t *a, int a_stride,
Yaowu Xuc27fc142016-08-22 16:08:15 -070068 const uint8_t *b, int b_stride,
69 unsigned int *sse) {
70 int sum;
71 variance_neon_w8(a, a_stride, b, b_stride, 16, 16, sse, &sum);
James Zern859931e2017-04-03 21:09:17 -070072 return *sse - (((unsigned int)((int64_t)sum * sum)) >> 8);
Yaowu Xuc27fc142016-08-22 16:08:15 -070073}
74
Yaowu Xuf883b422016-08-30 14:01:10 -070075unsigned int aom_variance32x32_neon(const uint8_t *a, int a_stride,
Yaowu Xuc27fc142016-08-22 16:08:15 -070076 const uint8_t *b, int b_stride,
77 unsigned int *sse) {
78 int sum;
79 variance_neon_w8(a, a_stride, b, b_stride, 32, 32, sse, &sum);
James Zern859931e2017-04-03 21:09:17 -070080 return *sse - (unsigned int)(((int64_t)sum * sum) >> 10);
Yaowu Xuc27fc142016-08-22 16:08:15 -070081}
82
Yaowu Xuf883b422016-08-30 14:01:10 -070083unsigned int aom_variance32x64_neon(const uint8_t *a, int a_stride,
Yaowu Xuc27fc142016-08-22 16:08:15 -070084 const uint8_t *b, int b_stride,
85 unsigned int *sse) {
86 int sum1, sum2;
87 uint32_t sse1, sse2;
88 variance_neon_w8(a, a_stride, b, b_stride, 32, 32, &sse1, &sum1);
89 variance_neon_w8(a + (32 * a_stride), a_stride, b + (32 * b_stride), b_stride,
90 32, 32, &sse2, &sum2);
91 *sse = sse1 + sse2;
92 sum1 += sum2;
James Zern859931e2017-04-03 21:09:17 -070093 return *sse - (unsigned int)(((int64_t)sum1 * sum1) >> 11);
Yaowu Xuc27fc142016-08-22 16:08:15 -070094}
95
Yaowu Xuf883b422016-08-30 14:01:10 -070096unsigned int aom_variance64x32_neon(const uint8_t *a, int a_stride,
Yaowu Xuc27fc142016-08-22 16:08:15 -070097 const uint8_t *b, int b_stride,
98 unsigned int *sse) {
99 int sum1, sum2;
100 uint32_t sse1, sse2;
101 variance_neon_w8(a, a_stride, b, b_stride, 64, 16, &sse1, &sum1);
102 variance_neon_w8(a + (16 * a_stride), a_stride, b + (16 * b_stride), b_stride,
103 64, 16, &sse2, &sum2);
104 *sse = sse1 + sse2;
105 sum1 += sum2;
James Zern859931e2017-04-03 21:09:17 -0700106 return *sse - (unsigned int)(((int64_t)sum1 * sum1) >> 11);
Yaowu Xuc27fc142016-08-22 16:08:15 -0700107}
108
Yaowu Xuf883b422016-08-30 14:01:10 -0700109unsigned int aom_variance64x64_neon(const uint8_t *a, int a_stride,
Yaowu Xuc27fc142016-08-22 16:08:15 -0700110 const uint8_t *b, int b_stride,
111 unsigned int *sse) {
112 int sum1, sum2;
113 uint32_t sse1, sse2;
114
115 variance_neon_w8(a, a_stride, b, b_stride, 64, 16, &sse1, &sum1);
116 variance_neon_w8(a + (16 * a_stride), a_stride, b + (16 * b_stride), b_stride,
117 64, 16, &sse2, &sum2);
118 sse1 += sse2;
119 sum1 += sum2;
120
121 variance_neon_w8(a + (16 * 2 * a_stride), a_stride, b + (16 * 2 * b_stride),
122 b_stride, 64, 16, &sse2, &sum2);
123 sse1 += sse2;
124 sum1 += sum2;
125
126 variance_neon_w8(a + (16 * 3 * a_stride), a_stride, b + (16 * 3 * b_stride),
127 b_stride, 64, 16, &sse2, &sum2);
128 *sse = sse1 + sse2;
129 sum1 += sum2;
James Zern859931e2017-04-03 21:09:17 -0700130 return *sse - (unsigned int)(((int64_t)sum1 * sum1) >> 12);
Yaowu Xuc27fc142016-08-22 16:08:15 -0700131}
132
Jerome Jiang723a8a32020-03-02 14:06:33 -0800133unsigned int aom_variance128x128_neon(const uint8_t *a, int a_stride,
134 const uint8_t *b, int b_stride,
135 unsigned int *sse) {
136 int sum1, sum2;
137 uint32_t sse1, sse2;
138 sum1 = sse1 = 0;
139 for (int i = 0; i < 16; i++) {
140 variance_neon_w8(a + (8 * i * a_stride), a_stride, b + (8 * i * b_stride),
141 b_stride, 128, 8, &sse2, &sum2);
142 sse1 += sse2;
143 sum1 += sum2;
144 }
145
146 *sse = sse1;
147
148 return *sse - (unsigned int)(((int64_t)sum1 * sum1) >> 14);
149}
150
Yaowu Xuf883b422016-08-30 14:01:10 -0700151unsigned int aom_variance16x8_neon(const unsigned char *src_ptr,
Yaowu Xuc27fc142016-08-22 16:08:15 -0700152 int source_stride,
153 const unsigned char *ref_ptr,
154 int recon_stride, unsigned int *sse) {
155 int i;
156 int16x4_t d22s16, d23s16, d24s16, d25s16, d26s16, d27s16, d28s16, d29s16;
157 uint32x2_t d0u32, d10u32;
158 int64x1_t d0s64, d1s64;
159 uint8x16_t q0u8, q1u8, q2u8, q3u8;
160 uint16x8_t q11u16, q12u16, q13u16, q14u16;
161 int32x4_t q8s32, q9s32, q10s32;
162 int64x2_t q0s64, q1s64, q5s64;
163
164 q8s32 = vdupq_n_s32(0);
165 q9s32 = vdupq_n_s32(0);
166 q10s32 = vdupq_n_s32(0);
167
168 for (i = 0; i < 4; i++) {
169 q0u8 = vld1q_u8(src_ptr);
170 src_ptr += source_stride;
171 q1u8 = vld1q_u8(src_ptr);
172 src_ptr += source_stride;
173 __builtin_prefetch(src_ptr);
174
175 q2u8 = vld1q_u8(ref_ptr);
176 ref_ptr += recon_stride;
177 q3u8 = vld1q_u8(ref_ptr);
178 ref_ptr += recon_stride;
179 __builtin_prefetch(ref_ptr);
180
181 q11u16 = vsubl_u8(vget_low_u8(q0u8), vget_low_u8(q2u8));
182 q12u16 = vsubl_u8(vget_high_u8(q0u8), vget_high_u8(q2u8));
183 q13u16 = vsubl_u8(vget_low_u8(q1u8), vget_low_u8(q3u8));
184 q14u16 = vsubl_u8(vget_high_u8(q1u8), vget_high_u8(q3u8));
185
186 d22s16 = vreinterpret_s16_u16(vget_low_u16(q11u16));
187 d23s16 = vreinterpret_s16_u16(vget_high_u16(q11u16));
188 q8s32 = vpadalq_s16(q8s32, vreinterpretq_s16_u16(q11u16));
189 q9s32 = vmlal_s16(q9s32, d22s16, d22s16);
190 q10s32 = vmlal_s16(q10s32, d23s16, d23s16);
191
192 d24s16 = vreinterpret_s16_u16(vget_low_u16(q12u16));
193 d25s16 = vreinterpret_s16_u16(vget_high_u16(q12u16));
194 q8s32 = vpadalq_s16(q8s32, vreinterpretq_s16_u16(q12u16));
195 q9s32 = vmlal_s16(q9s32, d24s16, d24s16);
196 q10s32 = vmlal_s16(q10s32, d25s16, d25s16);
197
198 d26s16 = vreinterpret_s16_u16(vget_low_u16(q13u16));
199 d27s16 = vreinterpret_s16_u16(vget_high_u16(q13u16));
200 q8s32 = vpadalq_s16(q8s32, vreinterpretq_s16_u16(q13u16));
201 q9s32 = vmlal_s16(q9s32, d26s16, d26s16);
202 q10s32 = vmlal_s16(q10s32, d27s16, d27s16);
203
204 d28s16 = vreinterpret_s16_u16(vget_low_u16(q14u16));
205 d29s16 = vreinterpret_s16_u16(vget_high_u16(q14u16));
206 q8s32 = vpadalq_s16(q8s32, vreinterpretq_s16_u16(q14u16));
207 q9s32 = vmlal_s16(q9s32, d28s16, d28s16);
208 q10s32 = vmlal_s16(q10s32, d29s16, d29s16);
209 }
210
211 q10s32 = vaddq_s32(q10s32, q9s32);
212 q0s64 = vpaddlq_s32(q8s32);
213 q1s64 = vpaddlq_s32(q10s32);
214
215 d0s64 = vadd_s64(vget_low_s64(q0s64), vget_high_s64(q0s64));
216 d1s64 = vadd_s64(vget_low_s64(q1s64), vget_high_s64(q1s64));
217
218 q5s64 = vmull_s32(vreinterpret_s32_s64(d0s64), vreinterpret_s32_s64(d0s64));
219 vst1_lane_u32((uint32_t *)sse, vreinterpret_u32_s64(d1s64), 0);
220
221 d10u32 = vshr_n_u32(vreinterpret_u32_s64(vget_low_s64(q5s64)), 7);
222 d0u32 = vsub_u32(vreinterpret_u32_s64(d1s64), d10u32);
223
224 return vget_lane_u32(d0u32, 0);
225}
226
Yaowu Xuf883b422016-08-30 14:01:10 -0700227unsigned int aom_variance8x16_neon(const unsigned char *src_ptr,
Yaowu Xuc27fc142016-08-22 16:08:15 -0700228 int source_stride,
229 const unsigned char *ref_ptr,
230 int recon_stride, unsigned int *sse) {
231 int i;
232 uint8x8_t d0u8, d2u8, d4u8, d6u8;
233 int16x4_t d22s16, d23s16, d24s16, d25s16;
234 uint32x2_t d0u32, d10u32;
235 int64x1_t d0s64, d1s64;
236 uint16x8_t q11u16, q12u16;
237 int32x4_t q8s32, q9s32, q10s32;
238 int64x2_t q0s64, q1s64, q5s64;
239
240 q8s32 = vdupq_n_s32(0);
241 q9s32 = vdupq_n_s32(0);
242 q10s32 = vdupq_n_s32(0);
243
244 for (i = 0; i < 8; i++) {
245 d0u8 = vld1_u8(src_ptr);
246 src_ptr += source_stride;
247 d2u8 = vld1_u8(src_ptr);
248 src_ptr += source_stride;
249 __builtin_prefetch(src_ptr);
250
251 d4u8 = vld1_u8(ref_ptr);
252 ref_ptr += recon_stride;
253 d6u8 = vld1_u8(ref_ptr);
254 ref_ptr += recon_stride;
255 __builtin_prefetch(ref_ptr);
256
257 q11u16 = vsubl_u8(d0u8, d4u8);
258 q12u16 = vsubl_u8(d2u8, d6u8);
259
260 d22s16 = vreinterpret_s16_u16(vget_low_u16(q11u16));
261 d23s16 = vreinterpret_s16_u16(vget_high_u16(q11u16));
262 q8s32 = vpadalq_s16(q8s32, vreinterpretq_s16_u16(q11u16));
263 q9s32 = vmlal_s16(q9s32, d22s16, d22s16);
264 q10s32 = vmlal_s16(q10s32, d23s16, d23s16);
265
266 d24s16 = vreinterpret_s16_u16(vget_low_u16(q12u16));
267 d25s16 = vreinterpret_s16_u16(vget_high_u16(q12u16));
268 q8s32 = vpadalq_s16(q8s32, vreinterpretq_s16_u16(q12u16));
269 q9s32 = vmlal_s16(q9s32, d24s16, d24s16);
270 q10s32 = vmlal_s16(q10s32, d25s16, d25s16);
271 }
272
273 q10s32 = vaddq_s32(q10s32, q9s32);
274 q0s64 = vpaddlq_s32(q8s32);
275 q1s64 = vpaddlq_s32(q10s32);
276
277 d0s64 = vadd_s64(vget_low_s64(q0s64), vget_high_s64(q0s64));
278 d1s64 = vadd_s64(vget_low_s64(q1s64), vget_high_s64(q1s64));
279
280 q5s64 = vmull_s32(vreinterpret_s32_s64(d0s64), vreinterpret_s32_s64(d0s64));
281 vst1_lane_u32((uint32_t *)sse, vreinterpret_u32_s64(d1s64), 0);
282
283 d10u32 = vshr_n_u32(vreinterpret_u32_s64(vget_low_s64(q5s64)), 7);
284 d0u32 = vsub_u32(vreinterpret_u32_s64(d1s64), d10u32);
285
286 return vget_lane_u32(d0u32, 0);
287}
288
Yaowu Xuf883b422016-08-30 14:01:10 -0700289unsigned int aom_mse16x16_neon(const unsigned char *src_ptr, int source_stride,
Yaowu Xuc27fc142016-08-22 16:08:15 -0700290 const unsigned char *ref_ptr, int recon_stride,
291 unsigned int *sse) {
292 int i;
293 int16x4_t d22s16, d23s16, d24s16, d25s16, d26s16, d27s16, d28s16, d29s16;
294 int64x1_t d0s64;
295 uint8x16_t q0u8, q1u8, q2u8, q3u8;
296 int32x4_t q7s32, q8s32, q9s32, q10s32;
297 uint16x8_t q11u16, q12u16, q13u16, q14u16;
298 int64x2_t q1s64;
299
300 q7s32 = vdupq_n_s32(0);
301 q8s32 = vdupq_n_s32(0);
302 q9s32 = vdupq_n_s32(0);
303 q10s32 = vdupq_n_s32(0);
304
305 for (i = 0; i < 8; i++) { // mse16x16_neon_loop
306 q0u8 = vld1q_u8(src_ptr);
307 src_ptr += source_stride;
308 q1u8 = vld1q_u8(src_ptr);
309 src_ptr += source_stride;
310 q2u8 = vld1q_u8(ref_ptr);
311 ref_ptr += recon_stride;
312 q3u8 = vld1q_u8(ref_ptr);
313 ref_ptr += recon_stride;
314
315 q11u16 = vsubl_u8(vget_low_u8(q0u8), vget_low_u8(q2u8));
316 q12u16 = vsubl_u8(vget_high_u8(q0u8), vget_high_u8(q2u8));
317 q13u16 = vsubl_u8(vget_low_u8(q1u8), vget_low_u8(q3u8));
318 q14u16 = vsubl_u8(vget_high_u8(q1u8), vget_high_u8(q3u8));
319
320 d22s16 = vreinterpret_s16_u16(vget_low_u16(q11u16));
321 d23s16 = vreinterpret_s16_u16(vget_high_u16(q11u16));
322 q7s32 = vmlal_s16(q7s32, d22s16, d22s16);
323 q8s32 = vmlal_s16(q8s32, d23s16, d23s16);
324
325 d24s16 = vreinterpret_s16_u16(vget_low_u16(q12u16));
326 d25s16 = vreinterpret_s16_u16(vget_high_u16(q12u16));
327 q9s32 = vmlal_s16(q9s32, d24s16, d24s16);
328 q10s32 = vmlal_s16(q10s32, d25s16, d25s16);
329
330 d26s16 = vreinterpret_s16_u16(vget_low_u16(q13u16));
331 d27s16 = vreinterpret_s16_u16(vget_high_u16(q13u16));
332 q7s32 = vmlal_s16(q7s32, d26s16, d26s16);
333 q8s32 = vmlal_s16(q8s32, d27s16, d27s16);
334
335 d28s16 = vreinterpret_s16_u16(vget_low_u16(q14u16));
336 d29s16 = vreinterpret_s16_u16(vget_high_u16(q14u16));
337 q9s32 = vmlal_s16(q9s32, d28s16, d28s16);
338 q10s32 = vmlal_s16(q10s32, d29s16, d29s16);
339 }
340
341 q7s32 = vaddq_s32(q7s32, q8s32);
342 q9s32 = vaddq_s32(q9s32, q10s32);
343 q10s32 = vaddq_s32(q7s32, q9s32);
344
345 q1s64 = vpaddlq_s32(q10s32);
346 d0s64 = vadd_s64(vget_low_s64(q1s64), vget_high_s64(q1s64));
347
348 vst1_lane_u32((uint32_t *)sse, vreinterpret_u32_s64(d0s64), 0);
349 return vget_lane_u32(vreinterpret_u32_s64(d0s64), 0);
350}
351
Yaowu Xuf883b422016-08-30 14:01:10 -0700352unsigned int aom_get4x4sse_cs_neon(const unsigned char *src_ptr,
Yaowu Xuc27fc142016-08-22 16:08:15 -0700353 int source_stride,
354 const unsigned char *ref_ptr,
355 int recon_stride) {
356 int16x4_t d22s16, d24s16, d26s16, d28s16;
357 int64x1_t d0s64;
358 uint8x8_t d0u8, d1u8, d2u8, d3u8, d4u8, d5u8, d6u8, d7u8;
359 int32x4_t q7s32, q8s32, q9s32, q10s32;
360 uint16x8_t q11u16, q12u16, q13u16, q14u16;
361 int64x2_t q1s64;
362
363 d0u8 = vld1_u8(src_ptr);
364 src_ptr += source_stride;
365 d4u8 = vld1_u8(ref_ptr);
366 ref_ptr += recon_stride;
367 d1u8 = vld1_u8(src_ptr);
368 src_ptr += source_stride;
369 d5u8 = vld1_u8(ref_ptr);
370 ref_ptr += recon_stride;
371 d2u8 = vld1_u8(src_ptr);
372 src_ptr += source_stride;
373 d6u8 = vld1_u8(ref_ptr);
374 ref_ptr += recon_stride;
375 d3u8 = vld1_u8(src_ptr);
Yaowu Xuc27fc142016-08-22 16:08:15 -0700376 d7u8 = vld1_u8(ref_ptr);
Yaowu Xuc27fc142016-08-22 16:08:15 -0700377
378 q11u16 = vsubl_u8(d0u8, d4u8);
379 q12u16 = vsubl_u8(d1u8, d5u8);
380 q13u16 = vsubl_u8(d2u8, d6u8);
381 q14u16 = vsubl_u8(d3u8, d7u8);
382
383 d22s16 = vget_low_s16(vreinterpretq_s16_u16(q11u16));
384 d24s16 = vget_low_s16(vreinterpretq_s16_u16(q12u16));
385 d26s16 = vget_low_s16(vreinterpretq_s16_u16(q13u16));
386 d28s16 = vget_low_s16(vreinterpretq_s16_u16(q14u16));
387
388 q7s32 = vmull_s16(d22s16, d22s16);
389 q8s32 = vmull_s16(d24s16, d24s16);
390 q9s32 = vmull_s16(d26s16, d26s16);
391 q10s32 = vmull_s16(d28s16, d28s16);
392
393 q7s32 = vaddq_s32(q7s32, q8s32);
394 q9s32 = vaddq_s32(q9s32, q10s32);
395 q9s32 = vaddq_s32(q7s32, q9s32);
396
397 q1s64 = vpaddlq_s32(q9s32);
398 d0s64 = vadd_s64(vget_low_s64(q1s64), vget_high_s64(q1s64));
399
400 return vget_lane_u32(vreinterpret_u32_s64(d0s64), 0);
401}
Vitalii Dziumenkob54b7b42020-05-14 11:35:10 +0300402
403// Load 4 sets of 4 bytes when alignment is not guaranteed.
404static INLINE uint8x16_t load_unaligned_u8q(const uint8_t *buf, int stride) {
405 uint32_t a;
406 uint32x4_t a_u32 = vdupq_n_u32(0);
407 if (stride == 4) return vld1q_u8(buf);
408 memcpy(&a, buf, 4);
409 buf += stride;
410 a_u32 = vld1q_lane_u32(&a, a_u32, 0);
411 memcpy(&a, buf, 4);
412 buf += stride;
413 a_u32 = vld1q_lane_u32(&a, a_u32, 1);
414 memcpy(&a, buf, 4);
415 buf += stride;
416 a_u32 = vld1q_lane_u32(&a, a_u32, 2);
417 memcpy(&a, buf, 4);
418 buf += stride;
419 a_u32 = vld1q_lane_u32(&a, a_u32, 3);
420 return vreinterpretq_u8_u32(a_u32);
421}
422
423// The variance helper functions use int16_t for sum. 8 values are accumulated
424// and then added (at which point they expand up to int32_t). To avoid overflow,
425// there can be no more than 32767 / 255 ~= 128 values accumulated in each
426// column. For a 32x32 buffer, this results in 32 / 8 = 4 values per row * 32
427// rows = 128. Asserts have been added to each function to warn against reaching
428// this limit.
429
430// Process a block of width 4 four rows at a time.
431static void variance_neon_w4x4(const uint8_t *a, int a_stride, const uint8_t *b,
432 int b_stride, int h, uint32_t *sse, int *sum) {
433 const int32x4_t zero = vdupq_n_s32(0);
434 int16x8_t sum_s16 = vreinterpretq_s16_s32(zero);
435 int32x4_t sse_s32 = zero;
436
437 // Since width is only 4, sum_s16 only loads a half row per loop.
438 assert(h <= 256);
439
440 int i;
441 for (i = 0; i < h; i += 4) {
442 const uint8x16_t a_u8 = load_unaligned_u8q(a, a_stride);
443 const uint8x16_t b_u8 = load_unaligned_u8q(b, b_stride);
444 const int16x8_t diff_lo_s16 =
445 vreinterpretq_s16_u16(vsubl_u8(vget_low_u8(a_u8), vget_low_u8(b_u8)));
446 const int16x8_t diff_hi_s16 =
447 vreinterpretq_s16_u16(vsubl_u8(vget_high_u8(a_u8), vget_high_u8(b_u8)));
448
449 sum_s16 = vaddq_s16(sum_s16, diff_lo_s16);
450 sum_s16 = vaddq_s16(sum_s16, diff_hi_s16);
451
452 sse_s32 = vmlal_s16(sse_s32, vget_low_s16(diff_lo_s16),
453 vget_low_s16(diff_lo_s16));
454 sse_s32 = vmlal_s16(sse_s32, vget_high_s16(diff_lo_s16),
455 vget_high_s16(diff_lo_s16));
456
457 sse_s32 = vmlal_s16(sse_s32, vget_low_s16(diff_hi_s16),
458 vget_low_s16(diff_hi_s16));
459 sse_s32 = vmlal_s16(sse_s32, vget_high_s16(diff_hi_s16),
460 vget_high_s16(diff_hi_s16));
461
462 a += 4 * a_stride;
463 b += 4 * b_stride;
464 }
465
466#if defined(__aarch64__)
467 *sum = vaddvq_s32(vpaddlq_s16(sum_s16));
468 *sse = (uint32_t)vaddvq_s32(sse_s32);
469#else
470 *sum = horizontal_add_s16x8(sum_s16);
471 *sse = (uint32_t)horizontal_add_s32x4(sse_s32);
472#endif
473}
474
475// Process a block of any size where the width is divisible by 16.
476static void variance_neon_w16(const uint8_t *a, int a_stride, const uint8_t *b,
477 int b_stride, int w, int h, uint32_t *sse,
478 int *sum) {
479 const int32x4_t zero = vdupq_n_s32(0);
480 int16x8_t sum_s16 = vreinterpretq_s16_s32(zero);
481 int32x4_t sse_s32 = zero;
482
483 // The loop loads 16 values at a time but doubles them up when accumulating
484 // into sum_s16.
485 assert(w / 8 * h <= 128);
486
487 int i, j;
488 for (i = 0; i < h; ++i) {
489 for (j = 0; j < w; j += 16) {
490 const uint8x16_t a_u8 = vld1q_u8(a + j);
491 const uint8x16_t b_u8 = vld1q_u8(b + j);
492
493 const int16x8_t diff_lo_s16 =
494 vreinterpretq_s16_u16(vsubl_u8(vget_low_u8(a_u8), vget_low_u8(b_u8)));
495 const int16x8_t diff_hi_s16 = vreinterpretq_s16_u16(
496 vsubl_u8(vget_high_u8(a_u8), vget_high_u8(b_u8)));
497
498 sum_s16 = vaddq_s16(sum_s16, diff_lo_s16);
499 sum_s16 = vaddq_s16(sum_s16, diff_hi_s16);
500
501 sse_s32 = vmlal_s16(sse_s32, vget_low_s16(diff_lo_s16),
502 vget_low_s16(diff_lo_s16));
503 sse_s32 = vmlal_s16(sse_s32, vget_high_s16(diff_lo_s16),
504 vget_high_s16(diff_lo_s16));
505
506 sse_s32 = vmlal_s16(sse_s32, vget_low_s16(diff_hi_s16),
507 vget_low_s16(diff_hi_s16));
508 sse_s32 = vmlal_s16(sse_s32, vget_high_s16(diff_hi_s16),
509 vget_high_s16(diff_hi_s16));
510 }
511 a += a_stride;
512 b += b_stride;
513 }
514
515#if defined(__aarch64__)
516 *sum = vaddvq_s32(vpaddlq_s16(sum_s16));
517 *sse = (uint32_t)vaddvq_s32(sse_s32);
518#else
519 *sum = horizontal_add_s16x8(sum_s16);
520 *sse = (uint32_t)horizontal_add_s32x4(sse_s32);
521#endif
522}
523
524// Process a block of width 8 two rows at a time.
525static void variance_neon_w8x2(const uint8_t *a, int a_stride, const uint8_t *b,
526 int b_stride, int h, uint32_t *sse, int *sum) {
527 const int32x4_t zero = vdupq_n_s32(0);
528 int16x8_t sum_s16 = vreinterpretq_s16_s32(zero);
529 int32x4_t sse_s32 = zero;
530
531 // Each column has it's own accumulator entry in sum_s16.
532 assert(h <= 128);
533
534 int i = 0;
535 do {
536 const uint8x8_t a_0_u8 = vld1_u8(a);
537 const uint8x8_t a_1_u8 = vld1_u8(a + a_stride);
538 const uint8x8_t b_0_u8 = vld1_u8(b);
539 const uint8x8_t b_1_u8 = vld1_u8(b + b_stride);
540 const int16x8_t diff_0_s16 =
541 vreinterpretq_s16_u16(vsubl_u8(a_0_u8, b_0_u8));
542 const int16x8_t diff_1_s16 =
543 vreinterpretq_s16_u16(vsubl_u8(a_1_u8, b_1_u8));
544 sum_s16 = vaddq_s16(sum_s16, diff_0_s16);
545 sum_s16 = vaddq_s16(sum_s16, diff_1_s16);
546 sse_s32 =
547 vmlal_s16(sse_s32, vget_low_s16(diff_0_s16), vget_low_s16(diff_0_s16));
548 sse_s32 =
549 vmlal_s16(sse_s32, vget_low_s16(diff_1_s16), vget_low_s16(diff_1_s16));
550 sse_s32 = vmlal_s16(sse_s32, vget_high_s16(diff_0_s16),
551 vget_high_s16(diff_0_s16));
552 sse_s32 = vmlal_s16(sse_s32, vget_high_s16(diff_1_s16),
553 vget_high_s16(diff_1_s16));
554 a += a_stride + a_stride;
555 b += b_stride + b_stride;
556 i += 2;
557 } while (i < h);
558
559#if defined(__aarch64__)
560 *sum = vaddvq_s32(vpaddlq_s16(sum_s16));
561 *sse = (uint32_t)vaddvq_s32(sse_s32);
562#else
563 *sum = horizontal_add_s16x8(sum_s16);
564 *sse = (uint32_t)horizontal_add_s32x4(sse_s32);
565#endif
566}
567
568#define varianceNxM(n, m, shift) \
569 unsigned int aom_variance##n##x##m##_neon(const uint8_t *a, int a_stride, \
570 const uint8_t *b, int b_stride, \
571 unsigned int *sse) { \
572 int sum; \
573 if (n == 4) \
574 variance_neon_w4x4(a, a_stride, b, b_stride, m, sse, &sum); \
575 else if (n == 8) \
576 variance_neon_w8x2(a, a_stride, b, b_stride, m, sse, &sum); \
577 else \
578 variance_neon_w16(a, a_stride, b, b_stride, n, m, sse, &sum); \
579 if (n * m < 16 * 16) \
580 return *sse - ((sum * sum) >> shift); \
581 else \
582 return *sse - (uint32_t)(((int64_t)sum * sum) >> shift); \
583 }
584
585static void variance_neon_wide_block(const uint8_t *a, int a_stride,
586 const uint8_t *b, int b_stride, int w,
587 int h, uint32_t *sse, int *sum) {
588 const int32x4_t zero = vdupq_n_s32(0);
589 int32x4_t v_diff = zero;
590 int64x2_t v_sse = vreinterpretq_s64_s32(zero);
591
592 int s, i, j;
593 for (s = 0; s < 16; s++) {
594 int32x4_t sse_s32 = zero;
595 int16x8_t sum_s16 = vreinterpretq_s16_s32(zero);
596 for (i = (s * h) >> 4; i < (((s + 1) * h) >> 4); ++i) {
597 for (j = 0; j < w; j += 16) {
598 const uint8x16_t a_u8 = vld1q_u8(a + j);
599 const uint8x16_t b_u8 = vld1q_u8(b + j);
600
601 const int16x8_t diff_lo_s16 = vreinterpretq_s16_u16(
602 vsubl_u8(vget_low_u8(a_u8), vget_low_u8(b_u8)));
603 const int16x8_t diff_hi_s16 = vreinterpretq_s16_u16(
604 vsubl_u8(vget_high_u8(a_u8), vget_high_u8(b_u8)));
605
606 sum_s16 = vaddq_s16(sum_s16, diff_lo_s16);
607 sum_s16 = vaddq_s16(sum_s16, diff_hi_s16);
608
609 sse_s32 = vmlal_s16(sse_s32, vget_low_s16(diff_lo_s16),
610 vget_low_s16(diff_lo_s16));
611 sse_s32 = vmlal_s16(sse_s32, vget_high_s16(diff_lo_s16),
612 vget_high_s16(diff_lo_s16));
613 sse_s32 = vmlal_s16(sse_s32, vget_low_s16(diff_hi_s16),
614 vget_low_s16(diff_hi_s16));
615 sse_s32 = vmlal_s16(sse_s32, vget_high_s16(diff_hi_s16),
616 vget_high_s16(diff_hi_s16));
617 }
618
619 a += a_stride;
620 b += b_stride;
621 }
622
623 v_diff = vpadalq_s16(v_diff, sum_s16);
624 v_sse = vpadalq_s32(v_sse, sse_s32);
625 }
626#if defined(__aarch64__)
627 int diff = vaddvq_s32(v_diff);
628 uint32_t sq = (uint32_t)vaddvq_u64(vreinterpretq_u64_s64(v_sse));
629#else
630 int diff = horizontal_add_s32x4(v_diff);
631 uint32_t sq = vget_lane_u32(
632 vreinterpret_u32_s64(vadd_s64(vget_low_s64(v_sse), vget_high_s64(v_sse))),
633 0);
634#endif
635
636 *sum = diff;
637 *sse = sq;
638}
639
640#define varianceNxM_wide(W, H) \
641 unsigned int aom_variance##W##x##H##_neon(const uint8_t *a, int a_stride, \
642 const uint8_t *b, int b_stride, \
643 uint32_t *sse) { \
644 int sum; \
645 variance_neon_wide_block(a, a_stride, b, b_stride, W, H, sse, &sum); \
646 return *sse - (uint32_t)(((int64_t)sum * sum) / (W * H)); \
647 }
648
649varianceNxM(4, 4, 4);
650varianceNxM(4, 8, 5);
651varianceNxM(8, 4, 5);
652varianceNxM(16, 32, 9);
653varianceNxM(32, 16, 9);
654varianceNxM_wide(128, 64);
655varianceNxM_wide(64, 128);