blob: 0871b4fe065f18fc6232a03ed068bcb38c6246bb [file] [log] [blame]
Luc Trudeaud8d2ef12018-02-15 13:10:18 -05001/*
2 * Copyright (c) 2017, Alliance for Open Media. All rights reserved
3 *
4 * This source code is subject to the terms of the BSD 2 Clause License and
5 * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
6 * was not distributed with this source code in the LICENSE file, you can
7 * obtain it at www.aomedia.org/license/software. If the Alliance for Open
8 * Media Patent License 1.0 was not distributed with this source code in the
9 * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
10 */
11#include <arm_neon.h>
12
Wan-Teh Changab9cd8f2023-06-05 11:05:00 -070013#include "config/aom_config.h"
Tom Finegan44702c82018-05-22 13:00:39 -070014#include "config/av1_rtcd.h"
Luc Trudeaud8d2ef12018-02-15 13:10:18 -050015
16#include "av1/common/cfl.h"
17
Luc Trudeau1f431172018-05-10 11:37:23 -040018static INLINE void vldsubstq_s16(int16_t *dst, const uint16_t *src, int offset,
Luc Trudeau8a192112018-05-09 22:38:28 -040019 int16x8_t sub) {
Luc Trudeau1f431172018-05-10 11:37:23 -040020 vst1q_s16(dst + offset,
21 vsubq_s16(vreinterpretq_s16_u16(vld1q_u16(src + offset)), sub));
Luc Trudeaud8d2ef12018-02-15 13:10:18 -050022}
23
24static INLINE uint16x8_t vldaddq_u16(const uint16_t *buf, size_t offset) {
25 return vaddq_u16(vld1q_u16(buf), vld1q_u16(buf + offset));
26}
27
Luc Trudeau46929632018-02-16 15:09:26 -050028// Load half of a vector and duplicated in other half
29static INLINE uint8x8_t vldh_dup_u8(const uint8_t *ptr) {
30 return vreinterpret_u8_u32(vld1_dup_u32((const uint32_t *)ptr));
31}
32
33// Store half of a vector.
Luc Trudeau1f431172018-05-10 11:37:23 -040034static INLINE void vsth_u16(uint16_t *ptr, uint16x4_t val) {
James Zernd192cdf2023-04-26 15:56:47 -070035 vst1_lane_u32((uint32_t *)ptr, vreinterpret_u32_u16(val), 0);
Luc Trudeau46929632018-02-16 15:09:26 -050036}
37
Luc Trudeau5905ac52018-03-08 13:22:23 -050038// Store half of a vector.
39static INLINE void vsth_u8(uint8_t *ptr, uint8x8_t val) {
James Zernd192cdf2023-04-26 15:56:47 -070040 vst1_lane_u32((uint32_t *)ptr, vreinterpret_u32_u8(val), 0);
Luc Trudeau5905ac52018-03-08 13:22:23 -050041}
42
Luc Trudeau46929632018-02-16 15:09:26 -050043static void cfl_luma_subsampling_420_lbd_neon(const uint8_t *input,
44 int input_stride,
Luc Trudeau1f431172018-05-10 11:37:23 -040045 uint16_t *pred_buf_q3, int width,
Luc Trudeau46929632018-02-16 15:09:26 -050046 int height) {
Luc Trudeau1f431172018-05-10 11:37:23 -040047 const uint16_t *end = pred_buf_q3 + (height >> 1) * CFL_BUF_LINE;
Luc Trudeau46929632018-02-16 15:09:26 -050048 const int luma_stride = input_stride << 1;
49 do {
50 if (width == 4) {
51 const uint16x4_t top = vpaddl_u8(vldh_dup_u8(input));
52 const uint16x4_t sum = vpadal_u8(top, vldh_dup_u8(input + input_stride));
Luc Trudeau1f431172018-05-10 11:37:23 -040053 vsth_u16(pred_buf_q3, vshl_n_u16(sum, 1));
Luc Trudeau46929632018-02-16 15:09:26 -050054 } else if (width == 8) {
55 const uint16x4_t top = vpaddl_u8(vld1_u8(input));
56 const uint16x4_t sum = vpadal_u8(top, vld1_u8(input + input_stride));
Luc Trudeau1f431172018-05-10 11:37:23 -040057 vst1_u16(pred_buf_q3, vshl_n_u16(sum, 1));
Luc Trudeaubb5f8802018-05-01 15:56:41 -040058 } else if (width == 16) {
Luc Trudeau46929632018-02-16 15:09:26 -050059 const uint16x8_t top = vpaddlq_u8(vld1q_u8(input));
60 const uint16x8_t sum = vpadalq_u8(top, vld1q_u8(input + input_stride));
Luc Trudeau1f431172018-05-10 11:37:23 -040061 vst1q_u16(pred_buf_q3, vshlq_n_u16(sum, 1));
Luc Trudeaubb5f8802018-05-01 15:56:41 -040062 } else {
63 const uint8x8x4_t top = vld4_u8(input);
64 const uint8x8x4_t bot = vld4_u8(input + input_stride);
65 // equivalent to a vpaddlq_u8 (because vld4q interleaves)
66 const uint16x8_t top_0 = vaddl_u8(top.val[0], top.val[1]);
67 // equivalent to a vpaddlq_u8 (because vld4q interleaves)
68 const uint16x8_t bot_0 = vaddl_u8(bot.val[0], bot.val[1]);
69 // equivalent to a vpaddlq_u8 (because vld4q interleaves)
70 const uint16x8_t top_1 = vaddl_u8(top.val[2], top.val[3]);
71 // equivalent to a vpaddlq_u8 (because vld4q interleaves)
72 const uint16x8_t bot_1 = vaddl_u8(bot.val[2], bot.val[3]);
Luc Trudeau1f431172018-05-10 11:37:23 -040073 uint16x8x2_t sum;
74 sum.val[0] = vshlq_n_u16(vaddq_u16(top_0, bot_0), 1);
75 sum.val[1] = vshlq_n_u16(vaddq_u16(top_1, bot_1), 1);
76 vst2q_u16(pred_buf_q3, sum);
Luc Trudeau46929632018-02-16 15:09:26 -050077 }
78 input += luma_stride;
79 } while ((pred_buf_q3 += CFL_BUF_LINE) < end);
80}
81
Luc Trudeau34061662018-03-27 20:10:49 -040082static void cfl_luma_subsampling_422_lbd_neon(const uint8_t *input,
83 int input_stride,
Luc Trudeau1f431172018-05-10 11:37:23 -040084 uint16_t *pred_buf_q3, int width,
Luc Trudeau34061662018-03-27 20:10:49 -040085 int height) {
Luc Trudeau1f431172018-05-10 11:37:23 -040086 const uint16_t *end = pred_buf_q3 + height * CFL_BUF_LINE;
Luc Trudeau34061662018-03-27 20:10:49 -040087 do {
88 if (width == 4) {
89 const uint16x4_t top = vpaddl_u8(vldh_dup_u8(input));
Luc Trudeau1f431172018-05-10 11:37:23 -040090 vsth_u16(pred_buf_q3, vshl_n_u16(top, 2));
Luc Trudeau34061662018-03-27 20:10:49 -040091 } else if (width == 8) {
92 const uint16x4_t top = vpaddl_u8(vld1_u8(input));
Luc Trudeau1f431172018-05-10 11:37:23 -040093 vst1_u16(pred_buf_q3, vshl_n_u16(top, 2));
Luc Trudeau4fc10ef2018-05-01 22:40:16 -040094 } else if (width == 16) {
Luc Trudeau34061662018-03-27 20:10:49 -040095 const uint16x8_t top = vpaddlq_u8(vld1q_u8(input));
Luc Trudeau1f431172018-05-10 11:37:23 -040096 vst1q_u16(pred_buf_q3, vshlq_n_u16(top, 2));
Luc Trudeau4fc10ef2018-05-01 22:40:16 -040097 } else {
98 const uint8x8x4_t top = vld4_u8(input);
Luc Trudeau1f431172018-05-10 11:37:23 -040099 uint16x8x2_t sum;
100 // vaddl_u8 is equivalent to a vpaddlq_u8 (because vld4q interleaves)
101 sum.val[0] = vshlq_n_u16(vaddl_u8(top.val[0], top.val[1]), 2);
102 sum.val[1] = vshlq_n_u16(vaddl_u8(top.val[2], top.val[3]), 2);
103 vst2q_u16(pred_buf_q3, sum);
Luc Trudeau34061662018-03-27 20:10:49 -0400104 }
105 input += input_stride;
106 } while ((pred_buf_q3 += CFL_BUF_LINE) < end);
107}
108
Luc Trudeau9ba35682018-03-23 21:08:15 -0400109static void cfl_luma_subsampling_444_lbd_neon(const uint8_t *input,
110 int input_stride,
Luc Trudeau1f431172018-05-10 11:37:23 -0400111 uint16_t *pred_buf_q3, int width,
Luc Trudeau9ba35682018-03-23 21:08:15 -0400112 int height) {
Luc Trudeau1f431172018-05-10 11:37:23 -0400113 const uint16_t *end = pred_buf_q3 + height * CFL_BUF_LINE;
Luc Trudeau9ba35682018-03-23 21:08:15 -0400114 do {
115 if (width == 4) {
116 const uint16x8_t top = vshll_n_u8(vldh_dup_u8(input), 3);
Luc Trudeau1f431172018-05-10 11:37:23 -0400117 vst1_u16(pred_buf_q3, vget_low_u16(top));
Luc Trudeau9ba35682018-03-23 21:08:15 -0400118 } else if (width == 8) {
119 const uint16x8_t top = vshll_n_u8(vld1_u8(input), 3);
Luc Trudeau1f431172018-05-10 11:37:23 -0400120 vst1q_u16(pred_buf_q3, top);
Luc Trudeau9ba35682018-03-23 21:08:15 -0400121 } else {
122 const uint8x16_t top = vld1q_u8(input);
Luc Trudeau1f431172018-05-10 11:37:23 -0400123 vst1q_u16(pred_buf_q3, vshll_n_u8(vget_low_u8(top), 3));
124 vst1q_u16(pred_buf_q3 + 8, vshll_n_u8(vget_high_u8(top), 3));
Luc Trudeau9ba35682018-03-23 21:08:15 -0400125 if (width == 32) {
126 const uint8x16_t next_top = vld1q_u8(input + 16);
Luc Trudeau1f431172018-05-10 11:37:23 -0400127 vst1q_u16(pred_buf_q3 + 16, vshll_n_u8(vget_low_u8(next_top), 3));
128 vst1q_u16(pred_buf_q3 + 24, vshll_n_u8(vget_high_u8(next_top), 3));
Luc Trudeau9ba35682018-03-23 21:08:15 -0400129 }
130 }
131 input += input_stride;
132 } while ((pred_buf_q3 += CFL_BUF_LINE) < end);
133}
134
Jerome Jiang7683ed52019-09-17 15:10:25 -0700135#if CONFIG_AV1_HIGHBITDEPTH
James Zernfe7676b2023-05-22 13:18:43 -0700136#if !AOM_ARCH_AARCH64
Luc Trudeau32b8af72018-03-30 18:38:02 -0400137uint16x8_t vpaddq_u16(uint16x8_t a, uint16x8_t b) {
138 return vcombine_u16(vpadd_u16(vget_low_u16(a), vget_high_u16(a)),
139 vpadd_u16(vget_low_u16(b), vget_high_u16(b)));
140}
141#endif
142
143static void cfl_luma_subsampling_420_hbd_neon(const uint16_t *input,
144 int input_stride,
Luc Trudeau1f431172018-05-10 11:37:23 -0400145 uint16_t *pred_buf_q3, int width,
Luc Trudeau32b8af72018-03-30 18:38:02 -0400146 int height) {
Luc Trudeau1f431172018-05-10 11:37:23 -0400147 const uint16_t *end = pred_buf_q3 + (height >> 1) * CFL_BUF_LINE;
Luc Trudeau32b8af72018-03-30 18:38:02 -0400148 const int luma_stride = input_stride << 1;
149 do {
150 if (width == 4) {
151 const uint16x4_t top = vld1_u16(input);
152 const uint16x4_t bot = vld1_u16(input + input_stride);
153 const uint16x4_t sum = vadd_u16(top, bot);
154 const uint16x4_t hsum = vpadd_u16(sum, sum);
Luc Trudeau1f431172018-05-10 11:37:23 -0400155 vsth_u16(pred_buf_q3, vshl_n_u16(hsum, 1));
Luc Trudeau32b8af72018-03-30 18:38:02 -0400156 } else if (width < 32) {
157 const uint16x8_t top = vld1q_u16(input);
158 const uint16x8_t bot = vld1q_u16(input + input_stride);
159 const uint16x8_t sum = vaddq_u16(top, bot);
160 if (width == 8) {
Luc Trudeau1f431172018-05-10 11:37:23 -0400161 const uint16x4_t hsum = vget_low_u16(vpaddq_u16(sum, sum));
162 vst1_u16(pred_buf_q3, vshl_n_u16(hsum, 1));
Luc Trudeau32b8af72018-03-30 18:38:02 -0400163 } else {
164 const uint16x8_t top_1 = vld1q_u16(input + 8);
165 const uint16x8_t bot_1 = vld1q_u16(input + 8 + input_stride);
166 const uint16x8_t sum_1 = vaddq_u16(top_1, bot_1);
Luc Trudeau1f431172018-05-10 11:37:23 -0400167 const uint16x8_t hsum = vpaddq_u16(sum, sum_1);
168 vst1q_u16(pred_buf_q3, vshlq_n_u16(hsum, 1));
Luc Trudeau32b8af72018-03-30 18:38:02 -0400169 }
170 } else {
171 const uint16x8x4_t top = vld4q_u16(input);
172 const uint16x8x4_t bot = vld4q_u16(input + input_stride);
173 // equivalent to a vpaddq_u16 (because vld4q interleaves)
174 const uint16x8_t top_0 = vaddq_u16(top.val[0], top.val[1]);
175 // equivalent to a vpaddq_u16 (because vld4q interleaves)
176 const uint16x8_t bot_0 = vaddq_u16(bot.val[0], bot.val[1]);
177 // equivalent to a vpaddq_u16 (because vld4q interleaves)
178 const uint16x8_t top_1 = vaddq_u16(top.val[2], top.val[3]);
179 // equivalent to a vpaddq_u16 (because vld4q interleaves)
180 const uint16x8_t bot_1 = vaddq_u16(bot.val[2], bot.val[3]);
Luc Trudeau1f431172018-05-10 11:37:23 -0400181 uint16x8x2_t sum;
182 sum.val[0] = vshlq_n_u16(vaddq_u16(top_0, bot_0), 1);
183 sum.val[1] = vshlq_n_u16(vaddq_u16(top_1, bot_1), 1);
184 vst2q_u16(pred_buf_q3, sum);
Luc Trudeau32b8af72018-03-30 18:38:02 -0400185 }
186 input += luma_stride;
187 } while ((pred_buf_q3 += CFL_BUF_LINE) < end);
188}
189
Luc Trudeau733dacf2018-04-05 12:34:43 -0400190static void cfl_luma_subsampling_422_hbd_neon(const uint16_t *input,
191 int input_stride,
Luc Trudeau1f431172018-05-10 11:37:23 -0400192 uint16_t *pred_buf_q3, int width,
Luc Trudeau733dacf2018-04-05 12:34:43 -0400193 int height) {
Luc Trudeau1f431172018-05-10 11:37:23 -0400194 const uint16_t *end = pred_buf_q3 + height * CFL_BUF_LINE;
Luc Trudeau733dacf2018-04-05 12:34:43 -0400195 do {
196 if (width == 4) {
197 const uint16x4_t top = vld1_u16(input);
198 const uint16x4_t hsum = vpadd_u16(top, top);
Luc Trudeau1f431172018-05-10 11:37:23 -0400199 vsth_u16(pred_buf_q3, vshl_n_u16(hsum, 2));
Luc Trudeau733dacf2018-04-05 12:34:43 -0400200 } else if (width == 8) {
201 const uint16x4x2_t top = vld2_u16(input);
202 // equivalent to a vpadd_u16 (because vld2 interleaves)
203 const uint16x4_t hsum = vadd_u16(top.val[0], top.val[1]);
Luc Trudeau1f431172018-05-10 11:37:23 -0400204 vst1_u16(pred_buf_q3, vshl_n_u16(hsum, 2));
Luc Trudeau733dacf2018-04-05 12:34:43 -0400205 } else if (width == 16) {
206 const uint16x8x2_t top = vld2q_u16(input);
207 // equivalent to a vpaddq_u16 (because vld2q interleaves)
208 const uint16x8_t hsum = vaddq_u16(top.val[0], top.val[1]);
Luc Trudeau1f431172018-05-10 11:37:23 -0400209 vst1q_u16(pred_buf_q3, vshlq_n_u16(hsum, 2));
Luc Trudeau733dacf2018-04-05 12:34:43 -0400210 } else {
211 const uint16x8x4_t top = vld4q_u16(input);
212 // equivalent to a vpaddq_u16 (because vld4q interleaves)
213 const uint16x8_t hsum_0 = vaddq_u16(top.val[0], top.val[1]);
214 // equivalent to a vpaddq_u16 (because vld4q interleaves)
215 const uint16x8_t hsum_1 = vaddq_u16(top.val[2], top.val[3]);
Luc Trudeau1f431172018-05-10 11:37:23 -0400216 uint16x8x2_t result = { { vshlq_n_u16(hsum_0, 2),
217 vshlq_n_u16(hsum_1, 2) } };
218 vst2q_u16(pred_buf_q3, result);
Luc Trudeau733dacf2018-04-05 12:34:43 -0400219 }
220 input += input_stride;
221 } while ((pred_buf_q3 += CFL_BUF_LINE) < end);
222}
223
Luc Trudeaubee20882018-04-05 14:50:35 -0400224static void cfl_luma_subsampling_444_hbd_neon(const uint16_t *input,
225 int input_stride,
Luc Trudeau1f431172018-05-10 11:37:23 -0400226 uint16_t *pred_buf_q3, int width,
Luc Trudeaubee20882018-04-05 14:50:35 -0400227 int height) {
Luc Trudeau1f431172018-05-10 11:37:23 -0400228 const uint16_t *end = pred_buf_q3 + height * CFL_BUF_LINE;
Luc Trudeaubee20882018-04-05 14:50:35 -0400229 do {
230 if (width == 4) {
Luc Trudeau1f431172018-05-10 11:37:23 -0400231 const uint16x4_t top = vld1_u16(input);
232 vst1_u16(pred_buf_q3, vshl_n_u16(top, 3));
Luc Trudeaubee20882018-04-05 14:50:35 -0400233 } else if (width == 8) {
Luc Trudeau1f431172018-05-10 11:37:23 -0400234 const uint16x8_t top = vld1q_u16(input);
235 vst1q_u16(pred_buf_q3, vshlq_n_u16(top, 3));
Luc Trudeaubee20882018-04-05 14:50:35 -0400236 } else if (width == 16) {
Luc Trudeau1f431172018-05-10 11:37:23 -0400237 uint16x8x2_t top = vld2q_u16(input);
238 top.val[0] = vshlq_n_u16(top.val[0], 3);
239 top.val[1] = vshlq_n_u16(top.val[1], 3);
240 vst2q_u16(pred_buf_q3, top);
Luc Trudeaubee20882018-04-05 14:50:35 -0400241 } else {
Luc Trudeau1f431172018-05-10 11:37:23 -0400242 uint16x8x4_t top = vld4q_u16(input);
243 top.val[0] = vshlq_n_u16(top.val[0], 3);
244 top.val[1] = vshlq_n_u16(top.val[1], 3);
245 top.val[2] = vshlq_n_u16(top.val[2], 3);
246 top.val[3] = vshlq_n_u16(top.val[3], 3);
247 vst4q_u16(pred_buf_q3, top);
Luc Trudeaubee20882018-04-05 14:50:35 -0400248 }
249 input += input_stride;
250 } while ((pred_buf_q3 += CFL_BUF_LINE) < end);
251}
Jerome Jiang7683ed52019-09-17 15:10:25 -0700252#endif // CONFIG_AV1_HIGHBITDEPTH
Luc Trudeaubee20882018-04-05 14:50:35 -0400253
Luc Trudeau46929632018-02-16 15:09:26 -0500254CFL_GET_SUBSAMPLE_FUNCTION(neon)
255
Luc Trudeau1f431172018-05-10 11:37:23 -0400256static INLINE void subtract_average_neon(const uint16_t *src, int16_t *dst,
Luc Trudeau8a192112018-05-09 22:38:28 -0400257 int width, int height,
258 int round_offset,
Luc Trudeaud8d2ef12018-02-15 13:10:18 -0500259 const int num_pel_log2) {
Luc Trudeau1f431172018-05-10 11:37:23 -0400260 const uint16_t *const end = src + height * CFL_BUF_LINE;
Luc Trudeaud8d2ef12018-02-15 13:10:18 -0500261
262 // Round offset is not needed, because NEON will handle the rounding.
263 (void)round_offset;
264
265 // To optimize the use of the CPU pipeline, we process 4 rows per iteration
266 const int step = 4 * CFL_BUF_LINE;
267
268 // At this stage, the prediction buffer contains scaled reconstructed luma
269 // pixels, which are positive integer and only require 15 bits. By using
270 // unsigned integer for the sum, we can do one addition operation inside 16
271 // bits (8 lanes) before having to convert to 32 bits (4 lanes).
Luc Trudeau1f431172018-05-10 11:37:23 -0400272 const uint16_t *sum_buf = src;
James Zernd192cdf2023-04-26 15:56:47 -0700273 uint32x4_t sum_32x4 = vdupq_n_u32(0);
Luc Trudeaud8d2ef12018-02-15 13:10:18 -0500274 do {
275 // For all widths, we load, add and combine the data so it fits in 4 lanes.
276 if (width == 4) {
277 const uint16x4_t a0 =
278 vadd_u16(vld1_u16(sum_buf), vld1_u16(sum_buf + CFL_BUF_LINE));
279 const uint16x4_t a1 = vadd_u16(vld1_u16(sum_buf + 2 * CFL_BUF_LINE),
280 vld1_u16(sum_buf + 3 * CFL_BUF_LINE));
281 sum_32x4 = vaddq_u32(sum_32x4, vaddl_u16(a0, a1));
282 } else if (width == 8) {
283 const uint16x8_t a0 = vldaddq_u16(sum_buf, CFL_BUF_LINE);
284 const uint16x8_t a1 =
285 vldaddq_u16(sum_buf + 2 * CFL_BUF_LINE, CFL_BUF_LINE);
286 sum_32x4 = vpadalq_u16(sum_32x4, a0);
287 sum_32x4 = vpadalq_u16(sum_32x4, a1);
288 } else {
289 const uint16x8_t row0 = vldaddq_u16(sum_buf, 8);
290 const uint16x8_t row1 = vldaddq_u16(sum_buf + CFL_BUF_LINE, 8);
291 const uint16x8_t row2 = vldaddq_u16(sum_buf + 2 * CFL_BUF_LINE, 8);
292 const uint16x8_t row3 = vldaddq_u16(sum_buf + 3 * CFL_BUF_LINE, 8);
293 sum_32x4 = vpadalq_u16(sum_32x4, row0);
294 sum_32x4 = vpadalq_u16(sum_32x4, row1);
295 sum_32x4 = vpadalq_u16(sum_32x4, row2);
296 sum_32x4 = vpadalq_u16(sum_32x4, row3);
297
298 if (width == 32) {
299 const uint16x8_t row0_1 = vldaddq_u16(sum_buf + 16, 8);
300 const uint16x8_t row1_1 = vldaddq_u16(sum_buf + CFL_BUF_LINE + 16, 8);
301 const uint16x8_t row2_1 =
302 vldaddq_u16(sum_buf + 2 * CFL_BUF_LINE + 16, 8);
303 const uint16x8_t row3_1 =
304 vldaddq_u16(sum_buf + 3 * CFL_BUF_LINE + 16, 8);
305
306 sum_32x4 = vpadalq_u16(sum_32x4, row0_1);
307 sum_32x4 = vpadalq_u16(sum_32x4, row1_1);
308 sum_32x4 = vpadalq_u16(sum_32x4, row2_1);
309 sum_32x4 = vpadalq_u16(sum_32x4, row3_1);
310 }
311 }
Luc Trudeau8a192112018-05-09 22:38:28 -0400312 sum_buf += step;
Luc Trudeau1f431172018-05-10 11:37:23 -0400313 } while (sum_buf < end);
Luc Trudeaud8d2ef12018-02-15 13:10:18 -0500314
315 // Permute and add in such a way that each lane contains the block sum.
316 // [A+C+B+D, B+D+A+C, C+A+D+B, D+B+C+A]
James Zernfe7676b2023-05-22 13:18:43 -0700317#if AOM_ARCH_AARCH64
Luc Trudeaud8d2ef12018-02-15 13:10:18 -0500318 sum_32x4 = vpaddq_u32(sum_32x4, sum_32x4);
319 sum_32x4 = vpaddq_u32(sum_32x4, sum_32x4);
320#else
321 uint32x4_t flip =
322 vcombine_u32(vget_high_u32(sum_32x4), vget_low_u32(sum_32x4));
323 sum_32x4 = vaddq_u32(sum_32x4, flip);
324 sum_32x4 = vaddq_u32(sum_32x4, vrev64q_u32(sum_32x4));
325#endif
326
327 // Computing the average could be done using scalars, but getting off the NEON
328 // engine introduces latency, so we use vqrshrn.
329 int16x4_t avg_16x4;
330 // Constant propagation makes for some ugly code.
331 switch (num_pel_log2) {
332 case 4: avg_16x4 = vreinterpret_s16_u16(vqrshrn_n_u32(sum_32x4, 4)); break;
333 case 5: avg_16x4 = vreinterpret_s16_u16(vqrshrn_n_u32(sum_32x4, 5)); break;
334 case 6: avg_16x4 = vreinterpret_s16_u16(vqrshrn_n_u32(sum_32x4, 6)); break;
335 case 7: avg_16x4 = vreinterpret_s16_u16(vqrshrn_n_u32(sum_32x4, 7)); break;
336 case 8: avg_16x4 = vreinterpret_s16_u16(vqrshrn_n_u32(sum_32x4, 8)); break;
337 case 9: avg_16x4 = vreinterpret_s16_u16(vqrshrn_n_u32(sum_32x4, 9)); break;
338 case 10:
339 avg_16x4 = vreinterpret_s16_u16(vqrshrn_n_u32(sum_32x4, 10));
340 break;
341 default: assert(0);
342 }
343
344 if (width == 4) {
345 do {
Luc Trudeau1f431172018-05-10 11:37:23 -0400346 vst1_s16(dst, vsub_s16(vreinterpret_s16_u16(vld1_u16(src)), avg_16x4));
Luc Trudeau8a192112018-05-09 22:38:28 -0400347 src += CFL_BUF_LINE;
348 dst += CFL_BUF_LINE;
349 } while (src < end);
Luc Trudeaud8d2ef12018-02-15 13:10:18 -0500350 } else {
351 const int16x8_t avg_16x8 = vcombine_s16(avg_16x4, avg_16x4);
352 do {
Luc Trudeau8a192112018-05-09 22:38:28 -0400353 vldsubstq_s16(dst, src, 0, avg_16x8);
354 vldsubstq_s16(dst, src, CFL_BUF_LINE, avg_16x8);
355 vldsubstq_s16(dst, src, 2 * CFL_BUF_LINE, avg_16x8);
356 vldsubstq_s16(dst, src, 3 * CFL_BUF_LINE, avg_16x8);
Luc Trudeaud8d2ef12018-02-15 13:10:18 -0500357
358 if (width > 8) {
Luc Trudeau8a192112018-05-09 22:38:28 -0400359 vldsubstq_s16(dst, src, 8, avg_16x8);
360 vldsubstq_s16(dst, src, 8 + CFL_BUF_LINE, avg_16x8);
361 vldsubstq_s16(dst, src, 8 + 2 * CFL_BUF_LINE, avg_16x8);
362 vldsubstq_s16(dst, src, 8 + 3 * CFL_BUF_LINE, avg_16x8);
Luc Trudeaud8d2ef12018-02-15 13:10:18 -0500363 }
364 if (width == 32) {
Luc Trudeau8a192112018-05-09 22:38:28 -0400365 vldsubstq_s16(dst, src, 16, avg_16x8);
366 vldsubstq_s16(dst, src, 16 + CFL_BUF_LINE, avg_16x8);
367 vldsubstq_s16(dst, src, 16 + 2 * CFL_BUF_LINE, avg_16x8);
368 vldsubstq_s16(dst, src, 16 + 3 * CFL_BUF_LINE, avg_16x8);
369 vldsubstq_s16(dst, src, 24, avg_16x8);
370 vldsubstq_s16(dst, src, 24 + CFL_BUF_LINE, avg_16x8);
371 vldsubstq_s16(dst, src, 24 + 2 * CFL_BUF_LINE, avg_16x8);
372 vldsubstq_s16(dst, src, 24 + 3 * CFL_BUF_LINE, avg_16x8);
Luc Trudeaud8d2ef12018-02-15 13:10:18 -0500373 }
Luc Trudeau8a192112018-05-09 22:38:28 -0400374 src += step;
375 dst += step;
376 } while (src < end);
Luc Trudeaud8d2ef12018-02-15 13:10:18 -0500377 }
378}
379
380CFL_SUB_AVG_FN(neon)
Luc Trudeau5905ac52018-03-08 13:22:23 -0500381
382// Saturating negate 16-bit integers in a when the corresponding signed 16-bit
383// integer in b is negative.
384// Notes:
385// * Negating INT16_MIN results in INT16_MIN. However, this cannot occur in
386// practice, as scaled_luma is the multiplication of two absolute values.
387// * In the Intel equivalent, elements in a are zeroed out when the
388// corresponding elements in b are zero. Because vsign is used twice in a
389// row, with b in the first call becoming a in the second call, there's no
390// impact from not zeroing out.
391static int16x4_t vsign_s16(int16x4_t a, int16x4_t b) {
392 const int16x4_t mask = vshr_n_s16(b, 15);
393 return veor_s16(vadd_s16(a, mask), mask);
394}
395
396// Saturating negate 16-bit integers in a when the corresponding signed 16-bit
397// integer in b is negative.
398// Notes:
399// * Negating INT16_MIN results in INT16_MIN. However, this cannot occur in
400// practice, as scaled_luma is the multiplication of two absolute values.
401// * In the Intel equivalent, elements in a are zeroed out when the
402// corresponding elements in b are zero. Because vsignq is used twice in a
403// row, with b in the first call becoming a in the second call, there's no
404// impact from not zeroing out.
405static int16x8_t vsignq_s16(int16x8_t a, int16x8_t b) {
406 const int16x8_t mask = vshrq_n_s16(b, 15);
407 return veorq_s16(vaddq_s16(a, mask), mask);
408}
409
410static INLINE int16x4_t predict_w4(const int16_t *pred_buf_q3,
411 int16x4_t alpha_sign, int abs_alpha_q12,
412 int16x4_t dc) {
413 const int16x4_t ac_q3 = vld1_s16(pred_buf_q3);
414 const int16x4_t ac_sign = veor_s16(alpha_sign, ac_q3);
415 int16x4_t scaled_luma = vqrdmulh_n_s16(vabs_s16(ac_q3), abs_alpha_q12);
416 return vadd_s16(vsign_s16(scaled_luma, ac_sign), dc);
417}
418
419static INLINE int16x8_t predict_w8(const int16_t *pred_buf_q3,
420 int16x8_t alpha_sign, int abs_alpha_q12,
421 int16x8_t dc) {
422 const int16x8_t ac_q3 = vld1q_s16(pred_buf_q3);
423 const int16x8_t ac_sign = veorq_s16(alpha_sign, ac_q3);
424 int16x8_t scaled_luma = vqrdmulhq_n_s16(vabsq_s16(ac_q3), abs_alpha_q12);
425 return vaddq_s16(vsignq_s16(scaled_luma, ac_sign), dc);
426}
427
Luc Trudeaubd55f9a2018-05-01 23:29:09 -0400428static INLINE int16x8x2_t predict_w16(const int16_t *pred_buf_q3,
429 int16x8_t alpha_sign, int abs_alpha_q12,
430 int16x8_t dc) {
Luc Trudeau614029c2018-05-02 21:16:23 -0400431 // vld2q_s16 interleaves, which is not useful for prediction. vst1q_s16_x2
432 // does not interleave, but is not currently available in the compilier used
433 // by the AOM build system.
Luc Trudeaubd55f9a2018-05-01 23:29:09 -0400434 const int16x8x2_t ac_q3 = vld2q_s16(pred_buf_q3);
435 const int16x8_t ac_sign_0 = veorq_s16(alpha_sign, ac_q3.val[0]);
436 const int16x8_t ac_sign_1 = veorq_s16(alpha_sign, ac_q3.val[1]);
437 const int16x8_t scaled_luma_0 =
438 vqrdmulhq_n_s16(vabsq_s16(ac_q3.val[0]), abs_alpha_q12);
439 const int16x8_t scaled_luma_1 =
440 vqrdmulhq_n_s16(vabsq_s16(ac_q3.val[1]), abs_alpha_q12);
441 int16x8x2_t result;
442 result.val[0] = vaddq_s16(vsignq_s16(scaled_luma_0, ac_sign_0), dc);
443 result.val[1] = vaddq_s16(vsignq_s16(scaled_luma_1, ac_sign_1), dc);
444 return result;
445}
446
447static INLINE int16x8x4_t predict_w32(const int16_t *pred_buf_q3,
448 int16x8_t alpha_sign, int abs_alpha_q12,
449 int16x8_t dc) {
Luc Trudeau614029c2018-05-02 21:16:23 -0400450 // vld4q_s16 interleaves, which is not useful for prediction. vst1q_s16_x4
451 // does not interleave, but is not currently available in the compilier used
452 // by the AOM build system.
Luc Trudeaubd55f9a2018-05-01 23:29:09 -0400453 const int16x8x4_t ac_q3 = vld4q_s16(pred_buf_q3);
454 const int16x8_t ac_sign_0 = veorq_s16(alpha_sign, ac_q3.val[0]);
455 const int16x8_t ac_sign_1 = veorq_s16(alpha_sign, ac_q3.val[1]);
456 const int16x8_t ac_sign_2 = veorq_s16(alpha_sign, ac_q3.val[2]);
457 const int16x8_t ac_sign_3 = veorq_s16(alpha_sign, ac_q3.val[3]);
458 const int16x8_t scaled_luma_0 =
459 vqrdmulhq_n_s16(vabsq_s16(ac_q3.val[0]), abs_alpha_q12);
460 const int16x8_t scaled_luma_1 =
461 vqrdmulhq_n_s16(vabsq_s16(ac_q3.val[1]), abs_alpha_q12);
462 const int16x8_t scaled_luma_2 =
463 vqrdmulhq_n_s16(vabsq_s16(ac_q3.val[2]), abs_alpha_q12);
464 const int16x8_t scaled_luma_3 =
465 vqrdmulhq_n_s16(vabsq_s16(ac_q3.val[3]), abs_alpha_q12);
466 int16x8x4_t result;
467 result.val[0] = vaddq_s16(vsignq_s16(scaled_luma_0, ac_sign_0), dc);
468 result.val[1] = vaddq_s16(vsignq_s16(scaled_luma_1, ac_sign_1), dc);
469 result.val[2] = vaddq_s16(vsignq_s16(scaled_luma_2, ac_sign_2), dc);
470 result.val[3] = vaddq_s16(vsignq_s16(scaled_luma_3, ac_sign_3), dc);
471 return result;
472}
473
Luc Trudeau5905ac52018-03-08 13:22:23 -0500474static INLINE void cfl_predict_lbd_neon(const int16_t *pred_buf_q3,
475 uint8_t *dst, int dst_stride,
476 int alpha_q3, int width, int height) {
477 const int16_t abs_alpha_q12 = abs(alpha_q3) << 9;
478 const int16_t *const end = pred_buf_q3 + height * CFL_BUF_LINE;
479 if (width == 4) {
480 const int16x4_t alpha_sign = vdup_n_s16(alpha_q3);
481 const int16x4_t dc = vdup_n_s16(*dst);
482 do {
Luc Trudeau614029c2018-05-02 21:16:23 -0400483 const int16x4_t pred =
Luc Trudeau5905ac52018-03-08 13:22:23 -0500484 predict_w4(pred_buf_q3, alpha_sign, abs_alpha_q12, dc);
Luc Trudeau614029c2018-05-02 21:16:23 -0400485 vsth_u8(dst, vqmovun_s16(vcombine_s16(pred, pred)));
Luc Trudeau5905ac52018-03-08 13:22:23 -0500486 dst += dst_stride;
487 } while ((pred_buf_q3 += CFL_BUF_LINE) < end);
488 } else {
489 const int16x8_t alpha_sign = vdupq_n_s16(alpha_q3);
490 const int16x8_t dc = vdupq_n_s16(*dst);
491 do {
Luc Trudeau5905ac52018-03-08 13:22:23 -0500492 if (width == 8) {
Luc Trudeau614029c2018-05-02 21:16:23 -0400493 vst1_u8(dst, vqmovun_s16(predict_w8(pred_buf_q3, alpha_sign,
494 abs_alpha_q12, dc)));
495 } else if (width == 16) {
496 const int16x8x2_t pred =
497 predict_w16(pred_buf_q3, alpha_sign, abs_alpha_q12, dc);
498 const uint8x8x2_t predun = { { vqmovun_s16(pred.val[0]),
499 vqmovun_s16(pred.val[1]) } };
500 vst2_u8(dst, predun);
Luc Trudeau5905ac52018-03-08 13:22:23 -0500501 } else {
Luc Trudeau614029c2018-05-02 21:16:23 -0400502 const int16x8x4_t pred =
503 predict_w32(pred_buf_q3, alpha_sign, abs_alpha_q12, dc);
504 const uint8x8x4_t predun = {
505 { vqmovun_s16(pred.val[0]), vqmovun_s16(pred.val[1]),
506 vqmovun_s16(pred.val[2]), vqmovun_s16(pred.val[3]) }
507 };
508 vst4_u8(dst, predun);
Luc Trudeau5905ac52018-03-08 13:22:23 -0500509 }
510 dst += dst_stride;
511 } while ((pred_buf_q3 += CFL_BUF_LINE) < end);
512 }
513}
514
515CFL_PREDICT_FN(neon, lbd)
516
Jerome Jiang7683ed52019-09-17 15:10:25 -0700517#if CONFIG_AV1_HIGHBITDEPTH
Luc Trudeau5905ac52018-03-08 13:22:23 -0500518static INLINE uint16x4_t clamp_s16(int16x4_t a, int16x4_t max) {
519 return vreinterpret_u16_s16(vmax_s16(vmin_s16(a, max), vdup_n_s16(0)));
520}
521
522static INLINE uint16x8_t clampq_s16(int16x8_t a, int16x8_t max) {
523 return vreinterpretq_u16_s16(vmaxq_s16(vminq_s16(a, max), vdupq_n_s16(0)));
524}
525
Luc Trudeaubd55f9a2018-05-01 23:29:09 -0400526static INLINE uint16x8x2_t clamp2q_s16(int16x8x2_t a, int16x8_t max) {
527 uint16x8x2_t result;
528 result.val[0] = vreinterpretq_u16_s16(
529 vmaxq_s16(vminq_s16(a.val[0], max), vdupq_n_s16(0)));
530 result.val[1] = vreinterpretq_u16_s16(
531 vmaxq_s16(vminq_s16(a.val[1], max), vdupq_n_s16(0)));
532 return result;
533}
534
535static INLINE uint16x8x4_t clamp4q_s16(int16x8x4_t a, int16x8_t max) {
536 uint16x8x4_t result;
537 result.val[0] = vreinterpretq_u16_s16(
538 vmaxq_s16(vminq_s16(a.val[0], max), vdupq_n_s16(0)));
539 result.val[1] = vreinterpretq_u16_s16(
540 vmaxq_s16(vminq_s16(a.val[1], max), vdupq_n_s16(0)));
541 result.val[2] = vreinterpretq_u16_s16(
542 vmaxq_s16(vminq_s16(a.val[2], max), vdupq_n_s16(0)));
543 result.val[3] = vreinterpretq_u16_s16(
544 vmaxq_s16(vminq_s16(a.val[3], max), vdupq_n_s16(0)));
545 return result;
546}
547
Luc Trudeau5905ac52018-03-08 13:22:23 -0500548static INLINE void cfl_predict_hbd_neon(const int16_t *pred_buf_q3,
549 uint16_t *dst, int dst_stride,
550 int alpha_q3, int bd, int width,
551 int height) {
552 const int max = (1 << bd) - 1;
553 const int16_t abs_alpha_q12 = abs(alpha_q3) << 9;
554 const int16_t *const end = pred_buf_q3 + height * CFL_BUF_LINE;
555 if (width == 4) {
556 const int16x4_t alpha_sign = vdup_n_s16(alpha_q3);
557 const int16x4_t dc = vdup_n_s16(*dst);
558 const int16x4_t max_16x4 = vdup_n_s16(max);
559 do {
560 const int16x4_t scaled_luma =
561 predict_w4(pred_buf_q3, alpha_sign, abs_alpha_q12, dc);
562 vst1_u16(dst, clamp_s16(scaled_luma, max_16x4));
563 dst += dst_stride;
564 } while ((pred_buf_q3 += CFL_BUF_LINE) < end);
565 } else {
566 const int16x8_t alpha_sign = vdupq_n_s16(alpha_q3);
567 const int16x8_t dc = vdupq_n_s16(*dst);
568 const int16x8_t max_16x8 = vdupq_n_s16(max);
569 do {
Luc Trudeaubd55f9a2018-05-01 23:29:09 -0400570 if (width == 8) {
571 const int16x8_t pred =
572 predict_w8(pred_buf_q3, alpha_sign, abs_alpha_q12, dc);
573 vst1q_u16(dst, clampq_s16(pred, max_16x8));
574 } else if (width == 16) {
575 const int16x8x2_t pred =
576 predict_w16(pred_buf_q3, alpha_sign, abs_alpha_q12, dc);
577 vst2q_u16(dst, clamp2q_s16(pred, max_16x8));
578 } else {
579 const int16x8x4_t pred =
580 predict_w32(pred_buf_q3, alpha_sign, abs_alpha_q12, dc);
581 vst4q_u16(dst, clamp4q_s16(pred, max_16x8));
Luc Trudeau5905ac52018-03-08 13:22:23 -0500582 }
583 dst += dst_stride;
584 } while ((pred_buf_q3 += CFL_BUF_LINE) < end);
585 }
586}
587
588CFL_PREDICT_FN(neon, hbd)
Jerome Jiang7683ed52019-09-17 15:10:25 -0700589#endif // CONFIG_AV1_HIGHBITDEPTH