blob: d731b6a66139e2a08245fd7e3220d7440ac52e2e [file] [log] [blame]
Luc Trudeaud8d2ef12018-02-15 13:10:18 -05001/*
2 * Copyright (c) 2017, Alliance for Open Media. All rights reserved
3 *
4 * This source code is subject to the terms of the BSD 2 Clause License and
5 * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
6 * was not distributed with this source code in the LICENSE file, you can
7 * obtain it at www.aomedia.org/license/software. If the Alliance for Open
8 * Media Patent License 1.0 was not distributed with this source code in the
9 * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
10 */
11#include <arm_neon.h>
12
Tom Finegan44702c82018-05-22 13:00:39 -070013#include "config/av1_rtcd.h"
Luc Trudeaud8d2ef12018-02-15 13:10:18 -050014
15#include "av1/common/cfl.h"
16
Luc Trudeau1f431172018-05-10 11:37:23 -040017static INLINE void vldsubstq_s16(int16_t *dst, const uint16_t *src, int offset,
Luc Trudeau8a192112018-05-09 22:38:28 -040018 int16x8_t sub) {
Luc Trudeau1f431172018-05-10 11:37:23 -040019 vst1q_s16(dst + offset,
20 vsubq_s16(vreinterpretq_s16_u16(vld1q_u16(src + offset)), sub));
Luc Trudeaud8d2ef12018-02-15 13:10:18 -050021}
22
23static INLINE uint16x8_t vldaddq_u16(const uint16_t *buf, size_t offset) {
24 return vaddq_u16(vld1q_u16(buf), vld1q_u16(buf + offset));
25}
26
Luc Trudeau46929632018-02-16 15:09:26 -050027// Load half of a vector and duplicated in other half
28static INLINE uint8x8_t vldh_dup_u8(const uint8_t *ptr) {
29 return vreinterpret_u8_u32(vld1_dup_u32((const uint32_t *)ptr));
30}
31
32// Store half of a vector.
Luc Trudeau1f431172018-05-10 11:37:23 -040033static INLINE void vsth_u16(uint16_t *ptr, uint16x4_t val) {
34 *((uint32_t *)ptr) = vreinterpret_u32_u16(val)[0];
Luc Trudeau46929632018-02-16 15:09:26 -050035}
36
Luc Trudeau5905ac52018-03-08 13:22:23 -050037// Store half of a vector.
38static INLINE void vsth_u8(uint8_t *ptr, uint8x8_t val) {
39 *((uint32_t *)ptr) = vreinterpret_u32_u8(val)[0];
40}
41
Luc Trudeau46929632018-02-16 15:09:26 -050042static void cfl_luma_subsampling_420_lbd_neon(const uint8_t *input,
43 int input_stride,
Luc Trudeau1f431172018-05-10 11:37:23 -040044 uint16_t *pred_buf_q3, int width,
Luc Trudeau46929632018-02-16 15:09:26 -050045 int height) {
Luc Trudeau1f431172018-05-10 11:37:23 -040046 const uint16_t *end = pred_buf_q3 + (height >> 1) * CFL_BUF_LINE;
Luc Trudeau46929632018-02-16 15:09:26 -050047 const int luma_stride = input_stride << 1;
48 do {
49 if (width == 4) {
50 const uint16x4_t top = vpaddl_u8(vldh_dup_u8(input));
51 const uint16x4_t sum = vpadal_u8(top, vldh_dup_u8(input + input_stride));
Luc Trudeau1f431172018-05-10 11:37:23 -040052 vsth_u16(pred_buf_q3, vshl_n_u16(sum, 1));
Luc Trudeau46929632018-02-16 15:09:26 -050053 } else if (width == 8) {
54 const uint16x4_t top = vpaddl_u8(vld1_u8(input));
55 const uint16x4_t sum = vpadal_u8(top, vld1_u8(input + input_stride));
Luc Trudeau1f431172018-05-10 11:37:23 -040056 vst1_u16(pred_buf_q3, vshl_n_u16(sum, 1));
Luc Trudeaubb5f8802018-05-01 15:56:41 -040057 } else if (width == 16) {
Luc Trudeau46929632018-02-16 15:09:26 -050058 const uint16x8_t top = vpaddlq_u8(vld1q_u8(input));
59 const uint16x8_t sum = vpadalq_u8(top, vld1q_u8(input + input_stride));
Luc Trudeau1f431172018-05-10 11:37:23 -040060 vst1q_u16(pred_buf_q3, vshlq_n_u16(sum, 1));
Luc Trudeaubb5f8802018-05-01 15:56:41 -040061 } else {
62 const uint8x8x4_t top = vld4_u8(input);
63 const uint8x8x4_t bot = vld4_u8(input + input_stride);
64 // equivalent to a vpaddlq_u8 (because vld4q interleaves)
65 const uint16x8_t top_0 = vaddl_u8(top.val[0], top.val[1]);
66 // equivalent to a vpaddlq_u8 (because vld4q interleaves)
67 const uint16x8_t bot_0 = vaddl_u8(bot.val[0], bot.val[1]);
68 // equivalent to a vpaddlq_u8 (because vld4q interleaves)
69 const uint16x8_t top_1 = vaddl_u8(top.val[2], top.val[3]);
70 // equivalent to a vpaddlq_u8 (because vld4q interleaves)
71 const uint16x8_t bot_1 = vaddl_u8(bot.val[2], bot.val[3]);
Luc Trudeau1f431172018-05-10 11:37:23 -040072 uint16x8x2_t sum;
73 sum.val[0] = vshlq_n_u16(vaddq_u16(top_0, bot_0), 1);
74 sum.val[1] = vshlq_n_u16(vaddq_u16(top_1, bot_1), 1);
75 vst2q_u16(pred_buf_q3, sum);
Luc Trudeau46929632018-02-16 15:09:26 -050076 }
77 input += luma_stride;
78 } while ((pred_buf_q3 += CFL_BUF_LINE) < end);
79}
80
Luc Trudeau34061662018-03-27 20:10:49 -040081static void cfl_luma_subsampling_422_lbd_neon(const uint8_t *input,
82 int input_stride,
Luc Trudeau1f431172018-05-10 11:37:23 -040083 uint16_t *pred_buf_q3, int width,
Luc Trudeau34061662018-03-27 20:10:49 -040084 int height) {
Luc Trudeau1f431172018-05-10 11:37:23 -040085 const uint16_t *end = pred_buf_q3 + height * CFL_BUF_LINE;
Luc Trudeau34061662018-03-27 20:10:49 -040086 do {
87 if (width == 4) {
88 const uint16x4_t top = vpaddl_u8(vldh_dup_u8(input));
Luc Trudeau1f431172018-05-10 11:37:23 -040089 vsth_u16(pred_buf_q3, vshl_n_u16(top, 2));
Luc Trudeau34061662018-03-27 20:10:49 -040090 } else if (width == 8) {
91 const uint16x4_t top = vpaddl_u8(vld1_u8(input));
Luc Trudeau1f431172018-05-10 11:37:23 -040092 vst1_u16(pred_buf_q3, vshl_n_u16(top, 2));
Luc Trudeau4fc10ef2018-05-01 22:40:16 -040093 } else if (width == 16) {
Luc Trudeau34061662018-03-27 20:10:49 -040094 const uint16x8_t top = vpaddlq_u8(vld1q_u8(input));
Luc Trudeau1f431172018-05-10 11:37:23 -040095 vst1q_u16(pred_buf_q3, vshlq_n_u16(top, 2));
Luc Trudeau4fc10ef2018-05-01 22:40:16 -040096 } else {
97 const uint8x8x4_t top = vld4_u8(input);
Luc Trudeau1f431172018-05-10 11:37:23 -040098 uint16x8x2_t sum;
99 // vaddl_u8 is equivalent to a vpaddlq_u8 (because vld4q interleaves)
100 sum.val[0] = vshlq_n_u16(vaddl_u8(top.val[0], top.val[1]), 2);
101 sum.val[1] = vshlq_n_u16(vaddl_u8(top.val[2], top.val[3]), 2);
102 vst2q_u16(pred_buf_q3, sum);
Luc Trudeau34061662018-03-27 20:10:49 -0400103 }
104 input += input_stride;
105 } while ((pred_buf_q3 += CFL_BUF_LINE) < end);
106}
107
Luc Trudeau9ba35682018-03-23 21:08:15 -0400108static void cfl_luma_subsampling_444_lbd_neon(const uint8_t *input,
109 int input_stride,
Luc Trudeau1f431172018-05-10 11:37:23 -0400110 uint16_t *pred_buf_q3, int width,
Luc Trudeau9ba35682018-03-23 21:08:15 -0400111 int height) {
Luc Trudeau1f431172018-05-10 11:37:23 -0400112 const uint16_t *end = pred_buf_q3 + height * CFL_BUF_LINE;
Luc Trudeau9ba35682018-03-23 21:08:15 -0400113 do {
114 if (width == 4) {
115 const uint16x8_t top = vshll_n_u8(vldh_dup_u8(input), 3);
Luc Trudeau1f431172018-05-10 11:37:23 -0400116 vst1_u16(pred_buf_q3, vget_low_u16(top));
Luc Trudeau9ba35682018-03-23 21:08:15 -0400117 } else if (width == 8) {
118 const uint16x8_t top = vshll_n_u8(vld1_u8(input), 3);
Luc Trudeau1f431172018-05-10 11:37:23 -0400119 vst1q_u16(pred_buf_q3, top);
Luc Trudeau9ba35682018-03-23 21:08:15 -0400120 } else {
121 const uint8x16_t top = vld1q_u8(input);
Luc Trudeau1f431172018-05-10 11:37:23 -0400122 vst1q_u16(pred_buf_q3, vshll_n_u8(vget_low_u8(top), 3));
123 vst1q_u16(pred_buf_q3 + 8, vshll_n_u8(vget_high_u8(top), 3));
Luc Trudeau9ba35682018-03-23 21:08:15 -0400124 if (width == 32) {
125 const uint8x16_t next_top = vld1q_u8(input + 16);
Luc Trudeau1f431172018-05-10 11:37:23 -0400126 vst1q_u16(pred_buf_q3 + 16, vshll_n_u8(vget_low_u8(next_top), 3));
127 vst1q_u16(pred_buf_q3 + 24, vshll_n_u8(vget_high_u8(next_top), 3));
Luc Trudeau9ba35682018-03-23 21:08:15 -0400128 }
129 }
130 input += input_stride;
131 } while ((pred_buf_q3 += CFL_BUF_LINE) < end);
132}
133
Luc Trudeau32b8af72018-03-30 18:38:02 -0400134#if __ARM_ARCH <= 7
135uint16x8_t vpaddq_u16(uint16x8_t a, uint16x8_t b) {
136 return vcombine_u16(vpadd_u16(vget_low_u16(a), vget_high_u16(a)),
137 vpadd_u16(vget_low_u16(b), vget_high_u16(b)));
138}
139#endif
140
141static void cfl_luma_subsampling_420_hbd_neon(const uint16_t *input,
142 int input_stride,
Luc Trudeau1f431172018-05-10 11:37:23 -0400143 uint16_t *pred_buf_q3, int width,
Luc Trudeau32b8af72018-03-30 18:38:02 -0400144 int height) {
Luc Trudeau1f431172018-05-10 11:37:23 -0400145 const uint16_t *end = pred_buf_q3 + (height >> 1) * CFL_BUF_LINE;
Luc Trudeau32b8af72018-03-30 18:38:02 -0400146 const int luma_stride = input_stride << 1;
147 do {
148 if (width == 4) {
149 const uint16x4_t top = vld1_u16(input);
150 const uint16x4_t bot = vld1_u16(input + input_stride);
151 const uint16x4_t sum = vadd_u16(top, bot);
152 const uint16x4_t hsum = vpadd_u16(sum, sum);
Luc Trudeau1f431172018-05-10 11:37:23 -0400153 vsth_u16(pred_buf_q3, vshl_n_u16(hsum, 1));
Luc Trudeau32b8af72018-03-30 18:38:02 -0400154 } else if (width < 32) {
155 const uint16x8_t top = vld1q_u16(input);
156 const uint16x8_t bot = vld1q_u16(input + input_stride);
157 const uint16x8_t sum = vaddq_u16(top, bot);
158 if (width == 8) {
Luc Trudeau1f431172018-05-10 11:37:23 -0400159 const uint16x4_t hsum = vget_low_u16(vpaddq_u16(sum, sum));
160 vst1_u16(pred_buf_q3, vshl_n_u16(hsum, 1));
Luc Trudeau32b8af72018-03-30 18:38:02 -0400161 } else {
162 const uint16x8_t top_1 = vld1q_u16(input + 8);
163 const uint16x8_t bot_1 = vld1q_u16(input + 8 + input_stride);
164 const uint16x8_t sum_1 = vaddq_u16(top_1, bot_1);
Luc Trudeau1f431172018-05-10 11:37:23 -0400165 const uint16x8_t hsum = vpaddq_u16(sum, sum_1);
166 vst1q_u16(pred_buf_q3, vshlq_n_u16(hsum, 1));
Luc Trudeau32b8af72018-03-30 18:38:02 -0400167 }
168 } else {
169 const uint16x8x4_t top = vld4q_u16(input);
170 const uint16x8x4_t bot = vld4q_u16(input + input_stride);
171 // equivalent to a vpaddq_u16 (because vld4q interleaves)
172 const uint16x8_t top_0 = vaddq_u16(top.val[0], top.val[1]);
173 // equivalent to a vpaddq_u16 (because vld4q interleaves)
174 const uint16x8_t bot_0 = vaddq_u16(bot.val[0], bot.val[1]);
175 // equivalent to a vpaddq_u16 (because vld4q interleaves)
176 const uint16x8_t top_1 = vaddq_u16(top.val[2], top.val[3]);
177 // equivalent to a vpaddq_u16 (because vld4q interleaves)
178 const uint16x8_t bot_1 = vaddq_u16(bot.val[2], bot.val[3]);
Luc Trudeau1f431172018-05-10 11:37:23 -0400179 uint16x8x2_t sum;
180 sum.val[0] = vshlq_n_u16(vaddq_u16(top_0, bot_0), 1);
181 sum.val[1] = vshlq_n_u16(vaddq_u16(top_1, bot_1), 1);
182 vst2q_u16(pred_buf_q3, sum);
Luc Trudeau32b8af72018-03-30 18:38:02 -0400183 }
184 input += luma_stride;
185 } while ((pred_buf_q3 += CFL_BUF_LINE) < end);
186}
187
Luc Trudeau733dacf2018-04-05 12:34:43 -0400188static void cfl_luma_subsampling_422_hbd_neon(const uint16_t *input,
189 int input_stride,
Luc Trudeau1f431172018-05-10 11:37:23 -0400190 uint16_t *pred_buf_q3, int width,
Luc Trudeau733dacf2018-04-05 12:34:43 -0400191 int height) {
Luc Trudeau1f431172018-05-10 11:37:23 -0400192 const uint16_t *end = pred_buf_q3 + height * CFL_BUF_LINE;
Luc Trudeau733dacf2018-04-05 12:34:43 -0400193 do {
194 if (width == 4) {
195 const uint16x4_t top = vld1_u16(input);
196 const uint16x4_t hsum = vpadd_u16(top, top);
Luc Trudeau1f431172018-05-10 11:37:23 -0400197 vsth_u16(pred_buf_q3, vshl_n_u16(hsum, 2));
Luc Trudeau733dacf2018-04-05 12:34:43 -0400198 } else if (width == 8) {
199 const uint16x4x2_t top = vld2_u16(input);
200 // equivalent to a vpadd_u16 (because vld2 interleaves)
201 const uint16x4_t hsum = vadd_u16(top.val[0], top.val[1]);
Luc Trudeau1f431172018-05-10 11:37:23 -0400202 vst1_u16(pred_buf_q3, vshl_n_u16(hsum, 2));
Luc Trudeau733dacf2018-04-05 12:34:43 -0400203 } else if (width == 16) {
204 const uint16x8x2_t top = vld2q_u16(input);
205 // equivalent to a vpaddq_u16 (because vld2q interleaves)
206 const uint16x8_t hsum = vaddq_u16(top.val[0], top.val[1]);
Luc Trudeau1f431172018-05-10 11:37:23 -0400207 vst1q_u16(pred_buf_q3, vshlq_n_u16(hsum, 2));
Luc Trudeau733dacf2018-04-05 12:34:43 -0400208 } else {
209 const uint16x8x4_t top = vld4q_u16(input);
210 // equivalent to a vpaddq_u16 (because vld4q interleaves)
211 const uint16x8_t hsum_0 = vaddq_u16(top.val[0], top.val[1]);
212 // equivalent to a vpaddq_u16 (because vld4q interleaves)
213 const uint16x8_t hsum_1 = vaddq_u16(top.val[2], top.val[3]);
Luc Trudeau1f431172018-05-10 11:37:23 -0400214 uint16x8x2_t result = { { vshlq_n_u16(hsum_0, 2),
215 vshlq_n_u16(hsum_1, 2) } };
216 vst2q_u16(pred_buf_q3, result);
Luc Trudeau733dacf2018-04-05 12:34:43 -0400217 }
218 input += input_stride;
219 } while ((pred_buf_q3 += CFL_BUF_LINE) < end);
220}
221
Luc Trudeaubee20882018-04-05 14:50:35 -0400222static void cfl_luma_subsampling_444_hbd_neon(const uint16_t *input,
223 int input_stride,
Luc Trudeau1f431172018-05-10 11:37:23 -0400224 uint16_t *pred_buf_q3, int width,
Luc Trudeaubee20882018-04-05 14:50:35 -0400225 int height) {
Luc Trudeau1f431172018-05-10 11:37:23 -0400226 const uint16_t *end = pred_buf_q3 + height * CFL_BUF_LINE;
Luc Trudeaubee20882018-04-05 14:50:35 -0400227 do {
228 if (width == 4) {
Luc Trudeau1f431172018-05-10 11:37:23 -0400229 const uint16x4_t top = vld1_u16(input);
230 vst1_u16(pred_buf_q3, vshl_n_u16(top, 3));
Luc Trudeaubee20882018-04-05 14:50:35 -0400231 } else if (width == 8) {
Luc Trudeau1f431172018-05-10 11:37:23 -0400232 const uint16x8_t top = vld1q_u16(input);
233 vst1q_u16(pred_buf_q3, vshlq_n_u16(top, 3));
Luc Trudeaubee20882018-04-05 14:50:35 -0400234 } else if (width == 16) {
Luc Trudeau1f431172018-05-10 11:37:23 -0400235 uint16x8x2_t top = vld2q_u16(input);
236 top.val[0] = vshlq_n_u16(top.val[0], 3);
237 top.val[1] = vshlq_n_u16(top.val[1], 3);
238 vst2q_u16(pred_buf_q3, top);
Luc Trudeaubee20882018-04-05 14:50:35 -0400239 } else {
Luc Trudeau1f431172018-05-10 11:37:23 -0400240 uint16x8x4_t top = vld4q_u16(input);
241 top.val[0] = vshlq_n_u16(top.val[0], 3);
242 top.val[1] = vshlq_n_u16(top.val[1], 3);
243 top.val[2] = vshlq_n_u16(top.val[2], 3);
244 top.val[3] = vshlq_n_u16(top.val[3], 3);
245 vst4q_u16(pred_buf_q3, top);
Luc Trudeaubee20882018-04-05 14:50:35 -0400246 }
247 input += input_stride;
248 } while ((pred_buf_q3 += CFL_BUF_LINE) < end);
249}
250
Luc Trudeau46929632018-02-16 15:09:26 -0500251CFL_GET_SUBSAMPLE_FUNCTION(neon)
252
Luc Trudeau1f431172018-05-10 11:37:23 -0400253static INLINE void subtract_average_neon(const uint16_t *src, int16_t *dst,
Luc Trudeau8a192112018-05-09 22:38:28 -0400254 int width, int height,
255 int round_offset,
Luc Trudeaud8d2ef12018-02-15 13:10:18 -0500256 const int num_pel_log2) {
Luc Trudeau1f431172018-05-10 11:37:23 -0400257 const uint16_t *const end = src + height * CFL_BUF_LINE;
Luc Trudeaud8d2ef12018-02-15 13:10:18 -0500258
259 // Round offset is not needed, because NEON will handle the rounding.
260 (void)round_offset;
261
262 // To optimize the use of the CPU pipeline, we process 4 rows per iteration
263 const int step = 4 * CFL_BUF_LINE;
264
265 // At this stage, the prediction buffer contains scaled reconstructed luma
266 // pixels, which are positive integer and only require 15 bits. By using
267 // unsigned integer for the sum, we can do one addition operation inside 16
268 // bits (8 lanes) before having to convert to 32 bits (4 lanes).
Luc Trudeau1f431172018-05-10 11:37:23 -0400269 const uint16_t *sum_buf = src;
Luc Trudeaud8d2ef12018-02-15 13:10:18 -0500270 uint32x4_t sum_32x4 = { 0, 0, 0, 0 };
271 do {
272 // For all widths, we load, add and combine the data so it fits in 4 lanes.
273 if (width == 4) {
274 const uint16x4_t a0 =
275 vadd_u16(vld1_u16(sum_buf), vld1_u16(sum_buf + CFL_BUF_LINE));
276 const uint16x4_t a1 = vadd_u16(vld1_u16(sum_buf + 2 * CFL_BUF_LINE),
277 vld1_u16(sum_buf + 3 * CFL_BUF_LINE));
278 sum_32x4 = vaddq_u32(sum_32x4, vaddl_u16(a0, a1));
279 } else if (width == 8) {
280 const uint16x8_t a0 = vldaddq_u16(sum_buf, CFL_BUF_LINE);
281 const uint16x8_t a1 =
282 vldaddq_u16(sum_buf + 2 * CFL_BUF_LINE, CFL_BUF_LINE);
283 sum_32x4 = vpadalq_u16(sum_32x4, a0);
284 sum_32x4 = vpadalq_u16(sum_32x4, a1);
285 } else {
286 const uint16x8_t row0 = vldaddq_u16(sum_buf, 8);
287 const uint16x8_t row1 = vldaddq_u16(sum_buf + CFL_BUF_LINE, 8);
288 const uint16x8_t row2 = vldaddq_u16(sum_buf + 2 * CFL_BUF_LINE, 8);
289 const uint16x8_t row3 = vldaddq_u16(sum_buf + 3 * CFL_BUF_LINE, 8);
290 sum_32x4 = vpadalq_u16(sum_32x4, row0);
291 sum_32x4 = vpadalq_u16(sum_32x4, row1);
292 sum_32x4 = vpadalq_u16(sum_32x4, row2);
293 sum_32x4 = vpadalq_u16(sum_32x4, row3);
294
295 if (width == 32) {
296 const uint16x8_t row0_1 = vldaddq_u16(sum_buf + 16, 8);
297 const uint16x8_t row1_1 = vldaddq_u16(sum_buf + CFL_BUF_LINE + 16, 8);
298 const uint16x8_t row2_1 =
299 vldaddq_u16(sum_buf + 2 * CFL_BUF_LINE + 16, 8);
300 const uint16x8_t row3_1 =
301 vldaddq_u16(sum_buf + 3 * CFL_BUF_LINE + 16, 8);
302
303 sum_32x4 = vpadalq_u16(sum_32x4, row0_1);
304 sum_32x4 = vpadalq_u16(sum_32x4, row1_1);
305 sum_32x4 = vpadalq_u16(sum_32x4, row2_1);
306 sum_32x4 = vpadalq_u16(sum_32x4, row3_1);
307 }
308 }
Luc Trudeau8a192112018-05-09 22:38:28 -0400309 sum_buf += step;
Luc Trudeau1f431172018-05-10 11:37:23 -0400310 } while (sum_buf < end);
Luc Trudeaud8d2ef12018-02-15 13:10:18 -0500311
312 // Permute and add in such a way that each lane contains the block sum.
313 // [A+C+B+D, B+D+A+C, C+A+D+B, D+B+C+A]
314#if __ARM_ARCH >= 8
315 sum_32x4 = vpaddq_u32(sum_32x4, sum_32x4);
316 sum_32x4 = vpaddq_u32(sum_32x4, sum_32x4);
317#else
318 uint32x4_t flip =
319 vcombine_u32(vget_high_u32(sum_32x4), vget_low_u32(sum_32x4));
320 sum_32x4 = vaddq_u32(sum_32x4, flip);
321 sum_32x4 = vaddq_u32(sum_32x4, vrev64q_u32(sum_32x4));
322#endif
323
324 // Computing the average could be done using scalars, but getting off the NEON
325 // engine introduces latency, so we use vqrshrn.
326 int16x4_t avg_16x4;
327 // Constant propagation makes for some ugly code.
328 switch (num_pel_log2) {
329 case 4: avg_16x4 = vreinterpret_s16_u16(vqrshrn_n_u32(sum_32x4, 4)); break;
330 case 5: avg_16x4 = vreinterpret_s16_u16(vqrshrn_n_u32(sum_32x4, 5)); break;
331 case 6: avg_16x4 = vreinterpret_s16_u16(vqrshrn_n_u32(sum_32x4, 6)); break;
332 case 7: avg_16x4 = vreinterpret_s16_u16(vqrshrn_n_u32(sum_32x4, 7)); break;
333 case 8: avg_16x4 = vreinterpret_s16_u16(vqrshrn_n_u32(sum_32x4, 8)); break;
334 case 9: avg_16x4 = vreinterpret_s16_u16(vqrshrn_n_u32(sum_32x4, 9)); break;
335 case 10:
336 avg_16x4 = vreinterpret_s16_u16(vqrshrn_n_u32(sum_32x4, 10));
337 break;
338 default: assert(0);
339 }
340
341 if (width == 4) {
342 do {
Luc Trudeau1f431172018-05-10 11:37:23 -0400343 vst1_s16(dst, vsub_s16(vreinterpret_s16_u16(vld1_u16(src)), avg_16x4));
Luc Trudeau8a192112018-05-09 22:38:28 -0400344 src += CFL_BUF_LINE;
345 dst += CFL_BUF_LINE;
346 } while (src < end);
Luc Trudeaud8d2ef12018-02-15 13:10:18 -0500347 } else {
348 const int16x8_t avg_16x8 = vcombine_s16(avg_16x4, avg_16x4);
349 do {
Luc Trudeau8a192112018-05-09 22:38:28 -0400350 vldsubstq_s16(dst, src, 0, avg_16x8);
351 vldsubstq_s16(dst, src, CFL_BUF_LINE, avg_16x8);
352 vldsubstq_s16(dst, src, 2 * CFL_BUF_LINE, avg_16x8);
353 vldsubstq_s16(dst, src, 3 * CFL_BUF_LINE, avg_16x8);
Luc Trudeaud8d2ef12018-02-15 13:10:18 -0500354
355 if (width > 8) {
Luc Trudeau8a192112018-05-09 22:38:28 -0400356 vldsubstq_s16(dst, src, 8, avg_16x8);
357 vldsubstq_s16(dst, src, 8 + CFL_BUF_LINE, avg_16x8);
358 vldsubstq_s16(dst, src, 8 + 2 * CFL_BUF_LINE, avg_16x8);
359 vldsubstq_s16(dst, src, 8 + 3 * CFL_BUF_LINE, avg_16x8);
Luc Trudeaud8d2ef12018-02-15 13:10:18 -0500360 }
361 if (width == 32) {
Luc Trudeau8a192112018-05-09 22:38:28 -0400362 vldsubstq_s16(dst, src, 16, avg_16x8);
363 vldsubstq_s16(dst, src, 16 + CFL_BUF_LINE, avg_16x8);
364 vldsubstq_s16(dst, src, 16 + 2 * CFL_BUF_LINE, avg_16x8);
365 vldsubstq_s16(dst, src, 16 + 3 * CFL_BUF_LINE, avg_16x8);
366 vldsubstq_s16(dst, src, 24, avg_16x8);
367 vldsubstq_s16(dst, src, 24 + CFL_BUF_LINE, avg_16x8);
368 vldsubstq_s16(dst, src, 24 + 2 * CFL_BUF_LINE, avg_16x8);
369 vldsubstq_s16(dst, src, 24 + 3 * CFL_BUF_LINE, avg_16x8);
Luc Trudeaud8d2ef12018-02-15 13:10:18 -0500370 }
Luc Trudeau8a192112018-05-09 22:38:28 -0400371 src += step;
372 dst += step;
373 } while (src < end);
Luc Trudeaud8d2ef12018-02-15 13:10:18 -0500374 }
375}
376
377CFL_SUB_AVG_FN(neon)
Luc Trudeau5905ac52018-03-08 13:22:23 -0500378
379// Saturating negate 16-bit integers in a when the corresponding signed 16-bit
380// integer in b is negative.
381// Notes:
382// * Negating INT16_MIN results in INT16_MIN. However, this cannot occur in
383// practice, as scaled_luma is the multiplication of two absolute values.
384// * In the Intel equivalent, elements in a are zeroed out when the
385// corresponding elements in b are zero. Because vsign is used twice in a
386// row, with b in the first call becoming a in the second call, there's no
387// impact from not zeroing out.
388static int16x4_t vsign_s16(int16x4_t a, int16x4_t b) {
389 const int16x4_t mask = vshr_n_s16(b, 15);
390 return veor_s16(vadd_s16(a, mask), mask);
391}
392
393// Saturating negate 16-bit integers in a when the corresponding signed 16-bit
394// integer in b is negative.
395// Notes:
396// * Negating INT16_MIN results in INT16_MIN. However, this cannot occur in
397// practice, as scaled_luma is the multiplication of two absolute values.
398// * In the Intel equivalent, elements in a are zeroed out when the
399// corresponding elements in b are zero. Because vsignq is used twice in a
400// row, with b in the first call becoming a in the second call, there's no
401// impact from not zeroing out.
402static int16x8_t vsignq_s16(int16x8_t a, int16x8_t b) {
403 const int16x8_t mask = vshrq_n_s16(b, 15);
404 return veorq_s16(vaddq_s16(a, mask), mask);
405}
406
407static INLINE int16x4_t predict_w4(const int16_t *pred_buf_q3,
408 int16x4_t alpha_sign, int abs_alpha_q12,
409 int16x4_t dc) {
410 const int16x4_t ac_q3 = vld1_s16(pred_buf_q3);
411 const int16x4_t ac_sign = veor_s16(alpha_sign, ac_q3);
412 int16x4_t scaled_luma = vqrdmulh_n_s16(vabs_s16(ac_q3), abs_alpha_q12);
413 return vadd_s16(vsign_s16(scaled_luma, ac_sign), dc);
414}
415
416static INLINE int16x8_t predict_w8(const int16_t *pred_buf_q3,
417 int16x8_t alpha_sign, int abs_alpha_q12,
418 int16x8_t dc) {
419 const int16x8_t ac_q3 = vld1q_s16(pred_buf_q3);
420 const int16x8_t ac_sign = veorq_s16(alpha_sign, ac_q3);
421 int16x8_t scaled_luma = vqrdmulhq_n_s16(vabsq_s16(ac_q3), abs_alpha_q12);
422 return vaddq_s16(vsignq_s16(scaled_luma, ac_sign), dc);
423}
424
Luc Trudeaubd55f9a2018-05-01 23:29:09 -0400425static INLINE int16x8x2_t predict_w16(const int16_t *pred_buf_q3,
426 int16x8_t alpha_sign, int abs_alpha_q12,
427 int16x8_t dc) {
Luc Trudeau614029c2018-05-02 21:16:23 -0400428 // vld2q_s16 interleaves, which is not useful for prediction. vst1q_s16_x2
429 // does not interleave, but is not currently available in the compilier used
430 // by the AOM build system.
Luc Trudeaubd55f9a2018-05-01 23:29:09 -0400431 const int16x8x2_t ac_q3 = vld2q_s16(pred_buf_q3);
432 const int16x8_t ac_sign_0 = veorq_s16(alpha_sign, ac_q3.val[0]);
433 const int16x8_t ac_sign_1 = veorq_s16(alpha_sign, ac_q3.val[1]);
434 const int16x8_t scaled_luma_0 =
435 vqrdmulhq_n_s16(vabsq_s16(ac_q3.val[0]), abs_alpha_q12);
436 const int16x8_t scaled_luma_1 =
437 vqrdmulhq_n_s16(vabsq_s16(ac_q3.val[1]), abs_alpha_q12);
438 int16x8x2_t result;
439 result.val[0] = vaddq_s16(vsignq_s16(scaled_luma_0, ac_sign_0), dc);
440 result.val[1] = vaddq_s16(vsignq_s16(scaled_luma_1, ac_sign_1), dc);
441 return result;
442}
443
444static INLINE int16x8x4_t predict_w32(const int16_t *pred_buf_q3,
445 int16x8_t alpha_sign, int abs_alpha_q12,
446 int16x8_t dc) {
Luc Trudeau614029c2018-05-02 21:16:23 -0400447 // vld4q_s16 interleaves, which is not useful for prediction. vst1q_s16_x4
448 // does not interleave, but is not currently available in the compilier used
449 // by the AOM build system.
Luc Trudeaubd55f9a2018-05-01 23:29:09 -0400450 const int16x8x4_t ac_q3 = vld4q_s16(pred_buf_q3);
451 const int16x8_t ac_sign_0 = veorq_s16(alpha_sign, ac_q3.val[0]);
452 const int16x8_t ac_sign_1 = veorq_s16(alpha_sign, ac_q3.val[1]);
453 const int16x8_t ac_sign_2 = veorq_s16(alpha_sign, ac_q3.val[2]);
454 const int16x8_t ac_sign_3 = veorq_s16(alpha_sign, ac_q3.val[3]);
455 const int16x8_t scaled_luma_0 =
456 vqrdmulhq_n_s16(vabsq_s16(ac_q3.val[0]), abs_alpha_q12);
457 const int16x8_t scaled_luma_1 =
458 vqrdmulhq_n_s16(vabsq_s16(ac_q3.val[1]), abs_alpha_q12);
459 const int16x8_t scaled_luma_2 =
460 vqrdmulhq_n_s16(vabsq_s16(ac_q3.val[2]), abs_alpha_q12);
461 const int16x8_t scaled_luma_3 =
462 vqrdmulhq_n_s16(vabsq_s16(ac_q3.val[3]), abs_alpha_q12);
463 int16x8x4_t result;
464 result.val[0] = vaddq_s16(vsignq_s16(scaled_luma_0, ac_sign_0), dc);
465 result.val[1] = vaddq_s16(vsignq_s16(scaled_luma_1, ac_sign_1), dc);
466 result.val[2] = vaddq_s16(vsignq_s16(scaled_luma_2, ac_sign_2), dc);
467 result.val[3] = vaddq_s16(vsignq_s16(scaled_luma_3, ac_sign_3), dc);
468 return result;
469}
470
Luc Trudeau5905ac52018-03-08 13:22:23 -0500471static INLINE void cfl_predict_lbd_neon(const int16_t *pred_buf_q3,
472 uint8_t *dst, int dst_stride,
473 int alpha_q3, int width, int height) {
474 const int16_t abs_alpha_q12 = abs(alpha_q3) << 9;
475 const int16_t *const end = pred_buf_q3 + height * CFL_BUF_LINE;
476 if (width == 4) {
477 const int16x4_t alpha_sign = vdup_n_s16(alpha_q3);
478 const int16x4_t dc = vdup_n_s16(*dst);
479 do {
Luc Trudeau614029c2018-05-02 21:16:23 -0400480 const int16x4_t pred =
Luc Trudeau5905ac52018-03-08 13:22:23 -0500481 predict_w4(pred_buf_q3, alpha_sign, abs_alpha_q12, dc);
Luc Trudeau614029c2018-05-02 21:16:23 -0400482 vsth_u8(dst, vqmovun_s16(vcombine_s16(pred, pred)));
Luc Trudeau5905ac52018-03-08 13:22:23 -0500483 dst += dst_stride;
484 } while ((pred_buf_q3 += CFL_BUF_LINE) < end);
485 } else {
486 const int16x8_t alpha_sign = vdupq_n_s16(alpha_q3);
487 const int16x8_t dc = vdupq_n_s16(*dst);
488 do {
Luc Trudeau5905ac52018-03-08 13:22:23 -0500489 if (width == 8) {
Luc Trudeau614029c2018-05-02 21:16:23 -0400490 vst1_u8(dst, vqmovun_s16(predict_w8(pred_buf_q3, alpha_sign,
491 abs_alpha_q12, dc)));
492 } else if (width == 16) {
493 const int16x8x2_t pred =
494 predict_w16(pred_buf_q3, alpha_sign, abs_alpha_q12, dc);
495 const uint8x8x2_t predun = { { vqmovun_s16(pred.val[0]),
496 vqmovun_s16(pred.val[1]) } };
497 vst2_u8(dst, predun);
Luc Trudeau5905ac52018-03-08 13:22:23 -0500498 } else {
Luc Trudeau614029c2018-05-02 21:16:23 -0400499 const int16x8x4_t pred =
500 predict_w32(pred_buf_q3, alpha_sign, abs_alpha_q12, dc);
501 const uint8x8x4_t predun = {
502 { vqmovun_s16(pred.val[0]), vqmovun_s16(pred.val[1]),
503 vqmovun_s16(pred.val[2]), vqmovun_s16(pred.val[3]) }
504 };
505 vst4_u8(dst, predun);
Luc Trudeau5905ac52018-03-08 13:22:23 -0500506 }
507 dst += dst_stride;
508 } while ((pred_buf_q3 += CFL_BUF_LINE) < end);
509 }
510}
511
512CFL_PREDICT_FN(neon, lbd)
513
514static INLINE uint16x4_t clamp_s16(int16x4_t a, int16x4_t max) {
515 return vreinterpret_u16_s16(vmax_s16(vmin_s16(a, max), vdup_n_s16(0)));
516}
517
518static INLINE uint16x8_t clampq_s16(int16x8_t a, int16x8_t max) {
519 return vreinterpretq_u16_s16(vmaxq_s16(vminq_s16(a, max), vdupq_n_s16(0)));
520}
521
Luc Trudeaubd55f9a2018-05-01 23:29:09 -0400522static INLINE uint16x8x2_t clamp2q_s16(int16x8x2_t a, int16x8_t max) {
523 uint16x8x2_t result;
524 result.val[0] = vreinterpretq_u16_s16(
525 vmaxq_s16(vminq_s16(a.val[0], max), vdupq_n_s16(0)));
526 result.val[1] = vreinterpretq_u16_s16(
527 vmaxq_s16(vminq_s16(a.val[1], max), vdupq_n_s16(0)));
528 return result;
529}
530
531static INLINE uint16x8x4_t clamp4q_s16(int16x8x4_t a, int16x8_t max) {
532 uint16x8x4_t result;
533 result.val[0] = vreinterpretq_u16_s16(
534 vmaxq_s16(vminq_s16(a.val[0], max), vdupq_n_s16(0)));
535 result.val[1] = vreinterpretq_u16_s16(
536 vmaxq_s16(vminq_s16(a.val[1], max), vdupq_n_s16(0)));
537 result.val[2] = vreinterpretq_u16_s16(
538 vmaxq_s16(vminq_s16(a.val[2], max), vdupq_n_s16(0)));
539 result.val[3] = vreinterpretq_u16_s16(
540 vmaxq_s16(vminq_s16(a.val[3], max), vdupq_n_s16(0)));
541 return result;
542}
543
Luc Trudeau5905ac52018-03-08 13:22:23 -0500544static INLINE void cfl_predict_hbd_neon(const int16_t *pred_buf_q3,
545 uint16_t *dst, int dst_stride,
546 int alpha_q3, int bd, int width,
547 int height) {
548 const int max = (1 << bd) - 1;
549 const int16_t abs_alpha_q12 = abs(alpha_q3) << 9;
550 const int16_t *const end = pred_buf_q3 + height * CFL_BUF_LINE;
551 if (width == 4) {
552 const int16x4_t alpha_sign = vdup_n_s16(alpha_q3);
553 const int16x4_t dc = vdup_n_s16(*dst);
554 const int16x4_t max_16x4 = vdup_n_s16(max);
555 do {
556 const int16x4_t scaled_luma =
557 predict_w4(pred_buf_q3, alpha_sign, abs_alpha_q12, dc);
558 vst1_u16(dst, clamp_s16(scaled_luma, max_16x4));
559 dst += dst_stride;
560 } while ((pred_buf_q3 += CFL_BUF_LINE) < end);
561 } else {
562 const int16x8_t alpha_sign = vdupq_n_s16(alpha_q3);
563 const int16x8_t dc = vdupq_n_s16(*dst);
564 const int16x8_t max_16x8 = vdupq_n_s16(max);
565 do {
Luc Trudeaubd55f9a2018-05-01 23:29:09 -0400566 if (width == 8) {
567 const int16x8_t pred =
568 predict_w8(pred_buf_q3, alpha_sign, abs_alpha_q12, dc);
569 vst1q_u16(dst, clampq_s16(pred, max_16x8));
570 } else if (width == 16) {
571 const int16x8x2_t pred =
572 predict_w16(pred_buf_q3, alpha_sign, abs_alpha_q12, dc);
573 vst2q_u16(dst, clamp2q_s16(pred, max_16x8));
574 } else {
575 const int16x8x4_t pred =
576 predict_w32(pred_buf_q3, alpha_sign, abs_alpha_q12, dc);
577 vst4q_u16(dst, clamp4q_s16(pred, max_16x8));
Luc Trudeau5905ac52018-03-08 13:22:23 -0500578 }
579 dst += dst_stride;
580 } while ((pred_buf_q3 += CFL_BUF_LINE) < end);
581 }
582}
583
584CFL_PREDICT_FN(neon, hbd)