blob: 0377d4ce10b721673cd97f1fd10df642aed2dd7e [file] [log] [blame]
Steinar Midtskogena5f8ea12016-08-03 13:17:33 +02001/*
2 * Copyright (c) 2016, Alliance for Open Media. All rights reserved
3 *
4 * This source code is subject to the terms of the BSD 2 Clause License and
5 * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
6 * was not distributed with this source code in the LICENSE file, you can
7 * obtain it at www.aomedia.org/license/software. If the Alliance for Open
8 * Media Patent License 1.0 was not distributed with this source code in the
9 * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
10 */
11
12#ifndef _V128_INTRINSICS_H
13#define _V128_INTRINSICS_H
14
15#include <arm_neon.h>
16#include "./v64_intrinsics_arm.h"
17
18typedef int64x2_t v128;
19
20SIMD_INLINE uint32_t v128_low_u32(v128 a) {
21 return v64_low_u32(vget_low_s64(a));
22}
23
24SIMD_INLINE v64 v128_low_v64(v128 a) { return vget_low_s64(a); }
25
26SIMD_INLINE v64 v128_high_v64(v128 a) { return vget_high_s64(a); }
27
28SIMD_INLINE v128 v128_from_v64(v64 a, v64 b) { return vcombine_s64(b, a); }
29
30SIMD_INLINE v128 v128_from_64(uint64_t a, uint64_t b) {
Steinar Midtskogen7b7624e2016-09-01 19:45:29 +020031 return vcombine_s64((uint64x1_t)b, (uint64x1_t)a);
Steinar Midtskogena5f8ea12016-08-03 13:17:33 +020032}
33
34SIMD_INLINE v128 v128_from_32(uint32_t a, uint32_t b, uint32_t c, uint32_t d) {
35 return vcombine_s64(v64_from_32(c, d), v64_from_32(a, b));
36}
37
38SIMD_INLINE v128 v128_load_aligned(const void *p) {
39 return vreinterpretq_s64_u8(vld1q_u8((const uint8_t *)p));
40}
41
42SIMD_INLINE v128 v128_load_unaligned(const void *p) {
43 return v128_load_aligned(p);
44}
45
46SIMD_INLINE void v128_store_aligned(void *p, v128 r) {
47 vst1q_u8((uint8_t *)p, vreinterpretq_u8_s64(r));
48}
49
50SIMD_INLINE void v128_store_unaligned(void *p, v128 r) {
51 vst1q_u8((uint8_t *)p, vreinterpretq_u8_s64(r));
52}
53
Yaowu Xu032573d2017-04-24 15:04:17 -070054SIMD_INLINE v128 v128_align(v128 a, v128 b, unsigned int c) {
Steinar Midtskogen7b7624e2016-09-01 19:45:29 +020055// The following functions require an immediate.
56// Some compilers will check this during optimisation, others wont.
Alex Conversefa160412017-03-22 19:59:15 -070057#if defined(__OPTIMIZE__) && __OPTIMIZE__ && !defined(__clang__)
Steinar Midtskogena5f8ea12016-08-03 13:17:33 +020058 return c ? vreinterpretq_s64_s8(
59 vextq_s8(vreinterpretq_s8_s64(b), vreinterpretq_s8_s64(a), c))
60 : b;
61#else
62 return c < 8 ? v128_from_v64(v64_align(v128_low_v64(a), v128_high_v64(b), c),
63 v64_align(v128_high_v64(b), v128_low_v64(b), c))
64 : v128_from_v64(
65 v64_align(v128_high_v64(a), v128_low_v64(a), c - 8),
66 v64_align(v128_low_v64(a), v128_high_v64(b), c - 8));
67#endif
68}
69
70SIMD_INLINE v128 v128_zero() { return vreinterpretq_s64_u8(vdupq_n_u8(0)); }
71
72SIMD_INLINE v128 v128_ones() { return vreinterpretq_s64_u8(vdupq_n_u8(-1)); }
73
74SIMD_INLINE v128 v128_dup_8(uint8_t x) {
75 return vreinterpretq_s64_u8(vdupq_n_u8(x));
76}
77
78SIMD_INLINE v128 v128_dup_16(uint16_t x) {
79 return vreinterpretq_s64_u16(vdupq_n_u16(x));
80}
81
82SIMD_INLINE v128 v128_dup_32(uint32_t x) {
83 return vreinterpretq_s64_u32(vdupq_n_u32(x));
84}
85
86SIMD_INLINE int64_t v128_dotp_s16(v128 a, v128 b) {
87 return v64_dotp_s16(vget_high_s64(a), vget_high_s64(b)) +
88 v64_dotp_s16(vget_low_s64(a), vget_low_s64(b));
89}
90
91SIMD_INLINE uint64_t v128_hadd_u8(v128 x) {
92 uint64x2_t t = vpaddlq_u32(vpaddlq_u16(vpaddlq_u8(vreinterpretq_u8_s64(x))));
93 return vget_lane_s32(
94 vreinterpret_s32_u64(vadd_u64(vget_high_u64(t), vget_low_u64(t))), 0);
95}
96
97SIMD_INLINE v128 v128_padd_s16(v128 a) {
98 return vreinterpretq_s64_s32(vpaddlq_s16(vreinterpretq_s16_s64(a)));
99}
100
101typedef struct { sad64_internal hi, lo; } sad128_internal;
102
103SIMD_INLINE sad128_internal v128_sad_u8_init() {
104 sad128_internal s;
105 s.hi = s.lo = vdupq_n_u16(0);
106 return s;
107}
108
109/* Implementation dependent return value. Result must be finalised with
110 v128_sad_u8_sum().
111 The result for more than 32 v128_sad_u8() calls is undefined. */
112SIMD_INLINE sad128_internal v128_sad_u8(sad128_internal s, v128 a, v128 b) {
113 sad128_internal r;
114 r.hi = v64_sad_u8(s.hi, vget_high_s64(a), vget_high_s64(b));
115 r.lo = v64_sad_u8(s.lo, vget_low_s64(a), vget_low_s64(b));
116 return r;
117}
118
119SIMD_INLINE uint32_t v128_sad_u8_sum(sad128_internal s) {
120 return (uint32_t)(v64_sad_u8_sum(s.hi) + v64_sad_u8_sum(s.lo));
121}
122
123typedef struct { ssd64_internal hi, lo; } ssd128_internal;
124
125SIMD_INLINE ssd128_internal v128_ssd_u8_init() {
126 ssd128_internal s;
Steinar Midtskogen7b7624e2016-09-01 19:45:29 +0200127 s.hi = s.lo = (ssd64_internal)(uint64_t)0;
Steinar Midtskogena5f8ea12016-08-03 13:17:33 +0200128 return s;
129}
130
131/* Implementation dependent return value. Result must be finalised with
132 * v128_ssd_u8_sum(). */
133SIMD_INLINE ssd128_internal v128_ssd_u8(ssd128_internal s, v128 a, v128 b) {
134 ssd128_internal r;
135 r.hi = v64_ssd_u8(s.hi, vget_high_s64(a), vget_high_s64(b));
136 r.lo = v64_ssd_u8(s.lo, vget_low_s64(a), vget_low_s64(b));
137 return r;
138}
139
140SIMD_INLINE uint32_t v128_ssd_u8_sum(ssd128_internal s) {
141 return (uint32_t)(v64_ssd_u8_sum(s.hi) + v64_ssd_u8_sum(s.lo));
142}
143
144SIMD_INLINE v128 v128_or(v128 x, v128 y) { return vorrq_s64(x, y); }
145
146SIMD_INLINE v128 v128_xor(v128 x, v128 y) { return veorq_s64(x, y); }
147
148SIMD_INLINE v128 v128_and(v128 x, v128 y) { return vandq_s64(x, y); }
149
150SIMD_INLINE v128 v128_andn(v128 x, v128 y) { return vbicq_s64(x, y); }
151
152SIMD_INLINE v128 v128_add_8(v128 x, v128 y) {
153 return vreinterpretq_s64_u8(
154 vaddq_u8(vreinterpretq_u8_s64(x), vreinterpretq_u8_s64(y)));
155}
156
157SIMD_INLINE v128 v128_add_16(v128 x, v128 y) {
158 return vreinterpretq_s64_s16(
159 vaddq_s16(vreinterpretq_s16_s64(x), vreinterpretq_s16_s64(y)));
160}
161
162SIMD_INLINE v128 v128_sadd_s16(v128 x, v128 y) {
163 return vreinterpretq_s64_s16(
164 vqaddq_s16(vreinterpretq_s16_s64(x), vreinterpretq_s16_s64(y)));
165}
166
167SIMD_INLINE v128 v128_add_32(v128 x, v128 y) {
168 return vreinterpretq_s64_u32(
169 vaddq_u32(vreinterpretq_u32_s64(x), vreinterpretq_u32_s64(y)));
170}
171
172SIMD_INLINE v128 v128_sub_8(v128 x, v128 y) {
173 return vreinterpretq_s64_u8(
174 vsubq_u8(vreinterpretq_u8_s64(x), vreinterpretq_u8_s64(y)));
175}
176
Steinar Midtskogena5f8ea12016-08-03 13:17:33 +0200177SIMD_INLINE v128 v128_sub_16(v128 x, v128 y) {
178 return vreinterpretq_s64_s16(
179 vsubq_s16(vreinterpretq_s16_s64(x), vreinterpretq_s16_s64(y)));
180}
181
182SIMD_INLINE v128 v128_ssub_s16(v128 x, v128 y) {
183 return vreinterpretq_s64_s16(
184 vqsubq_s16(vreinterpretq_s16_s64(x), vreinterpretq_s16_s64(y)));
185}
186
Steinar Midtskogen9b8444a2017-03-31 22:11:06 +0200187SIMD_INLINE v128 v128_ssub_u16(v128 x, v128 y) {
188 return vreinterpretq_s64_u16(
189 vqsubq_u16(vreinterpretq_u16_s64(x), vreinterpretq_u16_s64(y)));
190}
191
Steinar Midtskogena5f8ea12016-08-03 13:17:33 +0200192SIMD_INLINE v128 v128_ssub_u8(v128 x, v128 y) {
193 return vreinterpretq_s64_u8(
194 vqsubq_u8(vreinterpretq_u8_s64(x), vreinterpretq_u8_s64(y)));
195}
196
197SIMD_INLINE v128 v128_ssub_s8(v128 x, v128 y) {
198 return vreinterpretq_s64_s8(
199 vqsubq_s8(vreinterpretq_s8_s64(x), vreinterpretq_s8_s64(y)));
200}
201
202SIMD_INLINE v128 v128_sub_32(v128 x, v128 y) {
203 return vreinterpretq_s64_s32(
204 vsubq_s32(vreinterpretq_s32_s64(x), vreinterpretq_s32_s64(y)));
205}
206
207SIMD_INLINE v128 v128_abs_s16(v128 x) {
208 return vreinterpretq_s64_s16(vabsq_s16(vreinterpretq_s16_s64(x)));
209}
210
Steinar Midtskogen6033fb82017-04-02 21:32:41 +0200211SIMD_INLINE v128 v128_abs_s8(v128 x) {
212 return vreinterpretq_s64_s8(vabsq_s8(vreinterpretq_s8_s64(x)));
213}
214
Steinar Midtskogena5f8ea12016-08-03 13:17:33 +0200215SIMD_INLINE v128 v128_mul_s16(v64 a, v64 b) {
216 return vreinterpretq_s64_s32(
217 vmull_s16(vreinterpret_s16_s64(a), vreinterpret_s16_s64(b)));
218}
219
220SIMD_INLINE v128 v128_mullo_s16(v128 a, v128 b) {
221 return vreinterpretq_s64_s16(
222 vmulq_s16(vreinterpretq_s16_s64(a), vreinterpretq_s16_s64(b)));
223}
224
225SIMD_INLINE v128 v128_mulhi_s16(v128 a, v128 b) {
226 return v128_from_v64(v64_mulhi_s16(vget_high_s64(a), vget_high_s64(b)),
227 v64_mulhi_s16(vget_low_s64(a), vget_low_s64(b)));
228}
229
230SIMD_INLINE v128 v128_mullo_s32(v128 a, v128 b) {
231 return vreinterpretq_s64_s32(
232 vmulq_s32(vreinterpretq_s32_s64(a), vreinterpretq_s32_s64(b)));
233}
234
235SIMD_INLINE v128 v128_madd_s16(v128 a, v128 b) {
236 return v128_from_v64(v64_madd_s16(vget_high_s64(a), vget_high_s64(b)),
237 v64_madd_s16(vget_low_s64(a), vget_low_s64(b)));
238}
239
240SIMD_INLINE v128 v128_madd_us8(v128 a, v128 b) {
241 return v128_from_v64(v64_madd_us8(vget_high_s64(a), vget_high_s64(b)),
242 v64_madd_us8(vget_low_s64(a), vget_low_s64(b)));
243}
244
245SIMD_INLINE v128 v128_avg_u8(v128 x, v128 y) {
246 return vreinterpretq_s64_u8(
247 vrhaddq_u8(vreinterpretq_u8_s64(x), vreinterpretq_u8_s64(y)));
248}
249
250SIMD_INLINE v128 v128_rdavg_u8(v128 x, v128 y) {
251 return vreinterpretq_s64_u8(
252 vhaddq_u8(vreinterpretq_u8_s64(x), vreinterpretq_u8_s64(y)));
253}
254
255SIMD_INLINE v128 v128_avg_u16(v128 x, v128 y) {
256 return vreinterpretq_s64_u16(
257 vrhaddq_u16(vreinterpretq_u16_s64(x), vreinterpretq_u16_s64(y)));
258}
259
260SIMD_INLINE v128 v128_min_u8(v128 x, v128 y) {
261 return vreinterpretq_s64_u8(
262 vminq_u8(vreinterpretq_u8_s64(x), vreinterpretq_u8_s64(y)));
263}
264
265SIMD_INLINE v128 v128_max_u8(v128 x, v128 y) {
266 return vreinterpretq_s64_u8(
267 vmaxq_u8(vreinterpretq_u8_s64(x), vreinterpretq_u8_s64(y)));
268}
269
270SIMD_INLINE v128 v128_min_s8(v128 x, v128 y) {
271 return vreinterpretq_s64_s8(
272 vminq_s8(vreinterpretq_s8_s64(x), vreinterpretq_s8_s64(y)));
273}
274
275SIMD_INLINE v128 v128_max_s8(v128 x, v128 y) {
276 return vreinterpretq_s64_s8(
277 vmaxq_s8(vreinterpretq_s8_s64(x), vreinterpretq_s8_s64(y)));
278}
279
280SIMD_INLINE v128 v128_min_s16(v128 x, v128 y) {
281 return vreinterpretq_s64_s16(
282 vminq_s16(vreinterpretq_s16_s64(x), vreinterpretq_s16_s64(y)));
283}
284
285SIMD_INLINE v128 v128_max_s16(v128 x, v128 y) {
286 return vreinterpretq_s64_s16(
287 vmaxq_s16(vreinterpretq_s16_s64(x), vreinterpretq_s16_s64(y)));
288}
289
290SIMD_INLINE v128 v128_ziplo_8(v128 x, v128 y) {
291 uint8x16x2_t r = vzipq_u8(vreinterpretq_u8_s64(y), vreinterpretq_u8_s64(x));
292 return vreinterpretq_s64_u8(r.val[0]);
293}
294
295SIMD_INLINE v128 v128_ziphi_8(v128 x, v128 y) {
296 uint8x16x2_t r = vzipq_u8(vreinterpretq_u8_s64(y), vreinterpretq_u8_s64(x));
297 return vreinterpretq_s64_u8(r.val[1]);
298}
299
300SIMD_INLINE v128 v128_zip_8(v64 x, v64 y) {
301 uint8x8x2_t r = vzip_u8(vreinterpret_u8_s64(y), vreinterpret_u8_s64(x));
302 return vreinterpretq_s64_u8(vcombine_u8(r.val[0], r.val[1]));
303}
304
305SIMD_INLINE v128 v128_ziplo_16(v128 x, v128 y) {
306 int16x8x2_t r = vzipq_s16(vreinterpretq_s16_s64(y), vreinterpretq_s16_s64(x));
307 return vreinterpretq_s64_s16(r.val[0]);
308}
309
310SIMD_INLINE v128 v128_ziphi_16(v128 x, v128 y) {
311 int16x8x2_t r = vzipq_s16(vreinterpretq_s16_s64(y), vreinterpretq_s16_s64(x));
312 return vreinterpretq_s64_s16(r.val[1]);
313}
314
315SIMD_INLINE v128 v128_zip_16(v64 x, v64 y) {
316 uint16x4x2_t r = vzip_u16(vreinterpret_u16_s64(y), vreinterpret_u16_s64(x));
317 return vreinterpretq_s64_u16(vcombine_u16(r.val[0], r.val[1]));
318}
319
320SIMD_INLINE v128 v128_ziplo_32(v128 x, v128 y) {
321 int32x4x2_t r = vzipq_s32(vreinterpretq_s32_s64(y), vreinterpretq_s32_s64(x));
322 return vreinterpretq_s64_s32(r.val[0]);
323}
324
325SIMD_INLINE v128 v128_ziphi_32(v128 x, v128 y) {
326 int32x4x2_t r = vzipq_s32(vreinterpretq_s32_s64(y), vreinterpretq_s32_s64(x));
327 return vreinterpretq_s64_s32(r.val[1]);
328}
329
330SIMD_INLINE v128 v128_zip_32(v64 x, v64 y) {
331 uint32x2x2_t r = vzip_u32(vreinterpret_u32_s64(y), vreinterpret_u32_s64(x));
332 return vreinterpretq_s64_u32(vcombine_u32(r.val[0], r.val[1]));
333}
334
335SIMD_INLINE v128 v128_ziplo_64(v128 a, v128 b) {
336 return v128_from_v64(vget_low_u64((uint64x2_t)a),
337 vget_low_u64((uint64x2_t)b));
338}
339
340SIMD_INLINE v128 v128_ziphi_64(v128 a, v128 b) {
341 return v128_from_v64(vget_high_u64((uint64x2_t)a),
342 vget_high_u64((uint64x2_t)b));
343}
344
345SIMD_INLINE v128 v128_unziplo_8(v128 x, v128 y) {
346 uint8x16x2_t r = vuzpq_u8(vreinterpretq_u8_s64(y), vreinterpretq_u8_s64(x));
347 return vreinterpretq_s64_u8(r.val[0]);
348}
349
350SIMD_INLINE v128 v128_unziphi_8(v128 x, v128 y) {
351 uint8x16x2_t r = vuzpq_u8(vreinterpretq_u8_s64(y), vreinterpretq_u8_s64(x));
352 return vreinterpretq_s64_u8(r.val[1]);
353}
354
355SIMD_INLINE v128 v128_unziplo_16(v128 x, v128 y) {
356 uint16x8x2_t r =
357 vuzpq_u16(vreinterpretq_u16_s64(y), vreinterpretq_u16_s64(x));
358 return vreinterpretq_s64_u16(r.val[0]);
359}
360
361SIMD_INLINE v128 v128_unziphi_16(v128 x, v128 y) {
362 uint16x8x2_t r =
363 vuzpq_u16(vreinterpretq_u16_s64(y), vreinterpretq_u16_s64(x));
364 return vreinterpretq_s64_u16(r.val[1]);
365}
366
367SIMD_INLINE v128 v128_unziplo_32(v128 x, v128 y) {
368 uint32x4x2_t r =
369 vuzpq_u32(vreinterpretq_u32_s64(y), vreinterpretq_u32_s64(x));
370 return vreinterpretq_s64_u32(r.val[0]);
371}
372
373SIMD_INLINE v128 v128_unziphi_32(v128 x, v128 y) {
374 uint32x4x2_t r =
375 vuzpq_u32(vreinterpretq_u32_s64(y), vreinterpretq_u32_s64(x));
376 return vreinterpretq_s64_u32(r.val[1]);
377}
378
379SIMD_INLINE v128 v128_unpack_u8_s16(v64 a) {
380 return vreinterpretq_s64_u16(vmovl_u8(vreinterpret_u8_s64(a)));
381}
382
383SIMD_INLINE v128 v128_unpacklo_u8_s16(v128 a) {
384 return vreinterpretq_s64_u16(vmovl_u8(vreinterpret_u8_s64(vget_low_s64(a))));
385}
386
387SIMD_INLINE v128 v128_unpackhi_u8_s16(v128 a) {
388 return vreinterpretq_s64_u16(vmovl_u8(vreinterpret_u8_s64(vget_high_s64(a))));
389}
390
Steinar Midtskogen1b2b7392017-04-11 14:19:20 +0200391SIMD_INLINE v128 v128_unpack_s8_s16(v64 a) {
392 return vreinterpretq_s64_s16(vmovl_s8(vreinterpret_s8_s64(a)));
393}
394
395SIMD_INLINE v128 v128_unpacklo_s8_s16(v128 a) {
396 return vreinterpretq_s64_s16(vmovl_s8(vreinterpret_s8_s64(vget_low_s64(a))));
397}
398
399SIMD_INLINE v128 v128_unpackhi_s8_s16(v128 a) {
400 return vreinterpretq_s64_s16(vmovl_s8(vreinterpret_s8_s64(vget_high_s64(a))));
401}
402
Steinar Midtskogena5f8ea12016-08-03 13:17:33 +0200403SIMD_INLINE v128 v128_pack_s32_s16(v128 a, v128 b) {
404 return v128_from_v64(
405 vreinterpret_s64_s16(vqmovn_s32(vreinterpretq_s32_s64(a))),
406 vreinterpret_s64_s16(vqmovn_s32(vreinterpretq_s32_s64(b))));
407}
408
409SIMD_INLINE v128 v128_pack_s16_u8(v128 a, v128 b) {
410 return v128_from_v64(
411 vreinterpret_s64_u8(vqmovun_s16(vreinterpretq_s16_s64(a))),
412 vreinterpret_s64_u8(vqmovun_s16(vreinterpretq_s16_s64(b))));
413}
414
415SIMD_INLINE v128 v128_pack_s16_s8(v128 a, v128 b) {
416 return v128_from_v64(
417 vreinterpret_s64_s8(vqmovn_s16(vreinterpretq_s16_s64(a))),
418 vreinterpret_s64_s8(vqmovn_s16(vreinterpretq_s16_s64(b))));
419}
420
421SIMD_INLINE v128 v128_unpack_u16_s32(v64 a) {
422 return vreinterpretq_s64_u32(vmovl_u16(vreinterpret_u16_s64(a)));
423}
424
425SIMD_INLINE v128 v128_unpack_s16_s32(v64 a) {
426 return vreinterpretq_s64_s32(vmovl_s16(vreinterpret_s16_s64(a)));
427}
428
429SIMD_INLINE v128 v128_unpacklo_u16_s32(v128 a) {
430 return vreinterpretq_s64_u32(
431 vmovl_u16(vreinterpret_u16_s64(vget_low_s64(a))));
432}
433
434SIMD_INLINE v128 v128_unpacklo_s16_s32(v128 a) {
435 return vreinterpretq_s64_s32(
436 vmovl_s16(vreinterpret_s16_s64(vget_low_s64(a))));
437}
438
439SIMD_INLINE v128 v128_unpackhi_u16_s32(v128 a) {
440 return vreinterpretq_s64_u32(
441 vmovl_u16(vreinterpret_u16_s64(vget_high_s64(a))));
442}
443
444SIMD_INLINE v128 v128_unpackhi_s16_s32(v128 a) {
445 return vreinterpretq_s64_s32(
446 vmovl_s16(vreinterpret_s16_s64(vget_high_s64(a))));
447}
448
449SIMD_INLINE v128 v128_shuffle_8(v128 x, v128 pattern) {
450 return v128_from_64(
Steinar Midtskogen7b7624e2016-09-01 19:45:29 +0200451 (uint64_t)vreinterpret_s64_u8(
Steinar Midtskogena5f8ea12016-08-03 13:17:33 +0200452 vtbl2_u8((uint8x8x2_t){ { vget_low_u8(vreinterpretq_u8_s64(x)),
453 vget_high_u8(vreinterpretq_u8_s64(x)) } },
454 vreinterpret_u8_s64(vget_high_s64(pattern)))),
Steinar Midtskogen7b7624e2016-09-01 19:45:29 +0200455 (uint64_t)vreinterpret_s64_u8(
Steinar Midtskogena5f8ea12016-08-03 13:17:33 +0200456 vtbl2_u8((uint8x8x2_t){ { vget_low_u8(vreinterpretq_u8_s64(x)),
457 vget_high_u8(vreinterpretq_u8_s64(x)) } },
458 vreinterpret_u8_s64(vget_low_s64(pattern)))));
459}
460
461SIMD_INLINE v128 v128_cmpgt_s8(v128 x, v128 y) {
462 return vreinterpretq_s64_u8(
463 vcgtq_s8(vreinterpretq_s8_s64(x), vreinterpretq_s8_s64(y)));
464}
465
466SIMD_INLINE v128 v128_cmplt_s8(v128 x, v128 y) {
467 return vreinterpretq_s64_u8(
468 vcltq_s8(vreinterpretq_s8_s64(x), vreinterpretq_s8_s64(y)));
469}
470
471SIMD_INLINE v128 v128_cmpeq_8(v128 x, v128 y) {
472 return vreinterpretq_s64_u8(
473 vceqq_u8(vreinterpretq_u8_s64(x), vreinterpretq_u8_s64(y)));
474}
475
476SIMD_INLINE v128 v128_cmpgt_s16(v128 x, v128 y) {
477 return vreinterpretq_s64_u16(
478 vcgtq_s16(vreinterpretq_s16_s64(x), vreinterpretq_s16_s64(y)));
479}
480
481SIMD_INLINE v128 v128_cmplt_s16(v128 x, v128 y) {
482 return vreinterpretq_s64_u16(
483 vcltq_s16(vreinterpretq_s16_s64(x), vreinterpretq_s16_s64(y)));
484}
485
486SIMD_INLINE v128 v128_cmpeq_16(v128 x, v128 y) {
487 return vreinterpretq_s64_u16(
488 vceqq_s16(vreinterpretq_s16_s64(x), vreinterpretq_s16_s64(y)));
489}
490
491SIMD_INLINE v128 v128_shl_8(v128 a, unsigned int c) {
492 return (c > 7) ? v128_zero() : vreinterpretq_s64_u8(vshlq_u8(
493 vreinterpretq_u8_s64(a), vdupq_n_s8(c)));
494}
495
496SIMD_INLINE v128 v128_shr_u8(v128 a, unsigned int c) {
497 return (c > 7) ? v128_zero() : vreinterpretq_s64_u8(vshlq_u8(
498 vreinterpretq_u8_s64(a), vdupq_n_s8(-c)));
499}
500
501SIMD_INLINE v128 v128_shr_s8(v128 a, unsigned int c) {
502 return (c > 7) ? v128_ones() : vreinterpretq_s64_s8(vshlq_s8(
503 vreinterpretq_s8_s64(a), vdupq_n_s8(-c)));
504}
505
506SIMD_INLINE v128 v128_shl_16(v128 a, unsigned int c) {
507 return (c > 15) ? v128_zero()
508 : vreinterpretq_s64_u16(
509 vshlq_u16(vreinterpretq_u16_s64(a), vdupq_n_s16(c)));
510}
511
512SIMD_INLINE v128 v128_shr_u16(v128 a, unsigned int c) {
513 return (c > 15) ? v128_zero()
514 : vreinterpretq_s64_u16(
515 vshlq_u16(vreinterpretq_u16_s64(a), vdupq_n_s16(-c)));
516}
517
518SIMD_INLINE v128 v128_shr_s16(v128 a, unsigned int c) {
519 return (c > 15) ? v128_ones()
520 : vreinterpretq_s64_s16(
521 vshlq_s16(vreinterpretq_s16_s64(a), vdupq_n_s16(-c)));
522}
523
524SIMD_INLINE v128 v128_shl_32(v128 a, unsigned int c) {
525 return (c > 31) ? v128_zero()
526 : vreinterpretq_s64_u32(
527 vshlq_u32(vreinterpretq_u32_s64(a), vdupq_n_s32(c)));
528}
529
530SIMD_INLINE v128 v128_shr_u32(v128 a, unsigned int c) {
531 return (c > 31) ? v128_zero()
532 : vreinterpretq_s64_u32(
533 vshlq_u32(vreinterpretq_u32_s64(a), vdupq_n_s32(-c)));
534}
535
536SIMD_INLINE v128 v128_shr_s32(v128 a, unsigned int c) {
537 return (c > 31) ? v128_ones()
538 : vreinterpretq_s64_s32(
539 vshlq_s32(vreinterpretq_s32_s64(a), vdupq_n_s32(-c)));
540}
541
Alex Conversefa160412017-03-22 19:59:15 -0700542#if defined(__OPTIMIZE__) && __OPTIMIZE__ && !defined(__clang__)
Steinar Midtskogena5f8ea12016-08-03 13:17:33 +0200543
Yaowu Xu032573d2017-04-24 15:04:17 -0700544SIMD_INLINE v128 v128_shl_n_byte(v128 a, unsigned int n) {
Steinar Midtskogena5f8ea12016-08-03 13:17:33 +0200545 return n < 8
546 ? v128_from_64(
Steinar Midtskogen7b7624e2016-09-01 19:45:29 +0200547 (uint64_t)vorr_u64(
548 vshl_n_u64(vreinterpret_u64_s64(vget_high_s64(a)),
549 n * 8),
550 vshr_n_u64(vreinterpret_u64_s64(vget_low_s64(a)),
551 (8 - n) * 8)),
552 (uint64_t)vshl_n_u64(vreinterpret_u64_s64(vget_low_s64(a)),
553 n * 8))
554 : (n == 8 ? v128_from_64(
555 (uint64_t)vreinterpret_u64_s64(vget_low_s64(a)), 0)
556 : v128_from_64((uint64_t)vshl_n_u64(
557 vreinterpret_u64_s64(vget_low_s64(a)),
558 (n - 8) * 8),
559 0));
Steinar Midtskogena5f8ea12016-08-03 13:17:33 +0200560}
561
Yaowu Xu032573d2017-04-24 15:04:17 -0700562SIMD_INLINE v128 v128_shr_n_byte(v128 a, unsigned int n) {
Steinar Midtskogena5f8ea12016-08-03 13:17:33 +0200563 return n < 8
564 ? v128_from_64(
565 vshr_n_u64(vreinterpret_u64_s64(vget_high_s64(a)), n * 8),
566 vorr_u64(
567 vshr_n_u64(vreinterpret_u64_s64(vget_low_s64(a)), n * 8),
568 vshl_n_u64(vreinterpret_u64_s64(vget_high_s64(a)),
569 (8 - n) * 8)))
570 : (n == 8
571 ? v128_from_64(0, vreinterpret_u64_s64(vget_high_s64(a)))
572 : v128_from_64(
573 0, vshr_n_u64(vreinterpret_u64_s64(vget_high_s64(a)),
574 (n - 8) * 8)));
575}
576
Yaowu Xu032573d2017-04-24 15:04:17 -0700577SIMD_INLINE v128 v128_shl_n_8(v128 a, unsigned int c) {
Steinar Midtskogena5f8ea12016-08-03 13:17:33 +0200578 return vreinterpretq_s64_u8(vshlq_n_u8(vreinterpretq_u8_s64(a), c));
579}
580
Yaowu Xu032573d2017-04-24 15:04:17 -0700581SIMD_INLINE v128 v128_shr_n_u8(v128 a, unsigned int c) {
Steinar Midtskogena5f8ea12016-08-03 13:17:33 +0200582 return vreinterpretq_s64_u8(vshrq_n_u8(vreinterpretq_u8_s64(a), c));
583}
584
Yaowu Xu032573d2017-04-24 15:04:17 -0700585SIMD_INLINE v128 v128_shr_n_s8(v128 a, unsigned int c) {
Steinar Midtskogena5f8ea12016-08-03 13:17:33 +0200586 return vreinterpretq_s64_s8(vshrq_n_s8(vreinterpretq_s8_s64(a), c));
587}
588
Yaowu Xu032573d2017-04-24 15:04:17 -0700589SIMD_INLINE v128 v128_shl_n_16(v128 a, unsigned int c) {
Steinar Midtskogena5f8ea12016-08-03 13:17:33 +0200590 return vreinterpretq_s64_u16(vshlq_n_u16(vreinterpretq_u16_s64(a), c));
591}
592
Yaowu Xu032573d2017-04-24 15:04:17 -0700593SIMD_INLINE v128 v128_shr_n_u16(v128 a, unsigned int c) {
Steinar Midtskogena5f8ea12016-08-03 13:17:33 +0200594 return vreinterpretq_s64_u16(vshrq_n_u16(vreinterpretq_u16_s64(a), c));
595}
596
Yaowu Xu032573d2017-04-24 15:04:17 -0700597SIMD_INLINE v128 v128_shr_n_s16(v128 a, unsigned int c) {
Steinar Midtskogena5f8ea12016-08-03 13:17:33 +0200598 return vreinterpretq_s64_s16(vshrq_n_s16(vreinterpretq_s16_s64(a), c));
599}
600
Yaowu Xu032573d2017-04-24 15:04:17 -0700601SIMD_INLINE v128 v128_shl_n_32(v128 a, unsigned int c) {
Steinar Midtskogena5f8ea12016-08-03 13:17:33 +0200602 return vreinterpretq_s64_u32(vshlq_n_u32(vreinterpretq_u32_s64(a), c));
603}
604
Yaowu Xu032573d2017-04-24 15:04:17 -0700605SIMD_INLINE v128 v128_shr_n_u32(v128 a, unsigned int c) {
Steinar Midtskogena5f8ea12016-08-03 13:17:33 +0200606 return vreinterpretq_s64_u32(vshrq_n_u32(vreinterpretq_u32_s64(a), c));
607}
608
Yaowu Xu032573d2017-04-24 15:04:17 -0700609SIMD_INLINE v128 v128_shr_n_s32(v128 a, unsigned int c) {
Steinar Midtskogena5f8ea12016-08-03 13:17:33 +0200610 return vreinterpretq_s64_s32(vshrq_n_s32(vreinterpretq_s32_s64(a), c));
611}
612
613#else
614
Yaowu Xu032573d2017-04-24 15:04:17 -0700615SIMD_INLINE v128 v128_shl_n_byte(v128 a, unsigned int n) {
Steinar Midtskogena5f8ea12016-08-03 13:17:33 +0200616 if (n < 8)
617 return v128_from_v64(v64_or(v64_shl_n_byte(v128_high_v64(a), n),
618 v64_shr_n_byte(v128_low_v64(a), 8 - n)),
619 v64_shl_n_byte(v128_low_v64(a), n));
620 else
621 return v128_from_v64(v64_shl_n_byte(v128_low_v64(a), n - 8), v64_zero());
622}
623
Yaowu Xu032573d2017-04-24 15:04:17 -0700624SIMD_INLINE v128 v128_shr_n_byte(v128 a, unsigned int n) {
Steinar Midtskogena5f8ea12016-08-03 13:17:33 +0200625 if (n < 8)
626 return v128_from_v64(v64_shr_n_byte(v128_high_v64(a), n),
627 v64_or(v64_shr_n_byte(v128_low_v64(a), n),
628 v64_shl_n_byte(v128_high_v64(a), 8 - n)));
629 else
630 return v128_from_v64(v64_zero(), v64_shr_n_byte(v128_high_v64(a), n - 8));
631}
632
Yaowu Xu032573d2017-04-24 15:04:17 -0700633SIMD_INLINE v128 v128_shl_n_8(v128 a, unsigned int c) {
Steinar Midtskogena5f8ea12016-08-03 13:17:33 +0200634 return v128_shl_8(a, c);
635}
636
Yaowu Xu032573d2017-04-24 15:04:17 -0700637SIMD_INLINE v128 v128_shr_n_u8(v128 a, unsigned int c) {
Steinar Midtskogena5f8ea12016-08-03 13:17:33 +0200638 return v128_shr_u8(a, c);
639}
640
Yaowu Xu032573d2017-04-24 15:04:17 -0700641SIMD_INLINE v128 v128_shr_n_s8(v128 a, unsigned int c) {
Steinar Midtskogena5f8ea12016-08-03 13:17:33 +0200642 return v128_shr_s8(a, c);
643}
644
Yaowu Xu032573d2017-04-24 15:04:17 -0700645SIMD_INLINE v128 v128_shl_n_16(v128 a, unsigned int c) {
Steinar Midtskogena5f8ea12016-08-03 13:17:33 +0200646 return v128_shl_16(a, c);
647}
648
Yaowu Xu032573d2017-04-24 15:04:17 -0700649SIMD_INLINE v128 v128_shr_n_u16(v128 a, unsigned int c) {
Steinar Midtskogena5f8ea12016-08-03 13:17:33 +0200650 return v128_shr_u16(a, c);
651}
652
Yaowu Xu032573d2017-04-24 15:04:17 -0700653SIMD_INLINE v128 v128_shr_n_s16(v128 a, unsigned int c) {
Steinar Midtskogena5f8ea12016-08-03 13:17:33 +0200654 return v128_shr_s16(a, c);
655}
656
Yaowu Xu032573d2017-04-24 15:04:17 -0700657SIMD_INLINE v128 v128_shl_n_32(v128 a, unsigned int c) {
Steinar Midtskogena5f8ea12016-08-03 13:17:33 +0200658 return v128_shl_32(a, c);
659}
660
Yaowu Xu032573d2017-04-24 15:04:17 -0700661SIMD_INLINE v128 v128_shr_n_u32(v128 a, unsigned int c) {
Steinar Midtskogena5f8ea12016-08-03 13:17:33 +0200662 return v128_shr_u32(a, c);
663}
664
Yaowu Xu032573d2017-04-24 15:04:17 -0700665SIMD_INLINE v128 v128_shr_n_s32(v128 a, unsigned int c) {
Steinar Midtskogena5f8ea12016-08-03 13:17:33 +0200666 return v128_shr_s32(a, c);
667}
668
669#endif
670
671#endif /* _V128_INTRINSICS_H */