Added generic SIMD library supporting x86 SSE2+ and ARM NEON.
Change-Id: I037f4c44f621a7e909b82ccb6a299d41bcbf8607
diff --git a/aom_dsp/simd/v64_intrinsics_arm.h b/aom_dsp/simd/v64_intrinsics_arm.h
new file mode 100644
index 0000000..b487303
--- /dev/null
+++ b/aom_dsp/simd/v64_intrinsics_arm.h
@@ -0,0 +1,567 @@
+/*
+ * Copyright (c) 2016, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#ifndef _V64_INTRINSICS_H
+#define _V64_INTRINSICS_H
+
+#include <arm_neon.h>
+#include "./v64_intrinsics_arm.h"
+
+/* vzip in gcc is broken. Fixed in 4.6.1? */
+#if __GNUC__ && \
+ ((__GNUC__ << 16) + (__GNUC_MINOR__ << 8) + __GNUC_PATCHLEVEL__ < \
+ (4 << 16) + (6 << 8) + 1)
+#error vzip buggy in gcc. Get at least gcc 4.6.1.
+#endif
+
+typedef int64x1_t v64;
+
+SIMD_INLINE uint32_t v64_low_u32(v64 a) {
+ return vget_lane_u32(vreinterpret_u32_s64(a), 0);
+}
+
+SIMD_INLINE uint32_t v64_high_u32(v64 a) {
+ return vget_lane_u32(vreinterpret_u32_s64(a), 1);
+}
+
+SIMD_INLINE int32_t v64_low_s32(v64 a) {
+ return vget_lane_s32(vreinterpret_s32_s64(a), 0);
+}
+
+SIMD_INLINE int32_t v64_high_s32(v64 a) {
+ return vget_lane_s32(vreinterpret_s32_s64(a), 1);
+}
+
+SIMD_INLINE v64 v64_from_16(uint16_t a, uint16_t b, uint16_t c, uint16_t d) {
+ return vcreate_s64((uint64_t)a << 48 | (uint64_t)b << 32 | (uint64_t)c << 16 |
+ d);
+}
+
+SIMD_INLINE v64 v64_from_32(uint32_t x, uint32_t y) {
+ return vcreate_s64((uint64_t)x << 32 | y);
+}
+
+SIMD_INLINE v64 v64_from_64(uint64_t x) { return vcreate_s64(x); }
+
+SIMD_INLINE uint64_t v64_u64(v64 x) { return x; }
+
+SIMD_INLINE uint32_t u32_load_aligned(const void *p) {
+ return *((uint32_t *)p);
+}
+
+SIMD_INLINE uint32_t u32_load_unaligned(const void *p) {
+ return vget_lane_u32(vreinterpret_u32_u8(vld1_u8((const uint8_t *)p)), 0);
+}
+
+SIMD_INLINE void u32_store_aligned(void *p, uint32_t a) {
+ *((uint32_t *)p) = a;
+}
+
+SIMD_INLINE void u32_store_unaligned(void *p, uint32_t a) {
+#if __CC_ARM
+ *(__packed uint32_t *)p) = a;
+#elif __GNUC__
+ *((__attribute((packed)) uint32_t *)p) = a;
+#else
+ vst1_lane_u32((uint32_t*)p, vreinterpret_u32_s64(a), 0);
+#endif
+}
+
+SIMD_INLINE v64 v64_load_aligned(const void *p) {
+ return vreinterpret_s64_u8(vld1_u8((const uint8_t *)p));
+}
+
+SIMD_INLINE v64 v64_load_unaligned(const void *p) {
+ return v64_load_aligned(p);
+}
+
+SIMD_INLINE void v64_store_aligned(void *p, v64 r) {
+ vst1_u8((uint8_t *)p, vreinterpret_u8_s64(r));
+}
+
+SIMD_INLINE void v64_store_unaligned(void *p, v64 r) {
+ vst1_u8((uint8_t *)p, vreinterpret_u8_s64(r));
+}
+
+SIMD_INLINE v64 v64_align(v64 a, v64 b, const unsigned int c) {
+#if __OPTIMIZE__
+ return c ? vreinterpret_s64_s8(
+ vext_s8(vreinterpret_s8_s64(b), vreinterpret_s8_s64(a), c))
+ : b;
+#else
+ return c ? v64_from_64(b >> c * 8) | (a << (8 - c) * 8) : b;
+#endif
+}
+
+SIMD_INLINE v64 v64_zero() { return vreinterpret_s64_u8(vdup_n_u8(0)); }
+
+SIMD_INLINE v64 v64_ones() { return vreinterpret_s64_u8(vdup_n_u8(-1)); }
+
+SIMD_INLINE v64 v64_dup_8(uint8_t x) {
+ return vreinterpret_s64_u8(vdup_n_u8(x));
+}
+
+SIMD_INLINE v64 v64_dup_16(uint16_t x) {
+ return vreinterpret_s64_u16(vdup_n_u16(x));
+}
+
+SIMD_INLINE v64 v64_dup_32(uint32_t x) {
+ return vreinterpret_s64_u32(vdup_n_u32(x));
+}
+
+SIMD_INLINE int64_t v64_dotp_su8(v64 x, v64 y) {
+ int64x2_t r = vpaddlq_s32(vpaddlq_s16(
+ vmulq_s16(vmovl_s8(vreinterpret_s8_s64(x)),
+ vreinterpretq_s16_u16(vmovl_u8(vreinterpret_u8_s64(y))))));
+ return vadd_s64(vget_high_s64(r), vget_low_s64(r));
+}
+
+SIMD_INLINE int64_t v64_dotp_s16(v64 x, v64 y) {
+ int64x2_t r =
+ vpaddlq_s32(vmull_s16(vreinterpret_s16_s64(x), vreinterpret_s16_s64(y)));
+ return vget_high_s64(r) + vget_low_s64(r);
+}
+
+SIMD_INLINE uint64_t v64_hadd_u8(v64 x) {
+ return vpaddl_u32(vpaddl_u16(vpaddl_u8(vreinterpret_u8_s64(x))));
+}
+
+SIMD_INLINE int64_t v64_hadd_s16(v64 a) {
+ return vpaddl_s32(vpaddl_s16(vreinterpret_s16_s64(a)));
+}
+
+typedef uint16x8_t sad64_internal;
+
+SIMD_INLINE sad64_internal v64_sad_u8_init() { return vdupq_n_u16(0); }
+
+/* Implementation dependent return value. Result must be finalised with
+ v64_sad_u8_sum().
+ The result for more than 32 v64_sad_u8() calls is undefined. */
+SIMD_INLINE sad64_internal v64_sad_u8(sad64_internal s, v64 a, v64 b) {
+ return vabal_u8(s, vreinterpret_u8_s64(a), vreinterpret_u8_s64(b));
+}
+
+SIMD_INLINE uint32_t v64_sad_u8_sum(sad64_internal s) {
+ uint64x2_t r = vpaddlq_u32(vpaddlq_u16(s));
+ return (uint32_t)(vget_high_u64(r) + vget_low_u64(r));
+}
+
+typedef int64x1_t ssd64_internal;
+
+SIMD_INLINE ssd64_internal v64_ssd_u8_init() { return 0; }
+
+/* Implementation dependent return value. Result must be finalised with
+ * v64_ssd_u8_sum(). */
+SIMD_INLINE ssd64_internal v64_ssd_u8(ssd64_internal s, v64 a, v64 b) {
+ uint8x8_t t = vabd_u8(vreinterpret_u8_s64(a), vreinterpret_u8_s64(b));
+ uint64x2_t r = vpaddlq_u32(vpaddlq_u16(vmull_u8(t, t)));
+ return vadd_u64(s, vadd_u64(vget_high_u64(r), vget_low_u64(r)));
+}
+
+SIMD_INLINE uint32_t v64_ssd_u8_sum(ssd64_internal s) { return (uint32_t)s; }
+
+SIMD_INLINE v64 v64_or(v64 x, v64 y) { return vorr_s64(x, y); }
+
+SIMD_INLINE v64 v64_xor(v64 x, v64 y) { return veor_s64(x, y); }
+
+SIMD_INLINE v64 v64_and(v64 x, v64 y) { return vand_s64(x, y); }
+
+SIMD_INLINE v64 v64_andn(v64 x, v64 y) { return vbic_s64(x, y); }
+
+SIMD_INLINE v64 v64_add_8(v64 x, v64 y) {
+ return vreinterpret_s64_u8(
+ vadd_u8(vreinterpret_u8_s64(x), vreinterpret_u8_s64(y)));
+}
+
+SIMD_INLINE v64 v64_add_16(v64 x, v64 y) {
+ return vreinterpret_s64_s16(
+ vadd_s16(vreinterpret_s16_s64(x), vreinterpret_s16_s64(y)));
+}
+
+SIMD_INLINE v64 v64_sadd_s16(v64 x, v64 y) {
+ return vreinterpret_s64_s16(
+ vqadd_s16(vreinterpret_s16_s64(x), vreinterpret_s16_s64(y)));
+}
+
+SIMD_INLINE v64 v64_add_32(v64 x, v64 y) {
+ return vreinterpret_s64_u32(
+ vadd_u32(vreinterpret_u32_s64(x), vreinterpret_u32_s64(y)));
+}
+
+SIMD_INLINE v64 v64_sub_8(v64 x, v64 y) {
+ return vreinterpret_s64_u8(
+ vsub_u8(vreinterpret_u8_s64(x), vreinterpret_u8_s64(y)));
+}
+
+SIMD_INLINE v64 v64_sub_16(v64 x, v64 y) {
+ return vreinterpret_s64_s16(
+ vsub_s16(vreinterpret_s16_s64(x), vreinterpret_s16_s64(y)));
+}
+
+SIMD_INLINE v64 v64_ssub_s16(v64 x, v64 y) {
+ return vreinterpret_s64_s16(
+ vqsub_s16(vreinterpret_s16_s64(x), vreinterpret_s16_s64(y)));
+}
+
+SIMD_INLINE v64 v64_ssub_u8(v64 x, v64 y) {
+ return vreinterpret_s64_u8(
+ vqsub_u8(vreinterpret_u8_s64(x), vreinterpret_u8_s64(y)));
+}
+
+SIMD_INLINE v64 v64_ssub_s8(v64 x, v64 y) {
+ return vreinterpret_s64_s8(
+ vqsub_s8(vreinterpret_s8_s64(x), vreinterpret_s8_s64(y)));
+}
+
+SIMD_INLINE v64 v64_sub_32(v64 x, v64 y) {
+ return vreinterpret_s64_s32(
+ vsub_s32(vreinterpret_s32_s64(x), vreinterpret_s32_s64(y)));
+}
+
+SIMD_INLINE v64 v64_abs_s16(v64 x) {
+ return vreinterpret_s64_s16(vabs_s16(vreinterpret_s16_s64(x)));
+}
+
+SIMD_INLINE v64 v64_mullo_s16(v64 x, v64 y) {
+ return vreinterpret_s64_s16(
+ vmul_s16(vreinterpret_s16_s64(x), vreinterpret_s16_s64(y)));
+}
+
+SIMD_INLINE v64 v64_mulhi_s16(v64 x, v64 y) {
+ return vreinterpret_s64_s16(vmovn_s32(vshrq_n_s32(
+ vmull_s16(vreinterpret_s16_s64(x), vreinterpret_s16_s64(y)), 16)));
+}
+
+SIMD_INLINE v64 v64_mullo_s32(v64 x, v64 y) {
+ return vreinterpret_s64_s32(
+ vmul_s32(vreinterpret_s32_s64(x), vreinterpret_s32_s64(y)));
+}
+
+SIMD_INLINE v64 v64_madd_s16(v64 x, v64 y) {
+ int32x4_t t = vmull_s16(vreinterpret_s16_s64(x), vreinterpret_s16_s64(y));
+ return vreinterpret_s64_s32(
+ vpadd_s32(vreinterpret_s32_s64(vget_low_s64(vreinterpretq_s64_s32(t))),
+ vreinterpret_s32_s64(vget_high_s64(vreinterpretq_s64_s32(t)))));
+}
+
+SIMD_INLINE v64 v64_madd_us8(v64 x, v64 y) {
+ return vreinterpret_s64_s16(vqmovn_s32(vpaddlq_s16(
+ vaddq_s16(vmull_s8(vadd_s8(vreinterpret_s8_s64(x), vdup_n_s8(-128)),
+ vreinterpret_s8_s64(y)),
+ vshlq_n_s16(vmovl_s8(vreinterpret_s8_s64(y)), 7)))));
+}
+
+SIMD_INLINE v64 v64_avg_u8(v64 x, v64 y) {
+ return vreinterpret_s64_u8(
+ vrhadd_u8(vreinterpret_u8_s64(x), vreinterpret_u8_s64(y)));
+}
+
+SIMD_INLINE v64 v64_rdavg_u8(v64 x, v64 y) {
+ return vreinterpret_s64_u8(
+ vhadd_u8(vreinterpret_u8_s64(x), vreinterpret_u8_s64(y)));
+}
+
+SIMD_INLINE v64 v64_avg_u16(v64 x, v64 y) {
+ return vreinterpret_s64_u16(
+ vrhadd_u16(vreinterpret_u16_s64(x), vreinterpret_u16_s64(y)));
+}
+
+SIMD_INLINE v64 v64_max_u8(v64 x, v64 y) {
+ return vreinterpret_s64_u8(
+ vmax_u8(vreinterpret_u8_s64(x), vreinterpret_u8_s64(y)));
+}
+
+SIMD_INLINE v64 v64_min_u8(v64 x, v64 y) {
+ return vreinterpret_s64_u8(
+ vmin_u8(vreinterpret_u8_s64(x), vreinterpret_u8_s64(y)));
+}
+
+SIMD_INLINE v64 v64_max_s8(v64 x, v64 y) {
+ return vreinterpret_s64_s8(
+ vmax_s8(vreinterpret_s8_s64(x), vreinterpret_s8_s64(y)));
+}
+
+SIMD_INLINE v64 v64_min_s8(v64 x, v64 y) {
+ return vreinterpret_s64_s8(
+ vmin_s8(vreinterpret_s8_s64(x), vreinterpret_s8_s64(y)));
+}
+
+SIMD_INLINE v64 v64_max_s16(v64 x, v64 y) {
+ return vreinterpret_s64_s16(
+ vmax_s16(vreinterpret_s16_s64(x), vreinterpret_s16_s64(y)));
+}
+
+SIMD_INLINE v64 v64_min_s16(v64 x, v64 y) {
+ return vreinterpret_s64_s16(
+ vmin_s16(vreinterpret_s16_s64(x), vreinterpret_s16_s64(y)));
+}
+
+SIMD_INLINE v64 v64_ziplo_8(v64 x, v64 y) {
+ uint8x8x2_t r = vzip_u8(vreinterpret_u8_s64(y), vreinterpret_u8_s64(x));
+ return vreinterpret_s64_u8(r.val[0]);
+}
+
+SIMD_INLINE v64 v64_ziphi_8(v64 x, v64 y) {
+ uint8x8x2_t r = vzip_u8(vreinterpret_u8_s64(y), vreinterpret_u8_s64(x));
+ return vreinterpret_s64_u8(r.val[1]);
+}
+
+SIMD_INLINE v64 v64_ziplo_16(v64 x, v64 y) {
+ int16x4x2_t r = vzip_s16(vreinterpret_s16_s64(y), vreinterpret_s16_s64(x));
+ return vreinterpret_s64_s16(r.val[0]);
+}
+
+SIMD_INLINE v64 v64_ziphi_16(v64 x, v64 y) {
+ int16x4x2_t r = vzip_s16(vreinterpret_s16_s64(y), vreinterpret_s16_s64(x));
+ return vreinterpret_s64_s16(r.val[1]);
+}
+
+SIMD_INLINE v64 v64_ziplo_32(v64 x, v64 y) {
+ int32x2x2_t r = vzip_s32(vreinterpret_s32_s64(y), vreinterpret_s32_s64(x));
+ return vreinterpret_s64_s32(r.val[0]);
+}
+
+SIMD_INLINE v64 v64_ziphi_32(v64 x, v64 y) {
+ int32x2x2_t r = vzip_s32(vreinterpret_s32_s64(y), vreinterpret_s32_s64(x));
+ return vreinterpret_s64_s32(r.val[1]);
+}
+
+SIMD_INLINE v64 v64_unpacklo_u8_s16(v64 a) {
+ return vreinterpret_s64_u16(vget_low_u16(vmovl_u8(vreinterpret_u8_s64(a))));
+}
+
+SIMD_INLINE v64 v64_unpackhi_u8_s16(v64 a) {
+ return vreinterpret_s64_u16(vget_high_u16(vmovl_u8(vreinterpret_u8_s64(a))));
+}
+
+SIMD_INLINE v64 v64_pack_s32_s16(v64 x, v64 y) {
+ return vreinterpret_s64_s16(vqmovn_s32(
+ vcombine_s32(vreinterpret_s32_s64(y), vreinterpret_s32_s64(x))));
+}
+
+SIMD_INLINE v64 v64_pack_s16_u8(v64 x, v64 y) {
+ return vreinterpret_s64_u8(vqmovun_s16(vreinterpretq_s16_s32(
+ vcombine_s32(vreinterpret_s32_s64(y), vreinterpret_s32_s64(x)))));
+}
+
+SIMD_INLINE v64 v64_pack_s16_s8(v64 x, v64 y) {
+ return vreinterpret_s64_s8(vqmovn_s16(vreinterpretq_s16_s32(
+ vcombine_s32(vreinterpret_s32_s64(y), vreinterpret_s32_s64(x)))));
+}
+
+SIMD_INLINE v64 v64_unziplo_8(v64 x, v64 y) {
+ uint8x8x2_t r = vuzp_u8(vreinterpret_u8_s64(y), vreinterpret_u8_s64(x));
+ return vreinterpret_s64_u8(r.val[0]);
+}
+
+SIMD_INLINE v64 v64_unziphi_8(v64 x, v64 y) {
+ uint8x8x2_t r = vuzp_u8(vreinterpret_u8_s64(y), vreinterpret_u8_s64(x));
+ return vreinterpret_s64_u8(r.val[1]);
+}
+
+SIMD_INLINE v64 v64_unziplo_16(v64 x, v64 y) {
+ uint16x4x2_t r = vuzp_u16(vreinterpret_u16_s64(y), vreinterpret_u16_s64(x));
+ return vreinterpret_s64_u16(r.val[0]);
+}
+
+SIMD_INLINE v64 v64_unziphi_16(v64 x, v64 y) {
+ uint16x4x2_t r = vuzp_u16(vreinterpret_u16_s64(y), vreinterpret_u16_s64(x));
+ return vreinterpret_s64_u16(r.val[1]);
+}
+
+SIMD_INLINE v64 v64_unpacklo_s16_s32(v64 x) {
+ return vreinterpret_s64_s32(vget_low_s32(vmovl_s16(vreinterpret_s16_s64(x))));
+}
+
+SIMD_INLINE v64 v64_unpacklo_u16_s32(v64 x) {
+ return vreinterpret_s64_u32(vget_low_u32(vmovl_u16(vreinterpret_u16_s64(x))));
+}
+
+SIMD_INLINE v64 v64_unpackhi_s16_s32(v64 x) {
+ return vreinterpret_s64_s32(
+ vget_high_s32(vmovl_s16(vreinterpret_s16_s64(x))));
+}
+
+SIMD_INLINE v64 v64_unpackhi_u16_s32(v64 x) {
+ return vreinterpret_s64_u32(
+ vget_high_u32(vmovl_u16(vreinterpret_u16_s64(x))));
+}
+
+SIMD_INLINE v64 v64_shuffle_8(v64 x, v64 pattern) {
+ return vreinterpret_s64_u8(
+ vtbl1_u8(vreinterpret_u8_s64(x), vreinterpret_u8_s64(pattern)));
+}
+
+SIMD_INLINE v64 v64_cmpgt_s8(v64 x, v64 y) {
+ return vreinterpret_s64_u8(
+ vcgt_s8(vreinterpret_s8_s64(x), vreinterpret_s8_s64(y)));
+}
+
+SIMD_INLINE v64 v64_cmplt_s8(v64 x, v64 y) {
+ return vreinterpret_s64_u8(
+ vclt_s8(vreinterpret_s8_s64(x), vreinterpret_s8_s64(y)));
+}
+
+SIMD_INLINE v64 v64_cmpeq_8(v64 x, v64 y) {
+ return vreinterpret_s64_u8(
+ vceq_u8(vreinterpret_u8_s64(x), vreinterpret_u8_s64(y)));
+}
+
+SIMD_INLINE v64 v64_cmpgt_s16(v64 x, v64 y) {
+ return vreinterpret_s64_u16(
+ vcgt_s16(vreinterpret_s16_s64(x), vreinterpret_s16_s64(y)));
+}
+
+SIMD_INLINE v64 v64_cmplt_s16(v64 x, v64 y) {
+ return vreinterpret_s64_u16(
+ vclt_s16(vreinterpret_s16_s64(x), vreinterpret_s16_s64(y)));
+}
+
+SIMD_INLINE v64 v64_cmpeq_16(v64 x, v64 y) {
+ return vreinterpret_s64_u16(
+ vceq_s16(vreinterpret_s16_s64(x), vreinterpret_s16_s64(y)));
+}
+
+SIMD_INLINE v64 v64_shl_8(v64 a, unsigned int c) {
+ return vreinterpret_s64_u8(vshl_u8(vreinterpret_u8_s64(a), vdup_n_s8(c)));
+}
+
+SIMD_INLINE v64 v64_shr_u8(v64 a, unsigned int c) {
+ return vreinterpret_s64_u8(vshl_u8(vreinterpret_u8_s64(a), vdup_n_s8(-c)));
+}
+
+SIMD_INLINE v64 v64_shr_s8(v64 a, unsigned int c) {
+ return vreinterpret_s64_s8(vshl_s8(vreinterpret_s8_s64(a), vdup_n_s8(-c)));
+}
+
+SIMD_INLINE v64 v64_shl_16(v64 a, unsigned int c) {
+ return vreinterpret_s64_u16(vshl_u16(vreinterpret_u16_s64(a), vdup_n_s16(c)));
+}
+
+SIMD_INLINE v64 v64_shr_u16(v64 a, unsigned int c) {
+ return vreinterpret_s64_u16(
+ vshl_u16(vreinterpret_u16_s64(a), vdup_n_s16(-(int)c)));
+}
+
+SIMD_INLINE v64 v64_shr_s16(v64 a, unsigned int c) {
+ return vreinterpret_s64_s16(
+ vshl_s16(vreinterpret_s16_s64(a), vdup_n_s16(-(int)c)));
+}
+
+SIMD_INLINE v64 v64_shl_32(v64 a, unsigned int c) {
+ return vreinterpret_s64_u32(vshl_u32(vreinterpret_u32_s64(a), vdup_n_s32(c)));
+}
+
+SIMD_INLINE v64 v64_shr_u32(v64 a, unsigned int c) {
+ return vreinterpret_s64_u32(
+ vshl_u32(vreinterpret_u32_s64(a), vdup_n_s32(-(int)c)));
+}
+
+SIMD_INLINE v64 v64_shr_s32(v64 a, unsigned int c) {
+ return vreinterpret_s64_s32(
+ vshl_s32(vreinterpret_s32_s64(a), vdup_n_s32(-(int)c)));
+}
+
+#if __OPTIMIZE__
+
+SIMD_INLINE v64 v64_shl_n_byte(v64 a, const unsigned int c) {
+ return vshl_n_s64(a, c * 8);
+}
+
+SIMD_INLINE v64 v64_shr_n_byte(v64 a, const unsigned int c) {
+ return c ? (v64)vshr_n_u64(vreinterpret_u64_s64(a), c * 8) : a;
+}
+
+SIMD_INLINE v64 v64_shl_n_8(v64 a, const unsigned int c) {
+ return vreinterpret_s64_u8(vshl_n_u8(vreinterpret_u8_s64(a), c));
+}
+
+SIMD_INLINE v64 v64_shr_n_u8(v64 a, const unsigned int c) {
+ return vreinterpret_s64_u8(vshr_n_u8(vreinterpret_u8_s64(a), c));
+}
+
+SIMD_INLINE v64 v64_shr_n_s8(v64 a, const unsigned int c) {
+ return vreinterpret_s64_s8(vshr_n_s8(vreinterpret_s8_s64(a), c));
+}
+
+SIMD_INLINE v64 v64_shl_n_16(v64 a, const unsigned int c) {
+ return vreinterpret_s64_u16(vshl_n_u16(vreinterpret_u16_s64(a), c));
+}
+
+SIMD_INLINE v64 v64_shr_n_u16(v64 a, const unsigned int c) {
+ return vreinterpret_s64_u16(vshr_n_u16(vreinterpret_u16_s64(a), c));
+}
+
+SIMD_INLINE v64 v64_shr_n_s16(v64 a, const unsigned int c) {
+ return vreinterpret_s64_s16(vshr_n_s16(vreinterpret_s16_s64(a), c));
+}
+
+SIMD_INLINE v64 v64_shl_n_32(v64 a, const unsigned int c) {
+ return vreinterpret_s64_u32(vshl_n_u32(vreinterpret_u32_s64(a), c));
+}
+
+SIMD_INLINE v64 v64_shr_n_u32(v64 a, const unsigned int c) {
+ return vreinterpret_s64_u32(vshr_n_u32(vreinterpret_u32_s64(a), c));
+}
+
+SIMD_INLINE v64 v64_shr_n_s32(v64 a, const unsigned int c) {
+ return vreinterpret_s64_s32(vshr_n_s32(vreinterpret_s32_s64(a), c));
+}
+
+#else
+
+SIMD_INLINE v64 v64_shl_n_byte(v64 a, const unsigned int c) {
+ return v64_from_64(v64_u64(a) << c * 8);
+}
+
+SIMD_INLINE v64 v64_shr_n_byte(v64 a, const unsigned int c) {
+ return v64_from_64(v64_u64(a) >> c * 8);
+}
+
+SIMD_INLINE v64 v64_shl_n_8(v64 a, const unsigned int c) {
+ return v64_shl_8(a, c);
+}
+
+SIMD_INLINE v64 v64_shr_n_u8(v64 a, const unsigned int c) {
+ return v64_shr_u8(a, c);
+}
+
+SIMD_INLINE v64 v64_shr_n_s8(v64 a, const unsigned int c) {
+ return v64_shr_s8(a, c);
+}
+
+SIMD_INLINE v64 v64_shl_n_16(v64 a, const unsigned int c) {
+ return v64_shl_16(a, c);
+}
+
+SIMD_INLINE v64 v64_shr_n_u16(v64 a, const unsigned int c) {
+ return v64_shr_u16(a, c);
+}
+
+SIMD_INLINE v64 v64_shr_n_s16(v64 a, const unsigned int c) {
+ return v64_shr_s16(a, c);
+}
+
+SIMD_INLINE v64 v64_shl_n_32(v64 a, const unsigned int c) {
+ return v64_shl_32(a, c);
+}
+
+SIMD_INLINE v64 v64_shr_n_u32(v64 a, const unsigned int c) {
+ return v64_shr_u32(a, c);
+}
+
+SIMD_INLINE v64 v64_shr_n_s32(v64 a, const unsigned int c) {
+ return v64_shr_s32(a, c);
+}
+
+#endif
+
+#endif /* _V64_INTRINSICS_H */