Added generic SIMD library supporting x86 SSE2+ and ARM NEON.

Change-Id: I037f4c44f621a7e909b82ccb6a299d41bcbf8607
diff --git a/aom_dsp/aom_simd.h b/aom_dsp/aom_simd.h
new file mode 100644
index 0000000..7ffca4a
--- /dev/null
+++ b/aom_dsp/aom_simd.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2016, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#ifndef _AOM_SIMD_H
+#define _AOM_SIMD_H
+
+#ifndef SIMD_INLINE
+#ifdef __GNUC__
+#define SIMD_INLINE static inline __attribute__((always_inline))
+#elif __STDC_VERSION__ >= 199901L
+#define SIMD_INLINE static inline
+#elif defined(_MSC_VER)
+#define SIMD_INLINE static __inline
+#else
+#define SIMD_INLINE static
+#endif
+#endif
+
+#include <stdint.h>
+
+#if defined(_WIN32)
+#include <intrin.h>
+#endif
+
+#include "./aom_config.h"
+
+#if HAVE_NEON
+#include "simd/v128_intrinsics_arm.h"
+#elif HAVE_SSE2
+#include "simd/v128_intrinsics_x86.h"
+#else
+#include "simd/v128_intrinsics.h"
+#endif
+
+#endif /* _AOM_SIMD_H */
diff --git a/aom_dsp/simd/v128_intrinsics.h b/aom_dsp/simd/v128_intrinsics.h
new file mode 100644
index 0000000..70fcfb8
--- /dev/null
+++ b/aom_dsp/simd/v128_intrinsics.h
@@ -0,0 +1,259 @@
+/*
+ * Copyright (c) 2016, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#ifndef _V128_INTRINSICS_H
+#define _V128_INTRINSICS_H
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include "./v128_intrinsics_c.h"
+#include "./v64_intrinsics.h"
+
+/* Fallback to plain, unoptimised C. */
+
+typedef c_v128 v128;
+
+SIMD_INLINE uint32_t v128_low_u32(v128 a) { return c_v128_low_u32(a); }
+SIMD_INLINE v64 v128_low_v64(v128 a) { return c_v128_low_v64(a); }
+SIMD_INLINE v64 v128_high_v64(v128 a) { return c_v128_high_v64(a); }
+SIMD_INLINE v128 v128_from_64(uint64_t hi, uint64_t lo) {
+  return c_v128_from_64(hi, lo);
+}
+SIMD_INLINE v128 v128_from_v64(v64 hi, v64 lo) {
+  return c_v128_from_v64(hi, lo);
+}
+SIMD_INLINE v128 v128_from_32(uint32_t a, uint32_t b, uint32_t c, uint32_t d) {
+  return c_v128_from_32(a, b, c, d);
+}
+
+SIMD_INLINE v128 v128_load_unaligned(const void *p) {
+  return c_v128_load_unaligned(p);
+}
+SIMD_INLINE v128 v128_load_aligned(const void *p) {
+  return c_v128_load_aligned(p);
+}
+
+SIMD_INLINE void v128_store_unaligned(void *p, v128 a) {
+  c_v128_store_unaligned(p, a);
+}
+SIMD_INLINE void v128_store_aligned(void *p, v128 a) {
+  c_v128_store_aligned(p, a);
+}
+
+SIMD_INLINE v128 v128_align(v128 a, v128 b, const unsigned int c) {
+  return c_v128_align(a, b, c);
+}
+
+SIMD_INLINE v128 v128_zero() { return c_v128_zero(); }
+SIMD_INLINE v128 v128_dup_8(uint8_t x) { return c_v128_dup_8(x); }
+SIMD_INLINE v128 v128_dup_16(uint16_t x) { return c_v128_dup_16(x); }
+SIMD_INLINE v128 v128_dup_32(uint32_t x) { return c_v128_dup_32(x); }
+
+typedef uint32_t sad128_internal;
+SIMD_INLINE sad128_internal v128_sad_u8_init() { return c_v128_sad_u8_init(); }
+SIMD_INLINE sad128_internal v128_sad_u8(sad128_internal s, v128 a, v128 b) {
+  return c_v128_sad_u8(s, a, b);
+}
+SIMD_INLINE uint32_t v128_sad_u8_sum(sad128_internal s) {
+  return c_v128_sad_u8_sum(s);
+}
+typedef uint32_t ssd128_internal;
+SIMD_INLINE ssd128_internal v128_ssd_u8_init() { return c_v128_ssd_u8_init(); }
+SIMD_INLINE ssd128_internal v128_ssd_u8(ssd128_internal s, v128 a, v128 b) {
+  return c_v128_ssd_u8(s, a, b);
+}
+SIMD_INLINE uint32_t v128_ssd_u8_sum(ssd128_internal s) {
+  return c_v128_ssd_u8_sum(s);
+}
+SIMD_INLINE int64_t v128_dotp_s16(v128 a, v128 b) {
+  return c_v128_dotp_s16(a, b);
+}
+SIMD_INLINE uint64_t v128_hadd_u8(v128 a) { return c_v128_hadd_u8(a); }
+
+SIMD_INLINE v128 v128_or(v128 a, v128 b) { return c_v128_or(a, b); }
+SIMD_INLINE v128 v128_xor(v128 a, v128 b) { return c_v128_xor(a, b); }
+SIMD_INLINE v128 v128_and(v128 a, v128 b) { return c_v128_and(a, b); }
+SIMD_INLINE v128 v128_andn(v128 a, v128 b) { return c_v128_andn(a, b); }
+
+SIMD_INLINE v128 v128_add_8(v128 a, v128 b) { return c_v128_add_8(a, b); }
+SIMD_INLINE v128 v128_add_16(v128 a, v128 b) { return c_v128_add_16(a, b); }
+SIMD_INLINE v128 v128_sadd_s16(v128 a, v128 b) { return c_v128_sadd_s16(a, b); }
+SIMD_INLINE v128 v128_add_32(v128 a, v128 b) { return c_v128_add_32(a, b); }
+SIMD_INLINE v128 v128_padd_s16(v128 a) { return c_v128_padd_s16(a); }
+SIMD_INLINE v128 v128_sub_8(v128 a, v128 b) { return c_v128_sub_8(a, b); }
+SIMD_INLINE v128 v128_ssub_u8(v128 a, v128 b) { return c_v128_ssub_u8(a, b); }
+SIMD_INLINE v128 v128_ssub_s8(v128 a, v128 b) { return c_v128_ssub_s8(a, b); }
+SIMD_INLINE v128 v128_sub_16(v128 a, v128 b) { return c_v128_sub_16(a, b); }
+SIMD_INLINE v128 v128_ssub_s16(v128 a, v128 b) { return c_v128_ssub_s16(a, b); }
+SIMD_INLINE v128 v128_sub_32(v128 a, v128 b) { return c_v128_sub_32(a, b); }
+SIMD_INLINE v128 v128_abs_s16(v128 a) { return c_v128_abs_s16(a); }
+
+SIMD_INLINE v128 v128_mul_s16(v64 a, v64 b) { return c_v128_mul_s16(a, b); }
+SIMD_INLINE v128 v128_mullo_s16(v128 a, v128 b) {
+  return c_v128_mullo_s16(a, b);
+}
+SIMD_INLINE v128 v128_mulhi_s16(v128 a, v128 b) {
+  return c_v128_mulhi_s16(a, b);
+}
+SIMD_INLINE v128 v128_mullo_s32(v128 a, v128 b) {
+  return c_v128_mullo_s32(a, b);
+}
+SIMD_INLINE v128 v128_madd_s16(v128 a, v128 b) { return c_v128_madd_s16(a, b); }
+SIMD_INLINE v128 v128_madd_us8(v128 a, v128 b) { return c_v128_madd_us8(a, b); }
+
+SIMD_INLINE v128 v128_avg_u8(v128 a, v128 b) { return c_v128_avg_u8(a, b); }
+SIMD_INLINE v128 v128_rdavg_u8(v128 a, v128 b) { return c_v128_rdavg_u8(a, b); }
+SIMD_INLINE v128 v128_avg_u16(v128 a, v128 b) { return c_v128_avg_u16(a, b); }
+SIMD_INLINE v128 v128_min_u8(v128 a, v128 b) { return c_v128_min_u8(a, b); }
+SIMD_INLINE v128 v128_max_u8(v128 a, v128 b) { return c_v128_max_u8(a, b); }
+SIMD_INLINE v128 v128_min_s8(v128 a, v128 b) { return c_v128_min_s8(a, b); }
+SIMD_INLINE v128 v128_max_s8(v128 a, v128 b) { return c_v128_max_s8(a, b); }
+SIMD_INLINE v128 v128_min_s16(v128 a, v128 b) { return c_v128_min_s16(a, b); }
+SIMD_INLINE v128 v128_max_s16(v128 a, v128 b) { return c_v128_max_s16(a, b); }
+
+SIMD_INLINE v128 v128_ziplo_8(v128 a, v128 b) { return c_v128_ziplo_8(a, b); }
+SIMD_INLINE v128 v128_ziphi_8(v128 a, v128 b) { return c_v128_ziphi_8(a, b); }
+SIMD_INLINE v128 v128_ziplo_16(v128 a, v128 b) { return c_v128_ziplo_16(a, b); }
+SIMD_INLINE v128 v128_ziphi_16(v128 a, v128 b) { return c_v128_ziphi_16(a, b); }
+SIMD_INLINE v128 v128_ziplo_32(v128 a, v128 b) { return c_v128_ziplo_32(a, b); }
+SIMD_INLINE v128 v128_ziphi_32(v128 a, v128 b) { return c_v128_ziphi_32(a, b); }
+SIMD_INLINE v128 v128_ziplo_64(v128 a, v128 b) { return c_v128_ziplo_64(a, b); }
+SIMD_INLINE v128 v128_ziphi_64(v128 a, v128 b) { return c_v128_ziphi_64(a, b); }
+SIMD_INLINE v128 v128_zip_8(v64 a, v64 b) { return c_v128_zip_8(a, b); }
+SIMD_INLINE v128 v128_zip_16(v64 a, v64 b) { return c_v128_zip_16(a, b); }
+SIMD_INLINE v128 v128_zip_32(v64 a, v64 b) { return c_v128_zip_32(a, b); }
+SIMD_INLINE v128 v128_unziplo_8(v128 a, v128 b) {
+  return c_v128_unziplo_8(a, b);
+}
+SIMD_INLINE v128 v128_unziphi_8(v128 a, v128 b) {
+  return c_v128_unziphi_8(a, b);
+}
+SIMD_INLINE v128 v128_unziplo_16(v128 a, v128 b) {
+  return c_v128_unziplo_16(a, b);
+}
+SIMD_INLINE v128 v128_unziphi_16(v128 a, v128 b) {
+  return c_v128_unziphi_16(a, b);
+}
+SIMD_INLINE v128 v128_unziplo_32(v128 a, v128 b) {
+  return c_v128_unziplo_32(a, b);
+}
+SIMD_INLINE v128 v128_unziphi_32(v128 a, v128 b) {
+  return c_v128_unziphi_32(a, b);
+}
+SIMD_INLINE v128 v128_unpack_u8_s16(v64 a) { return c_v128_unpack_u8_s16(a); }
+SIMD_INLINE v128 v128_unpacklo_u8_s16(v128 a) {
+  return c_v128_unpacklo_u8_s16(a);
+}
+SIMD_INLINE v128 v128_unpackhi_u8_s16(v128 a) {
+  return c_v128_unpackhi_u8_s16(a);
+}
+SIMD_INLINE v128 v128_pack_s32_s16(v128 a, v128 b) {
+  return c_v128_pack_s32_s16(a, b);
+}
+SIMD_INLINE v128 v128_pack_s16_u8(v128 a, v128 b) {
+  return c_v128_pack_s16_u8(a, b);
+}
+SIMD_INLINE v128 v128_pack_s16_s8(v128 a, v128 b) {
+  return c_v128_pack_s16_s8(a, b);
+}
+SIMD_INLINE v128 v128_unpack_u16_s32(v64 a) { return c_v128_unpack_u16_s32(a); }
+SIMD_INLINE v128 v128_unpack_s16_s32(v64 a) { return c_v128_unpack_s16_s32(a); }
+SIMD_INLINE v128 v128_unpacklo_u16_s32(v128 a) {
+  return c_v128_unpacklo_u16_s32(a);
+}
+SIMD_INLINE v128 v128_unpacklo_s16_s32(v128 a) {
+  return c_v128_unpacklo_s16_s32(a);
+}
+SIMD_INLINE v128 v128_unpackhi_u16_s32(v128 a) {
+  return c_v128_unpackhi_u16_s32(a);
+}
+SIMD_INLINE v128 v128_unpackhi_s16_s32(v128 a) {
+  return c_v128_unpackhi_s16_s32(a);
+}
+SIMD_INLINE v128 v128_shuffle_8(v128 a, v128 pattern) {
+  return c_v128_shuffle_8(a, pattern);
+}
+
+SIMD_INLINE v128 v128_cmpgt_s8(v128 a, v128 b) { return c_v128_cmpgt_s8(a, b); }
+SIMD_INLINE v128 v128_cmplt_s8(v128 a, v128 b) { return c_v128_cmplt_s8(a, b); }
+SIMD_INLINE v128 v128_cmpeq_8(v128 a, v128 b) { return c_v128_cmpeq_8(a, b); }
+SIMD_INLINE v128 v128_cmpgt_s16(v128 a, v128 b) {
+  return c_v128_cmpgt_s16(a, b);
+}
+SIMD_INLINE v128 v128_cmplt_s16(v128 a, v128 b) {
+  return c_v128_cmplt_s16(a, b);
+}
+SIMD_INLINE v128 v128_cmpeq_16(v128 a, v128 b) { return c_v128_cmpeq_16(a, b); }
+
+SIMD_INLINE v128 v128_shl_8(v128 a, unsigned int c) {
+  return c_v128_shl_8(a, c);
+}
+SIMD_INLINE v128 v128_shr_u8(v128 a, unsigned int c) {
+  return c_v128_shr_u8(a, c);
+}
+SIMD_INLINE v128 v128_shr_s8(v128 a, unsigned int c) {
+  return c_v128_shr_s8(a, c);
+}
+SIMD_INLINE v128 v128_shl_16(v128 a, unsigned int c) {
+  return c_v128_shl_16(a, c);
+}
+SIMD_INLINE v128 v128_shr_u16(v128 a, unsigned int c) {
+  return c_v128_shr_u16(a, c);
+}
+SIMD_INLINE v128 v128_shr_s16(v128 a, unsigned int c) {
+  return c_v128_shr_s16(a, c);
+}
+SIMD_INLINE v128 v128_shl_32(v128 a, unsigned int c) {
+  return c_v128_shl_32(a, c);
+}
+SIMD_INLINE v128 v128_shr_u32(v128 a, unsigned int c) {
+  return c_v128_shr_u32(a, c);
+}
+SIMD_INLINE v128 v128_shr_s32(v128 a, unsigned int c) {
+  return c_v128_shr_s32(a, c);
+}
+
+SIMD_INLINE v128 v128_shr_n_byte(v128 a, const unsigned int n) {
+  return c_v128_shr_n_byte(a, n);
+}
+SIMD_INLINE v128 v128_shl_n_byte(v128 a, const unsigned int n) {
+  return c_v128_shl_n_byte(a, n);
+}
+SIMD_INLINE v128 v128_shl_n_8(v128 a, const unsigned int n) {
+  return c_v128_shl_n_8(a, n);
+}
+SIMD_INLINE v128 v128_shl_n_16(v128 a, const unsigned int n) {
+  return c_v128_shl_n_16(a, n);
+}
+SIMD_INLINE v128 v128_shl_n_32(v128 a, const unsigned int n) {
+  return c_v128_shl_n_32(a, n);
+}
+SIMD_INLINE v128 v128_shr_n_u8(v128 a, const unsigned int n) {
+  return c_v128_shr_n_u8(a, n);
+}
+SIMD_INLINE v128 v128_shr_n_u16(v128 a, const unsigned int n) {
+  return c_v128_shr_n_u16(a, n);
+}
+SIMD_INLINE v128 v128_shr_n_u32(v128 a, const unsigned int n) {
+  return c_v128_shr_n_u32(a, n);
+}
+SIMD_INLINE v128 v128_shr_n_s8(v128 a, const unsigned int n) {
+  return c_v128_shr_n_s8(a, n);
+}
+SIMD_INLINE v128 v128_shr_n_s16(v128 a, const unsigned int n) {
+  return c_v128_shr_n_s16(a, n);
+}
+SIMD_INLINE v128 v128_shr_n_s32(v128 a, const unsigned int n) {
+  return c_v128_shr_n_s32(a, n);
+}
+
+#endif /* _V128_INTRINSICS_H */
diff --git a/aom_dsp/simd/v128_intrinsics_arm.h b/aom_dsp/simd/v128_intrinsics_arm.h
new file mode 100644
index 0000000..13d1314
--- /dev/null
+++ b/aom_dsp/simd/v128_intrinsics_arm.h
@@ -0,0 +1,650 @@
+/*
+ * Copyright (c) 2016, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#ifndef _V128_INTRINSICS_H
+#define _V128_INTRINSICS_H
+
+#include <arm_neon.h>
+#include "./v64_intrinsics_arm.h"
+
+typedef int64x2_t v128;
+
+SIMD_INLINE uint32_t v128_low_u32(v128 a) {
+  return v64_low_u32(vget_low_s64(a));
+}
+
+SIMD_INLINE v64 v128_low_v64(v128 a) { return vget_low_s64(a); }
+
+SIMD_INLINE v64 v128_high_v64(v128 a) { return vget_high_s64(a); }
+
+SIMD_INLINE v128 v128_from_v64(v64 a, v64 b) { return vcombine_s64(b, a); }
+
+SIMD_INLINE v128 v128_from_64(uint64_t a, uint64_t b) {
+  return vcombine_s64(b, a);
+}
+
+SIMD_INLINE v128 v128_from_32(uint32_t a, uint32_t b, uint32_t c, uint32_t d) {
+  return vcombine_s64(v64_from_32(c, d), v64_from_32(a, b));
+}
+
+SIMD_INLINE v128 v128_load_aligned(const void *p) {
+  return vreinterpretq_s64_u8(vld1q_u8((const uint8_t *)p));
+}
+
+SIMD_INLINE v128 v128_load_unaligned(const void *p) {
+  return v128_load_aligned(p);
+}
+
+SIMD_INLINE void v128_store_aligned(void *p, v128 r) {
+  vst1q_u8((uint8_t *)p, vreinterpretq_u8_s64(r));
+}
+
+SIMD_INLINE void v128_store_unaligned(void *p, v128 r) {
+  vst1q_u8((uint8_t *)p, vreinterpretq_u8_s64(r));
+}
+
+SIMD_INLINE v128 v128_align(v128 a, v128 b, const unsigned int c) {
+#if __OPTIMIZE__
+  return c ? vreinterpretq_s64_s8(
+                 vextq_s8(vreinterpretq_s8_s64(b), vreinterpretq_s8_s64(a), c))
+           : b;
+#else
+  return c < 8 ? v128_from_v64(v64_align(v128_low_v64(a), v128_high_v64(b), c),
+                               v64_align(v128_high_v64(b), v128_low_v64(b), c))
+               : v128_from_v64(
+                     v64_align(v128_high_v64(a), v128_low_v64(a), c - 8),
+                     v64_align(v128_low_v64(a), v128_high_v64(b), c - 8));
+#endif
+}
+
+SIMD_INLINE v128 v128_zero() { return vreinterpretq_s64_u8(vdupq_n_u8(0)); }
+
+SIMD_INLINE v128 v128_ones() { return vreinterpretq_s64_u8(vdupq_n_u8(-1)); }
+
+SIMD_INLINE v128 v128_dup_8(uint8_t x) {
+  return vreinterpretq_s64_u8(vdupq_n_u8(x));
+}
+
+SIMD_INLINE v128 v128_dup_16(uint16_t x) {
+  return vreinterpretq_s64_u16(vdupq_n_u16(x));
+}
+
+SIMD_INLINE v128 v128_dup_32(uint32_t x) {
+  return vreinterpretq_s64_u32(vdupq_n_u32(x));
+}
+
+SIMD_INLINE int64_t v128_dotp_s16(v128 a, v128 b) {
+  return v64_dotp_s16(vget_high_s64(a), vget_high_s64(b)) +
+         v64_dotp_s16(vget_low_s64(a), vget_low_s64(b));
+}
+
+SIMD_INLINE uint64_t v128_hadd_u8(v128 x) {
+  uint64x2_t t = vpaddlq_u32(vpaddlq_u16(vpaddlq_u8(vreinterpretq_u8_s64(x))));
+  return vget_lane_s32(
+      vreinterpret_s32_u64(vadd_u64(vget_high_u64(t), vget_low_u64(t))), 0);
+}
+
+SIMD_INLINE v128 v128_padd_s16(v128 a) {
+  return vreinterpretq_s64_s32(vpaddlq_s16(vreinterpretq_s16_s64(a)));
+}
+
+typedef struct { sad64_internal hi, lo; } sad128_internal;
+
+SIMD_INLINE sad128_internal v128_sad_u8_init() {
+  sad128_internal s;
+  s.hi = s.lo = vdupq_n_u16(0);
+  return s;
+}
+
+/* Implementation dependent return value.  Result must be finalised with
+   v128_sad_u8_sum().
+   The result for more than 32 v128_sad_u8() calls is undefined. */
+SIMD_INLINE sad128_internal v128_sad_u8(sad128_internal s, v128 a, v128 b) {
+  sad128_internal r;
+  r.hi = v64_sad_u8(s.hi, vget_high_s64(a), vget_high_s64(b));
+  r.lo = v64_sad_u8(s.lo, vget_low_s64(a), vget_low_s64(b));
+  return r;
+}
+
+SIMD_INLINE uint32_t v128_sad_u8_sum(sad128_internal s) {
+  return (uint32_t)(v64_sad_u8_sum(s.hi) + v64_sad_u8_sum(s.lo));
+}
+
+typedef struct { ssd64_internal hi, lo; } ssd128_internal;
+
+SIMD_INLINE ssd128_internal v128_ssd_u8_init() {
+  ssd128_internal s;
+  s.hi = s.lo = 0;
+  return s;
+}
+
+/* Implementation dependent return value.  Result must be finalised with
+ * v128_ssd_u8_sum(). */
+SIMD_INLINE ssd128_internal v128_ssd_u8(ssd128_internal s, v128 a, v128 b) {
+  ssd128_internal r;
+  r.hi = v64_ssd_u8(s.hi, vget_high_s64(a), vget_high_s64(b));
+  r.lo = v64_ssd_u8(s.lo, vget_low_s64(a), vget_low_s64(b));
+  return r;
+}
+
+SIMD_INLINE uint32_t v128_ssd_u8_sum(ssd128_internal s) {
+  return (uint32_t)(v64_ssd_u8_sum(s.hi) + v64_ssd_u8_sum(s.lo));
+}
+
+SIMD_INLINE v128 v128_or(v128 x, v128 y) { return vorrq_s64(x, y); }
+
+SIMD_INLINE v128 v128_xor(v128 x, v128 y) { return veorq_s64(x, y); }
+
+SIMD_INLINE v128 v128_and(v128 x, v128 y) { return vandq_s64(x, y); }
+
+SIMD_INLINE v128 v128_andn(v128 x, v128 y) { return vbicq_s64(x, y); }
+
+SIMD_INLINE v128 v128_add_8(v128 x, v128 y) {
+  return vreinterpretq_s64_u8(
+      vaddq_u8(vreinterpretq_u8_s64(x), vreinterpretq_u8_s64(y)));
+}
+
+SIMD_INLINE v128 v128_add_16(v128 x, v128 y) {
+  return vreinterpretq_s64_s16(
+      vaddq_s16(vreinterpretq_s16_s64(x), vreinterpretq_s16_s64(y)));
+}
+
+SIMD_INLINE v128 v128_sadd_s16(v128 x, v128 y) {
+  return vreinterpretq_s64_s16(
+      vqaddq_s16(vreinterpretq_s16_s64(x), vreinterpretq_s16_s64(y)));
+}
+
+SIMD_INLINE v128 v128_add_32(v128 x, v128 y) {
+  return vreinterpretq_s64_u32(
+      vaddq_u32(vreinterpretq_u32_s64(x), vreinterpretq_u32_s64(y)));
+}
+
+SIMD_INLINE v128 v128_sub_8(v128 x, v128 y) {
+  return vreinterpretq_s64_u8(
+      vsubq_u8(vreinterpretq_u8_s64(x), vreinterpretq_u8_s64(y)));
+}
+
+SIMD_INLINE v128 v128_sub_u8(v128 x, v128 y) {
+  return vreinterpretq_s64_u8(
+      vqsubq_u8(vreinterpretq_u8_s64(x), vreinterpretq_u8_s64(y)));
+}
+
+SIMD_INLINE v128 v128_sub_16(v128 x, v128 y) {
+  return vreinterpretq_s64_s16(
+      vsubq_s16(vreinterpretq_s16_s64(x), vreinterpretq_s16_s64(y)));
+}
+
+SIMD_INLINE v128 v128_ssub_s16(v128 x, v128 y) {
+  return vreinterpretq_s64_s16(
+      vqsubq_s16(vreinterpretq_s16_s64(x), vreinterpretq_s16_s64(y)));
+}
+
+SIMD_INLINE v128 v128_ssub_u8(v128 x, v128 y) {
+  return vreinterpretq_s64_u8(
+      vqsubq_u8(vreinterpretq_u8_s64(x), vreinterpretq_u8_s64(y)));
+}
+
+SIMD_INLINE v128 v128_ssub_s8(v128 x, v128 y) {
+  return vreinterpretq_s64_s8(
+      vqsubq_s8(vreinterpretq_s8_s64(x), vreinterpretq_s8_s64(y)));
+}
+
+SIMD_INLINE v128 v128_sub_32(v128 x, v128 y) {
+  return vreinterpretq_s64_s32(
+      vsubq_s32(vreinterpretq_s32_s64(x), vreinterpretq_s32_s64(y)));
+}
+
+SIMD_INLINE v128 v128_abs_s16(v128 x) {
+  return vreinterpretq_s64_s16(vabsq_s16(vreinterpretq_s16_s64(x)));
+}
+
+SIMD_INLINE v128 v128_mul_s16(v64 a, v64 b) {
+  return vreinterpretq_s64_s32(
+      vmull_s16(vreinterpret_s16_s64(a), vreinterpret_s16_s64(b)));
+}
+
+SIMD_INLINE v128 v128_mullo_s16(v128 a, v128 b) {
+  return vreinterpretq_s64_s16(
+      vmulq_s16(vreinterpretq_s16_s64(a), vreinterpretq_s16_s64(b)));
+}
+
+SIMD_INLINE v128 v128_mulhi_s16(v128 a, v128 b) {
+  return v128_from_v64(v64_mulhi_s16(vget_high_s64(a), vget_high_s64(b)),
+                       v64_mulhi_s16(vget_low_s64(a), vget_low_s64(b)));
+}
+
+SIMD_INLINE v128 v128_mullo_s32(v128 a, v128 b) {
+  return vreinterpretq_s64_s32(
+      vmulq_s32(vreinterpretq_s32_s64(a), vreinterpretq_s32_s64(b)));
+}
+
+SIMD_INLINE v128 v128_madd_s16(v128 a, v128 b) {
+  return v128_from_v64(v64_madd_s16(vget_high_s64(a), vget_high_s64(b)),
+                       v64_madd_s16(vget_low_s64(a), vget_low_s64(b)));
+}
+
+SIMD_INLINE v128 v128_madd_us8(v128 a, v128 b) {
+  return v128_from_v64(v64_madd_us8(vget_high_s64(a), vget_high_s64(b)),
+                       v64_madd_us8(vget_low_s64(a), vget_low_s64(b)));
+}
+
+SIMD_INLINE v128 v128_avg_u8(v128 x, v128 y) {
+  return vreinterpretq_s64_u8(
+      vrhaddq_u8(vreinterpretq_u8_s64(x), vreinterpretq_u8_s64(y)));
+}
+
+SIMD_INLINE v128 v128_rdavg_u8(v128 x, v128 y) {
+  return vreinterpretq_s64_u8(
+      vhaddq_u8(vreinterpretq_u8_s64(x), vreinterpretq_u8_s64(y)));
+}
+
+SIMD_INLINE v128 v128_avg_u16(v128 x, v128 y) {
+  return vreinterpretq_s64_u16(
+      vrhaddq_u16(vreinterpretq_u16_s64(x), vreinterpretq_u16_s64(y)));
+}
+
+SIMD_INLINE v128 v128_min_u8(v128 x, v128 y) {
+  return vreinterpretq_s64_u8(
+      vminq_u8(vreinterpretq_u8_s64(x), vreinterpretq_u8_s64(y)));
+}
+
+SIMD_INLINE v128 v128_max_u8(v128 x, v128 y) {
+  return vreinterpretq_s64_u8(
+      vmaxq_u8(vreinterpretq_u8_s64(x), vreinterpretq_u8_s64(y)));
+}
+
+SIMD_INLINE v128 v128_min_s8(v128 x, v128 y) {
+  return vreinterpretq_s64_s8(
+      vminq_s8(vreinterpretq_s8_s64(x), vreinterpretq_s8_s64(y)));
+}
+
+SIMD_INLINE v128 v128_max_s8(v128 x, v128 y) {
+  return vreinterpretq_s64_s8(
+      vmaxq_s8(vreinterpretq_s8_s64(x), vreinterpretq_s8_s64(y)));
+}
+
+SIMD_INLINE v128 v128_min_s16(v128 x, v128 y) {
+  return vreinterpretq_s64_s16(
+      vminq_s16(vreinterpretq_s16_s64(x), vreinterpretq_s16_s64(y)));
+}
+
+SIMD_INLINE v128 v128_max_s16(v128 x, v128 y) {
+  return vreinterpretq_s64_s16(
+      vmaxq_s16(vreinterpretq_s16_s64(x), vreinterpretq_s16_s64(y)));
+}
+
+SIMD_INLINE v128 v128_ziplo_8(v128 x, v128 y) {
+  uint8x16x2_t r = vzipq_u8(vreinterpretq_u8_s64(y), vreinterpretq_u8_s64(x));
+  return vreinterpretq_s64_u8(r.val[0]);
+}
+
+SIMD_INLINE v128 v128_ziphi_8(v128 x, v128 y) {
+  uint8x16x2_t r = vzipq_u8(vreinterpretq_u8_s64(y), vreinterpretq_u8_s64(x));
+  return vreinterpretq_s64_u8(r.val[1]);
+}
+
+SIMD_INLINE v128 v128_zip_8(v64 x, v64 y) {
+  uint8x8x2_t r = vzip_u8(vreinterpret_u8_s64(y), vreinterpret_u8_s64(x));
+  return vreinterpretq_s64_u8(vcombine_u8(r.val[0], r.val[1]));
+}
+
+SIMD_INLINE v128 v128_ziplo_16(v128 x, v128 y) {
+  int16x8x2_t r = vzipq_s16(vreinterpretq_s16_s64(y), vreinterpretq_s16_s64(x));
+  return vreinterpretq_s64_s16(r.val[0]);
+}
+
+SIMD_INLINE v128 v128_ziphi_16(v128 x, v128 y) {
+  int16x8x2_t r = vzipq_s16(vreinterpretq_s16_s64(y), vreinterpretq_s16_s64(x));
+  return vreinterpretq_s64_s16(r.val[1]);
+}
+
+SIMD_INLINE v128 v128_zip_16(v64 x, v64 y) {
+  uint16x4x2_t r = vzip_u16(vreinterpret_u16_s64(y), vreinterpret_u16_s64(x));
+  return vreinterpretq_s64_u16(vcombine_u16(r.val[0], r.val[1]));
+}
+
+SIMD_INLINE v128 v128_ziplo_32(v128 x, v128 y) {
+  int32x4x2_t r = vzipq_s32(vreinterpretq_s32_s64(y), vreinterpretq_s32_s64(x));
+  return vreinterpretq_s64_s32(r.val[0]);
+}
+
+SIMD_INLINE v128 v128_ziphi_32(v128 x, v128 y) {
+  int32x4x2_t r = vzipq_s32(vreinterpretq_s32_s64(y), vreinterpretq_s32_s64(x));
+  return vreinterpretq_s64_s32(r.val[1]);
+}
+
+SIMD_INLINE v128 v128_zip_32(v64 x, v64 y) {
+  uint32x2x2_t r = vzip_u32(vreinterpret_u32_s64(y), vreinterpret_u32_s64(x));
+  return vreinterpretq_s64_u32(vcombine_u32(r.val[0], r.val[1]));
+}
+
+SIMD_INLINE v128 v128_ziplo_64(v128 a, v128 b) {
+  return v128_from_v64(vget_low_u64((uint64x2_t)a),
+                       vget_low_u64((uint64x2_t)b));
+}
+
+SIMD_INLINE v128 v128_ziphi_64(v128 a, v128 b) {
+  return v128_from_v64(vget_high_u64((uint64x2_t)a),
+                       vget_high_u64((uint64x2_t)b));
+}
+
+SIMD_INLINE v128 v128_unziplo_8(v128 x, v128 y) {
+  uint8x16x2_t r = vuzpq_u8(vreinterpretq_u8_s64(y), vreinterpretq_u8_s64(x));
+  return vreinterpretq_s64_u8(r.val[0]);
+}
+
+SIMD_INLINE v128 v128_unziphi_8(v128 x, v128 y) {
+  uint8x16x2_t r = vuzpq_u8(vreinterpretq_u8_s64(y), vreinterpretq_u8_s64(x));
+  return vreinterpretq_s64_u8(r.val[1]);
+}
+
+SIMD_INLINE v128 v128_unziplo_16(v128 x, v128 y) {
+  uint16x8x2_t r =
+      vuzpq_u16(vreinterpretq_u16_s64(y), vreinterpretq_u16_s64(x));
+  return vreinterpretq_s64_u16(r.val[0]);
+}
+
+SIMD_INLINE v128 v128_unziphi_16(v128 x, v128 y) {
+  uint16x8x2_t r =
+      vuzpq_u16(vreinterpretq_u16_s64(y), vreinterpretq_u16_s64(x));
+  return vreinterpretq_s64_u16(r.val[1]);
+}
+
+SIMD_INLINE v128 v128_unziplo_32(v128 x, v128 y) {
+  uint32x4x2_t r =
+      vuzpq_u32(vreinterpretq_u32_s64(y), vreinterpretq_u32_s64(x));
+  return vreinterpretq_s64_u32(r.val[0]);
+}
+
+SIMD_INLINE v128 v128_unziphi_32(v128 x, v128 y) {
+  uint32x4x2_t r =
+      vuzpq_u32(vreinterpretq_u32_s64(y), vreinterpretq_u32_s64(x));
+  return vreinterpretq_s64_u32(r.val[1]);
+}
+
+SIMD_INLINE v128 v128_unpack_u8_s16(v64 a) {
+  return vreinterpretq_s64_u16(vmovl_u8(vreinterpret_u8_s64(a)));
+}
+
+SIMD_INLINE v128 v128_unpacklo_u8_s16(v128 a) {
+  return vreinterpretq_s64_u16(vmovl_u8(vreinterpret_u8_s64(vget_low_s64(a))));
+}
+
+SIMD_INLINE v128 v128_unpackhi_u8_s16(v128 a) {
+  return vreinterpretq_s64_u16(vmovl_u8(vreinterpret_u8_s64(vget_high_s64(a))));
+}
+
+SIMD_INLINE v128 v128_pack_s32_s16(v128 a, v128 b) {
+  return v128_from_v64(
+      vreinterpret_s64_s16(vqmovn_s32(vreinterpretq_s32_s64(a))),
+      vreinterpret_s64_s16(vqmovn_s32(vreinterpretq_s32_s64(b))));
+}
+
+SIMD_INLINE v128 v128_pack_s16_u8(v128 a, v128 b) {
+  return v128_from_v64(
+      vreinterpret_s64_u8(vqmovun_s16(vreinterpretq_s16_s64(a))),
+      vreinterpret_s64_u8(vqmovun_s16(vreinterpretq_s16_s64(b))));
+}
+
+SIMD_INLINE v128 v128_pack_s16_s8(v128 a, v128 b) {
+  return v128_from_v64(
+      vreinterpret_s64_s8(vqmovn_s16(vreinterpretq_s16_s64(a))),
+      vreinterpret_s64_s8(vqmovn_s16(vreinterpretq_s16_s64(b))));
+}
+
+SIMD_INLINE v128 v128_unpack_u16_s32(v64 a) {
+  return vreinterpretq_s64_u32(vmovl_u16(vreinterpret_u16_s64(a)));
+}
+
+SIMD_INLINE v128 v128_unpack_s16_s32(v64 a) {
+  return vreinterpretq_s64_s32(vmovl_s16(vreinterpret_s16_s64(a)));
+}
+
+SIMD_INLINE v128 v128_unpacklo_u16_s32(v128 a) {
+  return vreinterpretq_s64_u32(
+      vmovl_u16(vreinterpret_u16_s64(vget_low_s64(a))));
+}
+
+SIMD_INLINE v128 v128_unpacklo_s16_s32(v128 a) {
+  return vreinterpretq_s64_s32(
+      vmovl_s16(vreinterpret_s16_s64(vget_low_s64(a))));
+}
+
+SIMD_INLINE v128 v128_unpackhi_u16_s32(v128 a) {
+  return vreinterpretq_s64_u32(
+      vmovl_u16(vreinterpret_u16_s64(vget_high_s64(a))));
+}
+
+SIMD_INLINE v128 v128_unpackhi_s16_s32(v128 a) {
+  return vreinterpretq_s64_s32(
+      vmovl_s16(vreinterpret_s16_s64(vget_high_s64(a))));
+}
+
+SIMD_INLINE v128 v128_shuffle_8(v128 x, v128 pattern) {
+  return v128_from_64(
+      vreinterpret_s64_u8(
+          vtbl2_u8((uint8x8x2_t){ { vget_low_u8(vreinterpretq_u8_s64(x)),
+                                    vget_high_u8(vreinterpretq_u8_s64(x)) } },
+                   vreinterpret_u8_s64(vget_high_s64(pattern)))),
+      vreinterpret_s64_u8(
+          vtbl2_u8((uint8x8x2_t){ { vget_low_u8(vreinterpretq_u8_s64(x)),
+                                    vget_high_u8(vreinterpretq_u8_s64(x)) } },
+                   vreinterpret_u8_s64(vget_low_s64(pattern)))));
+}
+
+SIMD_INLINE v128 v128_cmpgt_s8(v128 x, v128 y) {
+  return vreinterpretq_s64_u8(
+      vcgtq_s8(vreinterpretq_s8_s64(x), vreinterpretq_s8_s64(y)));
+}
+
+SIMD_INLINE v128 v128_cmplt_s8(v128 x, v128 y) {
+  return vreinterpretq_s64_u8(
+      vcltq_s8(vreinterpretq_s8_s64(x), vreinterpretq_s8_s64(y)));
+}
+
+SIMD_INLINE v128 v128_cmpeq_8(v128 x, v128 y) {
+  return vreinterpretq_s64_u8(
+      vceqq_u8(vreinterpretq_u8_s64(x), vreinterpretq_u8_s64(y)));
+}
+
+SIMD_INLINE v128 v128_cmpgt_s16(v128 x, v128 y) {
+  return vreinterpretq_s64_u16(
+      vcgtq_s16(vreinterpretq_s16_s64(x), vreinterpretq_s16_s64(y)));
+}
+
+SIMD_INLINE v128 v128_cmplt_s16(v128 x, v128 y) {
+  return vreinterpretq_s64_u16(
+      vcltq_s16(vreinterpretq_s16_s64(x), vreinterpretq_s16_s64(y)));
+}
+
+SIMD_INLINE v128 v128_cmpeq_16(v128 x, v128 y) {
+  return vreinterpretq_s64_u16(
+      vceqq_s16(vreinterpretq_s16_s64(x), vreinterpretq_s16_s64(y)));
+}
+
+SIMD_INLINE v128 v128_shl_8(v128 a, unsigned int c) {
+  return (c > 7) ? v128_zero() : vreinterpretq_s64_u8(vshlq_u8(
+                                     vreinterpretq_u8_s64(a), vdupq_n_s8(c)));
+}
+
+SIMD_INLINE v128 v128_shr_u8(v128 a, unsigned int c) {
+  return (c > 7) ? v128_zero() : vreinterpretq_s64_u8(vshlq_u8(
+                                     vreinterpretq_u8_s64(a), vdupq_n_s8(-c)));
+}
+
+SIMD_INLINE v128 v128_shr_s8(v128 a, unsigned int c) {
+  return (c > 7) ? v128_ones() : vreinterpretq_s64_s8(vshlq_s8(
+                                     vreinterpretq_s8_s64(a), vdupq_n_s8(-c)));
+}
+
+SIMD_INLINE v128 v128_shl_16(v128 a, unsigned int c) {
+  return (c > 15) ? v128_zero()
+                  : vreinterpretq_s64_u16(
+                        vshlq_u16(vreinterpretq_u16_s64(a), vdupq_n_s16(c)));
+}
+
+SIMD_INLINE v128 v128_shr_u16(v128 a, unsigned int c) {
+  return (c > 15) ? v128_zero()
+                  : vreinterpretq_s64_u16(
+                        vshlq_u16(vreinterpretq_u16_s64(a), vdupq_n_s16(-c)));
+}
+
+SIMD_INLINE v128 v128_shr_s16(v128 a, unsigned int c) {
+  return (c > 15) ? v128_ones()
+                  : vreinterpretq_s64_s16(
+                        vshlq_s16(vreinterpretq_s16_s64(a), vdupq_n_s16(-c)));
+}
+
+SIMD_INLINE v128 v128_shl_32(v128 a, unsigned int c) {
+  return (c > 31) ? v128_zero()
+                  : vreinterpretq_s64_u32(
+                        vshlq_u32(vreinterpretq_u32_s64(a), vdupq_n_s32(c)));
+}
+
+SIMD_INLINE v128 v128_shr_u32(v128 a, unsigned int c) {
+  return (c > 31) ? v128_zero()
+                  : vreinterpretq_s64_u32(
+                        vshlq_u32(vreinterpretq_u32_s64(a), vdupq_n_s32(-c)));
+}
+
+SIMD_INLINE v128 v128_shr_s32(v128 a, unsigned int c) {
+  return (c > 31) ? v128_ones()
+                  : vreinterpretq_s64_s32(
+                        vshlq_s32(vreinterpretq_s32_s64(a), vdupq_n_s32(-c)));
+}
+
+#if __OPTIMIZE__
+
+SIMD_INLINE v128 v128_shl_n_byte(v128 a, const unsigned int n) {
+  return n < 8
+             ? v128_from_64(
+                   vorr_u64(vshl_n_u64(vreinterpret_u64_s64(vget_high_s64(a)),
+                                       n * 8),
+                            vshr_n_u64(vreinterpret_u64_s64(vget_low_s64(a)),
+                                       (8 - n) * 8)),
+                   vshl_n_u64(vreinterpret_u64_s64(vget_low_s64(a)), n * 8))
+             : (n == 8 ? v128_from_64(vreinterpret_u64_s64(vget_low_s64(a)), 0)
+                       : v128_from_64(
+                             vshl_n_u64(vreinterpret_u64_s64(vget_low_s64(a)),
+                                        (n - 8) * 8),
+                             0));
+}
+
+SIMD_INLINE v128 v128_shr_n_byte(v128 a, const unsigned int n) {
+  return n < 8
+             ? v128_from_64(
+                   vshr_n_u64(vreinterpret_u64_s64(vget_high_s64(a)), n * 8),
+                   vorr_u64(
+                       vshr_n_u64(vreinterpret_u64_s64(vget_low_s64(a)), n * 8),
+                       vshl_n_u64(vreinterpret_u64_s64(vget_high_s64(a)),
+                                  (8 - n) * 8)))
+             : (n == 8
+                    ? v128_from_64(0, vreinterpret_u64_s64(vget_high_s64(a)))
+                    : v128_from_64(
+                          0, vshr_n_u64(vreinterpret_u64_s64(vget_high_s64(a)),
+                                        (n - 8) * 8)));
+}
+
+SIMD_INLINE v128 v128_shl_n_8(v128 a, const unsigned int c) {
+  return vreinterpretq_s64_u8(vshlq_n_u8(vreinterpretq_u8_s64(a), c));
+}
+
+SIMD_INLINE v128 v128_shr_n_u8(v128 a, const unsigned int c) {
+  return vreinterpretq_s64_u8(vshrq_n_u8(vreinterpretq_u8_s64(a), c));
+}
+
+SIMD_INLINE v128 v128_shr_n_s8(v128 a, const unsigned int c) {
+  return vreinterpretq_s64_s8(vshrq_n_s8(vreinterpretq_s8_s64(a), c));
+}
+
+SIMD_INLINE v128 v128_shl_n_16(v128 a, const unsigned int c) {
+  return vreinterpretq_s64_u16(vshlq_n_u16(vreinterpretq_u16_s64(a), c));
+}
+
+SIMD_INLINE v128 v128_shr_n_u16(v128 a, const unsigned int c) {
+  return vreinterpretq_s64_u16(vshrq_n_u16(vreinterpretq_u16_s64(a), c));
+}
+
+SIMD_INLINE v128 v128_shr_n_s16(v128 a, const unsigned int c) {
+  return vreinterpretq_s64_s16(vshrq_n_s16(vreinterpretq_s16_s64(a), c));
+}
+
+SIMD_INLINE v128 v128_shl_n_32(v128 a, const unsigned int c) {
+  return vreinterpretq_s64_u32(vshlq_n_u32(vreinterpretq_u32_s64(a), c));
+}
+
+SIMD_INLINE v128 v128_shr_n_u32(v128 a, const unsigned int c) {
+  return vreinterpretq_s64_u32(vshrq_n_u32(vreinterpretq_u32_s64(a), c));
+}
+
+SIMD_INLINE v128 v128_shr_n_s32(v128 a, const unsigned int c) {
+  return vreinterpretq_s64_s32(vshrq_n_s32(vreinterpretq_s32_s64(a), c));
+}
+
+#else
+
+SIMD_INLINE v128 v128_shl_n_byte(v128 a, const unsigned int n) {
+  if (n < 8)
+    return v128_from_v64(v64_or(v64_shl_n_byte(v128_high_v64(a), n),
+                                v64_shr_n_byte(v128_low_v64(a), 8 - n)),
+                         v64_shl_n_byte(v128_low_v64(a), n));
+  else
+    return v128_from_v64(v64_shl_n_byte(v128_low_v64(a), n - 8), v64_zero());
+}
+
+SIMD_INLINE v128 v128_shr_n_byte(v128 a, const unsigned int n) {
+  if (n < 8)
+    return v128_from_v64(v64_shr_n_byte(v128_high_v64(a), n),
+                         v64_or(v64_shr_n_byte(v128_low_v64(a), n),
+                                v64_shl_n_byte(v128_high_v64(a), 8 - n)));
+  else
+    return v128_from_v64(v64_zero(), v64_shr_n_byte(v128_high_v64(a), n - 8));
+}
+
+SIMD_INLINE v128 v128_shl_n_8(v128 a, const unsigned int c) {
+  return v128_shl_8(a, c);
+}
+
+SIMD_INLINE v128 v128_shr_n_u8(v128 a, const unsigned int c) {
+  return v128_shr_u8(a, c);
+}
+
+SIMD_INLINE v128 v128_shr_n_s8(v128 a, const unsigned int c) {
+  return v128_shr_s8(a, c);
+}
+
+SIMD_INLINE v128 v128_shl_n_16(v128 a, const unsigned int c) {
+  return v128_shl_16(a, c);
+}
+
+SIMD_INLINE v128 v128_shr_n_u16(v128 a, const unsigned int c) {
+  return v128_shr_u16(a, c);
+}
+
+SIMD_INLINE v128 v128_shr_n_s16(v128 a, const unsigned int c) {
+  return v128_shr_s16(a, c);
+}
+
+SIMD_INLINE v128 v128_shl_n_32(v128 a, const unsigned int c) {
+  return v128_shl_32(a, c);
+}
+
+SIMD_INLINE v128 v128_shr_n_u32(v128 a, const unsigned int c) {
+  return v128_shr_u32(a, c);
+}
+
+SIMD_INLINE v128 v128_shr_n_s32(v128 a, const unsigned int c) {
+  return v128_shr_s32(a, c);
+}
+
+#endif
+
+#endif /* _V128_INTRINSICS_H */
diff --git a/aom_dsp/simd/v128_intrinsics_c.h b/aom_dsp/simd/v128_intrinsics_c.h
new file mode 100644
index 0000000..561ac86
--- /dev/null
+++ b/aom_dsp/simd/v128_intrinsics_c.h
@@ -0,0 +1,677 @@
+/*
+ * Copyright (c) 2016, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#ifndef _V128_INTRINSICS_C_H
+#define _V128_INTRINSICS_C_H
+
+#include <stdio.h>
+#include <stdlib.h>
+#include "./v64_intrinsics_c.h"
+
+typedef union {
+  uint8_t u8[16];
+  uint16_t u16[8];
+  uint32_t u32[4];
+  uint64_t u64[2];
+  int8_t s8[16];
+  int16_t s16[8];
+  int32_t s32[4];
+  int64_t s64[2];
+  c_v64 v64[2];
+} c_v128;
+
+SIMD_INLINE uint32_t c_v128_low_u32(c_v128 a) { return a.u32[0]; }
+
+SIMD_INLINE c_v64 c_v128_low_v64(c_v128 a) { return a.v64[0]; }
+
+SIMD_INLINE c_v64 c_v128_high_v64(c_v128 a) { return a.v64[1]; }
+
+SIMD_INLINE c_v128 c_v128_from_64(uint64_t hi, uint64_t lo) {
+  c_v128 t;
+  t.u64[1] = hi;
+  t.u64[0] = lo;
+  return t;
+}
+
+SIMD_INLINE c_v128 c_v128_from_v64(c_v64 hi, c_v64 lo) {
+  c_v128 t;
+  t.v64[1] = hi;
+  t.v64[0] = lo;
+  return t;
+}
+
+SIMD_INLINE c_v128 c_v128_from_32(uint32_t a, uint32_t b, uint32_t c,
+                                  uint32_t d) {
+  c_v128 t;
+  t.u32[3] = a;
+  t.u32[2] = b;
+  t.u32[1] = c;
+  t.u32[0] = d;
+  return t;
+}
+
+SIMD_INLINE c_v128 c_v128_load_unaligned(const void *p) {
+  c_v128 t;
+  uint8_t *pp = (uint8_t *)p;
+  uint8_t *q = (uint8_t *)&t;
+  int c;
+  for (c = 0; c < 16; c++) q[c] = pp[c];
+  return t;
+}
+
+SIMD_INLINE c_v128 c_v128_load_aligned(const void *p) {
+  if (simd_check && (uintptr_t)p & 15) {
+    fprintf(stderr, "Error: unaligned v128 load at %p\n", p);
+    abort();
+  }
+  return c_v128_load_unaligned(p);
+}
+
+SIMD_INLINE void c_v128_store_unaligned(void *p, c_v128 a) {
+  uint8_t *pp = (uint8_t *)p;
+  uint8_t *q = (uint8_t *)&a;
+  int c;
+  for (c = 0; c < 16; c++) pp[c] = q[c];
+}
+
+SIMD_INLINE void c_v128_store_aligned(void *p, c_v128 a) {
+  if (simd_check && (uintptr_t)p & 15) {
+    fprintf(stderr, "Error: unaligned v128 store at %p\n", p);
+    abort();
+  }
+  c_v128_store_unaligned(p, a);
+}
+
+SIMD_INLINE c_v128 c_v128_zero() {
+  c_v128 t;
+  t.u64[1] = t.u64[0] = 0;
+  return t;
+}
+
+SIMD_INLINE c_v128 c_v128_dup_8(uint8_t x) {
+  c_v128 t;
+  t.v64[1] = t.v64[0] = c_v64_dup_8(x);
+  return t;
+}
+
+SIMD_INLINE c_v128 c_v128_dup_16(uint16_t x) {
+  c_v128 t;
+  t.v64[1] = t.v64[0] = c_v64_dup_16(x);
+  return t;
+}
+
+SIMD_INLINE c_v128 c_v128_dup_32(uint32_t x) {
+  c_v128 t;
+  t.v64[1] = t.v64[0] = c_v64_dup_32(x);
+  return t;
+}
+
+SIMD_INLINE int64_t c_v128_dotp_s16(c_v128 a, c_v128 b) {
+  return c_v64_dotp_s16(a.v64[1], b.v64[1]) +
+         c_v64_dotp_s16(a.v64[0], b.v64[0]);
+}
+
+SIMD_INLINE uint64_t c_v128_hadd_u8(c_v128 a) {
+  return c_v64_hadd_u8(a.v64[1]) + c_v64_hadd_u8(a.v64[0]);
+}
+
+typedef uint32_t c_sad128_internal;
+
+SIMD_INLINE c_sad128_internal c_v128_sad_u8_init() { return 0; }
+
+/* Implementation dependent return value.  Result must be finalised with
+   v128_sad_u8_sum().
+   The result for more than 32 v128_sad_u8() calls is undefined. */
+SIMD_INLINE c_sad128_internal c_v128_sad_u8(c_sad128_internal s, c_v128 a,
+                                            c_v128 b) {
+  int c;
+  for (c = 0; c < 16; c++)
+    s += a.u8[c] > b.u8[c] ? a.u8[c] - b.u8[c] : b.u8[c] - a.u8[c];
+  return s;
+}
+
+SIMD_INLINE uint32_t c_v128_sad_u8_sum(c_sad128_internal s) { return s; }
+
+typedef uint32_t c_ssd128_internal;
+
+SIMD_INLINE c_ssd128_internal c_v128_ssd_u8_init() { return 0; }
+
+/* Implementation dependent return value.  Result must be finalised with
+ * v128_ssd_u8_sum(). */
+SIMD_INLINE c_ssd128_internal c_v128_ssd_u8(c_ssd128_internal s, c_v128 a,
+                                            c_v128 b) {
+  int c;
+  for (c = 0; c < 16; c++) s += (a.u8[c] - b.u8[c]) * (a.u8[c] - b.u8[c]);
+  return s;
+}
+
+SIMD_INLINE uint32_t c_v128_ssd_u8_sum(c_ssd128_internal s) { return s; }
+
+SIMD_INLINE c_v128 c_v128_or(c_v128 a, c_v128 b) {
+  return c_v128_from_v64(c_v64_or(a.v64[1], b.v64[1]),
+                         c_v64_or(a.v64[0], b.v64[0]));
+}
+
+SIMD_INLINE c_v128 c_v128_xor(c_v128 a, c_v128 b) {
+  return c_v128_from_v64(c_v64_xor(a.v64[1], b.v64[1]),
+                         c_v64_xor(a.v64[0], b.v64[0]));
+}
+
+SIMD_INLINE c_v128 c_v128_and(c_v128 a, c_v128 b) {
+  return c_v128_from_v64(c_v64_and(a.v64[1], b.v64[1]),
+                         c_v64_and(a.v64[0], b.v64[0]));
+}
+
+SIMD_INLINE c_v128 c_v128_andn(c_v128 a, c_v128 b) {
+  return c_v128_from_v64(c_v64_andn(a.v64[1], b.v64[1]),
+                         c_v64_andn(a.v64[0], b.v64[0]));
+}
+
+SIMD_INLINE c_v128 c_v128_add_8(c_v128 a, c_v128 b) {
+  return c_v128_from_v64(c_v64_add_8(a.v64[1], b.v64[1]),
+                         c_v64_add_8(a.v64[0], b.v64[0]));
+}
+
+SIMD_INLINE c_v128 c_v128_add_16(c_v128 a, c_v128 b) {
+  return c_v128_from_v64(c_v64_add_16(a.v64[1], b.v64[1]),
+                         c_v64_add_16(a.v64[0], b.v64[0]));
+}
+
+SIMD_INLINE c_v128 c_v128_sadd_s16(c_v128 a, c_v128 b) {
+  return c_v128_from_v64(c_v64_sadd_s16(a.v64[1], b.v64[1]),
+                         c_v64_sadd_s16(a.v64[0], b.v64[0]));
+}
+
+SIMD_INLINE c_v128 c_v128_add_32(c_v128 a, c_v128 b) {
+  return c_v128_from_v64(c_v64_add_32(a.v64[1], b.v64[1]),
+                         c_v64_add_32(a.v64[0], b.v64[0]));
+}
+
+SIMD_INLINE c_v128 c_v128_padd_s16(c_v128 a) {
+  c_v128 t;
+  t.s32[0] = (int32_t)a.s16[0] + (int32_t)a.s16[1];
+  t.s32[1] = (int32_t)a.s16[2] + (int32_t)a.s16[3];
+  t.s32[2] = (int32_t)a.s16[4] + (int32_t)a.s16[5];
+  t.s32[3] = (int32_t)a.s16[6] + (int32_t)a.s16[7];
+  return t;
+}
+
+SIMD_INLINE c_v128 c_v128_sub_8(c_v128 a, c_v128 b) {
+  return c_v128_from_v64(c_v64_sub_8(a.v64[1], b.v64[1]),
+                         c_v64_sub_8(a.v64[0], b.v64[0]));
+}
+
+SIMD_INLINE c_v128 c_v128_ssub_u8(c_v128 a, c_v128 b) {
+  return c_v128_from_v64(c_v64_ssub_u8(a.v64[1], b.v64[1]),
+                         c_v64_ssub_u8(a.v64[0], b.v64[0]));
+}
+
+SIMD_INLINE c_v128 c_v128_ssub_s8(c_v128 a, c_v128 b) {
+  return c_v128_from_v64(c_v64_ssub_s8(a.v64[1], b.v64[1]),
+                         c_v64_ssub_s8(a.v64[0], b.v64[0]));
+}
+
+SIMD_INLINE c_v128 c_v128_sub_16(c_v128 a, c_v128 b) {
+  return c_v128_from_v64(c_v64_sub_16(a.v64[1], b.v64[1]),
+                         c_v64_sub_16(a.v64[0], b.v64[0]));
+}
+
+SIMD_INLINE c_v128 c_v128_ssub_s16(c_v128 a, c_v128 b) {
+  return c_v128_from_v64(c_v64_ssub_s16(a.v64[1], b.v64[1]),
+                         c_v64_ssub_s16(a.v64[0], b.v64[0]));
+}
+
+SIMD_INLINE c_v128 c_v128_sub_32(c_v128 a, c_v128 b) {
+  return c_v128_from_v64(c_v64_sub_32(a.v64[1], b.v64[1]),
+                         c_v64_sub_32(a.v64[0], b.v64[0]));
+}
+
+SIMD_INLINE c_v128 c_v128_abs_s16(c_v128 a) {
+  return c_v128_from_v64(c_v64_abs_s16(a.v64[1]), c_v64_abs_s16(a.v64[0]));
+}
+
+SIMD_INLINE c_v128 c_v128_mul_s16(c_v64 a, c_v64 b) {
+  c_v64 lo_bits = c_v64_mullo_s16(a, b);
+  c_v64 hi_bits = c_v64_mulhi_s16(a, b);
+  return c_v128_from_v64(c_v64_ziphi_16(hi_bits, lo_bits),
+                         c_v64_ziplo_16(hi_bits, lo_bits));
+}
+
+SIMD_INLINE c_v128 c_v128_mullo_s16(c_v128 a, c_v128 b) {
+  return c_v128_from_v64(c_v64_mullo_s16(a.v64[1], b.v64[1]),
+                         c_v64_mullo_s16(a.v64[0], b.v64[0]));
+}
+
+SIMD_INLINE c_v128 c_v128_mulhi_s16(c_v128 a, c_v128 b) {
+  return c_v128_from_v64(c_v64_mulhi_s16(a.v64[1], b.v64[1]),
+                         c_v64_mulhi_s16(a.v64[0], b.v64[0]));
+}
+
+SIMD_INLINE c_v128 c_v128_mullo_s32(c_v128 a, c_v128 b) {
+  return c_v128_from_v64(c_v64_mullo_s32(a.v64[1], b.v64[1]),
+                         c_v64_mullo_s32(a.v64[0], b.v64[0]));
+}
+
+SIMD_INLINE c_v128 c_v128_madd_s16(c_v128 a, c_v128 b) {
+  return c_v128_from_v64(c_v64_madd_s16(a.v64[1], b.v64[1]),
+                         c_v64_madd_s16(a.v64[0], b.v64[0]));
+}
+
+SIMD_INLINE c_v128 c_v128_madd_us8(c_v128 a, c_v128 b) {
+  return c_v128_from_v64(c_v64_madd_us8(a.v64[1], b.v64[1]),
+                         c_v64_madd_us8(a.v64[0], b.v64[0]));
+}
+
+SIMD_INLINE c_v128 c_v128_avg_u8(c_v128 a, c_v128 b) {
+  return c_v128_from_v64(c_v64_avg_u8(a.v64[1], b.v64[1]),
+                         c_v64_avg_u8(a.v64[0], b.v64[0]));
+}
+
+SIMD_INLINE c_v128 c_v128_rdavg_u8(c_v128 a, c_v128 b) {
+  return c_v128_from_v64(c_v64_rdavg_u8(a.v64[1], b.v64[1]),
+                         c_v64_rdavg_u8(a.v64[0], b.v64[0]));
+}
+
+SIMD_INLINE c_v128 c_v128_avg_u16(c_v128 a, c_v128 b) {
+  return c_v128_from_v64(c_v64_avg_u16(a.v64[1], b.v64[1]),
+                         c_v64_avg_u16(a.v64[0], b.v64[0]));
+}
+
+SIMD_INLINE c_v128 c_v128_min_u8(c_v128 a, c_v128 b) {
+  return c_v128_from_v64(c_v64_min_u8(a.v64[1], b.v64[1]),
+                         c_v64_min_u8(a.v64[0], b.v64[0]));
+}
+
+SIMD_INLINE c_v128 c_v128_max_u8(c_v128 a, c_v128 b) {
+  return c_v128_from_v64(c_v64_max_u8(a.v64[1], b.v64[1]),
+                         c_v64_max_u8(a.v64[0], b.v64[0]));
+}
+
+SIMD_INLINE c_v128 c_v128_min_s8(c_v128 a, c_v128 b) {
+  return c_v128_from_v64(c_v64_min_s8(a.v64[1], b.v64[1]),
+                         c_v64_min_s8(a.v64[0], b.v64[0]));
+}
+
+SIMD_INLINE c_v128 c_v128_max_s8(c_v128 a, c_v128 b) {
+  return c_v128_from_v64(c_v64_max_s8(a.v64[1], b.v64[1]),
+                         c_v64_max_s8(a.v64[0], b.v64[0]));
+}
+
+SIMD_INLINE c_v128 c_v128_min_s16(c_v128 a, c_v128 b) {
+  return c_v128_from_v64(c_v64_min_s16(a.v64[1], b.v64[1]),
+                         c_v64_min_s16(a.v64[0], b.v64[0]));
+}
+
+SIMD_INLINE c_v128 c_v128_max_s16(c_v128 a, c_v128 b) {
+  return c_v128_from_v64(c_v64_max_s16(a.v64[1], b.v64[1]),
+                         c_v64_max_s16(a.v64[0], b.v64[0]));
+}
+
+SIMD_INLINE c_v128 c_v128_ziplo_8(c_v128 a, c_v128 b) {
+  return c_v128_from_v64(c_v64_ziphi_8(a.v64[0], b.v64[0]),
+                         c_v64_ziplo_8(a.v64[0], b.v64[0]));
+}
+
+SIMD_INLINE c_v128 c_v128_ziphi_8(c_v128 a, c_v128 b) {
+  return c_v128_from_v64(c_v64_ziphi_8(a.v64[1], b.v64[1]),
+                         c_v64_ziplo_8(a.v64[1], b.v64[1]));
+}
+
+SIMD_INLINE c_v128 c_v128_ziplo_16(c_v128 a, c_v128 b) {
+  return c_v128_from_v64(c_v64_ziphi_16(a.v64[0], b.v64[0]),
+                         c_v64_ziplo_16(a.v64[0], b.v64[0]));
+}
+
+SIMD_INLINE c_v128 c_v128_ziphi_16(c_v128 a, c_v128 b) {
+  return c_v128_from_v64(c_v64_ziphi_16(a.v64[1], b.v64[1]),
+                         c_v64_ziplo_16(a.v64[1], b.v64[1]));
+}
+
+SIMD_INLINE c_v128 c_v128_ziplo_32(c_v128 a, c_v128 b) {
+  return c_v128_from_v64(c_v64_ziphi_32(a.v64[0], b.v64[0]),
+                         c_v64_ziplo_32(a.v64[0], b.v64[0]));
+}
+
+SIMD_INLINE c_v128 c_v128_ziphi_32(c_v128 a, c_v128 b) {
+  return c_v128_from_v64(c_v64_ziphi_32(a.v64[1], b.v64[1]),
+                         c_v64_ziplo_32(a.v64[1], b.v64[1]));
+}
+
+SIMD_INLINE c_v128 c_v128_ziplo_64(c_v128 a, c_v128 b) {
+  return c_v128_from_v64(a.v64[0], b.v64[0]);
+}
+
+SIMD_INLINE c_v128 c_v128_ziphi_64(c_v128 a, c_v128 b) {
+  return c_v128_from_v64(a.v64[1], b.v64[1]);
+}
+
+SIMD_INLINE c_v128 c_v128_zip_8(c_v64 a, c_v64 b) {
+  return c_v128_from_v64(c_v64_ziphi_8(a, b), c_v64_ziplo_8(a, b));
+}
+
+SIMD_INLINE c_v128 c_v128_zip_16(c_v64 a, c_v64 b) {
+  return c_v128_from_v64(c_v64_ziphi_16(a, b), c_v64_ziplo_16(a, b));
+}
+
+SIMD_INLINE c_v128 c_v128_zip_32(c_v64 a, c_v64 b) {
+  return c_v128_from_v64(c_v64_ziphi_32(a, b), c_v64_ziplo_32(a, b));
+}
+
+SIMD_INLINE c_v128 _c_v128_unzip_8(c_v128 a, c_v128 b, int mode) {
+  c_v128 t;
+  if (mode) {
+    t.u8[15] = b.u8[15];
+    t.u8[14] = b.u8[13];
+    t.u8[13] = b.u8[11];
+    t.u8[12] = b.u8[9];
+    t.u8[11] = b.u8[7];
+    t.u8[10] = b.u8[5];
+    t.u8[9] = b.u8[3];
+    t.u8[8] = b.u8[1];
+    t.u8[7] = a.u8[15];
+    t.u8[6] = a.u8[13];
+    t.u8[5] = a.u8[11];
+    t.u8[4] = a.u8[9];
+    t.u8[3] = a.u8[7];
+    t.u8[2] = a.u8[5];
+    t.u8[1] = a.u8[3];
+    t.u8[0] = a.u8[1];
+  } else {
+    t.u8[15] = a.u8[14];
+    t.u8[14] = a.u8[12];
+    t.u8[13] = a.u8[10];
+    t.u8[12] = a.u8[8];
+    t.u8[11] = a.u8[6];
+    t.u8[10] = a.u8[4];
+    t.u8[9] = a.u8[2];
+    t.u8[8] = a.u8[0];
+    t.u8[7] = b.u8[14];
+    t.u8[6] = b.u8[12];
+    t.u8[5] = b.u8[10];
+    t.u8[4] = b.u8[8];
+    t.u8[3] = b.u8[6];
+    t.u8[2] = b.u8[4];
+    t.u8[1] = b.u8[2];
+    t.u8[0] = b.u8[0];
+  }
+  return t;
+}
+
+SIMD_INLINE c_v128 c_v128_unziplo_8(c_v128 a, c_v128 b) {
+  return big_endian() ? _c_v128_unzip_8(a, b, 1) : _c_v128_unzip_8(a, b, 0);
+}
+
+SIMD_INLINE c_v128 c_v128_unziphi_8(c_v128 a, c_v128 b) {
+  return big_endian() ? _c_v128_unzip_8(b, a, 0) : _c_v128_unzip_8(b, a, 1);
+}
+
+SIMD_INLINE c_v128 _c_v128_unzip_16(c_v128 a, c_v128 b, int mode) {
+  c_v128 t;
+  if (mode) {
+    t.u16[7] = b.u16[7];
+    t.u16[6] = b.u16[5];
+    t.u16[5] = b.u16[3];
+    t.u16[4] = b.u16[1];
+    t.u16[3] = a.u16[7];
+    t.u16[2] = a.u16[5];
+    t.u16[1] = a.u16[3];
+    t.u16[0] = a.u16[1];
+  } else {
+    t.u16[7] = a.u16[6];
+    t.u16[6] = a.u16[4];
+    t.u16[5] = a.u16[2];
+    t.u16[4] = a.u16[0];
+    t.u16[3] = b.u16[6];
+    t.u16[2] = b.u16[4];
+    t.u16[1] = b.u16[2];
+    t.u16[0] = b.u16[0];
+  }
+  return t;
+}
+
+SIMD_INLINE c_v128 c_v128_unziplo_16(c_v128 a, c_v128 b) {
+  return big_endian() ? _c_v128_unzip_16(a, b, 1) : _c_v128_unzip_16(a, b, 0);
+}
+
+SIMD_INLINE c_v128 c_v128_unziphi_16(c_v128 a, c_v128 b) {
+  return big_endian() ? _c_v128_unzip_16(b, a, 0) : _c_v128_unzip_16(b, a, 1);
+}
+
+SIMD_INLINE c_v128 _c_v128_unzip_32(c_v128 a, c_v128 b, int mode) {
+  c_v128 t;
+  if (mode) {
+    t.u32[3] = b.u32[3];
+    t.u32[2] = b.u32[1];
+    t.u32[1] = a.u32[3];
+    t.u32[0] = a.u32[1];
+  } else {
+    t.u32[3] = a.u32[2];
+    t.u32[2] = a.u32[0];
+    t.u32[1] = b.u32[2];
+    t.u32[0] = b.u32[0];
+  }
+  return t;
+}
+
+SIMD_INLINE c_v128 c_v128_unziplo_32(c_v128 a, c_v128 b) {
+  return big_endian() ? _c_v128_unzip_32(a, b, 1) : _c_v128_unzip_32(a, b, 0);
+}
+
+SIMD_INLINE c_v128 c_v128_unziphi_32(c_v128 a, c_v128 b) {
+  return big_endian() ? _c_v128_unzip_32(b, a, 0) : _c_v128_unzip_32(b, a, 1);
+}
+
+SIMD_INLINE c_v128 c_v128_unpack_u8_s16(c_v64 a) {
+  return c_v128_from_v64(c_v64_unpackhi_u8_s16(a), c_v64_unpacklo_u8_s16(a));
+}
+
+SIMD_INLINE c_v128 c_v128_unpacklo_u8_s16(c_v128 a) {
+  return c_v128_from_v64(c_v64_unpackhi_u8_s16(a.v64[0]),
+                         c_v64_unpacklo_u8_s16(a.v64[0]));
+}
+
+SIMD_INLINE c_v128 c_v128_unpackhi_u8_s16(c_v128 a) {
+  return c_v128_from_v64(c_v64_unpackhi_u8_s16(a.v64[1]),
+                         c_v64_unpacklo_u8_s16(a.v64[1]));
+}
+
+SIMD_INLINE c_v128 c_v128_pack_s32_s16(c_v128 a, c_v128 b) {
+  return c_v128_from_v64(c_v64_pack_s32_s16(a.v64[1], a.v64[0]),
+                         c_v64_pack_s32_s16(b.v64[1], b.v64[0]));
+}
+
+SIMD_INLINE c_v128 c_v128_pack_s16_u8(c_v128 a, c_v128 b) {
+  return c_v128_from_v64(c_v64_pack_s16_u8(a.v64[1], a.v64[0]),
+                         c_v64_pack_s16_u8(b.v64[1], b.v64[0]));
+}
+
+SIMD_INLINE c_v128 c_v128_pack_s16_s8(c_v128 a, c_v128 b) {
+  return c_v128_from_v64(c_v64_pack_s16_s8(a.v64[1], a.v64[0]),
+                         c_v64_pack_s16_s8(b.v64[1], b.v64[0]));
+}
+
+SIMD_INLINE c_v128 c_v128_unpack_u16_s32(c_v64 a) {
+  return c_v128_from_v64(c_v64_unpackhi_u16_s32(a), c_v64_unpacklo_u16_s32(a));
+}
+
+SIMD_INLINE c_v128 c_v128_unpack_s16_s32(c_v64 a) {
+  return c_v128_from_v64(c_v64_unpackhi_s16_s32(a), c_v64_unpacklo_s16_s32(a));
+}
+
+SIMD_INLINE c_v128 c_v128_unpacklo_u16_s32(c_v128 a) {
+  return c_v128_from_v64(c_v64_unpackhi_u16_s32(a.v64[0]),
+                         c_v64_unpacklo_u16_s32(a.v64[0]));
+}
+
+SIMD_INLINE c_v128 c_v128_unpacklo_s16_s32(c_v128 a) {
+  return c_v128_from_v64(c_v64_unpackhi_s16_s32(a.v64[0]),
+                         c_v64_unpacklo_s16_s32(a.v64[0]));
+}
+
+SIMD_INLINE c_v128 c_v128_unpackhi_u16_s32(c_v128 a) {
+  return c_v128_from_v64(c_v64_unpackhi_u16_s32(a.v64[1]),
+                         c_v64_unpacklo_u16_s32(a.v64[1]));
+}
+
+SIMD_INLINE c_v128 c_v128_unpackhi_s16_s32(c_v128 a) {
+  return c_v128_from_v64(c_v64_unpackhi_s16_s32(a.v64[1]),
+                         c_v64_unpacklo_s16_s32(a.v64[1]));
+}
+
+SIMD_INLINE c_v128 c_v128_shuffle_8(c_v128 a, c_v128 pattern) {
+  c_v128 t;
+  int c;
+  for (c = 0; c < 16; c++) {
+    if (pattern.u8[c] & ~15) {
+      fprintf(stderr, "Undefined v128_shuffle_8 index %d/%d\n", pattern.u8[c],
+              c);
+      abort();
+    }
+    t.u8[c] =
+        a.u8[big_endian() ? 15 - (pattern.u8[c] & 15) : pattern.u8[c] & 15];
+  }
+  return t;
+}
+
+SIMD_INLINE c_v128 c_v128_cmpgt_s8(c_v128 a, c_v128 b) {
+  return c_v128_from_v64(c_v64_cmpgt_s8(a.v64[1], b.v64[1]),
+                         c_v64_cmpgt_s8(a.v64[0], b.v64[0]));
+}
+
+SIMD_INLINE c_v128 c_v128_cmplt_s8(c_v128 a, c_v128 b) {
+  return c_v128_from_v64(c_v64_cmplt_s8(a.v64[1], b.v64[1]),
+                         c_v64_cmplt_s8(a.v64[0], b.v64[0]));
+}
+
+SIMD_INLINE c_v128 c_v128_cmpeq_8(c_v128 a, c_v128 b) {
+  return c_v128_from_v64(c_v64_cmpeq_8(a.v64[1], b.v64[1]),
+                         c_v64_cmpeq_8(a.v64[0], b.v64[0]));
+}
+
+SIMD_INLINE c_v128 c_v128_cmpgt_s16(c_v128 a, c_v128 b) {
+  return c_v128_from_v64(c_v64_cmpgt_s16(a.v64[1], b.v64[1]),
+                         c_v64_cmpgt_s16(a.v64[0], b.v64[0]));
+}
+
+SIMD_INLINE c_v128 c_v128_cmplt_s16(c_v128 a, c_v128 b) {
+  return c_v128_from_v64(c_v64_cmplt_s16(a.v64[1], b.v64[1]),
+                         c_v64_cmplt_s16(a.v64[0], b.v64[0]));
+}
+
+SIMD_INLINE c_v128 c_v128_cmpeq_16(c_v128 a, c_v128 b) {
+  return c_v128_from_v64(c_v64_cmpeq_16(a.v64[1], b.v64[1]),
+                         c_v64_cmpeq_16(a.v64[0], b.v64[0]));
+}
+
+SIMD_INLINE c_v128 c_v128_shl_n_byte(c_v128 a, const unsigned int n) {
+  if (n < 8)
+    return c_v128_from_v64(c_v64_or(c_v64_shl_n_byte(a.v64[1], n),
+                                    c_v64_shr_n_byte(a.v64[0], 8 - n)),
+                           c_v64_shl_n_byte(a.v64[0], n));
+  else
+    return c_v128_from_v64(c_v64_shl_n_byte(a.v64[0], n - 8), c_v64_zero());
+}
+
+SIMD_INLINE c_v128 c_v128_shr_n_byte(c_v128 a, const unsigned int n) {
+  if (n < 8)
+    return c_v128_from_v64(c_v64_shr_n_byte(a.v64[1], n),
+                           c_v64_or(c_v64_shr_n_byte(a.v64[0], n),
+                                    c_v64_shl_n_byte(a.v64[1], 8 - n)));
+  else
+    return c_v128_from_v64(c_v64_zero(), c_v64_shr_n_byte(a.v64[1], n - 8));
+}
+
+SIMD_INLINE c_v128 c_v128_align(c_v128 a, c_v128 b, const unsigned int c) {
+  if (simd_check && c > 15) {
+    fprintf(stderr, "Error: undefined alignment %d\n", c);
+    abort();
+  }
+  return c ? c_v128_or(c_v128_shr_n_byte(b, c), c_v128_shl_n_byte(a, 16 - c))
+           : b;
+}
+
+SIMD_INLINE c_v128 c_v128_shl_8(c_v128 a, const unsigned int c) {
+  return c_v128_from_v64(c_v64_shl_8(a.v64[1], c), c_v64_shl_8(a.v64[0], c));
+}
+
+SIMD_INLINE c_v128 c_v128_shr_u8(c_v128 a, const unsigned int c) {
+  return c_v128_from_v64(c_v64_shr_u8(a.v64[1], c), c_v64_shr_u8(a.v64[0], c));
+}
+
+SIMD_INLINE c_v128 c_v128_shr_s8(c_v128 a, const unsigned int c) {
+  return c_v128_from_v64(c_v64_shr_s8(a.v64[1], c), c_v64_shr_s8(a.v64[0], c));
+}
+
+SIMD_INLINE c_v128 c_v128_shl_16(c_v128 a, const unsigned int c) {
+  return c_v128_from_v64(c_v64_shl_16(a.v64[1], c), c_v64_shl_16(a.v64[0], c));
+}
+
+SIMD_INLINE c_v128 c_v128_shr_u16(c_v128 a, const unsigned int c) {
+  return c_v128_from_v64(c_v64_shr_u16(a.v64[1], c),
+                         c_v64_shr_u16(a.v64[0], c));
+}
+
+SIMD_INLINE c_v128 c_v128_shr_s16(c_v128 a, const unsigned int c) {
+  return c_v128_from_v64(c_v64_shr_s16(a.v64[1], c),
+                         c_v64_shr_s16(a.v64[0], c));
+}
+
+SIMD_INLINE c_v128 c_v128_shl_32(c_v128 a, const unsigned int c) {
+  return c_v128_from_v64(c_v64_shl_32(a.v64[1], c), c_v64_shl_32(a.v64[0], c));
+}
+
+SIMD_INLINE c_v128 c_v128_shr_u32(c_v128 a, const unsigned int c) {
+  return c_v128_from_v64(c_v64_shr_u32(a.v64[1], c),
+                         c_v64_shr_u32(a.v64[0], c));
+}
+
+SIMD_INLINE c_v128 c_v128_shr_s32(c_v128 a, const unsigned int c) {
+  return c_v128_from_v64(c_v64_shr_s32(a.v64[1], c),
+                         c_v64_shr_s32(a.v64[0], c));
+}
+
+SIMD_INLINE c_v128 c_v128_shl_n_8(c_v128 a, const unsigned int n) {
+  return c_v128_shl_8(a, n);
+}
+
+SIMD_INLINE c_v128 c_v128_shl_n_16(c_v128 a, const unsigned int n) {
+  return c_v128_shl_16(a, n);
+}
+
+SIMD_INLINE c_v128 c_v128_shl_n_32(c_v128 a, const unsigned int n) {
+  return c_v128_shl_32(a, n);
+}
+
+SIMD_INLINE c_v128 c_v128_shr_n_u8(c_v128 a, const unsigned int n) {
+  return c_v128_shr_u8(a, n);
+}
+
+SIMD_INLINE c_v128 c_v128_shr_n_u16(c_v128 a, const unsigned int n) {
+  return c_v128_shr_u16(a, n);
+}
+
+SIMD_INLINE c_v128 c_v128_shr_n_u32(c_v128 a, const unsigned int n) {
+  return c_v128_shr_u32(a, n);
+}
+
+SIMD_INLINE c_v128 c_v128_shr_n_s8(c_v128 a, const unsigned int n) {
+  return c_v128_shr_s8(a, n);
+}
+
+SIMD_INLINE c_v128 c_v128_shr_n_s16(c_v128 a, const unsigned int n) {
+  return c_v128_shr_s16(a, n);
+}
+
+SIMD_INLINE c_v128 c_v128_shr_n_s32(c_v128 a, const unsigned int n) {
+  return c_v128_shr_s32(a, n);
+}
+
+#endif /* _V128_INTRINSICS_C_H */
diff --git a/aom_dsp/simd/v128_intrinsics_x86.h b/aom_dsp/simd/v128_intrinsics_x86.h
new file mode 100644
index 0000000..e09cbb9
--- /dev/null
+++ b/aom_dsp/simd/v128_intrinsics_x86.h
@@ -0,0 +1,492 @@
+/*
+ * Copyright (c) 2016, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#ifndef _V128_INTRINSICS_H
+#define _V128_INTRINSICS_H
+
+#include "./v64_intrinsics_x86.h"
+
+typedef __m128i v128;
+
+SIMD_INLINE uint32_t v128_low_u32(v128 a) {
+  return (uint32_t)_mm_cvtsi128_si32(a);
+}
+
+SIMD_INLINE v64 v128_low_v64(v128 a) {
+  return _mm_unpacklo_epi64(a, v64_zero());
+}
+
+SIMD_INLINE v64 v128_high_v64(v128 a) { return _mm_srli_si128(a, 8); }
+
+SIMD_INLINE v128 v128_from_v64(v64 a, v64 b) {
+  return _mm_unpacklo_epi64(b, a);
+}
+
+SIMD_INLINE v128 v128_from_64(uint64_t a, uint64_t b) {
+  return v128_from_v64(v64_from_64(a), v64_from_64(b));
+}
+
+SIMD_INLINE v128 v128_from_32(uint32_t a, uint32_t b, uint32_t c, uint32_t d) {
+  return _mm_set_epi32(a, b, c, d);
+}
+
+SIMD_INLINE v128 v128_load_aligned(const void *p) {
+  return _mm_load_si128((__m128i *)p);
+}
+
+SIMD_INLINE v128 v128_load_unaligned(const void *p) {
+#if defined(__SSSE3__)
+  return (__m128i)_mm_lddqu_si128((__m128i *)p);
+#else
+  return _mm_loadu_si128((__m128i *)p);
+#endif
+}
+
+SIMD_INLINE void v128_store_aligned(void *p, v128 a) {
+  _mm_store_si128((__m128i *)p, a);
+}
+
+SIMD_INLINE void v128_store_unaligned(void *p, v128 a) {
+  _mm_storeu_si128((__m128i *)p, a);
+}
+
+#if defined(__OPTIMIZE__)
+#if defined(__SSSE3__)
+SIMD_INLINE v128 v128_align(v128 a, v128 b, const unsigned int c) {
+  return c ? _mm_alignr_epi8(a, b, c) : b;
+}
+#else
+#define v128_align(a, b, c) \
+  ((c) ? _mm_or_si128(_mm_srli_si128(b, c), _mm_slli_si128(a, 16 - (c))) : (b))
+#endif
+#else
+#if defined(__SSSE3__)
+#define v128_align(a, b, c) ((c) ? _mm_alignr_epi8(a, b, c) : (b))
+#else
+#define v128_align(a, b, c) \
+  ((c) ? _mm_or_si128(_mm_srli_si128(b, c), _mm_slli_si128(a, 16 - (c))) : (b))
+#endif
+#endif
+
+SIMD_INLINE v128 v128_zero() { return _mm_setzero_si128(); }
+
+SIMD_INLINE v128 v128_dup_8(uint8_t x) { return _mm_set1_epi8(x); }
+
+SIMD_INLINE v128 v128_dup_16(uint16_t x) { return _mm_set1_epi16(x); }
+
+SIMD_INLINE v128 v128_dup_32(uint32_t x) { return _mm_set1_epi32(x); }
+
+SIMD_INLINE v128 v128_add_8(v128 a, v128 b) { return _mm_add_epi8(a, b); }
+
+SIMD_INLINE v128 v128_add_16(v128 a, v128 b) { return _mm_add_epi16(a, b); }
+
+SIMD_INLINE v128 v128_sadd_s16(v128 a, v128 b) { return _mm_adds_epi16(a, b); }
+
+SIMD_INLINE v128 v128_add_32(v128 a, v128 b) { return _mm_add_epi32(a, b); }
+
+SIMD_INLINE v128 v128_padd_s16(v128 a) {
+  return _mm_madd_epi16(a, _mm_set1_epi16(1));
+}
+
+SIMD_INLINE v128 v128_sub_8(v128 a, v128 b) { return _mm_sub_epi8(a, b); }
+
+SIMD_INLINE v128 v128_ssub_u8(v128 a, v128 b) { return _mm_subs_epu8(a, b); }
+
+SIMD_INLINE v128 v128_ssub_s8(v128 a, v128 b) { return _mm_subs_epi8(a, b); }
+
+SIMD_INLINE v128 v128_sub_16(v128 a, v128 b) { return _mm_sub_epi16(a, b); }
+
+SIMD_INLINE v128 v128_ssub_s16(v128 a, v128 b) { return _mm_subs_epi16(a, b); }
+
+SIMD_INLINE v128 v128_sub_32(v128 a, v128 b) { return _mm_sub_epi32(a, b); }
+
+SIMD_INLINE v128 v128_abs_s16(v128 a) {
+#if defined(__SSSE3__)
+  return _mm_abs_epi16(a);
+#else
+  return _mm_max_epi16(a, _mm_sub_epi16(_mm_setzero_si128(), a));
+#endif
+}
+
+SIMD_INLINE v128 v128_ziplo_8(v128 a, v128 b) {
+  return _mm_unpacklo_epi8(b, a);
+}
+
+SIMD_INLINE v128 v128_ziphi_8(v128 a, v128 b) {
+  return _mm_unpackhi_epi8(b, a);
+}
+
+SIMD_INLINE v128 v128_ziplo_16(v128 a, v128 b) {
+  return _mm_unpacklo_epi16(b, a);
+}
+
+SIMD_INLINE v128 v128_ziphi_16(v128 a, v128 b) {
+  return _mm_unpackhi_epi16(b, a);
+}
+
+SIMD_INLINE v128 v128_ziplo_32(v128 a, v128 b) {
+  return _mm_unpacklo_epi32(b, a);
+}
+
+SIMD_INLINE v128 v128_ziphi_32(v128 a, v128 b) {
+  return _mm_unpackhi_epi32(b, a);
+}
+
+SIMD_INLINE v128 v128_ziplo_64(v128 a, v128 b) {
+  return _mm_unpacklo_epi64(b, a);
+}
+
+SIMD_INLINE v128 v128_ziphi_64(v128 a, v128 b) {
+  return _mm_unpackhi_epi64(b, a);
+}
+
+SIMD_INLINE v128 v128_zip_8(v64 a, v64 b) { return _mm_unpacklo_epi8(b, a); }
+
+SIMD_INLINE v128 v128_zip_16(v64 a, v64 b) { return _mm_unpacklo_epi16(b, a); }
+
+SIMD_INLINE v128 v128_zip_32(v64 a, v64 b) { return _mm_unpacklo_epi32(b, a); }
+
+SIMD_INLINE v128 v128_unziphi_8(v128 a, v128 b) {
+  return _mm_packs_epi16(_mm_srai_epi16(b, 8), _mm_srai_epi16(a, 8));
+}
+
+SIMD_INLINE v128 v128_unziplo_8(v128 a, v128 b) {
+#if defined(__SSSE3__)
+  v128 order = _mm_cvtsi64_si128(0x0e0c0a0806040200LL);
+  return _mm_unpacklo_epi64(_mm_shuffle_epi8(b, order),
+                            _mm_shuffle_epi8(a, order));
+#else
+  return v128_unziphi_8(_mm_slli_si128(a, 1), _mm_slli_si128(b, 1));
+#endif
+}
+
+SIMD_INLINE v128 v128_unziphi_16(v128 a, v128 b) {
+  return _mm_packs_epi32(_mm_srai_epi32(b, 16), _mm_srai_epi32(a, 16));
+}
+
+SIMD_INLINE v128 v128_unziplo_16(v128 a, v128 b) {
+#if defined(__SSSE3__)
+  v128 order = _mm_cvtsi64_si128(0x0d0c090805040100LL);
+  return _mm_unpacklo_epi64(_mm_shuffle_epi8(b, order),
+                            _mm_shuffle_epi8(a, order));
+#else
+  return v128_unziphi_16(_mm_slli_si128(a, 2), _mm_slli_si128(b, 2));
+#endif
+}
+
+SIMD_INLINE v128 v128_unziphi_32(v128 a, v128 b) {
+  return _mm_castps_si128(_mm_shuffle_ps(
+      _mm_castsi128_ps(b), _mm_castsi128_ps(a), _MM_SHUFFLE(3, 1, 3, 1)));
+}
+
+SIMD_INLINE v128 v128_unziplo_32(v128 a, v128 b) {
+  return _mm_castps_si128(_mm_shuffle_ps(
+      _mm_castsi128_ps(b), _mm_castsi128_ps(a), _MM_SHUFFLE(2, 0, 2, 0)));
+}
+
+SIMD_INLINE v128 v128_unpack_u8_s16(v64 a) {
+  return _mm_unpacklo_epi8(a, _mm_setzero_si128());
+}
+
+SIMD_INLINE v128 v128_unpacklo_u8_s16(v128 a) {
+  return _mm_unpacklo_epi8(a, _mm_setzero_si128());
+}
+
+SIMD_INLINE v128 v128_unpackhi_u8_s16(v128 a) {
+  return _mm_unpackhi_epi8(a, _mm_setzero_si128());
+}
+
+SIMD_INLINE v128 v128_pack_s32_s16(v128 a, v128 b) {
+  return _mm_packs_epi32(b, a);
+}
+
+SIMD_INLINE v128 v128_pack_s16_u8(v128 a, v128 b) {
+  return _mm_packus_epi16(b, a);
+}
+
+SIMD_INLINE v128 v128_pack_s16_s8(v128 a, v128 b) {
+  return _mm_packs_epi16(b, a);
+}
+
+SIMD_INLINE v128 v128_unpack_u16_s32(v64 a) {
+  return _mm_unpacklo_epi16(a, _mm_setzero_si128());
+}
+
+SIMD_INLINE v128 v128_unpack_s16_s32(v64 a) {
+  return _mm_srai_epi32(_mm_unpacklo_epi16(a, a), 16);
+}
+
+SIMD_INLINE v128 v128_unpacklo_u16_s32(v128 a) {
+  return _mm_unpacklo_epi16(a, _mm_setzero_si128());
+}
+
+SIMD_INLINE v128 v128_unpacklo_s16_s32(v128 a) {
+  return _mm_srai_epi32(_mm_unpacklo_epi16(a, a), 16);
+}
+
+SIMD_INLINE v128 v128_unpackhi_u16_s32(v128 a) {
+  return _mm_unpackhi_epi16(a, _mm_setzero_si128());
+}
+
+SIMD_INLINE v128 v128_unpackhi_s16_s32(v128 a) {
+  return _mm_srai_epi32(_mm_unpackhi_epi16(a, a), 16);
+}
+
+SIMD_INLINE v128 v128_shuffle_8(v128 x, v128 pattern) {
+#if defined(__SSSE3__)
+  return _mm_shuffle_epi8(x, pattern);
+#else
+  v128 output;
+  unsigned char *input = (unsigned char *)&x;
+  unsigned char *index = (unsigned char *)&pattern;
+  char *selected = (char *)&output;
+  int counter;
+
+  for (counter = 0; counter < 16; counter++) {
+    selected[counter] = input[index[counter] & 15];
+  }
+
+  return output;
+#endif
+}
+
+SIMD_INLINE int64_t v128_dotp_s16(v128 a, v128 b) {
+  v128 r = _mm_madd_epi16(a, b);
+#if defined(__SSE4_1__)
+  v128 c = _mm_add_epi64(_mm_cvtepi32_epi64(r),
+                         _mm_cvtepi32_epi64(_mm_srli_si128(r, 8)));
+  return _mm_cvtsi128_si64(_mm_add_epi64(c, _mm_srli_si128(c, 8)));
+#else
+  return (int64_t)_mm_cvtsi128_si32(r) +
+         (int64_t)_mm_cvtsi128_si32(_mm_srli_si128(r, 4)) +
+         (int64_t)_mm_cvtsi128_si32(_mm_srli_si128(r, 8)) +
+         (int64_t)_mm_cvtsi128_si32(_mm_srli_si128(r, 12));
+#endif
+}
+
+SIMD_INLINE uint64_t v128_hadd_u8(v128 a) {
+  v128 t = _mm_sad_epu8(a, _mm_setzero_si128());
+  return v64_low_u32(v128_low_v64(t)) + v64_low_u32(v128_high_v64(t));
+}
+
+typedef v128 sad128_internal;
+
+SIMD_INLINE sad128_internal v128_sad_u8_init() { return _mm_setzero_si128(); }
+
+/* Implementation dependent return value.  Result must be finalised with
+   v128_sad_sum().
+   The result for more than 32 v128_sad_u8() calls is undefined. */
+SIMD_INLINE sad128_internal v128_sad_u8(sad128_internal s, v128 a, v128 b) {
+  return _mm_add_epi64(s, _mm_sad_epu8(a, b));
+}
+
+SIMD_INLINE uint32_t v128_sad_u8_sum(sad128_internal s) {
+  return v128_low_u32(_mm_add_epi32(s, _mm_unpackhi_epi64(s, s)));
+}
+
+typedef v128 ssd128_internal;
+
+SIMD_INLINE ssd128_internal v128_ssd_u8_init() { return _mm_setzero_si128(); }
+
+/* Implementation dependent return value.  Result must be finalised with
+ * v128_ssd_sum(). */
+SIMD_INLINE ssd128_internal v128_ssd_u8(ssd128_internal s, v128 a, v128 b) {
+  v128 l = _mm_sub_epi16(_mm_unpacklo_epi8(a, _mm_setzero_si128()),
+                         _mm_unpacklo_epi8(b, _mm_setzero_si128()));
+  v128 h = _mm_sub_epi16(_mm_unpackhi_epi8(a, _mm_setzero_si128()),
+                         _mm_unpackhi_epi8(b, _mm_setzero_si128()));
+  v128 rl = _mm_madd_epi16(l, l);
+  v128 rh = _mm_madd_epi16(h, h);
+  v128 c = _mm_cvtsi32_si128(32);
+  rl = _mm_add_epi32(rl, _mm_srli_si128(rl, 8));
+  rl = _mm_add_epi32(rl, _mm_srli_si128(rl, 4));
+  rh = _mm_add_epi32(rh, _mm_srli_si128(rh, 8));
+  rh = _mm_add_epi32(rh, _mm_srli_si128(rh, 4));
+  return _mm_add_epi64(
+      s, _mm_srl_epi64(_mm_sll_epi64(_mm_unpacklo_epi64(rl, rh), c), c));
+}
+
+SIMD_INLINE uint32_t v128_ssd_u8_sum(ssd128_internal s) {
+  return v128_low_u32(_mm_add_epi32(s, _mm_unpackhi_epi64(s, s)));
+}
+
+SIMD_INLINE v128 v128_or(v128 a, v128 b) { return _mm_or_si128(a, b); }
+
+SIMD_INLINE v128 v128_xor(v128 a, v128 b) { return _mm_xor_si128(a, b); }
+
+SIMD_INLINE v128 v128_and(v128 a, v128 b) { return _mm_and_si128(a, b); }
+
+SIMD_INLINE v128 v128_andn(v128 a, v128 b) { return _mm_andnot_si128(b, a); }
+
+SIMD_INLINE v128 v128_mul_s16(v64 a, v64 b) {
+  v64 lo_bits = v64_mullo_s16(a, b);
+  v64 hi_bits = v64_mulhi_s16(a, b);
+  return v128_from_v64(v64_ziphi_16(hi_bits, lo_bits),
+                       v64_ziplo_16(hi_bits, lo_bits));
+}
+
+SIMD_INLINE v128 v128_mullo_s16(v128 a, v128 b) {
+  return _mm_mullo_epi16(a, b);
+}
+
+SIMD_INLINE v128 v128_mulhi_s16(v128 a, v128 b) {
+  return _mm_mulhi_epi16(a, b);
+}
+
+SIMD_INLINE v128 v128_mullo_s32(v128 a, v128 b) {
+#if defined(__SSE4_1__)
+  return _mm_mullo_epi32(a, b);
+#else
+  return _mm_unpacklo_epi32(
+      _mm_shuffle_epi32(_mm_mul_epu32(a, b), 8),
+      _mm_shuffle_epi32(
+          _mm_mul_epu32(_mm_srli_si128(a, 4), _mm_srli_si128(b, 4)), 8));
+#endif
+}
+
+SIMD_INLINE v128 v128_madd_s16(v128 a, v128 b) { return _mm_madd_epi16(a, b); }
+
+SIMD_INLINE v128 v128_madd_us8(v128 a, v128 b) {
+#if defined(__SSSE3__)
+  return _mm_maddubs_epi16(a, b);
+#else
+  return _mm_packs_epi32(
+      _mm_madd_epi16(_mm_unpacklo_epi8(a, _mm_setzero_si128()),
+                     _mm_srai_epi16(_mm_unpacklo_epi8(b, b), 8)),
+      _mm_madd_epi16(_mm_unpackhi_epi8(a, _mm_setzero_si128()),
+                     _mm_srai_epi16(_mm_unpackhi_epi8(b, b), 8)));
+#endif
+}
+
+SIMD_INLINE v128 v128_avg_u8(v128 a, v128 b) { return _mm_avg_epu8(a, b); }
+
+SIMD_INLINE v128 v128_rdavg_u8(v128 a, v128 b) {
+  return _mm_sub_epi8(_mm_avg_epu8(a, b),
+                      _mm_and_si128(_mm_xor_si128(a, b), v128_dup_8(1)));
+}
+
+SIMD_INLINE v128 v128_avg_u16(v128 a, v128 b) { return _mm_avg_epu16(a, b); }
+
+SIMD_INLINE v128 v128_min_u8(v128 a, v128 b) { return _mm_min_epu8(a, b); }
+
+SIMD_INLINE v128 v128_max_u8(v128 a, v128 b) { return _mm_max_epu8(a, b); }
+
+SIMD_INLINE v128 v128_min_s8(v128 a, v128 b) {
+#if defined(__SSE4_1__)
+  return _mm_min_epi8(a, b);
+#else
+  v128 mask = _mm_cmplt_epi8(a, b);
+  return _mm_or_si128(_mm_andnot_si128(mask, b), _mm_and_si128(mask, a));
+#endif
+}
+
+SIMD_INLINE v128 v128_max_s8(v128 a, v128 b) {
+#if defined(__SSE4_1__)
+  return _mm_max_epi8(a, b);
+#else
+  v128 mask = _mm_cmplt_epi8(b, a);
+  return _mm_or_si128(_mm_andnot_si128(mask, b), _mm_and_si128(mask, a));
+#endif
+}
+
+SIMD_INLINE v128 v128_min_s16(v128 a, v128 b) { return _mm_min_epi16(a, b); }
+
+SIMD_INLINE v128 v128_max_s16(v128 a, v128 b) { return _mm_max_epi16(a, b); }
+
+SIMD_INLINE v128 v128_cmpgt_s8(v128 a, v128 b) { return _mm_cmpgt_epi8(a, b); }
+
+SIMD_INLINE v128 v128_cmplt_s8(v128 a, v128 b) { return _mm_cmplt_epi8(a, b); }
+
+SIMD_INLINE v128 v128_cmpeq_8(v128 a, v128 b) { return _mm_cmpeq_epi8(a, b); }
+
+SIMD_INLINE v128 v128_cmpgt_s16(v128 a, v128 b) {
+  return _mm_cmpgt_epi16(a, b);
+}
+
+SIMD_INLINE v128 v128_cmplt_s16(v128 a, v128 b) {
+  return _mm_cmplt_epi16(a, b);
+}
+
+SIMD_INLINE v128 v128_cmpeq_16(v128 a, v128 b) { return _mm_cmpeq_epi16(a, b); }
+
+SIMD_INLINE v128 v128_shl_8(v128 a, unsigned int c) {
+  __m128i x = _mm_cvtsi32_si128(c);
+  return _mm_packus_epi16(
+      _mm_srli_epi16(
+          _mm_sll_epi16(_mm_unpacklo_epi8(_mm_setzero_si128(), a), x), 8),
+      _mm_srli_epi16(
+          _mm_sll_epi16(_mm_unpackhi_epi8(_mm_setzero_si128(), a), x), 8));
+}
+
+SIMD_INLINE v128 v128_shr_u8(v128 a, unsigned int c) {
+  __m128i x = _mm_cvtsi32_si128(c + 8);
+  return _mm_packus_epi16(
+      _mm_srl_epi16(_mm_unpacklo_epi8(_mm_setzero_si128(), a), x),
+      _mm_srl_epi16(_mm_unpackhi_epi8(_mm_setzero_si128(), a), x));
+}
+
+SIMD_INLINE v128 v128_shr_s8(v128 a, unsigned int c) {
+  __m128i x = _mm_cvtsi32_si128(c + 8);
+  return _mm_packs_epi16(
+      _mm_sra_epi16(_mm_unpacklo_epi8(_mm_setzero_si128(), a), x),
+      _mm_sra_epi16(_mm_unpackhi_epi8(_mm_setzero_si128(), a), x));
+}
+
+SIMD_INLINE v128 v128_shl_16(v128 a, unsigned int c) {
+  return _mm_sll_epi16(a, _mm_cvtsi32_si128(c));
+}
+
+SIMD_INLINE v128 v128_shr_u16(v128 a, unsigned int c) {
+  return _mm_srl_epi16(a, _mm_cvtsi32_si128(c));
+}
+
+SIMD_INLINE v128 v128_shr_s16(v128 a, unsigned int c) {
+  return _mm_sra_epi16(a, _mm_cvtsi32_si128(c));
+}
+
+SIMD_INLINE v128 v128_shl_32(v128 a, unsigned int c) {
+  return _mm_sll_epi32(a, _mm_cvtsi32_si128(c));
+}
+
+SIMD_INLINE v128 v128_shr_u32(v128 a, unsigned int c) {
+  return _mm_srl_epi32(a, _mm_cvtsi32_si128(c));
+}
+
+SIMD_INLINE v128 v128_shr_s32(v128 a, unsigned int c) {
+  return _mm_sra_epi32(a, _mm_cvtsi32_si128(c));
+}
+
+/* These intrinsics require immediate values, so we must use #defines
+   to enforce that. */
+#define v128_shl_n_byte(a, c) _mm_slli_si128(a, c)
+#define v128_shr_n_byte(a, c) _mm_srli_si128(a, c)
+#define v128_shl_n_8(a, c)                                                  \
+  _mm_packus_epi16(                                                         \
+      _mm_srli_epi16(                                                       \
+          _mm_slli_epi16(_mm_unpacklo_epi8(_mm_setzero_si128(), a), c), 8), \
+      _mm_srli_epi16(                                                       \
+          _mm_slli_epi16(_mm_unpackhi_epi8(_mm_setzero_si128(), a), c), 8))
+#define v128_shr_n_u8(a, c)                                             \
+  _mm_packus_epi16(                                                     \
+      _mm_srli_epi16(_mm_unpacklo_epi8(_mm_setzero_si128(), a), c + 8), \
+      _mm_srli_epi16(_mm_unpackhi_epi8(_mm_setzero_si128(), a), c + 8))
+#define v128_shr_n_s8(a, c)                                             \
+  _mm_packs_epi16(                                                      \
+      _mm_srai_epi16(_mm_unpacklo_epi8(_mm_setzero_si128(), a), c + 8), \
+      _mm_srai_epi16(_mm_unpackhi_epi8(_mm_setzero_si128(), a), c + 8))
+#define v128_shl_n_16(a, c) _mm_slli_epi16(a, c)
+#define v128_shr_n_u16(a, c) _mm_srli_epi16(a, c)
+#define v128_shr_n_s16(a, c) _mm_srai_epi16(a, c)
+#define v128_shl_n_32(a, c) _mm_slli_epi32(a, c)
+#define v128_shr_n_u32(a, c) _mm_srli_epi32(a, c)
+#define v128_shr_n_s32(a, c) _mm_srai_epi32(a, c)
+
+#endif /* _V128_INTRINSICS_H */
diff --git a/aom_dsp/simd/v64_intrinsics.h b/aom_dsp/simd/v64_intrinsics.h
new file mode 100644
index 0000000..8a1f8c9
--- /dev/null
+++ b/aom_dsp/simd/v64_intrinsics.h
@@ -0,0 +1,221 @@
+/*
+ * Copyright (c) 2016, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#ifndef _V64_INTRINSICS_H
+#define _V64_INTRINSICS_H
+
+#include <stdio.h>
+#include <stdlib.h>
+#include "./v64_intrinsics_c.h"
+
+/* Fallback to plain, unoptimised C. */
+
+typedef c_v64 v64;
+
+SIMD_INLINE uint32_t v64_low_u32(v64 a) { return c_v64_low_u32(a); }
+SIMD_INLINE uint32_t v64_high_u32(v64 a) { return c_v64_high_u32(a); }
+SIMD_INLINE int32_t v64_low_s32(v64 a) { return c_v64_low_s32(a); }
+SIMD_INLINE int32_t v64_high_s32(v64 a) { return c_v64_high_s32(a); }
+SIMD_INLINE v64 v64_from_32(uint32_t x, uint32_t y) {
+  return c_v64_from_32(x, y);
+}
+SIMD_INLINE v64 v64_from_64(uint64_t x) { return c_v64_from_64(x); }
+SIMD_INLINE uint64_t v64_u64(v64 x) { return c_v64_u64(x); }
+SIMD_INLINE v64 v64_from_16(uint16_t a, uint16_t b, uint16_t c, uint16_t d) {
+  return c_v64_from_16(a, b, c, d);
+}
+
+SIMD_INLINE uint32_t u32_load_unaligned(const void *p) {
+  return c_u32_load_unaligned(p);
+}
+SIMD_INLINE uint32_t u32_load_aligned(const void *p) {
+  return c_u32_load_aligned(p);
+}
+SIMD_INLINE void u32_store_unaligned(void *p, uint32_t a) {
+  c_u32_store_unaligned(p, a);
+}
+SIMD_INLINE void u32_store_aligned(void *p, uint32_t a) {
+  c_u32_store_aligned(p, a);
+}
+
+SIMD_INLINE v64 v64_load_unaligned(const void *p) {
+  return c_v64_load_unaligned(p);
+}
+SIMD_INLINE v64 v64_load_aligned(const void *p) {
+  return c_v64_load_aligned(p);
+}
+
+SIMD_INLINE void v64_store_unaligned(void *p, v64 a) {
+  c_v64_store_unaligned(p, a);
+}
+SIMD_INLINE void v64_store_aligned(void *p, v64 a) {
+  c_v64_store_aligned(p, a);
+}
+
+SIMD_INLINE v64 v64_align(v64 a, v64 b, const unsigned int c) {
+  return c_v64_align(a, b, c);
+}
+
+SIMD_INLINE v64 v64_zero() { return c_v64_zero(); }
+SIMD_INLINE v64 v64_dup_8(uint8_t x) { return c_v64_dup_8(x); }
+SIMD_INLINE v64 v64_dup_16(uint16_t x) { return c_v64_dup_16(x); }
+SIMD_INLINE v64 v64_dup_32(uint32_t x) { return c_v64_dup_32(x); }
+
+SIMD_INLINE v64 v64_add_8(v64 a, v64 b) { return c_v64_add_8(a, b); }
+SIMD_INLINE v64 v64_add_16(v64 a, v64 b) { return c_v64_add_16(a, b); }
+SIMD_INLINE v64 v64_sadd_s16(v64 a, v64 b) { return c_v64_sadd_s16(a, b); }
+SIMD_INLINE v64 v64_add_32(v64 a, v64 b) { return c_v64_add_32(a, b); }
+SIMD_INLINE v64 v64_sub_8(v64 a, v64 b) { return c_v64_sub_8(a, b); }
+SIMD_INLINE v64 v64_ssub_u8(v64 a, v64 b) { return c_v64_ssub_u8(a, b); }
+SIMD_INLINE v64 v64_ssub_s8(v64 a, v64 b) { return c_v64_ssub_s8(a, b); }
+SIMD_INLINE v64 v64_sub_16(v64 a, v64 b) { return c_v64_sub_16(a, b); }
+SIMD_INLINE v64 v64_ssub_s16(v64 a, v64 b) { return c_v64_ssub_s16(a, b); }
+SIMD_INLINE v64 v64_sub_32(v64 a, v64 b) { return c_v64_sub_32(a, b); }
+SIMD_INLINE v64 v64_abs_s16(v64 a) { return c_v64_abs_s16(a); }
+
+SIMD_INLINE v64 v64_ziplo_8(v64 a, v64 b) { return c_v64_ziplo_8(a, b); }
+SIMD_INLINE v64 v64_ziphi_8(v64 a, v64 b) { return c_v64_ziphi_8(a, b); }
+SIMD_INLINE v64 v64_ziplo_16(v64 a, v64 b) { return c_v64_ziplo_16(a, b); }
+SIMD_INLINE v64 v64_ziphi_16(v64 a, v64 b) { return c_v64_ziphi_16(a, b); }
+SIMD_INLINE v64 v64_ziplo_32(v64 a, v64 b) { return c_v64_ziplo_32(a, b); }
+SIMD_INLINE v64 v64_ziphi_32(v64 a, v64 b) { return c_v64_ziphi_32(a, b); }
+SIMD_INLINE v64 v64_unziplo_8(v64 a, v64 b) { return c_v64_unziplo_8(a, b); }
+SIMD_INLINE v64 v64_unziphi_8(v64 a, v64 b) { return c_v64_unziphi_8(a, b); }
+SIMD_INLINE v64 v64_unziplo_16(v64 a, v64 b) { return c_v64_unziplo_16(a, b); }
+SIMD_INLINE v64 v64_unziphi_16(v64 a, v64 b) { return c_v64_unziphi_16(a, b); }
+SIMD_INLINE v64 v64_unpacklo_u8_s16(v64 a) { return c_v64_unpacklo_u8_s16(a); }
+SIMD_INLINE v64 v64_unpackhi_u8_s16(v64 a) { return c_v64_unpackhi_u8_s16(a); }
+SIMD_INLINE v64 v64_pack_s32_s16(v64 a, v64 b) {
+  return c_v64_pack_s32_s16(a, b);
+}
+SIMD_INLINE v64 v64_pack_s16_u8(v64 a, v64 b) {
+  return c_v64_pack_s16_u8(a, b);
+}
+SIMD_INLINE v64 v64_pack_s16_s8(v64 a, v64 b) {
+  return c_v64_pack_s16_s8(a, b);
+}
+SIMD_INLINE v64 v64_unpacklo_u16_s32(v64 a) {
+  return c_v64_unpacklo_u16_s32(a);
+}
+SIMD_INLINE v64 v64_unpacklo_s16_s32(v64 a) {
+  return c_v64_unpacklo_s16_s32(a);
+}
+SIMD_INLINE v64 v64_unpackhi_u16_s32(v64 a) {
+  return c_v64_unpackhi_u16_s32(a);
+}
+SIMD_INLINE v64 v64_unpackhi_s16_s32(v64 a) {
+  return c_v64_unpackhi_s16_s32(a);
+}
+SIMD_INLINE v64 v64_shuffle_8(v64 a, v64 pattern) {
+  return c_v64_shuffle_8(a, pattern);
+}
+
+typedef uint32_t sad64_internal;
+SIMD_INLINE sad64_internal v64_sad_u8_init() { return c_v64_sad_u8_init(); }
+SIMD_INLINE sad64_internal v64_sad_u8(sad64_internal s, v64 a, v64 b) {
+  return c_v64_sad_u8(s, a, b);
+}
+SIMD_INLINE uint32_t v64_sad_u8_sum(sad64_internal s) {
+  return c_v64_sad_u8_sum(s);
+}
+typedef uint32_t ssd64_internal;
+SIMD_INLINE ssd64_internal v64_ssd_u8_init() { return c_v64_ssd_u8_init(); }
+SIMD_INLINE ssd64_internal v64_ssd_u8(ssd64_internal s, v64 a, v64 b) {
+  return c_v64_ssd_u8(s, a, b);
+}
+SIMD_INLINE uint32_t v64_ssd_u8_sum(ssd64_internal s) {
+  return c_v64_ssd_u8_sum(s);
+}
+SIMD_INLINE int64_t v64_dotp_su8(v64 a, v64 b) { return c_v64_dotp_su8(a, b); }
+SIMD_INLINE int64_t v64_dotp_s16(v64 a, v64 b) { return c_v64_dotp_s16(a, b); }
+SIMD_INLINE uint64_t v64_hadd_u8(v64 a) { return c_v64_hadd_u8(a); }
+SIMD_INLINE int64_t v64_hadd_s16(v64 a) { return c_v64_hadd_s16(a); }
+
+SIMD_INLINE v64 v64_or(v64 a, v64 b) { return c_v64_or(a, b); }
+SIMD_INLINE v64 v64_xor(v64 a, v64 b) { return c_v64_xor(a, b); }
+SIMD_INLINE v64 v64_and(v64 a, v64 b) { return c_v64_and(a, b); }
+SIMD_INLINE v64 v64_andn(v64 a, v64 b) { return c_v64_andn(a, b); }
+
+SIMD_INLINE v64 v64_mullo_s16(v64 a, v64 b) { return c_v64_mullo_s16(a, b); }
+SIMD_INLINE v64 v64_mulhi_s16(v64 a, v64 b) { return c_v64_mulhi_s16(a, b); }
+SIMD_INLINE v64 v64_mullo_s32(v64 a, v64 b) { return c_v64_mullo_s32(a, b); }
+SIMD_INLINE v64 v64_madd_s16(v64 a, v64 b) { return c_v64_madd_s16(a, b); }
+SIMD_INLINE v64 v64_madd_us8(v64 a, v64 b) { return c_v64_madd_us8(a, b); }
+
+SIMD_INLINE v64 v64_avg_u8(v64 a, v64 b) { return c_v64_avg_u8(a, b); }
+SIMD_INLINE v64 v64_rdavg_u8(v64 a, v64 b) { return c_v64_rdavg_u8(a, b); }
+SIMD_INLINE v64 v64_avg_u16(v64 a, v64 b) { return c_v64_avg_u16(a, b); }
+SIMD_INLINE v64 v64_min_u8(v64 a, v64 b) { return c_v64_min_u8(a, b); }
+SIMD_INLINE v64 v64_max_u8(v64 a, v64 b) { return c_v64_max_u8(a, b); }
+SIMD_INLINE v64 v64_min_s8(v64 a, v64 b) { return c_v64_min_s8(a, b); }
+SIMD_INLINE v64 v64_max_s8(v64 a, v64 b) { return c_v64_max_s8(a, b); }
+SIMD_INLINE v64 v64_min_s16(v64 a, v64 b) { return c_v64_min_s16(a, b); }
+SIMD_INLINE v64 v64_max_s16(v64 a, v64 b) { return c_v64_max_s16(a, b); }
+
+SIMD_INLINE v64 v64_cmpgt_s8(v64 a, v64 b) { return c_v64_cmpgt_s8(a, b); }
+SIMD_INLINE v64 v64_cmplt_s8(v64 a, v64 b) { return c_v64_cmplt_s8(a, b); }
+SIMD_INLINE v64 v64_cmpeq_8(v64 a, v64 b) { return c_v64_cmpeq_8(a, b); }
+SIMD_INLINE v64 v64_cmpgt_s16(v64 a, v64 b) { return c_v64_cmpgt_s16(a, b); }
+SIMD_INLINE v64 v64_cmplt_s16(v64 a, v64 b) { return c_v64_cmplt_s16(a, b); }
+SIMD_INLINE v64 v64_cmpeq_16(v64 a, v64 b) { return c_v64_cmpeq_16(a, b); }
+
+SIMD_INLINE v64 v64_shl_8(v64 a, unsigned int n) { return c_v64_shl_8(a, n); }
+SIMD_INLINE v64 v64_shr_u8(v64 a, unsigned int n) { return c_v64_shr_u8(a, n); }
+SIMD_INLINE v64 v64_shr_s8(v64 a, unsigned int n) { return c_v64_shr_s8(a, n); }
+SIMD_INLINE v64 v64_shl_16(v64 a, unsigned int n) { return c_v64_shl_16(a, n); }
+SIMD_INLINE v64 v64_shr_u16(v64 a, unsigned int n) {
+  return c_v64_shr_u16(a, n);
+}
+SIMD_INLINE v64 v64_shr_s16(v64 a, unsigned int n) {
+  return c_v64_shr_s16(a, n);
+}
+SIMD_INLINE v64 v64_shl_32(v64 a, unsigned int n) { return c_v64_shl_32(a, n); }
+SIMD_INLINE v64 v64_shr_u32(v64 a, unsigned int n) {
+  return c_v64_shr_u32(a, n);
+}
+SIMD_INLINE v64 v64_shr_s32(v64 a, unsigned int n) {
+  return c_v64_shr_s32(a, n);
+}
+SIMD_INLINE v64 v64_shr_n_byte(v64 a, const unsigned int n) {
+  return c_v64_shr_n_byte(a, n);
+}
+SIMD_INLINE v64 v64_shl_n_byte(v64 a, const unsigned int n) {
+  return c_v64_shl_n_byte(a, n);
+}
+SIMD_INLINE v64 v64_shl_n_8(v64 a, const unsigned int c) {
+  return c_v64_shl_n_8(a, c);
+}
+SIMD_INLINE v64 v64_shr_n_u8(v64 a, const unsigned int c) {
+  return c_v64_shr_n_u8(a, c);
+}
+SIMD_INLINE v64 v64_shr_n_s8(v64 a, const unsigned int c) {
+  return c_v64_shr_n_s8(a, c);
+}
+SIMD_INLINE v64 v64_shl_n_16(v64 a, const unsigned int c) {
+  return c_v64_shl_n_16(a, c);
+}
+SIMD_INLINE v64 v64_shr_n_u16(v64 a, const unsigned int c) {
+  return c_v64_shr_n_u16(a, c);
+}
+SIMD_INLINE v64 v64_shr_n_s16(v64 a, const unsigned int c) {
+  return c_v64_shr_n_s16(a, c);
+}
+SIMD_INLINE v64 v64_shl_n_32(v64 a, const unsigned int c) {
+  return c_v64_shl_n_32(a, c);
+}
+SIMD_INLINE v64 v64_shr_n_u32(v64 a, const unsigned int c) {
+  return c_v64_shr_n_u32(a, c);
+}
+SIMD_INLINE v64 v64_shr_n_s32(v64 a, const unsigned int c) {
+  return c_v64_shr_n_s32(a, c);
+}
+
+#endif /* _V64_INTRINSICS_H */
diff --git a/aom_dsp/simd/v64_intrinsics_arm.h b/aom_dsp/simd/v64_intrinsics_arm.h
new file mode 100644
index 0000000..b487303
--- /dev/null
+++ b/aom_dsp/simd/v64_intrinsics_arm.h
@@ -0,0 +1,567 @@
+/*
+ * Copyright (c) 2016, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#ifndef _V64_INTRINSICS_H
+#define _V64_INTRINSICS_H
+
+#include <arm_neon.h>
+#include "./v64_intrinsics_arm.h"
+
+/* vzip in gcc is broken.  Fixed in 4.6.1? */
+#if __GNUC__ &&                                                       \
+    ((__GNUC__ << 16) + (__GNUC_MINOR__ << 8) + __GNUC_PATCHLEVEL__ < \
+     (4 << 16) + (6 << 8) + 1)
+#error vzip buggy in gcc.  Get at least gcc 4.6.1.
+#endif
+
+typedef int64x1_t v64;
+
+SIMD_INLINE uint32_t v64_low_u32(v64 a) {
+  return vget_lane_u32(vreinterpret_u32_s64(a), 0);
+}
+
+SIMD_INLINE uint32_t v64_high_u32(v64 a) {
+  return vget_lane_u32(vreinterpret_u32_s64(a), 1);
+}
+
+SIMD_INLINE int32_t v64_low_s32(v64 a) {
+  return vget_lane_s32(vreinterpret_s32_s64(a), 0);
+}
+
+SIMD_INLINE int32_t v64_high_s32(v64 a) {
+  return vget_lane_s32(vreinterpret_s32_s64(a), 1);
+}
+
+SIMD_INLINE v64 v64_from_16(uint16_t a, uint16_t b, uint16_t c, uint16_t d) {
+  return vcreate_s64((uint64_t)a << 48 | (uint64_t)b << 32 | (uint64_t)c << 16 |
+                     d);
+}
+
+SIMD_INLINE v64 v64_from_32(uint32_t x, uint32_t y) {
+  return vcreate_s64((uint64_t)x << 32 | y);
+}
+
+SIMD_INLINE v64 v64_from_64(uint64_t x) { return vcreate_s64(x); }
+
+SIMD_INLINE uint64_t v64_u64(v64 x) { return x; }
+
+SIMD_INLINE uint32_t u32_load_aligned(const void *p) {
+  return *((uint32_t *)p);
+}
+
+SIMD_INLINE uint32_t u32_load_unaligned(const void *p) {
+  return vget_lane_u32(vreinterpret_u32_u8(vld1_u8((const uint8_t *)p)), 0);
+}
+
+SIMD_INLINE void u32_store_aligned(void *p, uint32_t a) {
+  *((uint32_t *)p) = a;
+}
+
+SIMD_INLINE void u32_store_unaligned(void *p, uint32_t a) {
+#if __CC_ARM
+  *(__packed uint32_t *)p) = a;
+#elif __GNUC__
+  *((__attribute((packed)) uint32_t *)p) = a;
+#else
+  vst1_lane_u32((uint32_t*)p, vreinterpret_u32_s64(a), 0);
+#endif
+}
+
+SIMD_INLINE v64 v64_load_aligned(const void *p) {
+  return vreinterpret_s64_u8(vld1_u8((const uint8_t *)p));
+}
+
+SIMD_INLINE v64 v64_load_unaligned(const void *p) {
+  return v64_load_aligned(p);
+}
+
+SIMD_INLINE void v64_store_aligned(void *p, v64 r) {
+  vst1_u8((uint8_t *)p, vreinterpret_u8_s64(r));
+}
+
+SIMD_INLINE void v64_store_unaligned(void *p, v64 r) {
+  vst1_u8((uint8_t *)p, vreinterpret_u8_s64(r));
+}
+
+SIMD_INLINE v64 v64_align(v64 a, v64 b, const unsigned int c) {
+#if __OPTIMIZE__
+  return c ? vreinterpret_s64_s8(
+                 vext_s8(vreinterpret_s8_s64(b), vreinterpret_s8_s64(a), c))
+           : b;
+#else
+  return c ? v64_from_64(b >> c * 8) | (a << (8 - c) * 8) : b;
+#endif
+}
+
+SIMD_INLINE v64 v64_zero() { return vreinterpret_s64_u8(vdup_n_u8(0)); }
+
+SIMD_INLINE v64 v64_ones() { return vreinterpret_s64_u8(vdup_n_u8(-1)); }
+
+SIMD_INLINE v64 v64_dup_8(uint8_t x) {
+  return vreinterpret_s64_u8(vdup_n_u8(x));
+}
+
+SIMD_INLINE v64 v64_dup_16(uint16_t x) {
+  return vreinterpret_s64_u16(vdup_n_u16(x));
+}
+
+SIMD_INLINE v64 v64_dup_32(uint32_t x) {
+  return vreinterpret_s64_u32(vdup_n_u32(x));
+}
+
+SIMD_INLINE int64_t v64_dotp_su8(v64 x, v64 y) {
+  int64x2_t r = vpaddlq_s32(vpaddlq_s16(
+      vmulq_s16(vmovl_s8(vreinterpret_s8_s64(x)),
+                vreinterpretq_s16_u16(vmovl_u8(vreinterpret_u8_s64(y))))));
+  return vadd_s64(vget_high_s64(r), vget_low_s64(r));
+}
+
+SIMD_INLINE int64_t v64_dotp_s16(v64 x, v64 y) {
+  int64x2_t r =
+      vpaddlq_s32(vmull_s16(vreinterpret_s16_s64(x), vreinterpret_s16_s64(y)));
+  return vget_high_s64(r) + vget_low_s64(r);
+}
+
+SIMD_INLINE uint64_t v64_hadd_u8(v64 x) {
+  return vpaddl_u32(vpaddl_u16(vpaddl_u8(vreinterpret_u8_s64(x))));
+}
+
+SIMD_INLINE int64_t v64_hadd_s16(v64 a) {
+  return vpaddl_s32(vpaddl_s16(vreinterpret_s16_s64(a)));
+}
+
+typedef uint16x8_t sad64_internal;
+
+SIMD_INLINE sad64_internal v64_sad_u8_init() { return vdupq_n_u16(0); }
+
+/* Implementation dependent return value.  Result must be finalised with
+   v64_sad_u8_sum().
+   The result for more than 32 v64_sad_u8() calls is undefined. */
+SIMD_INLINE sad64_internal v64_sad_u8(sad64_internal s, v64 a, v64 b) {
+  return vabal_u8(s, vreinterpret_u8_s64(a), vreinterpret_u8_s64(b));
+}
+
+SIMD_INLINE uint32_t v64_sad_u8_sum(sad64_internal s) {
+  uint64x2_t r = vpaddlq_u32(vpaddlq_u16(s));
+  return (uint32_t)(vget_high_u64(r) + vget_low_u64(r));
+}
+
+typedef int64x1_t ssd64_internal;
+
+SIMD_INLINE ssd64_internal v64_ssd_u8_init() { return 0; }
+
+/* Implementation dependent return value.  Result must be finalised with
+ * v64_ssd_u8_sum(). */
+SIMD_INLINE ssd64_internal v64_ssd_u8(ssd64_internal s, v64 a, v64 b) {
+  uint8x8_t t = vabd_u8(vreinterpret_u8_s64(a), vreinterpret_u8_s64(b));
+  uint64x2_t r = vpaddlq_u32(vpaddlq_u16(vmull_u8(t, t)));
+  return vadd_u64(s, vadd_u64(vget_high_u64(r), vget_low_u64(r)));
+}
+
+SIMD_INLINE uint32_t v64_ssd_u8_sum(ssd64_internal s) { return (uint32_t)s; }
+
+SIMD_INLINE v64 v64_or(v64 x, v64 y) { return vorr_s64(x, y); }
+
+SIMD_INLINE v64 v64_xor(v64 x, v64 y) { return veor_s64(x, y); }
+
+SIMD_INLINE v64 v64_and(v64 x, v64 y) { return vand_s64(x, y); }
+
+SIMD_INLINE v64 v64_andn(v64 x, v64 y) { return vbic_s64(x, y); }
+
+SIMD_INLINE v64 v64_add_8(v64 x, v64 y) {
+  return vreinterpret_s64_u8(
+      vadd_u8(vreinterpret_u8_s64(x), vreinterpret_u8_s64(y)));
+}
+
+SIMD_INLINE v64 v64_add_16(v64 x, v64 y) {
+  return vreinterpret_s64_s16(
+      vadd_s16(vreinterpret_s16_s64(x), vreinterpret_s16_s64(y)));
+}
+
+SIMD_INLINE v64 v64_sadd_s16(v64 x, v64 y) {
+  return vreinterpret_s64_s16(
+      vqadd_s16(vreinterpret_s16_s64(x), vreinterpret_s16_s64(y)));
+}
+
+SIMD_INLINE v64 v64_add_32(v64 x, v64 y) {
+  return vreinterpret_s64_u32(
+      vadd_u32(vreinterpret_u32_s64(x), vreinterpret_u32_s64(y)));
+}
+
+SIMD_INLINE v64 v64_sub_8(v64 x, v64 y) {
+  return vreinterpret_s64_u8(
+      vsub_u8(vreinterpret_u8_s64(x), vreinterpret_u8_s64(y)));
+}
+
+SIMD_INLINE v64 v64_sub_16(v64 x, v64 y) {
+  return vreinterpret_s64_s16(
+      vsub_s16(vreinterpret_s16_s64(x), vreinterpret_s16_s64(y)));
+}
+
+SIMD_INLINE v64 v64_ssub_s16(v64 x, v64 y) {
+  return vreinterpret_s64_s16(
+      vqsub_s16(vreinterpret_s16_s64(x), vreinterpret_s16_s64(y)));
+}
+
+SIMD_INLINE v64 v64_ssub_u8(v64 x, v64 y) {
+  return vreinterpret_s64_u8(
+      vqsub_u8(vreinterpret_u8_s64(x), vreinterpret_u8_s64(y)));
+}
+
+SIMD_INLINE v64 v64_ssub_s8(v64 x, v64 y) {
+  return vreinterpret_s64_s8(
+      vqsub_s8(vreinterpret_s8_s64(x), vreinterpret_s8_s64(y)));
+}
+
+SIMD_INLINE v64 v64_sub_32(v64 x, v64 y) {
+  return vreinterpret_s64_s32(
+      vsub_s32(vreinterpret_s32_s64(x), vreinterpret_s32_s64(y)));
+}
+
+SIMD_INLINE v64 v64_abs_s16(v64 x) {
+  return vreinterpret_s64_s16(vabs_s16(vreinterpret_s16_s64(x)));
+}
+
+SIMD_INLINE v64 v64_mullo_s16(v64 x, v64 y) {
+  return vreinterpret_s64_s16(
+      vmul_s16(vreinterpret_s16_s64(x), vreinterpret_s16_s64(y)));
+}
+
+SIMD_INLINE v64 v64_mulhi_s16(v64 x, v64 y) {
+  return vreinterpret_s64_s16(vmovn_s32(vshrq_n_s32(
+      vmull_s16(vreinterpret_s16_s64(x), vreinterpret_s16_s64(y)), 16)));
+}
+
+SIMD_INLINE v64 v64_mullo_s32(v64 x, v64 y) {
+  return vreinterpret_s64_s32(
+      vmul_s32(vreinterpret_s32_s64(x), vreinterpret_s32_s64(y)));
+}
+
+SIMD_INLINE v64 v64_madd_s16(v64 x, v64 y) {
+  int32x4_t t = vmull_s16(vreinterpret_s16_s64(x), vreinterpret_s16_s64(y));
+  return vreinterpret_s64_s32(
+      vpadd_s32(vreinterpret_s32_s64(vget_low_s64(vreinterpretq_s64_s32(t))),
+                vreinterpret_s32_s64(vget_high_s64(vreinterpretq_s64_s32(t)))));
+}
+
+SIMD_INLINE v64 v64_madd_us8(v64 x, v64 y) {
+  return vreinterpret_s64_s16(vqmovn_s32(vpaddlq_s16(
+      vaddq_s16(vmull_s8(vadd_s8(vreinterpret_s8_s64(x), vdup_n_s8(-128)),
+                         vreinterpret_s8_s64(y)),
+                vshlq_n_s16(vmovl_s8(vreinterpret_s8_s64(y)), 7)))));
+}
+
+SIMD_INLINE v64 v64_avg_u8(v64 x, v64 y) {
+  return vreinterpret_s64_u8(
+      vrhadd_u8(vreinterpret_u8_s64(x), vreinterpret_u8_s64(y)));
+}
+
+SIMD_INLINE v64 v64_rdavg_u8(v64 x, v64 y) {
+  return vreinterpret_s64_u8(
+      vhadd_u8(vreinterpret_u8_s64(x), vreinterpret_u8_s64(y)));
+}
+
+SIMD_INLINE v64 v64_avg_u16(v64 x, v64 y) {
+  return vreinterpret_s64_u16(
+      vrhadd_u16(vreinterpret_u16_s64(x), vreinterpret_u16_s64(y)));
+}
+
+SIMD_INLINE v64 v64_max_u8(v64 x, v64 y) {
+  return vreinterpret_s64_u8(
+      vmax_u8(vreinterpret_u8_s64(x), vreinterpret_u8_s64(y)));
+}
+
+SIMD_INLINE v64 v64_min_u8(v64 x, v64 y) {
+  return vreinterpret_s64_u8(
+      vmin_u8(vreinterpret_u8_s64(x), vreinterpret_u8_s64(y)));
+}
+
+SIMD_INLINE v64 v64_max_s8(v64 x, v64 y) {
+  return vreinterpret_s64_s8(
+      vmax_s8(vreinterpret_s8_s64(x), vreinterpret_s8_s64(y)));
+}
+
+SIMD_INLINE v64 v64_min_s8(v64 x, v64 y) {
+  return vreinterpret_s64_s8(
+      vmin_s8(vreinterpret_s8_s64(x), vreinterpret_s8_s64(y)));
+}
+
+SIMD_INLINE v64 v64_max_s16(v64 x, v64 y) {
+  return vreinterpret_s64_s16(
+      vmax_s16(vreinterpret_s16_s64(x), vreinterpret_s16_s64(y)));
+}
+
+SIMD_INLINE v64 v64_min_s16(v64 x, v64 y) {
+  return vreinterpret_s64_s16(
+      vmin_s16(vreinterpret_s16_s64(x), vreinterpret_s16_s64(y)));
+}
+
+SIMD_INLINE v64 v64_ziplo_8(v64 x, v64 y) {
+  uint8x8x2_t r = vzip_u8(vreinterpret_u8_s64(y), vreinterpret_u8_s64(x));
+  return vreinterpret_s64_u8(r.val[0]);
+}
+
+SIMD_INLINE v64 v64_ziphi_8(v64 x, v64 y) {
+  uint8x8x2_t r = vzip_u8(vreinterpret_u8_s64(y), vreinterpret_u8_s64(x));
+  return vreinterpret_s64_u8(r.val[1]);
+}
+
+SIMD_INLINE v64 v64_ziplo_16(v64 x, v64 y) {
+  int16x4x2_t r = vzip_s16(vreinterpret_s16_s64(y), vreinterpret_s16_s64(x));
+  return vreinterpret_s64_s16(r.val[0]);
+}
+
+SIMD_INLINE v64 v64_ziphi_16(v64 x, v64 y) {
+  int16x4x2_t r = vzip_s16(vreinterpret_s16_s64(y), vreinterpret_s16_s64(x));
+  return vreinterpret_s64_s16(r.val[1]);
+}
+
+SIMD_INLINE v64 v64_ziplo_32(v64 x, v64 y) {
+  int32x2x2_t r = vzip_s32(vreinterpret_s32_s64(y), vreinterpret_s32_s64(x));
+  return vreinterpret_s64_s32(r.val[0]);
+}
+
+SIMD_INLINE v64 v64_ziphi_32(v64 x, v64 y) {
+  int32x2x2_t r = vzip_s32(vreinterpret_s32_s64(y), vreinterpret_s32_s64(x));
+  return vreinterpret_s64_s32(r.val[1]);
+}
+
+SIMD_INLINE v64 v64_unpacklo_u8_s16(v64 a) {
+  return vreinterpret_s64_u16(vget_low_u16(vmovl_u8(vreinterpret_u8_s64(a))));
+}
+
+SIMD_INLINE v64 v64_unpackhi_u8_s16(v64 a) {
+  return vreinterpret_s64_u16(vget_high_u16(vmovl_u8(vreinterpret_u8_s64(a))));
+}
+
+SIMD_INLINE v64 v64_pack_s32_s16(v64 x, v64 y) {
+  return vreinterpret_s64_s16(vqmovn_s32(
+      vcombine_s32(vreinterpret_s32_s64(y), vreinterpret_s32_s64(x))));
+}
+
+SIMD_INLINE v64 v64_pack_s16_u8(v64 x, v64 y) {
+  return vreinterpret_s64_u8(vqmovun_s16(vreinterpretq_s16_s32(
+      vcombine_s32(vreinterpret_s32_s64(y), vreinterpret_s32_s64(x)))));
+}
+
+SIMD_INLINE v64 v64_pack_s16_s8(v64 x, v64 y) {
+  return vreinterpret_s64_s8(vqmovn_s16(vreinterpretq_s16_s32(
+      vcombine_s32(vreinterpret_s32_s64(y), vreinterpret_s32_s64(x)))));
+}
+
+SIMD_INLINE v64 v64_unziplo_8(v64 x, v64 y) {
+  uint8x8x2_t r = vuzp_u8(vreinterpret_u8_s64(y), vreinterpret_u8_s64(x));
+  return vreinterpret_s64_u8(r.val[0]);
+}
+
+SIMD_INLINE v64 v64_unziphi_8(v64 x, v64 y) {
+  uint8x8x2_t r = vuzp_u8(vreinterpret_u8_s64(y), vreinterpret_u8_s64(x));
+  return vreinterpret_s64_u8(r.val[1]);
+}
+
+SIMD_INLINE v64 v64_unziplo_16(v64 x, v64 y) {
+  uint16x4x2_t r = vuzp_u16(vreinterpret_u16_s64(y), vreinterpret_u16_s64(x));
+  return vreinterpret_s64_u16(r.val[0]);
+}
+
+SIMD_INLINE v64 v64_unziphi_16(v64 x, v64 y) {
+  uint16x4x2_t r = vuzp_u16(vreinterpret_u16_s64(y), vreinterpret_u16_s64(x));
+  return vreinterpret_s64_u16(r.val[1]);
+}
+
+SIMD_INLINE v64 v64_unpacklo_s16_s32(v64 x) {
+  return vreinterpret_s64_s32(vget_low_s32(vmovl_s16(vreinterpret_s16_s64(x))));
+}
+
+SIMD_INLINE v64 v64_unpacklo_u16_s32(v64 x) {
+  return vreinterpret_s64_u32(vget_low_u32(vmovl_u16(vreinterpret_u16_s64(x))));
+}
+
+SIMD_INLINE v64 v64_unpackhi_s16_s32(v64 x) {
+  return vreinterpret_s64_s32(
+      vget_high_s32(vmovl_s16(vreinterpret_s16_s64(x))));
+}
+
+SIMD_INLINE v64 v64_unpackhi_u16_s32(v64 x) {
+  return vreinterpret_s64_u32(
+      vget_high_u32(vmovl_u16(vreinterpret_u16_s64(x))));
+}
+
+SIMD_INLINE v64 v64_shuffle_8(v64 x, v64 pattern) {
+  return vreinterpret_s64_u8(
+      vtbl1_u8(vreinterpret_u8_s64(x), vreinterpret_u8_s64(pattern)));
+}
+
+SIMD_INLINE v64 v64_cmpgt_s8(v64 x, v64 y) {
+  return vreinterpret_s64_u8(
+      vcgt_s8(vreinterpret_s8_s64(x), vreinterpret_s8_s64(y)));
+}
+
+SIMD_INLINE v64 v64_cmplt_s8(v64 x, v64 y) {
+  return vreinterpret_s64_u8(
+      vclt_s8(vreinterpret_s8_s64(x), vreinterpret_s8_s64(y)));
+}
+
+SIMD_INLINE v64 v64_cmpeq_8(v64 x, v64 y) {
+  return vreinterpret_s64_u8(
+      vceq_u8(vreinterpret_u8_s64(x), vreinterpret_u8_s64(y)));
+}
+
+SIMD_INLINE v64 v64_cmpgt_s16(v64 x, v64 y) {
+  return vreinterpret_s64_u16(
+      vcgt_s16(vreinterpret_s16_s64(x), vreinterpret_s16_s64(y)));
+}
+
+SIMD_INLINE v64 v64_cmplt_s16(v64 x, v64 y) {
+  return vreinterpret_s64_u16(
+      vclt_s16(vreinterpret_s16_s64(x), vreinterpret_s16_s64(y)));
+}
+
+SIMD_INLINE v64 v64_cmpeq_16(v64 x, v64 y) {
+  return vreinterpret_s64_u16(
+      vceq_s16(vreinterpret_s16_s64(x), vreinterpret_s16_s64(y)));
+}
+
+SIMD_INLINE v64 v64_shl_8(v64 a, unsigned int c) {
+  return vreinterpret_s64_u8(vshl_u8(vreinterpret_u8_s64(a), vdup_n_s8(c)));
+}
+
+SIMD_INLINE v64 v64_shr_u8(v64 a, unsigned int c) {
+  return vreinterpret_s64_u8(vshl_u8(vreinterpret_u8_s64(a), vdup_n_s8(-c)));
+}
+
+SIMD_INLINE v64 v64_shr_s8(v64 a, unsigned int c) {
+  return vreinterpret_s64_s8(vshl_s8(vreinterpret_s8_s64(a), vdup_n_s8(-c)));
+}
+
+SIMD_INLINE v64 v64_shl_16(v64 a, unsigned int c) {
+  return vreinterpret_s64_u16(vshl_u16(vreinterpret_u16_s64(a), vdup_n_s16(c)));
+}
+
+SIMD_INLINE v64 v64_shr_u16(v64 a, unsigned int c) {
+  return vreinterpret_s64_u16(
+      vshl_u16(vreinterpret_u16_s64(a), vdup_n_s16(-(int)c)));
+}
+
+SIMD_INLINE v64 v64_shr_s16(v64 a, unsigned int c) {
+  return vreinterpret_s64_s16(
+      vshl_s16(vreinterpret_s16_s64(a), vdup_n_s16(-(int)c)));
+}
+
+SIMD_INLINE v64 v64_shl_32(v64 a, unsigned int c) {
+  return vreinterpret_s64_u32(vshl_u32(vreinterpret_u32_s64(a), vdup_n_s32(c)));
+}
+
+SIMD_INLINE v64 v64_shr_u32(v64 a, unsigned int c) {
+  return vreinterpret_s64_u32(
+      vshl_u32(vreinterpret_u32_s64(a), vdup_n_s32(-(int)c)));
+}
+
+SIMD_INLINE v64 v64_shr_s32(v64 a, unsigned int c) {
+  return vreinterpret_s64_s32(
+      vshl_s32(vreinterpret_s32_s64(a), vdup_n_s32(-(int)c)));
+}
+
+#if __OPTIMIZE__
+
+SIMD_INLINE v64 v64_shl_n_byte(v64 a, const unsigned int c) {
+  return vshl_n_s64(a, c * 8);
+}
+
+SIMD_INLINE v64 v64_shr_n_byte(v64 a, const unsigned int c) {
+  return c ? (v64)vshr_n_u64(vreinterpret_u64_s64(a), c * 8) : a;
+}
+
+SIMD_INLINE v64 v64_shl_n_8(v64 a, const unsigned int c) {
+  return vreinterpret_s64_u8(vshl_n_u8(vreinterpret_u8_s64(a), c));
+}
+
+SIMD_INLINE v64 v64_shr_n_u8(v64 a, const unsigned int c) {
+  return vreinterpret_s64_u8(vshr_n_u8(vreinterpret_u8_s64(a), c));
+}
+
+SIMD_INLINE v64 v64_shr_n_s8(v64 a, const unsigned int c) {
+  return vreinterpret_s64_s8(vshr_n_s8(vreinterpret_s8_s64(a), c));
+}
+
+SIMD_INLINE v64 v64_shl_n_16(v64 a, const unsigned int c) {
+  return vreinterpret_s64_u16(vshl_n_u16(vreinterpret_u16_s64(a), c));
+}
+
+SIMD_INLINE v64 v64_shr_n_u16(v64 a, const unsigned int c) {
+  return vreinterpret_s64_u16(vshr_n_u16(vreinterpret_u16_s64(a), c));
+}
+
+SIMD_INLINE v64 v64_shr_n_s16(v64 a, const unsigned int c) {
+  return vreinterpret_s64_s16(vshr_n_s16(vreinterpret_s16_s64(a), c));
+}
+
+SIMD_INLINE v64 v64_shl_n_32(v64 a, const unsigned int c) {
+  return vreinterpret_s64_u32(vshl_n_u32(vreinterpret_u32_s64(a), c));
+}
+
+SIMD_INLINE v64 v64_shr_n_u32(v64 a, const unsigned int c) {
+  return vreinterpret_s64_u32(vshr_n_u32(vreinterpret_u32_s64(a), c));
+}
+
+SIMD_INLINE v64 v64_shr_n_s32(v64 a, const unsigned int c) {
+  return vreinterpret_s64_s32(vshr_n_s32(vreinterpret_s32_s64(a), c));
+}
+
+#else
+
+SIMD_INLINE v64 v64_shl_n_byte(v64 a, const unsigned int c) {
+  return v64_from_64(v64_u64(a) << c * 8);
+}
+
+SIMD_INLINE v64 v64_shr_n_byte(v64 a, const unsigned int c) {
+  return v64_from_64(v64_u64(a) >> c * 8);
+}
+
+SIMD_INLINE v64 v64_shl_n_8(v64 a, const unsigned int c) {
+  return v64_shl_8(a, c);
+}
+
+SIMD_INLINE v64 v64_shr_n_u8(v64 a, const unsigned int c) {
+  return v64_shr_u8(a, c);
+}
+
+SIMD_INLINE v64 v64_shr_n_s8(v64 a, const unsigned int c) {
+  return v64_shr_s8(a, c);
+}
+
+SIMD_INLINE v64 v64_shl_n_16(v64 a, const unsigned int c) {
+  return v64_shl_16(a, c);
+}
+
+SIMD_INLINE v64 v64_shr_n_u16(v64 a, const unsigned int c) {
+  return v64_shr_u16(a, c);
+}
+
+SIMD_INLINE v64 v64_shr_n_s16(v64 a, const unsigned int c) {
+  return v64_shr_s16(a, c);
+}
+
+SIMD_INLINE v64 v64_shl_n_32(v64 a, const unsigned int c) {
+  return v64_shl_32(a, c);
+}
+
+SIMD_INLINE v64 v64_shr_n_u32(v64 a, const unsigned int c) {
+  return v64_shr_u32(a, c);
+}
+
+SIMD_INLINE v64 v64_shr_n_s32(v64 a, const unsigned int c) {
+  return v64_shr_s32(a, c);
+}
+
+#endif
+
+#endif /* _V64_INTRINSICS_H */
diff --git a/aom_dsp/simd/v64_intrinsics_c.h b/aom_dsp/simd/v64_intrinsics_c.h
new file mode 100644
index 0000000..3f2f352
--- /dev/null
+++ b/aom_dsp/simd/v64_intrinsics_c.h
@@ -0,0 +1,887 @@
+/*
+ * Copyright (c) 2016, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#ifndef _V64_INTRINSICS_C_H
+#define _V64_INTRINSICS_C_H
+
+/* Note: This implements the intrinsics in plain, unoptimised C.
+   Intended for reference, porting or debugging. */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include "./aom_config.h"
+
+extern const int simd_check;
+
+typedef union {
+  uint8_t u8[8];
+  uint16_t u16[4];
+  uint32_t u32[2];
+  uint64_t u64;
+  int8_t s8[8];
+  int16_t s16[4];
+  int32_t s32[2];
+  int64_t s64;
+} c_v64;
+
+SIMD_INLINE uint32_t c_v64_low_u32(c_v64 a) { return a.u32[CONFIG_BIG_ENDIAN]; }
+
+SIMD_INLINE uint32_t c_v64_high_u32(c_v64 a) {
+  return a.u32[!CONFIG_BIG_ENDIAN];
+}
+
+SIMD_INLINE int32_t c_v64_low_s32(c_v64 a) { return a.s32[CONFIG_BIG_ENDIAN]; }
+
+SIMD_INLINE int32_t c_v64_high_s32(c_v64 a) {
+  return a.s32[!CONFIG_BIG_ENDIAN];
+}
+
+SIMD_INLINE c_v64 c_v64_from_32(uint32_t x, uint32_t y) {
+  c_v64 t;
+  t.u32[!CONFIG_BIG_ENDIAN] = x;
+  t.u32[CONFIG_BIG_ENDIAN] = y;
+  return t;
+}
+
+SIMD_INLINE c_v64 c_v64_from_64(uint64_t x) {
+  c_v64 t;
+  t.u64 = x;
+  return t;
+}
+
+SIMD_INLINE uint64_t c_v64_u64(c_v64 x) { return x.u64; }
+
+SIMD_INLINE c_v64 c_v64_from_16(uint16_t a, uint16_t b, uint16_t c,
+                                uint16_t d) {
+  c_v64 t;
+  if (CONFIG_BIG_ENDIAN) {
+    t.u16[0] = a;
+    t.u16[1] = b;
+    t.u16[2] = c;
+    t.u16[3] = d;
+  } else {
+    t.u16[3] = a;
+    t.u16[2] = b;
+    t.u16[1] = c;
+    t.u16[0] = d;
+  }
+  return t;
+}
+
+SIMD_INLINE uint32_t c_u32_load_unaligned(const void *p) {
+  uint32_t t;
+  uint8_t *pp = (uint8_t *)p;
+  uint8_t *q = (uint8_t *)&t;
+  int c;
+  for (c = 0; c < 4; c++) q[c] = pp[c];
+  return t;
+}
+
+SIMD_INLINE void c_u32_store_unaligned(void *p, uint32_t a) {
+  uint8_t *pp = (uint8_t *)p;
+  uint8_t *q = (uint8_t *)&a;
+  int c;
+  for (c = 0; c < 4; c++) pp[c] = q[c];
+}
+
+SIMD_INLINE uint32_t c_u32_load_aligned(const void *p) {
+  if (simd_check && (uintptr_t)p & 3) {
+    fprintf(stderr, "Error: Unaligned u32 load at %p\n", p);
+    abort();
+  }
+  return c_u32_load_unaligned(p);
+}
+
+SIMD_INLINE void c_u32_store_aligned(void *p, uint32_t a) {
+  if (simd_check && (uintptr_t)p & 3) {
+    fprintf(stderr, "Error: Unaligned u32 store at %p\n", p);
+    abort();
+  }
+  c_u32_store_unaligned(p, a);
+}
+
+SIMD_INLINE c_v64 c_v64_load_unaligned(const void *p) {
+  c_v64 t;
+  uint8_t *pp = (uint8_t *)p;
+  uint8_t *q = (uint8_t *)&t;
+  int c;
+  for (c = 0; c < 8; c++) q[c] = pp[c];
+  return t;
+}
+
+SIMD_INLINE c_v64 c_v64_load_aligned(const void *p) {
+  if (simd_check && (uintptr_t)p & 7) {
+    fprintf(stderr, "Error: Unaligned c_v64 load at %p\n", p);
+    abort();
+  }
+  return c_v64_load_unaligned(p);
+}
+
+SIMD_INLINE void c_v64_store_unaligned(void *p, c_v64 a) {
+  uint8_t *q = (uint8_t *)p;
+  uint8_t *r = (uint8_t *)&a;
+  int c;
+  for (c = 0; c < 8; c++) q[c] = r[c];
+}
+
+SIMD_INLINE void c_v64_store_aligned(void *p, c_v64 a) {
+  if (simd_check && (uintptr_t)p & 7) {
+    fprintf(stderr, "Error: Unaligned c_v64 store at %p\n", p);
+    abort();
+  }
+  c_v64_store_unaligned(p, a);
+}
+
+SIMD_INLINE c_v64 c_v64_zero() {
+  c_v64 t;
+  t.u64 = 0;
+  return t;
+}
+
+SIMD_INLINE c_v64 c_v64_dup_8(uint8_t x) {
+  c_v64 t;
+  t.u8[0] = t.u8[1] = t.u8[2] = t.u8[3] = t.u8[4] = t.u8[5] = t.u8[6] =
+      t.u8[7] = x;
+  return t;
+}
+
+SIMD_INLINE c_v64 c_v64_dup_16(uint16_t x) {
+  c_v64 t;
+  t.u16[0] = t.u16[1] = t.u16[2] = t.u16[3] = x;
+  return t;
+}
+
+SIMD_INLINE c_v64 c_v64_dup_32(uint32_t x) {
+  c_v64 t;
+  t.u32[0] = t.u32[1] = x;
+  return t;
+}
+
+SIMD_INLINE c_v64 c_v64_add_8(c_v64 a, c_v64 b) {
+  c_v64 t;
+  int c;
+  for (c = 0; c < 8; c++) t.u8[c] = a.u8[c] + b.u8[c];
+  return t;
+}
+
+SIMD_INLINE c_v64 c_v64_add_16(c_v64 a, c_v64 b) {
+  c_v64 t;
+  int c;
+  for (c = 0; c < 4; c++) t.u16[c] = a.u16[c] + b.u16[c];
+  return t;
+}
+
+SIMD_INLINE c_v64 c_v64_sadd_s16(c_v64 a, c_v64 b) {
+  c_v64 t;
+  int c;
+  for (c = 0; c < 4; c++)
+    t.s16[c] = (int32_t)a.s16[c] + (int32_t)b.s16[c] > 32767
+                   ? 32767
+                   : (int32_t)a.s16[c] + (int32_t)b.s16[c] < -32768
+                         ? -32768
+                         : (int32_t)a.s16[c] + (int32_t)b.s16[c];
+  return t;
+}
+
+SIMD_INLINE c_v64 c_v64_add_32(c_v64 a, c_v64 b) {
+  c_v64 t;
+  t.u32[0] = a.u32[0] + b.u32[0];
+  t.u32[1] = a.u32[1] + b.u32[1];
+  return t;
+}
+
+SIMD_INLINE c_v64 c_v64_sub_8(c_v64 a, c_v64 b) {
+  c_v64 t;
+  int c;
+  for (c = 0; c < 8; c++) t.u8[c] = a.u8[c] - b.u8[c];
+  return t;
+}
+
+SIMD_INLINE c_v64 c_v64_ssub_u8(c_v64 a, c_v64 b) {
+  c_v64 t;
+  int c;
+  for (c = 0; c < 8; c++)
+    t.u8[c] = (int32_t)((uint32_t)a.u8[c] - (uint32_t)b.u8[c]) < 0
+                  ? 0
+                  : a.u8[c] - b.u8[c];
+  return t;
+}
+
+SIMD_INLINE c_v64 c_v64_ssub_s8(c_v64 a, c_v64 b) {
+  c_v64 t;
+  int c;
+  for (c = 0; c < 8; c++) {
+    int16_t d = (int16_t)a.s8[c] - (int16_t)b.s8[c];
+    t.s8[c] = d > 127 ? 127 : (d < -128 ? -128 : d);
+  }
+  return t;
+}
+
+SIMD_INLINE c_v64 c_v64_sub_16(c_v64 a, c_v64 b) {
+  c_v64 t;
+  int c;
+  for (c = 0; c < 4; c++) t.u16[c] = a.u16[c] - b.u16[c];
+  return t;
+}
+
+SIMD_INLINE c_v64 c_v64_ssub_s16(c_v64 a, c_v64 b) {
+  c_v64 t;
+  int c;
+  for (c = 0; c < 4; c++)
+    t.s16[c] = (int32_t)a.s16[c] - (int32_t)b.s16[c] < -32768
+                   ? -32768
+                   : (int32_t)a.s16[c] - (int32_t)b.s16[c] > 32767
+                         ? 32767
+                         : (int32_t)a.s16[c] - (int32_t)b.s16[c];
+  return t;
+}
+
+SIMD_INLINE c_v64 c_v64_sub_32(c_v64 a, c_v64 b) {
+  c_v64 t;
+  t.u32[0] = a.u32[0] - b.u32[0];
+  t.u32[1] = a.u32[1] - b.u32[1];
+  return t;
+}
+
+SIMD_INLINE c_v64 c_v64_abs_s16(c_v64 a) {
+  c_v64 t;
+  int c;
+  for (c = 0; c < 4; c++)
+    t.u16[c] = (int16_t)a.u16[c] > 0 ? a.u16[c] : -a.u16[c];
+  return t;
+}
+
+SIMD_INLINE c_v64 _c_v64_zip_8(c_v64 a, c_v64 b, int mode) {
+  c_v64 t;
+  if (mode) {
+    t.u8[7] = a.u8[7];
+    t.u8[6] = b.u8[7];
+    t.u8[5] = a.u8[6];
+    t.u8[4] = b.u8[6];
+    t.u8[3] = a.u8[5];
+    t.u8[2] = b.u8[5];
+    t.u8[1] = a.u8[4];
+    t.u8[0] = b.u8[4];
+  } else {
+    t.u8[7] = a.u8[3];
+    t.u8[6] = b.u8[3];
+    t.u8[5] = a.u8[2];
+    t.u8[4] = b.u8[2];
+    t.u8[3] = a.u8[1];
+    t.u8[2] = b.u8[1];
+    t.u8[1] = a.u8[0];
+    t.u8[0] = b.u8[0];
+  }
+  return t;
+}
+
+SIMD_INLINE c_v64 c_v64_ziplo_8(c_v64 a, c_v64 b) {
+  return CONFIG_BIG_ENDIAN ? _c_v64_zip_8(b, a, 1) : _c_v64_zip_8(a, b, 0);
+}
+
+SIMD_INLINE c_v64 c_v64_ziphi_8(c_v64 a, c_v64 b) {
+  return CONFIG_BIG_ENDIAN ? _c_v64_zip_8(b, a, 0) : _c_v64_zip_8(a, b, 1);
+}
+
+SIMD_INLINE c_v64 _c_v64_zip_16(c_v64 a, c_v64 b, int mode) {
+  c_v64 t;
+  if (mode) {
+    t.u16[3] = a.u16[3];
+    t.u16[2] = b.u16[3];
+    t.u16[1] = a.u16[2];
+    t.u16[0] = b.u16[2];
+  } else {
+    t.u16[3] = a.u16[1];
+    t.u16[2] = b.u16[1];
+    t.u16[1] = a.u16[0];
+    t.u16[0] = b.u16[0];
+  }
+  return t;
+}
+
+SIMD_INLINE c_v64 c_v64_ziplo_16(c_v64 a, c_v64 b) {
+  return CONFIG_BIG_ENDIAN ? _c_v64_zip_16(b, a, 1) : _c_v64_zip_16(a, b, 0);
+}
+
+SIMD_INLINE c_v64 c_v64_ziphi_16(c_v64 a, c_v64 b) {
+  return CONFIG_BIG_ENDIAN ? _c_v64_zip_16(b, a, 0) : _c_v64_zip_16(a, b, 1);
+}
+
+SIMD_INLINE c_v64 _c_v64_zip_32(c_v64 a, c_v64 b, int mode) {
+  c_v64 t;
+  if (mode) {
+    t.u32[1] = a.u32[1];
+    t.u32[0] = b.u32[1];
+  } else {
+    t.u32[1] = a.u32[0];
+    t.u32[0] = b.u32[0];
+  }
+  return t;
+}
+
+SIMD_INLINE c_v64 c_v64_ziplo_32(c_v64 a, c_v64 b) {
+  return CONFIG_BIG_ENDIAN ? _c_v64_zip_32(b, a, 1) : _c_v64_zip_32(a, b, 0);
+}
+
+SIMD_INLINE c_v64 c_v64_ziphi_32(c_v64 a, c_v64 b) {
+  return CONFIG_BIG_ENDIAN ? _c_v64_zip_32(b, a, 0) : _c_v64_zip_32(a, b, 1);
+}
+
+SIMD_INLINE c_v64 _c_v64_unzip_8(c_v64 a, c_v64 b, int mode) {
+  c_v64 t;
+  if (mode) {
+    t.u8[7] = b.u8[7];
+    t.u8[6] = b.u8[5];
+    t.u8[5] = b.u8[3];
+    t.u8[4] = b.u8[1];
+    t.u8[3] = a.u8[7];
+    t.u8[2] = a.u8[5];
+    t.u8[1] = a.u8[3];
+    t.u8[0] = a.u8[1];
+  } else {
+    t.u8[7] = a.u8[6];
+    t.u8[6] = a.u8[4];
+    t.u8[5] = a.u8[2];
+    t.u8[4] = a.u8[0];
+    t.u8[3] = b.u8[6];
+    t.u8[2] = b.u8[4];
+    t.u8[1] = b.u8[2];
+    t.u8[0] = b.u8[0];
+  }
+  return t;
+}
+
+SIMD_INLINE c_v64 c_v64_unziplo_8(c_v64 a, c_v64 b) {
+  return CONFIG_BIG_ENDIAN ? _c_v64_unzip_8(a, b, 1) : _c_v64_unzip_8(a, b, 0);
+}
+
+SIMD_INLINE c_v64 c_v64_unziphi_8(c_v64 a, c_v64 b) {
+  return CONFIG_BIG_ENDIAN ? _c_v64_unzip_8(b, a, 0) : _c_v64_unzip_8(b, a, 1);
+}
+
+SIMD_INLINE c_v64 _c_v64_unzip_16(c_v64 a, c_v64 b, int mode) {
+  c_v64 t;
+  if (mode) {
+    t.u16[3] = b.u16[3];
+    t.u16[2] = b.u16[1];
+    t.u16[1] = a.u16[3];
+    t.u16[0] = a.u16[1];
+  } else {
+    t.u16[3] = a.u16[2];
+    t.u16[2] = a.u16[0];
+    t.u16[1] = b.u16[2];
+    t.u16[0] = b.u16[0];
+  }
+  return t;
+}
+
+SIMD_INLINE c_v64 c_v64_unziplo_16(c_v64 a, c_v64 b) {
+  return CONFIG_BIG_ENDIAN ? _c_v64_unzip_16(a, b, 1)
+                           : _c_v64_unzip_16(a, b, 0);
+}
+
+SIMD_INLINE c_v64 c_v64_unziphi_16(c_v64 a, c_v64 b) {
+  return CONFIG_BIG_ENDIAN ? _c_v64_unzip_16(b, a, 0)
+                           : _c_v64_unzip_16(b, a, 1);
+}
+
+SIMD_INLINE c_v64 c_v64_unpacklo_u8_s16(c_v64 a) {
+  c_v64 t;
+  int endian = !!CONFIG_BIG_ENDIAN * 4;
+  t.s16[3] = (int16_t)a.u8[3 + endian];
+  t.s16[2] = (int16_t)a.u8[2 + endian];
+  t.s16[1] = (int16_t)a.u8[1 + endian];
+  t.s16[0] = (int16_t)a.u8[0 + endian];
+  return t;
+}
+
+SIMD_INLINE c_v64 c_v64_unpackhi_u8_s16(c_v64 a) {
+  c_v64 t;
+  int endian = !!CONFIG_BIG_ENDIAN * 4;
+  t.s16[3] = (int16_t)a.u8[7 - endian];
+  t.s16[2] = (int16_t)a.u8[6 - endian];
+  t.s16[1] = (int16_t)a.u8[5 - endian];
+  t.s16[0] = (int16_t)a.u8[4 - endian];
+  return t;
+}
+
+SIMD_INLINE c_v64 c_v64_pack_s32_s16(c_v64 a, c_v64 b) {
+  c_v64 t;
+  if (CONFIG_BIG_ENDIAN) {
+    c_v64 u = a;
+    a = b;
+    b = u;
+  }
+  t.s16[3] = a.s32[1] > 32767 ? 32767 : a.s32[1] < -32768 ? -32768 : a.s32[1];
+  t.s16[2] = a.s32[0] > 32767 ? 32767 : a.s32[0] < -32768 ? -32768 : a.s32[0];
+  t.s16[1] = b.s32[1] > 32767 ? 32767 : b.s32[1] < -32768 ? -32768 : b.s32[1];
+  t.s16[0] = b.s32[0] > 32767 ? 32767 : b.s32[0] < -32768 ? -32768 : b.s32[0];
+  return t;
+}
+
+SIMD_INLINE c_v64 c_v64_pack_s16_u8(c_v64 a, c_v64 b) {
+  c_v64 t;
+  if (CONFIG_BIG_ENDIAN) {
+    c_v64 u = a;
+    a = b;
+    b = u;
+  }
+  t.u8[7] = a.s16[3] > 255 ? 255 : a.s16[3] < 0 ? 0 : a.s16[3];
+  t.u8[6] = a.s16[2] > 255 ? 255 : a.s16[2] < 0 ? 0 : a.s16[2];
+  t.u8[5] = a.s16[1] > 255 ? 255 : a.s16[1] < 0 ? 0 : a.s16[1];
+  t.u8[4] = a.s16[0] > 255 ? 255 : a.s16[0] < 0 ? 0 : a.s16[0];
+  t.u8[3] = b.s16[3] > 255 ? 255 : b.s16[3] < 0 ? 0 : b.s16[3];
+  t.u8[2] = b.s16[2] > 255 ? 255 : b.s16[2] < 0 ? 0 : b.s16[2];
+  t.u8[1] = b.s16[1] > 255 ? 255 : b.s16[1] < 0 ? 0 : b.s16[1];
+  t.u8[0] = b.s16[0] > 255 ? 255 : b.s16[0] < 0 ? 0 : b.s16[0];
+  return t;
+}
+
+SIMD_INLINE c_v64 c_v64_pack_s16_s8(c_v64 a, c_v64 b) {
+  c_v64 t;
+  if (CONFIG_BIG_ENDIAN) {
+    c_v64 u = a;
+    a = b;
+    b = u;
+  }
+  t.u8[7] = a.s16[3] > 127 ? 127 : a.s16[3] < -128 ? 128 : a.s16[3];
+  t.u8[6] = a.s16[2] > 127 ? 127 : a.s16[2] < -128 ? 128 : a.s16[2];
+  t.u8[5] = a.s16[1] > 127 ? 127 : a.s16[1] < -128 ? 128 : a.s16[1];
+  t.u8[4] = a.s16[0] > 127 ? 127 : a.s16[0] < -128 ? 128 : a.s16[0];
+  t.u8[3] = b.s16[3] > 127 ? 127 : b.s16[3] < -128 ? 128 : b.s16[3];
+  t.u8[2] = b.s16[2] > 127 ? 127 : b.s16[2] < -128 ? 128 : b.s16[2];
+  t.u8[1] = b.s16[1] > 127 ? 127 : b.s16[1] < -128 ? 128 : b.s16[1];
+  t.u8[0] = b.s16[0] > 127 ? 127 : b.s16[0] < -128 ? 128 : b.s16[0];
+  return t;
+}
+
+SIMD_INLINE c_v64 c_v64_unpacklo_u16_s32(c_v64 a) {
+  c_v64 t;
+  t.s32[1] = a.u16[1 + !!CONFIG_BIG_ENDIAN * 2];
+  t.s32[0] = a.u16[0 + !!CONFIG_BIG_ENDIAN * 2];
+  return t;
+}
+
+SIMD_INLINE c_v64 c_v64_unpacklo_s16_s32(c_v64 a) {
+  c_v64 t;
+  t.s32[1] = a.s16[1 + !!CONFIG_BIG_ENDIAN * 2];
+  t.s32[0] = a.s16[0 + !!CONFIG_BIG_ENDIAN * 2];
+  return t;
+}
+
+SIMD_INLINE c_v64 c_v64_unpackhi_u16_s32(c_v64 a) {
+  c_v64 t;
+  t.s32[1] = a.u16[3 - !!CONFIG_BIG_ENDIAN * 2];
+  t.s32[0] = a.u16[2 - !!CONFIG_BIG_ENDIAN * 2];
+  return t;
+}
+
+SIMD_INLINE c_v64 c_v64_unpackhi_s16_s32(c_v64 a) {
+  c_v64 t;
+  t.s32[1] = a.s16[3 - !!CONFIG_BIG_ENDIAN * 2];
+  t.s32[0] = a.s16[2 - !!CONFIG_BIG_ENDIAN * 2];
+  return t;
+}
+
+SIMD_INLINE c_v64 c_v64_shuffle_8(c_v64 a, c_v64 pattern) {
+  c_v64 t;
+  int c;
+  for (c = 0; c < 8; c++) {
+    if (simd_check && (pattern.u8[c] & ~7)) {
+      fprintf(stderr, "Error: Undefined v64_shuffle_8 index %d/%d\n",
+              pattern.u8[c], c);
+      abort();
+    }
+    t.u8[c] =
+        a.u8[CONFIG_BIG_ENDIAN ? 7 - (pattern.u8[c] & 7) : pattern.u8[c] & 7];
+  }
+  return t;
+}
+
+SIMD_INLINE int64_t c_v64_dotp_su8(c_v64 a, c_v64 b) {
+  return a.s8[7] * b.u8[7] + a.s8[6] * b.u8[6] + a.s8[5] * b.u8[5] +
+         a.s8[4] * b.u8[4] + a.s8[3] * b.u8[3] + a.s8[2] * b.u8[2] +
+         a.s8[1] * b.u8[1] + a.s8[0] * b.u8[0];
+}
+
+SIMD_INLINE int64_t c_v64_dotp_s16(c_v64 a, c_v64 b) {
+  return (int64_t)(a.s16[3] * b.s16[3] + a.s16[2] * b.s16[2]) +
+         (int64_t)(a.s16[1] * b.s16[1] + a.s16[0] * b.s16[0]);
+}
+
+SIMD_INLINE uint64_t c_v64_hadd_u8(c_v64 a) {
+  return a.u8[7] + a.u8[6] + a.u8[5] + a.u8[4] + a.u8[3] + a.u8[2] + a.u8[1] +
+         a.u8[0];
+}
+
+SIMD_INLINE int64_t c_v64_hadd_s16(c_v64 a) {
+  return a.s16[3] + a.s16[2] + a.s16[1] + a.s16[0];
+}
+
+typedef uint32_t c_sad64_internal;
+
+/* Implementation dependent return value.  Result must be finalised with
+   v64_sad_u8_sum().
+   The result for more than 32 v64_sad_u8() calls is undefined. */
+SIMD_INLINE c_sad64_internal c_v64_sad_u8_init() { return 0; }
+
+SIMD_INLINE c_sad64_internal c_v64_sad_u8(c_sad64_internal s, c_v64 a,
+                                          c_v64 b) {
+  int c;
+  for (c = 0; c < 8; c++)
+    s += a.u8[c] > b.u8[c] ? a.u8[c] - b.u8[c] : b.u8[c] - a.u8[c];
+  return s;
+}
+
+SIMD_INLINE uint32_t c_v64_sad_u8_sum(c_sad64_internal s) { return s; }
+
+typedef uint32_t c_ssd64_internal;
+
+/* Implementation dependent return value.  Result must be finalised with
+ * v64_ssd_u8_sum(). */
+SIMD_INLINE c_ssd64_internal c_v64_ssd_u8_init() { return 0; }
+
+SIMD_INLINE c_ssd64_internal c_v64_ssd_u8(c_ssd64_internal s, c_v64 a,
+                                          c_v64 b) {
+  int c;
+  for (c = 0; c < 8; c++) s += (a.u8[c] - b.u8[c]) * (a.u8[c] - b.u8[c]);
+  return s;
+}
+
+SIMD_INLINE uint32_t c_v64_ssd_u8_sum(c_ssd64_internal s) { return s; }
+
+SIMD_INLINE c_v64 c_v64_or(c_v64 a, c_v64 b) {
+  c_v64 t;
+  t.u64 = a.u64 | b.u64;
+  return t;
+}
+
+SIMD_INLINE c_v64 c_v64_xor(c_v64 a, c_v64 b) {
+  c_v64 t;
+  t.u64 = a.u64 ^ b.u64;
+  return t;
+}
+
+SIMD_INLINE c_v64 c_v64_and(c_v64 a, c_v64 b) {
+  c_v64 t;
+  t.u64 = a.u64 & b.u64;
+  return t;
+}
+
+SIMD_INLINE c_v64 c_v64_andn(c_v64 a, c_v64 b) {
+  c_v64 t;
+  t.u64 = a.u64 & ~b.u64;
+  return t;
+}
+
+SIMD_INLINE c_v64 c_v64_mullo_s16(c_v64 a, c_v64 b) {
+  c_v64 t;
+  int c;
+  for (c = 0; c < 4; c++) t.s16[c] = (int16_t)(a.s16[c] * b.s16[c]);
+  return t;
+}
+
+SIMD_INLINE c_v64 c_v64_mulhi_s16(c_v64 a, c_v64 b) {
+  c_v64 t;
+  int c;
+  for (c = 0; c < 4; c++) t.s16[c] = (a.s16[c] * b.s16[c]) >> 16;
+  return t;
+}
+
+SIMD_INLINE c_v64 c_v64_mullo_s32(c_v64 a, c_v64 b) {
+  c_v64 t;
+  t.s32[0] = a.s32[0] * b.s32[0];
+  t.s32[1] = a.s32[1] * b.s32[1];
+  return t;
+}
+
+SIMD_INLINE c_v64 c_v64_madd_s16(c_v64 a, c_v64 b) {
+  c_v64 t;
+  t.s32[0] = a.s16[0] * b.s16[0] + a.s16[1] * b.s16[1];
+  t.s32[1] = a.s16[2] * b.s16[2] + a.s16[3] * b.s16[3];
+  return t;
+}
+
+SIMD_INLINE c_v64 c_v64_madd_us8(c_v64 a, c_v64 b) {
+  c_v64 t;
+  int32_t u;
+  u = a.u8[0] * b.s8[0] + a.u8[1] * b.s8[1];
+  t.s16[0] = u > 32767 ? 32767 : u < -32768 ? -32768 : u;
+  u = a.u8[2] * b.s8[2] + a.u8[3] * b.s8[3];
+  t.s16[1] = u > 32767 ? 32767 : u < -32768 ? -32768 : u;
+  u = a.u8[4] * b.s8[4] + a.u8[5] * b.s8[5];
+  t.s16[2] = u > 32767 ? 32767 : u < -32768 ? -32768 : u;
+  u = a.u8[6] * b.s8[6] + a.u8[7] * b.s8[7];
+  t.s16[3] = u > 32767 ? 32767 : u < -32768 ? -32768 : u;
+  return t;
+}
+
+SIMD_INLINE c_v64 c_v64_avg_u8(c_v64 a, c_v64 b) {
+  c_v64 t;
+  int c;
+  for (c = 0; c < 8; c++) t.u8[c] = (a.u8[c] + b.u8[c] + 1) >> 1;
+  return t;
+}
+
+SIMD_INLINE c_v64 c_v64_rdavg_u8(c_v64 a, c_v64 b) {
+  c_v64 t;
+  int c;
+  for (c = 0; c < 8; c++) t.u8[c] = (a.u8[c] + b.u8[c]) >> 1;
+  return t;
+}
+
+SIMD_INLINE c_v64 c_v64_avg_u16(c_v64 a, c_v64 b) {
+  c_v64 t;
+  int c;
+  for (c = 0; c < 4; c++) t.u16[c] = (a.u16[c] + b.u16[c] + 1) >> 1;
+  return t;
+}
+
+SIMD_INLINE c_v64 c_v64_min_u8(c_v64 a, c_v64 b) {
+  c_v64 t;
+  int c;
+  for (c = 0; c < 8; c++) t.u8[c] = a.u8[c] > b.u8[c] ? b.u8[c] : a.u8[c];
+  return t;
+}
+
+SIMD_INLINE c_v64 c_v64_max_u8(c_v64 a, c_v64 b) {
+  c_v64 t;
+  int c;
+  for (c = 0; c < 8; c++) t.u8[c] = a.u8[c] > b.u8[c] ? a.u8[c] : b.u8[c];
+  return t;
+}
+
+SIMD_INLINE c_v64 c_v64_min_s8(c_v64 a, c_v64 b) {
+  c_v64 t;
+  int c;
+  for (c = 0; c < 8; c++) t.s8[c] = a.s8[c] > b.s8[c] ? b.s8[c] : a.s8[c];
+  return t;
+}
+
+SIMD_INLINE c_v64 c_v64_max_s8(c_v64 a, c_v64 b) {
+  c_v64 t;
+  int c;
+  for (c = 0; c < 8; c++) t.s8[c] = a.s8[c] > b.s8[c] ? a.s8[c] : b.s8[c];
+  return t;
+}
+
+SIMD_INLINE c_v64 c_v64_min_s16(c_v64 a, c_v64 b) {
+  c_v64 t;
+  int c;
+  for (c = 0; c < 4; c++) t.s16[c] = a.s16[c] > b.s16[c] ? b.s16[c] : a.s16[c];
+  return t;
+}
+
+SIMD_INLINE c_v64 c_v64_max_s16(c_v64 a, c_v64 b) {
+  c_v64 t;
+  int c;
+  for (c = 0; c < 4; c++) t.s16[c] = a.s16[c] > b.s16[c] ? a.s16[c] : b.s16[c];
+  return t;
+}
+
+SIMD_INLINE c_v64 c_v64_cmpgt_s8(c_v64 a, c_v64 b) {
+  c_v64 t;
+  int c;
+  for (c = 0; c < 8; c++) t.s8[c] = -(a.s8[c] > b.s8[c]);
+  return t;
+}
+
+SIMD_INLINE c_v64 c_v64_cmplt_s8(c_v64 a, c_v64 b) {
+  c_v64 t;
+  int c;
+  for (c = 0; c < 8; c++) t.s8[c] = -(a.s8[c] < b.s8[c]);
+  return t;
+}
+
+SIMD_INLINE c_v64 c_v64_cmpeq_8(c_v64 a, c_v64 b) {
+  c_v64 t;
+  int c;
+  for (c = 0; c < 8; c++) t.s8[c] = -(a.u8[c] == b.u8[c]);
+  return t;
+}
+
+SIMD_INLINE c_v64 c_v64_cmpgt_s16(c_v64 a, c_v64 b) {
+  c_v64 t;
+  int c;
+  for (c = 0; c < 4; c++) t.s16[c] = -(a.s16[c] > b.s16[c]);
+  return t;
+}
+
+SIMD_INLINE c_v64 c_v64_cmplt_s16(c_v64 a, c_v64 b) {
+  c_v64 t;
+  int c;
+  for (c = 0; c < 4; c++) t.s16[c] = -(a.s16[c] < b.s16[c]);
+  return t;
+}
+
+SIMD_INLINE c_v64 c_v64_cmpeq_16(c_v64 a, c_v64 b) {
+  c_v64 t;
+  int c;
+  for (c = 0; c < 4; c++) t.s16[c] = -(a.u16[c] == b.u16[c]);
+  return t;
+}
+
+SIMD_INLINE c_v64 c_v64_shl_8(c_v64 a, unsigned int n) {
+  c_v64 t;
+  int c;
+  if (simd_check && n > 7) {
+    fprintf(stderr, "Error: Undefined u8 shift left %d\n", n);
+    abort();
+  }
+  for (c = 0; c < 8; c++) t.s8[c] = a.u8[c] << n;
+  return t;
+}
+
+SIMD_INLINE c_v64 c_v64_shr_u8(c_v64 a, unsigned int n) {
+  c_v64 t;
+  int c;
+  if (simd_check && n > 7) {
+    fprintf(stderr, "Error: Undefined u8 shift right %d\n", n);
+    abort();
+  }
+  for (c = 0; c < 8; c++) t.u8[c] = a.u8[c] >> n;
+  return t;
+}
+
+SIMD_INLINE c_v64 c_v64_shr_s8(c_v64 a, unsigned int n) {
+  c_v64 t;
+  int c;
+  if (simd_check && n > 7) {
+    fprintf(stderr, "Error: Undefined s8 shift right %d\n", n);
+    abort();
+  }
+  for (c = 0; c < 8; c++) t.s8[c] = a.s8[c] >> n;
+  return t;
+}
+
+SIMD_INLINE c_v64 c_v64_shl_16(c_v64 a, unsigned int n) {
+  c_v64 t;
+  int c;
+  if (simd_check && n > 15) {
+    fprintf(stderr, "Error: Undefined u16 shift left %d\n", n);
+    abort();
+  }
+  for (c = 0; c < 4; c++) t.u16[c] = a.u16[c] << n;
+  return t;
+}
+
+SIMD_INLINE c_v64 c_v64_shr_u16(c_v64 a, unsigned int n) {
+  c_v64 t;
+  int c;
+  if (simd_check && n > 15) {
+    fprintf(stderr, "Error: Undefined u16 shift right %d\n", n);
+    abort();
+  }
+  for (c = 0; c < 4; c++) t.u16[c] = a.u16[c] >> n;
+  return t;
+}
+
+SIMD_INLINE c_v64 c_v64_shr_s16(c_v64 a, unsigned int n) {
+  c_v64 t;
+  int c;
+  if (simd_check && n > 15) {
+    fprintf(stderr, "Error: undefined s16 shift right %d\n", n);
+    abort();
+  }
+  for (c = 0; c < 4; c++) t.s16[c] = a.s16[c] >> n;
+  return t;
+}
+
+SIMD_INLINE c_v64 c_v64_shl_32(c_v64 a, unsigned int n) {
+  c_v64 t;
+  if (simd_check && n > 31) {
+    fprintf(stderr, "Error: undefined u32 shift left %d\n", n);
+    abort();
+  }
+  t.u32[1] = a.u32[1] << n;
+  t.u32[0] = a.u32[0] << n;
+  return t;
+}
+
+SIMD_INLINE c_v64 c_v64_shr_u32(c_v64 a, unsigned int n) {
+  c_v64 t;
+  if (simd_check && n > 31) {
+    fprintf(stderr, "Error: undefined u32 shift right %d\n", n);
+    abort();
+  }
+  t.u32[1] = a.u32[1] >> n;
+  t.u32[0] = a.u32[0] >> n;
+  return t;
+}
+
+SIMD_INLINE c_v64 c_v64_shr_s32(c_v64 a, unsigned int n) {
+  c_v64 t;
+  if (simd_check && n > 31) {
+    fprintf(stderr, "Error: undefined s32 shift right %d\n", n);
+    abort();
+  }
+  t.s32[1] = a.s32[1] >> n;
+  t.s32[0] = a.s32[0] >> n;
+  return t;
+}
+
+SIMD_INLINE c_v64 c_v64_shr_n_byte(c_v64 x, const unsigned int i) {
+  c_v64 t;
+  t.u64 = x.u64 >> i * 8;
+  return t;
+}
+
+SIMD_INLINE c_v64 c_v64_shl_n_byte(c_v64 x, const unsigned int i) {
+  c_v64 t;
+  t.u64 = x.u64 << i * 8;
+  return t;
+}
+
+SIMD_INLINE c_v64 c_v64_align(c_v64 a, c_v64 b, const unsigned int c) {
+  if (simd_check && c > 7) {
+    fprintf(stderr, "Error: undefined alignment %d\n", c);
+    abort();
+  }
+  return c ? c_v64_or(c_v64_shr_n_byte(b, c), c_v64_shl_n_byte(a, 8 - c)) : b;
+}
+
+SIMD_INLINE c_v64 c_v64_shl_n_8(c_v64 a, const unsigned int c) {
+  return c_v64_shl_8(a, c);
+}
+
+SIMD_INLINE c_v64 c_v64_shr_n_u8(c_v64 a, const unsigned int c) {
+  return c_v64_shr_u8(a, c);
+}
+
+SIMD_INLINE c_v64 c_v64_shr_n_s8(c_v64 a, const unsigned int c) {
+  return c_v64_shr_s8(a, c);
+}
+
+SIMD_INLINE c_v64 c_v64_shl_n_16(c_v64 a, const unsigned int c) {
+  return c_v64_shl_16(a, c);
+}
+
+SIMD_INLINE c_v64 c_v64_shr_n_u16(c_v64 a, const unsigned int c) {
+  return c_v64_shr_u16(a, c);
+}
+
+SIMD_INLINE c_v64 c_v64_shr_n_s16(c_v64 a, const unsigned int c) {
+  return c_v64_shr_s16(a, c);
+}
+
+SIMD_INLINE c_v64 c_v64_shl_n_32(c_v64 a, const unsigned int c) {
+  return c_v64_shl_32(a, c);
+}
+
+SIMD_INLINE c_v64 c_v64_shr_n_u32(c_v64 a, const unsigned int c) {
+  return c_v64_shr_u32(a, c);
+}
+
+SIMD_INLINE c_v64 c_v64_shr_n_s32(c_v64 a, const unsigned int c) {
+  return c_v64_shr_s32(a, c);
+}
+
+#endif /* _V64_INTRINSICS_C_H */
diff --git a/aom_dsp/simd/v64_intrinsics_x86.h b/aom_dsp/simd/v64_intrinsics_x86.h
new file mode 100644
index 0000000..502df23
--- /dev/null
+++ b/aom_dsp/simd/v64_intrinsics_x86.h
@@ -0,0 +1,460 @@
+/*
+ * Copyright (c) 2016, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#ifndef _V64_INTRINSICS_H
+#define _V64_INTRINSICS_H
+
+#include <emmintrin.h>
+#if defined(__SSSE3__)
+#include <tmmintrin.h>
+#endif
+#if defined(__SSE4_1__)
+#include <smmintrin.h>
+#endif
+
+typedef __m128i v64;
+
+SIMD_INLINE uint32_t v64_low_u32(v64 a) {
+  return (uint32_t)_mm_cvtsi128_si32(a);
+}
+
+SIMD_INLINE uint32_t v64_high_u32(v64 a) {
+  return (uint32_t)_mm_cvtsi128_si32(_mm_srli_si128(a, 4));
+}
+
+SIMD_INLINE int32_t v64_low_s32(v64 a) { return (int32_t)_mm_cvtsi128_si32(a); }
+
+SIMD_INLINE int32_t v64_high_s32(v64 a) {
+  return (int32_t)_mm_cvtsi128_si32(_mm_srli_si128(a, 4));
+}
+
+SIMD_INLINE v64 v64_from_16(uint16_t a, uint16_t b, uint16_t c, uint16_t d) {
+  return _mm_packs_epi32(
+      _mm_set_epi32((int16_t)a, (int16_t)b, (int16_t)c, (int16_t)d),
+      _mm_setzero_si128());
+}
+
+SIMD_INLINE v64 v64_from_32(uint32_t x, uint32_t y) {
+  return _mm_set_epi32(0, 0, x, y);
+}
+
+SIMD_INLINE v64 v64_from_64(uint64_t x) {
+  return _mm_set_epi32(0, 0, x >> 32, (uint32_t)x);
+}
+
+SIMD_INLINE uint64_t v64_u64(v64 x) {
+  return (uint64_t)v64_low_u32(x) | ((uint64_t)v64_high_u32(x) << 32);
+}
+
+SIMD_INLINE uint32_t u32_load_aligned(const void *p) {
+  return *((uint32_t *)p);
+}
+
+SIMD_INLINE uint32_t u32_load_unaligned(const void *p) {
+  return *((uint32_t *)p);
+}
+
+SIMD_INLINE void u32_store_aligned(void *p, uint32_t a) {
+  *((uint32_t *)p) = a;
+}
+
+SIMD_INLINE void u32_store_unaligned(void *p, uint32_t a) {
+  *((uint32_t *)p) = a;
+}
+
+SIMD_INLINE v64 v64_load_aligned(const void *p) {
+  return _mm_loadl_epi64((__m128i *)p);
+}
+
+SIMD_INLINE v64 v64_load_unaligned(const void *p) {
+  return _mm_loadl_epi64((__m128i *)p);
+}
+
+SIMD_INLINE void v64_store_aligned(void *p, v64 a) {
+  _mm_storel_epi64((__m128i *)p, a);
+}
+
+SIMD_INLINE void v64_store_unaligned(void *p, v64 a) {
+  _mm_storel_epi64((__m128i *)p, a);
+}
+
+#if __OPTIMIZE__
+#define v64_align(a, b, c) \
+  (c) ? _mm_srli_si128(_mm_unpacklo_epi64(b, a), (c)) : b;
+#else
+#define v64_align(a, b, c)                                                  \
+  ((c) ? v64_from_64((v64_u64(b) >> (c)*8) | (v64_u64(a) << (8 - (c)) * 8)) \
+       : (b))
+#endif
+
+SIMD_INLINE v64 v64_zero() { return _mm_setzero_si128(); }
+
+SIMD_INLINE v64 v64_dup_8(uint8_t x) { return _mm_set1_epi8(x); }
+
+SIMD_INLINE v64 v64_dup_16(uint16_t x) { return _mm_set1_epi16(x); }
+
+SIMD_INLINE v64 v64_dup_32(uint32_t x) { return _mm_set1_epi32(x); }
+
+SIMD_INLINE v64 v64_add_8(v64 a, v64 b) { return _mm_add_epi8(a, b); }
+
+SIMD_INLINE v64 v64_add_16(v64 a, v64 b) { return _mm_add_epi16(a, b); }
+
+SIMD_INLINE v64 v64_sadd_s16(v64 a, v64 b) { return _mm_adds_epi16(a, b); }
+
+SIMD_INLINE v64 v64_add_32(v64 a, v64 b) { return _mm_add_epi32(a, b); }
+
+SIMD_INLINE v64 v64_sub_8(v64 a, v64 b) { return _mm_sub_epi8(a, b); }
+
+SIMD_INLINE v64 v64_ssub_u8(v64 a, v64 b) { return _mm_subs_epu8(a, b); }
+
+SIMD_INLINE v64 v64_ssub_s8(v64 a, v64 b) { return _mm_subs_epi8(a, b); }
+
+SIMD_INLINE v64 v64_sub_16(v64 a, v64 b) { return _mm_sub_epi16(a, b); }
+
+SIMD_INLINE v64 v64_ssub_s16(v64 a, v64 b) { return _mm_subs_epi16(a, b); }
+
+SIMD_INLINE v64 v64_sub_32(v64 a, v64 b) { return _mm_sub_epi32(a, b); }
+
+SIMD_INLINE v64 v64_abs_s16(v64 a) {
+#if defined(__SSSE3__)
+  return _mm_abs_epi16(a);
+#else
+  return _mm_max_epi16(a, _mm_sub_epi16(_mm_setzero_si128(), a));
+#endif
+}
+
+SIMD_INLINE v64 v64_ziplo_8(v64 a, v64 b) { return _mm_unpacklo_epi8(b, a); }
+
+SIMD_INLINE v64 v64_ziphi_8(v64 a, v64 b) {
+  return _mm_srli_si128(_mm_unpacklo_epi8(b, a), 8);
+}
+
+SIMD_INLINE v64 v64_ziplo_16(v64 a, v64 b) { return _mm_unpacklo_epi16(b, a); }
+
+SIMD_INLINE v64 v64_ziphi_16(v64 a, v64 b) {
+  return _mm_srli_si128(_mm_unpacklo_epi16(b, a), 8);
+}
+
+SIMD_INLINE v64 v64_ziplo_32(v64 a, v64 b) { return _mm_unpacklo_epi32(b, a); }
+
+SIMD_INLINE v64 v64_ziphi_32(v64 a, v64 b) {
+  return _mm_srli_si128(_mm_unpacklo_epi32(b, a), 8);
+}
+
+SIMD_INLINE v64 v64_pack_s32_s16(v64 a, v64 b) {
+  __m128i t = _mm_unpacklo_epi64(b, a);
+  return _mm_packs_epi32(t, t);
+}
+
+SIMD_INLINE v64 v64_pack_s16_u8(v64 a, v64 b) {
+  __m128i t = _mm_unpacklo_epi64(b, a);
+  return _mm_packus_epi16(t, t);
+}
+
+SIMD_INLINE v64 v64_pack_s16_s8(v64 a, v64 b) {
+  __m128i t = _mm_unpacklo_epi64(b, a);
+  return _mm_packs_epi16(t, t);
+}
+
+SIMD_INLINE v64 v64_unziphi_8(v64 a, v64 b) {
+#if defined(__SSSE3__)
+  return _mm_shuffle_epi8(_mm_unpacklo_epi64(b, a),
+                          _mm_cvtsi64_si128(0x0f0d0b0907050301LL));
+#else
+  return _mm_packus_epi16(
+      _mm_unpacklo_epi64(_mm_srli_epi16(b, 8), _mm_srli_epi16(a, 8)),
+      _mm_setzero_si128());
+#endif
+}
+
+SIMD_INLINE v64 v64_unziplo_8(v64 a, v64 b) {
+#if defined(__SSSE3__)
+  return _mm_shuffle_epi8(_mm_unpacklo_epi64(b, a),
+                          _mm_cvtsi64_si128(0x0e0c0a0806040200LL));
+#else
+  return v64_unziphi_8(_mm_slli_si128(a, 1), _mm_slli_si128(b, 1));
+#endif
+}
+
+SIMD_INLINE v64 v64_unziphi_16(v64 a, v64 b) {
+#if defined(__SSSE3__)
+  return _mm_shuffle_epi8(_mm_unpacklo_epi64(b, a),
+                          _mm_cvtsi64_si128(0x0f0e0b0a07060302LL));
+#else
+  return _mm_packs_epi32(
+      _mm_unpacklo_epi64(_mm_srai_epi32(b, 16), _mm_srai_epi32(a, 16)),
+      _mm_setzero_si128());
+#endif
+}
+
+SIMD_INLINE v64 v64_unziplo_16(v64 a, v64 b) {
+#if defined(__SSSE3__)
+  return _mm_shuffle_epi8(_mm_unpacklo_epi64(b, a),
+                          _mm_cvtsi64_si128(0x0d0c090805040100LL));
+#else
+  return v64_unziphi_16(_mm_slli_si128(a, 2), _mm_slli_si128(b, 2));
+#endif
+}
+
+SIMD_INLINE v64 v64_unpacklo_u8_s16(v64 a) {
+  return _mm_unpacklo_epi8(a, _mm_setzero_si128());
+}
+
+SIMD_INLINE v64 v64_unpackhi_u8_s16(v64 a) {
+  return _mm_srli_si128(_mm_unpacklo_epi8(a, _mm_setzero_si128()), 8);
+}
+
+SIMD_INLINE v64 v64_unpacklo_u16_s32(v64 a) {
+  return _mm_unpacklo_epi16(a, _mm_setzero_si128());
+}
+
+SIMD_INLINE v64 v64_unpacklo_s16_s32(v64 a) {
+  return _mm_srai_epi32(_mm_unpacklo_epi16(_mm_setzero_si128(), a), 16);
+}
+
+SIMD_INLINE v64 v64_unpackhi_u16_s32(v64 a) {
+  return _mm_srli_si128(_mm_unpacklo_epi16(a, _mm_setzero_si128()), 8);
+}
+
+SIMD_INLINE v64 v64_unpackhi_s16_s32(v64 a) {
+  return _mm_srli_si128(
+      _mm_srai_epi32(_mm_unpacklo_epi16(_mm_setzero_si128(), a), 16), 8);
+}
+
+SIMD_INLINE v64 v64_shuffle_8(v64 x, v64 pattern) {
+#if defined(__SSSE3__)
+  return _mm_shuffle_epi8(x, pattern);
+#else
+  v64 output;
+  unsigned char *input = (unsigned char *)&x;
+  unsigned char *index = (unsigned char *)&pattern;
+  char *selected = (char *)&output;
+  int counter;
+
+  for (counter = 0; counter < 8; counter++) {
+    selected[counter] = input[index[counter]];
+  }
+
+  return output;
+#endif
+}
+
+SIMD_INLINE int64_t v64_dotp_su8(v64 a, v64 b) {
+  __m128i r, r1, r2, z;
+  z = _mm_setzero_si128();
+  r1 = _mm_madd_epi16(_mm_slli_epi16(_mm_unpacklo_epi8(a, z), 8),
+                      _mm_unpacklo_epi8(b, z));
+  r2 = _mm_srli_si128(r1, 8);
+  r = _mm_add_epi32(r1, r2);
+  r = _mm_add_epi32(r, _mm_srli_si128(r, 4));
+  return ((int32_t)v64_low_u32(r)) >> 8;
+}
+
+SIMD_INLINE int64_t v64_dotp_s16(v64 a, v64 b) {
+  __m128i r = _mm_madd_epi16(a, b);
+#if defined(__SSE4_1__)
+  __m128i x = _mm_cvtepi32_epi64(r);
+  return _mm_cvtsi128_si64(_mm_add_epi64(x, _mm_srli_si128(x, 8)));
+#else
+  return (int64_t)_mm_cvtsi128_si32(_mm_srli_si128(r, 4)) +
+         (int64_t)_mm_cvtsi128_si32(r);
+#endif
+}
+
+SIMD_INLINE uint64_t v64_hadd_u8(v64 a) {
+  return v64_low_u32(_mm_sad_epu8(a, _mm_setzero_si128()));
+}
+
+SIMD_INLINE int64_t v64_hadd_s16(v64 a) {
+  return v64_dotp_s16(a, v64_dup_16(1));
+}
+
+typedef v64 sad64_internal;
+
+SIMD_INLINE sad64_internal v64_sad_u8_init() { return _mm_setzero_si128(); }
+
+/* Implementation dependent return value.  Result must be finalised with
+   v64_sad_u8_sum().
+   The result for more than 32 v64_sad_u8() calls is undefined. */
+SIMD_INLINE sad64_internal v64_sad_u8(sad64_internal s, v64 a, v64 b) {
+  return _mm_add_epi64(s, _mm_sad_epu8(a, b));
+}
+
+SIMD_INLINE uint32_t v64_sad_u8_sum(sad64_internal s) { return v64_low_u32(s); }
+
+typedef v64 ssd64_internal;
+
+SIMD_INLINE ssd64_internal v64_ssd_u8_init() { return _mm_setzero_si128(); }
+
+/* Implementation dependent return value.  Result must be finalised with
+ * v64_ssd_u8_sum(). */
+SIMD_INLINE ssd64_internal v64_ssd_u8(ssd64_internal s, v64 a, v64 b) {
+  v64 l = v64_sub_16(v64_ziplo_8(v64_zero(), a), v64_ziplo_8(v64_zero(), b));
+  v64 h = v64_sub_16(v64_ziphi_8(v64_zero(), a), v64_ziphi_8(v64_zero(), b));
+  v64 r = v64_add_32(_mm_madd_epi16(l, l), _mm_madd_epi16(h, h));
+  return _mm_add_epi64(
+      s, v64_ziplo_32(v64_zero(), _mm_add_epi32(r, _mm_srli_si128(r, 4))));
+}
+
+SIMD_INLINE uint32_t v64_ssd_u8_sum(sad64_internal s) { return v64_low_u32(s); }
+
+SIMD_INLINE v64 v64_or(v64 a, v64 b) { return _mm_or_si128(a, b); }
+
+SIMD_INLINE v64 v64_xor(v64 a, v64 b) { return _mm_xor_si128(a, b); }
+
+SIMD_INLINE v64 v64_and(v64 a, v64 b) { return _mm_and_si128(a, b); }
+
+SIMD_INLINE v64 v64_andn(v64 a, v64 b) { return _mm_andnot_si128(b, a); }
+
+SIMD_INLINE v64 v64_mullo_s16(v64 a, v64 b) { return _mm_mullo_epi16(a, b); }
+
+SIMD_INLINE v64 v64_mulhi_s16(v64 a, v64 b) { return _mm_mulhi_epi16(a, b); }
+
+SIMD_INLINE v64 v64_mullo_s32(v64 a, v64 b) {
+#if defined(__SSE4_1__)
+  return _mm_mullo_epi32(a, b);
+#else
+  return _mm_unpacklo_epi32(
+      _mm_mul_epu32(a, b),
+      _mm_mul_epu32(_mm_srli_si128(a, 4), _mm_srli_si128(b, 4)));
+#endif
+}
+
+SIMD_INLINE v64 v64_madd_s16(v64 a, v64 b) { return _mm_madd_epi16(a, b); }
+
+SIMD_INLINE v64 v64_madd_us8(v64 a, v64 b) {
+#if defined(__SSSE3__)
+  return _mm_maddubs_epi16(a, b);
+#else
+  __m128i t = _mm_madd_epi16(_mm_unpacklo_epi8(a, _mm_setzero_si128()),
+                             _mm_srai_epi16(_mm_unpacklo_epi8(b, b), 8));
+  return _mm_packs_epi32(t, t);
+#endif
+}
+
+SIMD_INLINE v64 v64_avg_u8(v64 a, v64 b) { return _mm_avg_epu8(a, b); }
+
+SIMD_INLINE v64 v64_rdavg_u8(v64 a, v64 b) {
+  return _mm_sub_epi8(_mm_avg_epu8(a, b),
+                      _mm_and_si128(_mm_xor_si128(a, b), v64_dup_8(1)));
+}
+
+SIMD_INLINE v64 v64_avg_u16(v64 a, v64 b) { return _mm_avg_epu16(a, b); }
+
+SIMD_INLINE v64 v64_min_u8(v64 a, v64 b) { return _mm_min_epu8(a, b); }
+
+SIMD_INLINE v64 v64_max_u8(v64 a, v64 b) { return _mm_max_epu8(a, b); }
+
+SIMD_INLINE v64 v64_min_s8(v64 a, v64 b) {
+#if defined(__SSE4_1__)
+  return _mm_min_epi8(a, b);
+#else
+  v64 mask = _mm_cmplt_epi8(a, b);
+  return _mm_or_si128(_mm_andnot_si128(mask, b), _mm_and_si128(mask, a));
+#endif
+}
+
+SIMD_INLINE v64 v64_max_s8(v64 a, v64 b) {
+#if defined(__SSE4_1__)
+  return _mm_max_epi8(a, b);
+#else
+  v64 mask = _mm_cmplt_epi8(b, a);
+  return _mm_or_si128(_mm_andnot_si128(mask, b), _mm_and_si128(mask, a));
+#endif
+}
+
+SIMD_INLINE v64 v64_min_s16(v64 a, v64 b) { return _mm_min_epi16(a, b); }
+
+SIMD_INLINE v64 v64_max_s16(v64 a, v64 b) { return _mm_max_epi16(a, b); }
+
+SIMD_INLINE v64 v64_cmpgt_s8(v64 a, v64 b) { return _mm_cmpgt_epi8(a, b); }
+
+SIMD_INLINE v64 v64_cmplt_s8(v64 a, v64 b) { return _mm_cmplt_epi8(a, b); }
+
+SIMD_INLINE v64 v64_cmpeq_8(v64 a, v64 b) { return _mm_cmpeq_epi8(a, b); }
+
+SIMD_INLINE v64 v64_cmpgt_s16(v64 a, v64 b) { return _mm_cmpgt_epi16(a, b); }
+
+SIMD_INLINE v64 v64_cmplt_s16(v64 a, v64 b) { return _mm_cmplt_epi16(a, b); }
+
+SIMD_INLINE v64 v64_cmpeq_16(v64 a, v64 b) { return _mm_cmpeq_epi16(a, b); }
+
+SIMD_INLINE v64 v64_shl_8(v64 a, unsigned int c) {
+  return _mm_packus_epi16(
+      _mm_srli_epi16(_mm_sll_epi16(_mm_unpacklo_epi8(_mm_setzero_si128(), a),
+                                   _mm_cvtsi32_si128(c)),
+                     8),
+      _mm_setzero_si128());
+}
+
+SIMD_INLINE v64 v64_shr_u8(v64 a, unsigned int c) {
+  __m128i cp8 = _mm_cvtsi32_si128(c + 8);
+  return _mm_packus_epi16(
+      _mm_srl_epi16(_mm_unpacklo_epi8(_mm_setzero_si128(), a), cp8),
+      _mm_setzero_si128());
+}
+
+SIMD_INLINE v64 v64_shr_s8(v64 a, unsigned int c) {
+  __m128i cp8 = _mm_cvtsi32_si128(c + 8);
+  return _mm_packs_epi16(
+      _mm_sra_epi16(_mm_unpacklo_epi8(_mm_setzero_si128(), a), cp8),
+      _mm_setzero_si128());
+}
+
+SIMD_INLINE v64 v64_shl_16(v64 a, unsigned int c) {
+  return _mm_sll_epi16(a, _mm_cvtsi32_si128(c));
+}
+
+SIMD_INLINE v64 v64_shr_u16(v64 a, unsigned int c) {
+  return _mm_srl_epi16(a, _mm_cvtsi32_si128(c));
+}
+
+SIMD_INLINE v64 v64_shr_s16(v64 a, unsigned int c) {
+  return _mm_sra_epi16(a, _mm_cvtsi32_si128(c));
+}
+
+SIMD_INLINE v64 v64_shl_32(v64 a, unsigned int c) {
+  return _mm_sll_epi32(a, _mm_cvtsi32_si128(c));
+}
+
+SIMD_INLINE v64 v64_shr_u32(v64 a, unsigned int c) {
+  return _mm_srl_epi32(a, _mm_cvtsi32_si128(c));
+}
+
+SIMD_INLINE v64 v64_shr_s32(v64 a, unsigned int c) {
+  return _mm_sra_epi32(a, _mm_cvtsi32_si128(c));
+}
+
+/* These intrinsics require immediate values, so we must use #defines
+   to enforce that. */
+#define v64_shl_n_byte(a, c) _mm_slli_si128(a, c)
+#define v64_shr_n_byte(a, c) _mm_srli_si128(_mm_unpacklo_epi64(a, a), c + 8)
+#define v64_shl_n_8(a, c)                                                  \
+  _mm_packus_epi16(                                                        \
+      _mm_srli_epi16(                                                      \
+          _mm_sll_epi16(_mm_unpacklo_epi8(_mm_setzero_si128(), a), c), 8), \
+      _mm_setzero_si128())
+#define v64_shr_n_u8(a, c)                                               \
+  _mm_packus_epi16(                                                      \
+      _mm_srl_epi16(_mm_unpacklo_epi8(_mm_setzero_si128(), a), (c) + 8), \
+      _mm_setzero_si128())
+#define v64_shr_n_s8(a, c)                                               \
+  _mm_packs_epi16(                                                       \
+      _mm_sra_epi16(_mm_unpacklo_epi8(_mm_setzero_si128(), a), (c) + 8), \
+      _mm_setzero_si128())
+#define v64_shl_n_16(a, c) _mm_slli_epi16(a, c)
+#define v64_shr_n_u16(a, c) _mm_srli_epi16(a, c)
+#define v64_shr_n_s16(a, c) _mm_srai_epi16(a, c)
+#define v64_shl_n_32(a, c) _mm_slli_epi32(a, c)
+#define v64_shr_n_u32(a, c) _mm_srli_epi32(a, c)
+#define v64_shr_n_s32(a, c) _mm_srai_epi32(a, c)
+
+#endif /* _V64_INTRINSICS_H */