Add unit tests for v128 intrinsics

Change-Id: I20a6ad88a6465b06700b6d692569d7e69c43f489
diff --git a/test/simd_cmp_impl.h b/test/simd_cmp_impl.h
index 391230d..5b36a43 100644
--- a/test/simd_cmp_impl.h
+++ b/test/simd_cmp_impl.h
@@ -17,7 +17,7 @@
 #include "aom_dsp/aom_simd.h"
 #undef SIMD_INLINE
 #define SIMD_INLINE static  // Don't enforce inlining
-#include "aom_dsp/simd/v64_intrinsics_c.h"
+#include "aom_dsp/simd/v128_intrinsics_c.h"
 
 // Machine tuned code goes into this file. This file is included from
 // simd_cmp_sse2.cc, simd_cmp_ssse3.cc etc which define the macros
@@ -127,6 +127,104 @@
   return c_v64_align(a, b, shift);
 }
 
+template <int shift>
+v128 imm_v128_shl_n_byte(v128 a) {
+  return v128_shl_n_byte(a, shift);
+}
+template <int shift>
+v128 imm_v128_shr_n_byte(v128 a) {
+  return v128_shr_n_byte(a, shift);
+}
+template <int shift>
+v128 imm_v128_shl_n_8(v128 a) {
+  return v128_shl_n_8(a, shift);
+}
+template <int shift>
+v128 imm_v128_shr_n_u8(v128 a) {
+  return v128_shr_n_u8(a, shift);
+}
+template <int shift>
+v128 imm_v128_shr_n_s8(v128 a) {
+  return v128_shr_n_s8(a, shift);
+}
+template <int shift>
+v128 imm_v128_shl_n_16(v128 a) {
+  return v128_shl_n_16(a, shift);
+}
+template <int shift>
+v128 imm_v128_shr_n_u16(v128 a) {
+  return v128_shr_n_u16(a, shift);
+}
+template <int shift>
+v128 imm_v128_shr_n_s16(v128 a) {
+  return v128_shr_n_s16(a, shift);
+}
+template <int shift>
+v128 imm_v128_shl_n_32(v128 a) {
+  return v128_shl_n_32(a, shift);
+}
+template <int shift>
+v128 imm_v128_shr_n_u32(v128 a) {
+  return v128_shr_n_u32(a, shift);
+}
+template <int shift>
+v128 imm_v128_shr_n_s32(v128 a) {
+  return v128_shr_n_s32(a, shift);
+}
+template <int shift>
+v128 imm_v128_align(v128 a, v128 b) {
+  return v128_align(a, b, shift);
+}
+
+template <int shift>
+c_v128 c_imm_v128_shl_n_byte(c_v128 a) {
+  return c_v128_shl_n_byte(a, shift);
+}
+template <int shift>
+c_v128 c_imm_v128_shr_n_byte(c_v128 a) {
+  return c_v128_shr_n_byte(a, shift);
+}
+template <int shift>
+c_v128 c_imm_v128_shl_n_8(c_v128 a) {
+  return c_v128_shl_n_8(a, shift);
+}
+template <int shift>
+c_v128 c_imm_v128_shr_n_u8(c_v128 a) {
+  return c_v128_shr_n_u8(a, shift);
+}
+template <int shift>
+c_v128 c_imm_v128_shr_n_s8(c_v128 a) {
+  return c_v128_shr_n_s8(a, shift);
+}
+template <int shift>
+c_v128 c_imm_v128_shl_n_16(c_v128 a) {
+  return c_v128_shl_n_16(a, shift);
+}
+template <int shift>
+c_v128 c_imm_v128_shr_n_u16(c_v128 a) {
+  return c_v128_shr_n_u16(a, shift);
+}
+template <int shift>
+c_v128 c_imm_v128_shr_n_s16(c_v128 a) {
+  return c_v128_shr_n_s16(a, shift);
+}
+template <int shift>
+c_v128 c_imm_v128_shl_n_32(c_v128 a) {
+  return c_v128_shl_n_32(a, shift);
+}
+template <int shift>
+c_v128 c_imm_v128_shr_n_u32(c_v128 a) {
+  return c_v128_shr_n_u32(a, shift);
+}
+template <int shift>
+c_v128 c_imm_v128_shr_n_s32(c_v128 a) {
+  return c_v128_shr_n_s32(a, shift);
+}
+template <int shift>
+c_v128 c_imm_v128_align(c_v128 a, c_v128 b) {
+  return c_v128_align(a, b, shift);
+}
+
 // Wrappers around the the SAD and SSD functions
 uint32_t v64_sad_u8(v64 a, v64 b) {
   return v64_sad_u8_sum(::v64_sad_u8(v64_sad_u8_init(), a, b));
@@ -141,6 +239,18 @@
 uint32_t c_v64_ssd_u8(c_v64 a, c_v64 b) {
   return c_v64_ssd_u8_sum(::c_v64_ssd_u8(c_v64_ssd_u8_init(), a, b));
 }
+int32_t v128_sad_u8(v128 a, v128 b) {
+  return v128_sad_u8_sum(::v128_sad_u8(v128_sad_u8_init(), a, b));
+}
+uint32_t v128_ssd_u8(v128 a, v128 b) {
+  return v128_ssd_u8_sum(::v128_ssd_u8(v128_ssd_u8_init(), a, b));
+}
+uint32_t c_v128_sad_u8(c_v128 a, c_v128 b) {
+  return c_v128_sad_u8_sum(::c_v128_sad_u8(c_v128_sad_u8_init(), a, b));
+}
+uint32_t c_v128_ssd_u8(c_v128 a, c_v128 b) {
+  return c_v128_ssd_u8_sum(::c_v128_ssd_u8(c_v128_ssd_u8_init(), a, b));
+}
 
 namespace {
 
@@ -329,6 +439,210 @@
                       MAP(v64_from_32),
                       MAP(v64_zero),
                       MAP(v64_from_16),
+                      MAP(v128_sad_u8),
+                      MAP(v128_ssd_u8),
+                      MAP(v128_add_8),
+                      MAP(v128_add_16),
+                      MAP(v128_sadd_s16),
+                      MAP(v128_add_32),
+                      MAP(v128_sub_8),
+                      MAP(v128_ssub_u8),
+                      MAP(v128_ssub_s8),
+                      MAP(v128_sub_16),
+                      MAP(v128_ssub_s16),
+                      MAP(v128_sub_32),
+                      MAP(v128_ziplo_8),
+                      MAP(v128_ziphi_8),
+                      MAP(v128_ziplo_16),
+                      MAP(v128_ziphi_16),
+                      MAP(v128_ziplo_32),
+                      MAP(v128_ziphi_32),
+                      MAP(v128_ziplo_64),
+                      MAP(v128_ziphi_64),
+                      MAP(v128_unziphi_8),
+                      MAP(v128_unziplo_8),
+                      MAP(v128_unziphi_16),
+                      MAP(v128_unziplo_16),
+                      MAP(v128_unziphi_32),
+                      MAP(v128_unziplo_32),
+                      MAP(v128_pack_s32_s16),
+                      MAP(v128_pack_s16_u8),
+                      MAP(v128_pack_s16_s8),
+                      MAP(v128_or),
+                      MAP(v128_xor),
+                      MAP(v128_and),
+                      MAP(v128_andn),
+                      MAP(v128_mullo_s16),
+                      MAP(v128_mulhi_s16),
+                      MAP(v128_mullo_s32),
+                      MAP(v128_madd_s16),
+                      MAP(v128_madd_us8),
+                      MAP(v128_avg_u8),
+                      MAP(v128_rdavg_u8),
+                      MAP(v128_avg_u16),
+                      MAP(v128_min_u8),
+                      MAP(v128_max_u8),
+                      MAP(v128_min_s8),
+                      MAP(v128_max_s8),
+                      MAP(v128_min_s16),
+                      MAP(v128_max_s16),
+                      MAP(v128_cmpgt_s8),
+                      MAP(v128_cmplt_s8),
+                      MAP(v128_cmpeq_8),
+                      MAP(v128_cmpgt_s16),
+                      MAP(v128_cmpeq_16),
+                      MAP(v128_cmplt_s16),
+                      MAP(v128_shuffle_8),
+                      MAP(imm_v128_align<1>),
+                      MAP(imm_v128_align<2>),
+                      MAP(imm_v128_align<3>),
+                      MAP(imm_v128_align<4>),
+                      MAP(imm_v128_align<5>),
+                      MAP(imm_v128_align<6>),
+                      MAP(imm_v128_align<7>),
+                      MAP(imm_v128_align<8>),
+                      MAP(imm_v128_align<9>),
+                      MAP(imm_v128_align<10>),
+                      MAP(imm_v128_align<11>),
+                      MAP(imm_v128_align<12>),
+                      MAP(imm_v128_align<13>),
+                      MAP(imm_v128_align<14>),
+                      MAP(imm_v128_align<15>),
+                      MAP(v128_abs_s16),
+                      MAP(v128_padd_s16),
+                      MAP(v128_unpacklo_u16_s32),
+                      MAP(v128_unpacklo_s16_s32),
+                      MAP(v128_unpackhi_u16_s32),
+                      MAP(v128_unpackhi_s16_s32),
+                      MAP(imm_v128_shr_n_byte<1>),
+                      MAP(imm_v128_shr_n_byte<2>),
+                      MAP(imm_v128_shr_n_byte<3>),
+                      MAP(imm_v128_shr_n_byte<4>),
+                      MAP(imm_v128_shr_n_byte<5>),
+                      MAP(imm_v128_shr_n_byte<6>),
+                      MAP(imm_v128_shr_n_byte<7>),
+                      MAP(imm_v128_shr_n_byte<8>),
+                      MAP(imm_v128_shr_n_byte<9>),
+                      MAP(imm_v128_shr_n_byte<10>),
+                      MAP(imm_v128_shr_n_byte<11>),
+                      MAP(imm_v128_shr_n_byte<12>),
+                      MAP(imm_v128_shr_n_byte<13>),
+                      MAP(imm_v128_shr_n_byte<14>),
+                      MAP(imm_v128_shr_n_byte<15>),
+                      MAP(imm_v128_shl_n_byte<1>),
+                      MAP(imm_v128_shl_n_byte<2>),
+                      MAP(imm_v128_shl_n_byte<3>),
+                      MAP(imm_v128_shl_n_byte<4>),
+                      MAP(imm_v128_shl_n_byte<5>),
+                      MAP(imm_v128_shl_n_byte<6>),
+                      MAP(imm_v128_shl_n_byte<7>),
+                      MAP(imm_v128_shl_n_byte<8>),
+                      MAP(imm_v128_shl_n_byte<9>),
+                      MAP(imm_v128_shl_n_byte<10>),
+                      MAP(imm_v128_shl_n_byte<11>),
+                      MAP(imm_v128_shl_n_byte<12>),
+                      MAP(imm_v128_shl_n_byte<13>),
+                      MAP(imm_v128_shl_n_byte<14>),
+                      MAP(imm_v128_shl_n_byte<15>),
+                      MAP(imm_v128_shl_n_8<1>),
+                      MAP(imm_v128_shl_n_8<2>),
+                      MAP(imm_v128_shl_n_8<3>),
+                      MAP(imm_v128_shl_n_8<4>),
+                      MAP(imm_v128_shl_n_8<5>),
+                      MAP(imm_v128_shl_n_8<6>),
+                      MAP(imm_v128_shl_n_8<7>),
+                      MAP(imm_v128_shr_n_u8<1>),
+                      MAP(imm_v128_shr_n_u8<2>),
+                      MAP(imm_v128_shr_n_u8<3>),
+                      MAP(imm_v128_shr_n_u8<4>),
+                      MAP(imm_v128_shr_n_u8<5>),
+                      MAP(imm_v128_shr_n_u8<6>),
+                      MAP(imm_v128_shr_n_u8<7>),
+                      MAP(imm_v128_shr_n_s8<1>),
+                      MAP(imm_v128_shr_n_s8<2>),
+                      MAP(imm_v128_shr_n_s8<3>),
+                      MAP(imm_v128_shr_n_s8<4>),
+                      MAP(imm_v128_shr_n_s8<5>),
+                      MAP(imm_v128_shr_n_s8<6>),
+                      MAP(imm_v128_shr_n_s8<7>),
+                      MAP(imm_v128_shl_n_16<1>),
+                      MAP(imm_v128_shl_n_16<2>),
+                      MAP(imm_v128_shl_n_16<4>),
+                      MAP(imm_v128_shl_n_16<6>),
+                      MAP(imm_v128_shl_n_16<8>),
+                      MAP(imm_v128_shl_n_16<10>),
+                      MAP(imm_v128_shl_n_16<12>),
+                      MAP(imm_v128_shl_n_16<14>),
+                      MAP(imm_v128_shr_n_u16<1>),
+                      MAP(imm_v128_shr_n_u16<2>),
+                      MAP(imm_v128_shr_n_u16<4>),
+                      MAP(imm_v128_shr_n_u16<6>),
+                      MAP(imm_v128_shr_n_u16<8>),
+                      MAP(imm_v128_shr_n_u16<10>),
+                      MAP(imm_v128_shr_n_u16<12>),
+                      MAP(imm_v128_shr_n_u16<14>),
+                      MAP(imm_v128_shr_n_s16<1>),
+                      MAP(imm_v128_shr_n_s16<2>),
+                      MAP(imm_v128_shr_n_s16<4>),
+                      MAP(imm_v128_shr_n_s16<6>),
+                      MAP(imm_v128_shr_n_s16<8>),
+                      MAP(imm_v128_shr_n_s16<10>),
+                      MAP(imm_v128_shr_n_s16<12>),
+                      MAP(imm_v128_shr_n_s16<14>),
+                      MAP(imm_v128_shl_n_32<1>),
+                      MAP(imm_v128_shl_n_32<4>),
+                      MAP(imm_v128_shl_n_32<8>),
+                      MAP(imm_v128_shl_n_32<12>),
+                      MAP(imm_v128_shl_n_32<16>),
+                      MAP(imm_v128_shl_n_32<20>),
+                      MAP(imm_v128_shl_n_32<24>),
+                      MAP(imm_v128_shl_n_32<28>),
+                      MAP(imm_v128_shr_n_u32<1>),
+                      MAP(imm_v128_shr_n_u32<4>),
+                      MAP(imm_v128_shr_n_u32<8>),
+                      MAP(imm_v128_shr_n_u32<12>),
+                      MAP(imm_v128_shr_n_u32<16>),
+                      MAP(imm_v128_shr_n_u32<20>),
+                      MAP(imm_v128_shr_n_u32<24>),
+                      MAP(imm_v128_shr_n_u32<28>),
+                      MAP(imm_v128_shr_n_s32<1>),
+                      MAP(imm_v128_shr_n_s32<4>),
+                      MAP(imm_v128_shr_n_s32<8>),
+                      MAP(imm_v128_shr_n_s32<12>),
+                      MAP(imm_v128_shr_n_s32<16>),
+                      MAP(imm_v128_shr_n_s32<20>),
+                      MAP(imm_v128_shr_n_s32<24>),
+                      MAP(imm_v128_shr_n_s32<28>),
+                      MAP(v128_from_v64),
+                      MAP(v128_zip_8),
+                      MAP(v128_zip_16),
+                      MAP(v128_zip_32),
+                      MAP(v128_mul_s16),
+                      MAP(v128_unpack_u8_s16),
+                      MAP(v128_unpack_u16_s32),
+                      MAP(v128_unpack_s16_s32),
+                      MAP(v128_shl_8),
+                      MAP(v128_shr_u8),
+                      MAP(v128_shr_s8),
+                      MAP(v128_shl_16),
+                      MAP(v128_shr_u16),
+                      MAP(v128_shr_s16),
+                      MAP(v128_shl_32),
+                      MAP(v128_shr_u32),
+                      MAP(v128_shr_s32),
+                      MAP(v128_hadd_u8),
+                      MAP(v128_dotp_s16),
+                      MAP(v128_low_u32),
+                      MAP(v128_low_v64),
+                      MAP(v128_high_v64),
+                      MAP(v128_from_64),
+                      MAP(v128_from_32),
+                      MAP(v128_zero),
+                      MAP(v128_dup_8),
+                      MAP(v128_dup_16),
+                      MAP(v128_dup_32),
+                      MAP(v128_unpacklo_u8_s16),
+                      MAP(v128_unpackhi_u8_s16),
                       { NULL, NULL, NULL } };
 #undef MAP
 
@@ -393,7 +707,7 @@
   }
 }
 
-// We need a store function for uint64_t
+// We need load/store functions for uint64_t
 void u64_store_aligned(void *p, uint64_t a) {
   v64_store_aligned(p, v64_from_64(a));
 }
@@ -402,6 +716,12 @@
   c_v64_store_aligned(p, c_v64_from_64(a));
 }
 
+uint64_t u64_load_aligned(void *p) { return v64_u64(v64_load_aligned(p)); }
+
+uint64_t c_u64_load_aligned(void *p) {
+  return c_v64_u64(c_v64_load_aligned(p));
+}
+
 // CompareSimd1Arg and CompareSimd2Args compare intrinsics taking 1 or
 // 2 arguments respectively with their corresponding C reference.
 // Ideally, the loads and stores should have gone into the template
@@ -514,6 +834,54 @@
           reinterpret_cast<fptr>(v64_load_aligned), simd, d,
           reinterpret_cast<fptr>(c_u32_store_aligned),
           reinterpret_cast<fptr>(c_v64_load_aligned), ref_simd, ref_d, s);
+    } else if (typeid(CRet) == typeid(uint32_t) &&
+               typeid(CArg) == typeid(c_v128)) {
+      // U32_V128
+      error = CompareSimd1Arg<uint32_t, v128, CRet, CArg>(
+          reinterpret_cast<fptr>(u32_store_aligned),
+          reinterpret_cast<fptr>(v128_load_aligned), simd, d,
+          reinterpret_cast<fptr>(c_u32_store_aligned),
+          reinterpret_cast<fptr>(c_v128_load_aligned), ref_simd, ref_d, s);
+    } else if (typeid(CRet) == typeid(uint64_t) &&
+               typeid(CArg) == typeid(c_v128)) {
+      // U64_V128
+      error = CompareSimd1Arg<uint64_t, v128, CRet, CArg>(
+          reinterpret_cast<fptr>(u64_store_aligned),
+          reinterpret_cast<fptr>(v128_load_aligned), simd, d,
+          reinterpret_cast<fptr>(c_u64_store_aligned),
+          reinterpret_cast<fptr>(c_v128_load_aligned), ref_simd, ref_d, s);
+    } else if (typeid(CRet) == typeid(c_v64) &&
+               typeid(CArg) == typeid(c_v128)) {
+      // V64_V128
+      error = CompareSimd1Arg<v64, v128, CRet, CArg>(
+          reinterpret_cast<fptr>(v64_store_aligned),
+          reinterpret_cast<fptr>(v128_load_aligned), simd, d,
+          reinterpret_cast<fptr>(c_v64_store_aligned),
+          reinterpret_cast<fptr>(c_v128_load_aligned), ref_simd, ref_d, s);
+    } else if (typeid(CRet) == typeid(c_v128) &&
+               typeid(CArg) == typeid(c_v128)) {
+      // V128_V128
+      error = CompareSimd1Arg<v128, v128, CRet, CArg>(
+          reinterpret_cast<fptr>(v128_store_aligned),
+          reinterpret_cast<fptr>(v128_load_aligned), simd, d,
+          reinterpret_cast<fptr>(c_v128_store_aligned),
+          reinterpret_cast<fptr>(c_v128_load_aligned), ref_simd, ref_d, s);
+    } else if (typeid(CRet) == typeid(c_v128) &&
+               typeid(CArg) == typeid(c_v64)) {
+      // V128_V64
+      error = CompareSimd1Arg<v128, v64, CRet, CArg>(
+          reinterpret_cast<fptr>(v128_store_aligned),
+          reinterpret_cast<fptr>(v64_load_aligned), simd, d,
+          reinterpret_cast<fptr>(c_v128_store_aligned),
+          reinterpret_cast<fptr>(c_v64_load_aligned), ref_simd, ref_d, s);
+    } else if (typeid(CRet) == typeid(c_v128) &&
+               typeid(CArg) == typeid(uint32_t)) {
+      // V128_U32
+      error = CompareSimd1Arg<v128, uint32_t, CRet, CArg>(
+          reinterpret_cast<fptr>(v128_store_aligned),
+          reinterpret_cast<fptr>(u32_load_aligned), simd, d,
+          reinterpret_cast<fptr>(c_v128_store_aligned),
+          reinterpret_cast<fptr>(c_u32_load_aligned), ref_simd, ref_d, s);
     } else {
       FAIL() << "Internal error: Unknown intrinsic function "
              << typeid(CRet).name() << " " << name << "(" << typeid(CArg).name()
@@ -615,6 +983,78 @@
           reinterpret_cast<fptr>(c_v64_load_aligned),
           reinterpret_cast<fptr>(c_u32_load_aligned),
           reinterpret_cast<fptr>(ref_simd), ref_d, s1, s2);
+    } else if (typeid(CRet) == typeid(c_v128) &&
+               typeid(CArg1) == typeid(c_v128) &&
+               typeid(CArg2) == typeid(c_v128)) {
+      // V128_V128V128
+      error = CompareSimd2Args<v128, v128, v128, CRet, CArg1, CArg2>(
+          reinterpret_cast<fptr>(v128_store_aligned),
+          reinterpret_cast<fptr>(v128_load_aligned),
+          reinterpret_cast<fptr>(v128_load_aligned), simd, d,
+          reinterpret_cast<fptr>(c_v128_store_aligned),
+          reinterpret_cast<fptr>(c_v128_load_aligned),
+          reinterpret_cast<fptr>(c_v128_load_aligned),
+          reinterpret_cast<fptr>(ref_simd), ref_d, s1, s2);
+    } else if (typeid(CRet) == typeid(uint32_t) &&
+               typeid(CArg1) == typeid(c_v128) &&
+               typeid(CArg2) == typeid(c_v128)) {
+      // U32_V128V128
+      error = CompareSimd2Args<uint32_t, v128, v128, CRet, CArg1, CArg2>(
+          reinterpret_cast<fptr>(u32_store_aligned),
+          reinterpret_cast<fptr>(v128_load_aligned),
+          reinterpret_cast<fptr>(v128_load_aligned), simd, d,
+          reinterpret_cast<fptr>(c_u32_store_aligned),
+          reinterpret_cast<fptr>(c_v128_load_aligned),
+          reinterpret_cast<fptr>(c_v128_load_aligned),
+          reinterpret_cast<fptr>(ref_simd), ref_d, s1, s2);
+    } else if (typeid(CRet) == typeid(int64_t) &&
+               typeid(CArg1) == typeid(c_v128) &&
+               typeid(CArg2) == typeid(c_v128)) {
+      // S64_V128V128
+      error = CompareSimd2Args<int64_t, v128, v128, CRet, CArg1, CArg2>(
+          reinterpret_cast<fptr>(u64_store_aligned),
+          reinterpret_cast<fptr>(v128_load_aligned),
+          reinterpret_cast<fptr>(v128_load_aligned), simd, d,
+          reinterpret_cast<fptr>(c_u64_store_aligned),
+          reinterpret_cast<fptr>(c_v128_load_aligned),
+          reinterpret_cast<fptr>(c_v128_load_aligned),
+          reinterpret_cast<fptr>(ref_simd), ref_d, s1, s2);
+    } else if (typeid(CRet) == typeid(c_v128) &&
+               typeid(CArg1) == typeid(uint64_t) &&
+               typeid(CArg2) == typeid(uint64_t)) {
+      // V128_U64U64
+      error = CompareSimd2Args<v128, uint64_t, uint64_t, CRet, CArg1, CArg2>(
+          reinterpret_cast<fptr>(v128_store_aligned),
+          reinterpret_cast<fptr>(u64_load_aligned),
+          reinterpret_cast<fptr>(u64_load_aligned), simd, d,
+          reinterpret_cast<fptr>(c_v128_store_aligned),
+          reinterpret_cast<fptr>(c_u64_load_aligned),
+          reinterpret_cast<fptr>(c_u64_load_aligned),
+          reinterpret_cast<fptr>(ref_simd), ref_d, s1, s2);
+    } else if (typeid(CRet) == typeid(c_v128) &&
+               typeid(CArg1) == typeid(c_v64) &&
+               typeid(CArg2) == typeid(c_v64)) {
+      // V128_V64V64
+      error = CompareSimd2Args<v128, v64, v64, CRet, CArg1, CArg2>(
+          reinterpret_cast<fptr>(v128_store_aligned),
+          reinterpret_cast<fptr>(v64_load_aligned),
+          reinterpret_cast<fptr>(v64_load_aligned), simd, d,
+          reinterpret_cast<fptr>(c_v128_store_aligned),
+          reinterpret_cast<fptr>(c_v64_load_aligned),
+          reinterpret_cast<fptr>(c_v64_load_aligned),
+          reinterpret_cast<fptr>(ref_simd), ref_d, s1, s2);
+    } else if (typeid(CRet) == typeid(c_v128) &&
+               typeid(CArg1) == typeid(c_v128) &&
+               typeid(CArg2) == typeid(uint32_t)) {
+      // V128_V128U32
+      error = CompareSimd2Args<v128, v128, uint32_t, CRet, CArg1, CArg2>(
+          reinterpret_cast<fptr>(v128_store_aligned),
+          reinterpret_cast<fptr>(v128_load_aligned),
+          reinterpret_cast<fptr>(u32_load_aligned), simd, d,
+          reinterpret_cast<fptr>(c_v128_store_aligned),
+          reinterpret_cast<fptr>(c_v128_load_aligned),
+          reinterpret_cast<fptr>(c_u32_load_aligned),
+          reinterpret_cast<fptr>(ref_simd), ref_d, s1, s2);
     } else {
       FAIL() << "Internal error: Unknown intrinsic function "
              << typeid(CRet).name() << " " << name << "("
@@ -649,5 +1089,29 @@
                                                    const char *);
 template void TestSimd2Args<uint32_t, c_v64, c_v64>(uint32_t, uint32_t,
                                                     uint32_t, const char *);
+template void TestSimd1Arg<c_v128, c_v128>(uint32_t, uint32_t, uint32_t,
+                                           const char *);
+template void TestSimd1Arg<c_v128, uint32_t>(uint32_t, uint32_t, uint32_t,
+                                             const char *);
+template void TestSimd1Arg<c_v128, c_v64>(uint32_t, uint32_t, uint32_t,
+                                          const char *);
+template void TestSimd1Arg<uint32_t, c_v128>(uint32_t, uint32_t, uint32_t,
+                                             const char *);
+template void TestSimd1Arg<uint64_t, c_v128>(uint32_t, uint32_t, uint32_t,
+                                             const char *);
+template void TestSimd1Arg<c_v64, c_v128>(uint32_t, uint32_t, uint32_t,
+                                          const char *);
+template void TestSimd2Args<c_v128, c_v128, c_v128>(uint32_t, uint32_t,
+                                                    uint32_t, const char *);
+template void TestSimd2Args<c_v128, c_v128, uint32_t>(uint32_t, uint32_t,
+                                                      uint32_t, const char *);
+template void TestSimd2Args<c_v128, uint64_t, uint64_t>(uint32_t, uint32_t,
+                                                        uint32_t, const char *);
+template void TestSimd2Args<c_v128, c_v64, c_v64>(uint32_t, uint32_t, uint32_t,
+                                                  const char *);
+template void TestSimd2Args<int64_t, c_v128, c_v128>(uint32_t, uint32_t,
+                                                     uint32_t, const char *);
+template void TestSimd2Args<uint32_t, c_v128, c_v128>(uint32_t, uint32_t,
+                                                      uint32_t, const char *);
 
 }  // namespace SIMD_NAMESPACE
diff --git a/test/simd_impl.h b/test/simd_impl.h
index 1b9198f..ce41fb6 100644
--- a/test/simd_impl.h
+++ b/test/simd_impl.h
@@ -14,7 +14,7 @@
 #include "test/clear_system_state.h"
 #include "test/register_state_check.h"
 #include "aom_dsp/aom_simd_inline.h"
-#include "aom_dsp/simd/v64_intrinsics_c.h"
+#include "aom_dsp/simd/v128_intrinsics_c.h"
 
 namespace SIMD_NAMESPACE {
 
@@ -49,10 +49,25 @@
 TYPEDEF_SIMD(S64_V64V64);
 TYPEDEF_SIMD(V64_V64U32);
 TYPEDEF_SIMD(U32_V64V64);
+TYPEDEF_SIMD(V128_V64);
+TYPEDEF_SIMD(V128_V128);
+TYPEDEF_SIMD(U32_V128);
+TYPEDEF_SIMD(U64_V128);
+TYPEDEF_SIMD(V64_V128);
+TYPEDEF_SIMD(V128_U32);
+TYPEDEF_SIMD(V128_U64U64);
+TYPEDEF_SIMD(V128_V64V64);
+TYPEDEF_SIMD(V128_V128V128);
+TYPEDEF_SIMD(S64_V128V128);
+TYPEDEF_SIMD(V128_V128U32);
+TYPEDEF_SIMD(U32_V128V128);
 
 // Google Test allows up to 50 tests per case, so split the largest
 typedef ARCH_POSTFIX(V64_V64) ARCH_POSTFIX(V64_V64_Part2);
 typedef ARCH_POSTFIX(V64_V64V64) ARCH_POSTFIX(V64_V64V64_Part2);
+typedef ARCH_POSTFIX(V128_V128) ARCH_POSTFIX(V128_V128_Part2);
+typedef ARCH_POSTFIX(V128_V128) ARCH_POSTFIX(V128_V128_Part3);
+typedef ARCH_POSTFIX(V128_V128V128) ARCH_POSTFIX(V128_V128V128_Part2);
 
 // These functions are machine tuned located elsewhere
 template <typename c_ret, typename c_arg>
@@ -114,6 +129,66 @@
   TestSimd2Args<c_v64, c_v64, c_v64>(iterations, mask, maskwidth, name);
 }
 
+MY_TEST_P(ARCH_POSTFIX(U32_V128), TestIntrinsics) {
+  TestSimd1Arg<uint32_t, c_v128>(iterations, mask, maskwidth, name);
+}
+
+MY_TEST_P(ARCH_POSTFIX(U64_V128), TestIntrinsics) {
+  TestSimd1Arg<uint64_t, c_v128>(iterations, mask, maskwidth, name);
+}
+
+MY_TEST_P(ARCH_POSTFIX(V64_V128), TestIntrinsics) {
+  TestSimd1Arg<c_v64, c_v128>(iterations, mask, maskwidth, name);
+}
+
+MY_TEST_P(ARCH_POSTFIX(V128_V128), TestIntrinsics) {
+  TestSimd1Arg<c_v128, c_v128>(iterations, mask, maskwidth, name);
+}
+
+MY_TEST_P(ARCH_POSTFIX(V128_U32), TestIntrinsics) {
+  TestSimd1Arg<c_v128, uint32_t>(iterations, mask, maskwidth, name);
+}
+
+MY_TEST_P(ARCH_POSTFIX(V128_V64), TestIntrinsics) {
+  TestSimd1Arg<c_v128, c_v64>(iterations, mask, maskwidth, name);
+}
+
+MY_TEST_P(ARCH_POSTFIX(V128_V128V128), TestIntrinsics) {
+  TestSimd2Args<c_v128, c_v128, c_v128>(iterations, mask, maskwidth, name);
+}
+
+MY_TEST_P(ARCH_POSTFIX(U32_V128V128), TestIntrinsics) {
+  TestSimd2Args<uint32_t, c_v128, c_v128>(iterations, mask, maskwidth, name);
+}
+
+MY_TEST_P(ARCH_POSTFIX(S64_V128V128), TestIntrinsics) {
+  TestSimd2Args<int64_t, c_v128, c_v128>(iterations, mask, maskwidth, name);
+}
+
+MY_TEST_P(ARCH_POSTFIX(V128_U64U64), TestIntrinsics) {
+  TestSimd2Args<c_v128, uint64_t, uint64_t>(iterations, mask, maskwidth, name);
+}
+
+MY_TEST_P(ARCH_POSTFIX(V128_V64V64), TestIntrinsics) {
+  TestSimd2Args<c_v128, c_v64, c_v64>(iterations, mask, maskwidth, name);
+}
+
+MY_TEST_P(ARCH_POSTFIX(V128_V128U32), TestIntrinsics) {
+  TestSimd2Args<c_v128, c_v128, uint32_t>(iterations, mask, maskwidth, name);
+}
+
+MY_TEST_P(ARCH_POSTFIX(V128_V128V128_Part2), TestIntrinsics) {
+  TestSimd2Args<c_v128, c_v128, c_v128>(iterations, mask, maskwidth, name);
+}
+
+MY_TEST_P(ARCH_POSTFIX(V128_V128_Part2), TestIntrinsics) {
+  TestSimd1Arg<c_v128, c_v128>(iterations, mask, maskwidth, name);
+}
+
+MY_TEST_P(ARCH_POSTFIX(V128_V128_Part3), TestIntrinsics) {
+  TestSimd1Arg<c_v128, c_v128>(iterations, mask, maskwidth, name);
+}
+
 // Add a macro layer since INSTANTIATE_TEST_CASE_P will quote the name
 // so we need to expand it first with the prefix
 #define INSTANTIATE(name, type, ...) \
@@ -274,4 +349,200 @@
 
 INSTANTIATE(ARCH, ARCH_POSTFIX(V64_U32U32), SIMD_TUPLE(v64_from_32, 0U, 0U));
 
+INSTANTIATE(ARCH, ARCH_POSTFIX(U32_V128V128), SIMD_TUPLE(v128_sad_u8, 0U, 0U),
+            SIMD_TUPLE(v128_ssd_u8, 0U, 0U));
+
+INSTANTIATE(
+    ARCH, ARCH_POSTFIX(V128_V128V128), SIMD_TUPLE(v128_add_8, 0U, 0U),
+    SIMD_TUPLE(v128_add_16, 0U, 0U), SIMD_TUPLE(v128_sadd_s16, 0U, 0U),
+    SIMD_TUPLE(v128_add_32, 0U, 0U), SIMD_TUPLE(v128_sub_8, 0U, 0U),
+    SIMD_TUPLE(v128_ssub_u8, 0U, 0U), SIMD_TUPLE(v128_ssub_s8, 0U, 0U),
+    SIMD_TUPLE(v128_sub_16, 0U, 0U), SIMD_TUPLE(v128_ssub_s16, 0U, 0U),
+    SIMD_TUPLE(v128_sub_32, 0U, 0U), SIMD_TUPLE(v128_ziplo_8, 0U, 0U),
+    SIMD_TUPLE(v128_ziphi_8, 0U, 0U), SIMD_TUPLE(v128_ziplo_16, 0U, 0U),
+    SIMD_TUPLE(v128_ziphi_16, 0U, 0U), SIMD_TUPLE(v128_ziplo_32, 0U, 0U),
+    SIMD_TUPLE(v128_ziphi_32, 0U, 0U), SIMD_TUPLE(v128_ziplo_64, 0U, 0U),
+    SIMD_TUPLE(v128_ziphi_64, 0U, 0U), SIMD_TUPLE(v128_unziphi_8, 0U, 0U),
+    SIMD_TUPLE(v128_unziplo_8, 0U, 0U), SIMD_TUPLE(v128_unziphi_16, 0U, 0U),
+    SIMD_TUPLE(v128_unziplo_16, 0U, 0U), SIMD_TUPLE(v128_unziphi_32, 0U, 0U),
+    SIMD_TUPLE(v128_unziplo_32, 0U, 0U), SIMD_TUPLE(v128_pack_s32_s16, 0U, 0U),
+    SIMD_TUPLE(v128_pack_s16_u8, 0U, 0U), SIMD_TUPLE(v128_pack_s16_s8, 0U, 0U),
+    SIMD_TUPLE(v128_or, 0U, 0U), SIMD_TUPLE(v128_xor, 0U, 0U),
+    SIMD_TUPLE(v128_and, 0U, 0U), SIMD_TUPLE(v128_andn, 0U, 0U),
+    SIMD_TUPLE(v128_mullo_s16, 0U, 0U), SIMD_TUPLE(v128_mulhi_s16, 0U, 0U),
+    SIMD_TUPLE(v128_mullo_s32, 0U, 0U), SIMD_TUPLE(v128_madd_s16, 0U, 0U),
+    SIMD_TUPLE(v128_madd_us8, 0U, 0U), SIMD_TUPLE(v128_avg_u8, 0U, 0U),
+    SIMD_TUPLE(v128_rdavg_u8, 0U, 0U), SIMD_TUPLE(v128_avg_u16, 0U, 0U),
+    SIMD_TUPLE(v128_min_u8, 0U, 0U), SIMD_TUPLE(v128_max_u8, 0U, 0U),
+    SIMD_TUPLE(v128_min_s8, 0U, 0U), SIMD_TUPLE(v128_max_s8, 0U, 0U),
+    SIMD_TUPLE(v128_min_s16, 0U, 0U), SIMD_TUPLE(v128_max_s16, 0U, 0U),
+    SIMD_TUPLE(v128_cmpgt_s8, 0U, 0U), SIMD_TUPLE(v128_cmplt_s8, 0U, 0U),
+    SIMD_TUPLE(v128_cmpeq_8, 0U, 0U), SIMD_TUPLE(v128_cmpgt_s16, 0U, 0U),
+    SIMD_TUPLE(v128_cmpeq_16, 0U, 0U));
+
+INSTANTIATE(ARCH, ARCH_POSTFIX(V128_V128V128_Part2),
+            SIMD_TUPLE(v128_cmplt_s16, 0U, 0U),
+            SIMD_TUPLE(v128_shuffle_8, 15U, 8U),
+            SIMD_TUPLE(imm_v128_align<1>, 0U, 0U),
+            SIMD_TUPLE(imm_v128_align<2>, 0U, 0U),
+            SIMD_TUPLE(imm_v128_align<3>, 0U, 0U),
+            SIMD_TUPLE(imm_v128_align<4>, 0U, 0U),
+            SIMD_TUPLE(imm_v128_align<5>, 0U, 0U),
+            SIMD_TUPLE(imm_v128_align<6>, 0U, 0U),
+            SIMD_TUPLE(imm_v128_align<7>, 0U, 0U),
+            SIMD_TUPLE(imm_v128_align<8>, 0U, 0U),
+            SIMD_TUPLE(imm_v128_align<9>, 0U, 0U),
+            SIMD_TUPLE(imm_v128_align<10>, 0U, 0U),
+            SIMD_TUPLE(imm_v128_align<11>, 0U, 0U),
+            SIMD_TUPLE(imm_v128_align<12>, 0U, 0U),
+            SIMD_TUPLE(imm_v128_align<13>, 0U, 0U),
+            SIMD_TUPLE(imm_v128_align<14>, 0U, 0U),
+            SIMD_TUPLE(imm_v128_align<15>, 0U, 0U));
+
+INSTANTIATE(ARCH, ARCH_POSTFIX(V128_V128), SIMD_TUPLE(v128_abs_s16, 0U, 0U),
+            SIMD_TUPLE(v128_padd_s16, 0U, 0U),
+            SIMD_TUPLE(v128_unpacklo_u8_s16, 0U, 0U),
+            SIMD_TUPLE(v128_unpacklo_u16_s32, 0U, 0U),
+            SIMD_TUPLE(v128_unpacklo_s16_s32, 0U, 0U),
+            SIMD_TUPLE(v128_unpackhi_u8_s16, 0U, 0U),
+            SIMD_TUPLE(v128_unpackhi_u16_s32, 0U, 0U),
+            SIMD_TUPLE(v128_unpackhi_s16_s32, 0U, 0U),
+            SIMD_TUPLE(imm_v128_shr_n_byte<1>, 0U, 0U),
+            SIMD_TUPLE(imm_v128_shr_n_byte<2>, 0U, 0U),
+            SIMD_TUPLE(imm_v128_shr_n_byte<3>, 0U, 0U),
+            SIMD_TUPLE(imm_v128_shr_n_byte<4>, 0U, 0U),
+            SIMD_TUPLE(imm_v128_shr_n_byte<5>, 0U, 0U),
+            SIMD_TUPLE(imm_v128_shr_n_byte<6>, 0U, 0U),
+            SIMD_TUPLE(imm_v128_shr_n_byte<7>, 0U, 0U),
+            SIMD_TUPLE(imm_v128_shr_n_byte<8>, 0U, 0U),
+            SIMD_TUPLE(imm_v128_shr_n_byte<9>, 0U, 0U),
+            SIMD_TUPLE(imm_v128_shr_n_byte<10>, 0U, 0U),
+            SIMD_TUPLE(imm_v128_shr_n_byte<11>, 0U, 0U),
+            SIMD_TUPLE(imm_v128_shr_n_byte<12>, 0U, 0U),
+            SIMD_TUPLE(imm_v128_shr_n_byte<13>, 0U, 0U),
+            SIMD_TUPLE(imm_v128_shr_n_byte<14>, 0U, 0U),
+            SIMD_TUPLE(imm_v128_shr_n_byte<15>, 0U, 0U),
+            SIMD_TUPLE(imm_v128_shl_n_byte<1>, 0U, 0U),
+            SIMD_TUPLE(imm_v128_shl_n_byte<2>, 0U, 0U),
+            SIMD_TUPLE(imm_v128_shl_n_byte<3>, 0U, 0U),
+            SIMD_TUPLE(imm_v128_shl_n_byte<4>, 0U, 0U),
+            SIMD_TUPLE(imm_v128_shl_n_byte<5>, 0U, 0U),
+            SIMD_TUPLE(imm_v128_shl_n_byte<6>, 0U, 0U),
+            SIMD_TUPLE(imm_v128_shl_n_byte<7>, 0U, 0U),
+            SIMD_TUPLE(imm_v128_shl_n_byte<8>, 0U, 0U),
+            SIMD_TUPLE(imm_v128_shl_n_byte<9>, 0U, 0U),
+            SIMD_TUPLE(imm_v128_shl_n_byte<10>, 0U, 0U),
+            SIMD_TUPLE(imm_v128_shl_n_byte<11>, 0U, 0U),
+            SIMD_TUPLE(imm_v128_shl_n_byte<12>, 0U, 0U),
+            SIMD_TUPLE(imm_v128_shl_n_byte<13>, 0U, 0U),
+            SIMD_TUPLE(imm_v128_shl_n_byte<14>, 0U, 0U),
+            SIMD_TUPLE(imm_v128_shl_n_byte<15>, 0U, 0U),
+            SIMD_TUPLE(imm_v128_shl_n_8<1>, 0U, 0U),
+            SIMD_TUPLE(imm_v128_shl_n_8<2>, 0U, 0U),
+            SIMD_TUPLE(imm_v128_shl_n_8<3>, 0U, 0U),
+            SIMD_TUPLE(imm_v128_shl_n_8<4>, 0U, 0U),
+            SIMD_TUPLE(imm_v128_shl_n_8<5>, 0U, 0U),
+            SIMD_TUPLE(imm_v128_shl_n_8<6>, 0U, 0U),
+            SIMD_TUPLE(imm_v128_shl_n_8<7>, 0U, 0U),
+            SIMD_TUPLE(imm_v128_shr_n_u8<1>, 0U, 0U),
+            SIMD_TUPLE(imm_v128_shr_n_u8<2>, 0U, 0U),
+            SIMD_TUPLE(imm_v128_shr_n_u8<3>, 0U, 0U),
+            SIMD_TUPLE(imm_v128_shr_n_u8<4>, 0U, 0U));
+
+INSTANTIATE(ARCH, ARCH_POSTFIX(V128_V128_Part2),
+            SIMD_TUPLE(imm_v128_shr_n_u8<5>, 0U, 0U),
+            SIMD_TUPLE(imm_v128_shr_n_u8<6>, 0U, 0U),
+            SIMD_TUPLE(imm_v128_shr_n_u8<7>, 0U, 0U),
+            SIMD_TUPLE(imm_v128_shr_n_s8<1>, 0U, 0U),
+            SIMD_TUPLE(imm_v128_shr_n_s8<2>, 0U, 0U),
+            SIMD_TUPLE(imm_v128_shr_n_s8<3>, 0U, 0U),
+            SIMD_TUPLE(imm_v128_shr_n_s8<4>, 0U, 0U),
+            SIMD_TUPLE(imm_v128_shr_n_s8<5>, 0U, 0U),
+            SIMD_TUPLE(imm_v128_shr_n_s8<6>, 0U, 0U),
+            SIMD_TUPLE(imm_v128_shr_n_s8<7>, 0U, 0U),
+            SIMD_TUPLE(imm_v128_shl_n_16<1>, 0U, 0U),
+            SIMD_TUPLE(imm_v128_shl_n_16<2>, 0U, 0U),
+            SIMD_TUPLE(imm_v128_shl_n_16<4>, 0U, 0U),
+            SIMD_TUPLE(imm_v128_shl_n_16<6>, 0U, 0U),
+            SIMD_TUPLE(imm_v128_shl_n_16<8>, 0U, 0U),
+            SIMD_TUPLE(imm_v128_shl_n_16<10>, 0U, 0U),
+            SIMD_TUPLE(imm_v128_shl_n_16<12>, 0U, 0U),
+            SIMD_TUPLE(imm_v128_shl_n_16<14>, 0U, 0U),
+            SIMD_TUPLE(imm_v128_shr_n_u16<1>, 0U, 0U),
+            SIMD_TUPLE(imm_v128_shr_n_u16<2>, 0U, 0U),
+            SIMD_TUPLE(imm_v128_shr_n_u16<4>, 0U, 0U),
+            SIMD_TUPLE(imm_v128_shr_n_u16<6>, 0U, 0U),
+            SIMD_TUPLE(imm_v128_shr_n_u16<8>, 0U, 0U),
+            SIMD_TUPLE(imm_v128_shr_n_u16<10>, 0U, 0U),
+            SIMD_TUPLE(imm_v128_shr_n_u16<12>, 0U, 0U),
+            SIMD_TUPLE(imm_v128_shr_n_u16<14>, 0U, 0U),
+            SIMD_TUPLE(imm_v128_shr_n_s16<1>, 0U, 0U),
+            SIMD_TUPLE(imm_v128_shr_n_s16<2>, 0U, 0U),
+            SIMD_TUPLE(imm_v128_shr_n_s16<4>, 0U, 0U),
+            SIMD_TUPLE(imm_v128_shr_n_s16<6>, 0U, 0U),
+            SIMD_TUPLE(imm_v128_shr_n_s16<8>, 0U, 0U),
+            SIMD_TUPLE(imm_v128_shr_n_s16<10>, 0U, 0U),
+            SIMD_TUPLE(imm_v128_shr_n_s16<12>, 0U, 0U),
+            SIMD_TUPLE(imm_v128_shr_n_s16<14>, 0U, 0U),
+            SIMD_TUPLE(imm_v128_shl_n_32<1>, 0U, 0U),
+            SIMD_TUPLE(imm_v128_shl_n_32<4>, 0U, 0U),
+            SIMD_TUPLE(imm_v128_shl_n_32<8>, 0U, 0U),
+            SIMD_TUPLE(imm_v128_shl_n_32<12>, 0U, 0U),
+            SIMD_TUPLE(imm_v128_shl_n_32<16>, 0U, 0U),
+            SIMD_TUPLE(imm_v128_shl_n_32<20>, 0U, 0U),
+            SIMD_TUPLE(imm_v128_shl_n_32<24>, 0U, 0U),
+            SIMD_TUPLE(imm_v128_shl_n_32<28>, 0U, 0U),
+            SIMD_TUPLE(imm_v128_shr_n_u32<1>, 0U, 0U),
+            SIMD_TUPLE(imm_v128_shr_n_u32<4>, 0U, 0U),
+            SIMD_TUPLE(imm_v128_shr_n_u32<8>, 0U, 0U),
+            SIMD_TUPLE(imm_v128_shr_n_u32<12>, 0U, 0U),
+            SIMD_TUPLE(imm_v128_shr_n_u32<16>, 0U, 0U));
+
+INSTANTIATE(ARCH, ARCH_POSTFIX(V128_V128_Part3),
+            SIMD_TUPLE(imm_v128_shr_n_u32<20>, 0U, 0U),
+            SIMD_TUPLE(imm_v128_shr_n_u32<24>, 0U, 0U),
+            SIMD_TUPLE(imm_v128_shr_n_u32<28>, 0U, 0U),
+            SIMD_TUPLE(imm_v128_shr_n_s32<1>, 0U, 0U),
+            SIMD_TUPLE(imm_v128_shr_n_s32<4>, 0U, 0U),
+            SIMD_TUPLE(imm_v128_shr_n_s32<8>, 0U, 0U),
+            SIMD_TUPLE(imm_v128_shr_n_s32<12>, 0U, 0U),
+            SIMD_TUPLE(imm_v128_shr_n_s32<16>, 0U, 0U),
+            SIMD_TUPLE(imm_v128_shr_n_s32<20>, 0U, 0U),
+            SIMD_TUPLE(imm_v128_shr_n_s32<24>, 0U, 0U),
+            SIMD_TUPLE(imm_v128_shr_n_s32<28>, 0U, 0U));
+
+INSTANTIATE(ARCH, ARCH_POSTFIX(V128_V64V64), SIMD_TUPLE(v128_from_v64, 0U, 0U),
+            SIMD_TUPLE(v128_zip_8, 0U, 0U), SIMD_TUPLE(v128_zip_16, 0U, 0U),
+            SIMD_TUPLE(v128_zip_32, 0U, 0U), SIMD_TUPLE(v128_mul_s16, 0U, 0U));
+
+INSTANTIATE(ARCH, ARCH_POSTFIX(V128_U64U64), SIMD_TUPLE(v128_from_64, 0U, 0U));
+
+INSTANTIATE(ARCH, ARCH_POSTFIX(V128_V64),
+            SIMD_TUPLE(v128_unpack_u8_s16, 0U, 0U),
+            SIMD_TUPLE(v128_unpack_u16_s32, 0U, 0U),
+            SIMD_TUPLE(v128_unpack_s16_s32, 0U, 0U));
+
+INSTANTIATE(ARCH, ARCH_POSTFIX(V128_V128U32), SIMD_TUPLE(v128_shl_8, 7U, 32U),
+            SIMD_TUPLE(v128_shr_u8, 7U, 32U), SIMD_TUPLE(v128_shr_s8, 7U, 32U),
+            SIMD_TUPLE(v128_shl_16, 15U, 32U),
+            SIMD_TUPLE(v128_shr_u16, 15U, 32U),
+            SIMD_TUPLE(v128_shr_s16, 15U, 32U),
+            SIMD_TUPLE(v128_shl_32, 31U, 32U),
+            SIMD_TUPLE(v128_shr_u32, 31U, 32U),
+            SIMD_TUPLE(v128_shr_s32, 31U, 32U));
+
+INSTANTIATE(ARCH, ARCH_POSTFIX(U32_V128), SIMD_TUPLE(v128_low_u32, 0U, 0U));
+
+INSTANTIATE(ARCH, ARCH_POSTFIX(U64_V128), SIMD_TUPLE(v128_hadd_u8, 0U, 0U));
+
+INSTANTIATE(ARCH, ARCH_POSTFIX(V64_V128), SIMD_TUPLE(v128_low_v64, 0U, 0U),
+            SIMD_TUPLE(v128_high_v64, 0U, 0U));
+
+INSTANTIATE(ARCH, ARCH_POSTFIX(V128_U32), SIMD_TUPLE(v128_dup_8, 0xffU, 32U),
+            SIMD_TUPLE(v128_dup_16, 0xffffU, 32U),
+            SIMD_TUPLE(v128_dup_32, 0U, 0U));
+
+INSTANTIATE(ARCH, ARCH_POSTFIX(S64_V128V128),
+            SIMD_TUPLE(v128_dotp_s16, 0U, 0U));
+
 }  // namespace SIMD_NAMESPACE