Merge "Use derived variable size for memcpy" into nextgenv2
diff --git a/aom_dsp/aom_dsp_rtcd_defs.pl b/aom_dsp/aom_dsp_rtcd_defs.pl
index 5455b7b..a2b9a75 100644
--- a/aom_dsp/aom_dsp_rtcd_defs.pl
+++ b/aom_dsp/aom_dsp_rtcd_defs.pl
@@ -586,6 +586,15 @@
 add_proto qw/void aom_lpf_horizontal_4_dual/, "uint8_t *s, int pitch, const uint8_t *blimit0, const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1, const uint8_t *thresh1";
 specialize qw/aom_lpf_horizontal_4_dual sse2 neon dspr2 msa/;
 
+if (aom_config("CONFIG_CLPF") eq "yes") {
+  add_proto qw/void aom_clpf_block/, "const uint8_t *src, uint8_t *dst, int stride, int x0, int y0, int sizex, int sizey, int width, int height, unsigned int strength";
+  specialize qw/aom_clpf_block sse2 ssse3 sse4_1 neon/;
+  add_proto qw/void aom_clpf_detect/, "const uint8_t *rec, const uint8_t *org, int rstride, int ostride, int x0, int y0, int width, int height, int *sum0, int *sum1, unsigned int strength";
+  specialize qw/aom_clpf_detect sse2 ssse3 sse4_1 neon/;
+  add_proto qw/void aom_clpf_detect_multi/, "const uint8_t *rec, const uint8_t *org, int rstride, int ostride, int x0, int y0, int width, int height, int *sum";
+  specialize qw/aom_clpf_detect_multi sse2 ssse3 sse4_1 neon/;
+}
+
 if (aom_config("CONFIG_AOM_HIGHBITDEPTH") eq "yes") {
   add_proto qw/void aom_highbd_lpf_vertical_16/, "uint16_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int bd";
   specialize qw/aom_highbd_lpf_vertical_16 sse2/;
diff --git a/aom_dsp/x86/inv_txfm_sse2.c b/aom_dsp/x86/inv_txfm_sse2.c
index 5162b6f..78fd1ef 100644
--- a/aom_dsp/x86/inv_txfm_sse2.c
+++ b/aom_dsp/x86/inv_txfm_sse2.c
@@ -171,14 +171,6 @@
   RECON_AND_STORE4X4(dest + 3 * stride, dc_value);
 }
 
-static INLINE void transpose_4x4(__m128i *res) {
-  const __m128i tr0_0 = _mm_unpacklo_epi16(res[0], res[1]);
-  const __m128i tr0_1 = _mm_unpackhi_epi16(res[0], res[1]);
-
-  res[0] = _mm_unpacklo_epi16(tr0_0, tr0_1);
-  res[1] = _mm_unpackhi_epi16(tr0_0, tr0_1);
-}
-
 void idct4_sse2(__m128i *in) {
   const __m128i k__cospi_p16_p16 = pair_set_epi16(cospi_16_64, cospi_16_64);
   const __m128i k__cospi_p16_m16 = pair_set_epi16(cospi_16_64, -cospi_16_64);
@@ -187,7 +179,7 @@
   const __m128i k__DCT_CONST_ROUNDING = _mm_set1_epi32(DCT_CONST_ROUNDING);
   __m128i u[8], v[8];
 
-  transpose_4x4(in);
+  array_transpose_4x4(in);
   // stage 1
   u[0] = _mm_unpacklo_epi16(in[0], in[1]);
   u[1] = _mm_unpackhi_epi16(in[0], in[1]);
@@ -225,7 +217,7 @@
   const __m128i k__DCT_CONST_ROUNDING = _mm_set1_epi32(DCT_CONST_ROUNDING);
   __m128i u[8], v[8], in7;
 
-  transpose_4x4(in);
+  array_transpose_4x4(in);
   in7 = _mm_srli_si128(in[1], 8);
   in7 = _mm_add_epi16(in7, in[0]);
   in7 = _mm_sub_epi16(in7, in[1]);
@@ -3518,7 +3510,7 @@
     test = _mm_movemask_epi8(temp_mm);
 
     if (test) {
-      transpose_4x4(inptr);
+      array_transpose_4x4(inptr);
       sign_bits[0] = _mm_cmplt_epi16(inptr[0], zero);
       sign_bits[1] = _mm_cmplt_epi16(inptr[1], zero);
       inptr[3] = _mm_unpackhi_epi16(inptr[1], sign_bits[1]);
diff --git a/aom_dsp/x86/inv_txfm_sse2.h b/aom_dsp/x86/inv_txfm_sse2.h
index 6a62508..f39b4d6 100644
--- a/aom_dsp/x86/inv_txfm_sse2.h
+++ b/aom_dsp/x86/inv_txfm_sse2.h
@@ -19,6 +19,14 @@
 #include "aom_dsp/x86/txfm_common_sse2.h"
 
 // perform 8x8 transpose
+static INLINE void array_transpose_4x4(__m128i *res) {
+  const __m128i tr0_0 = _mm_unpacklo_epi16(res[0], res[1]);
+  const __m128i tr0_1 = _mm_unpackhi_epi16(res[0], res[1]);
+
+  res[0] = _mm_unpacklo_epi16(tr0_0, tr0_1);
+  res[1] = _mm_unpackhi_epi16(tr0_0, tr0_1);
+}
+
 static INLINE void array_transpose_8x8(__m128i *in, __m128i *res) {
   const __m128i tr0_0 = _mm_unpacklo_epi16(in[0], in[1]);
   const __m128i tr0_1 = _mm_unpacklo_epi16(in[2], in[3]);
diff --git a/av1/av1_common.mk b/av1/av1_common.mk
index 5a8b420..5a283a9 100644
--- a/av1/av1_common.mk
+++ b/av1/av1_common.mk
@@ -89,6 +89,11 @@
 ifeq ($(CONFIG_CLPF),yes)
 AV1_COMMON_SRCS-yes += common/clpf.c
 AV1_COMMON_SRCS-yes += common/clpf.h
+AV1_COMMON_SRCS-yes += common/clpf_simd.h
+AV1_COMMON_SRCS-$(HAVE_SSE2) += common/clpf_sse2.c
+AV1_COMMON_SRCS-$(HAVE_SSSE3) += common/clpf_ssse3.c
+AV1_COMMON_SRCS-$(HAVE_SSE4_1) += common/clpf_sse4_1.c
+AV1_COMMON_SRCS-$(HAVE_NEON) += common/clpf_neon.c
 endif
 ifeq ($(CONFIG_DERING),yes)
 AV1_COMMON_SRCS-yes += common/od_dering.c
diff --git a/av1/av1_cx.mk b/av1/av1_cx.mk
index 80d5485..6461e4c 100644
--- a/av1/av1_cx.mk
+++ b/av1/av1_cx.mk
@@ -104,6 +104,11 @@
 ifeq ($(CONFIG_CLPF),yes)
 AV1_CX_SRCS-yes += encoder/clpf_rdo.c
 AV1_CX_SRCS-yes += encoder/clpf_rdo.h
+AV1_CX_SRCS-yes += encoder/clpf_rdo_simd.h
+AV1_CX_SRCS-$(HAVE_SSE2) += encoder/clpf_rdo_sse2.c
+AV1_CX_SRCS-$(HAVE_SSSE3) += encoder/clpf_rdo_ssse3.c
+AV1_CX_SRCS-$(HAVE_SSE4_1) += encoder/clpf_rdo_sse4_1.c
+AV1_CX_SRCS-$(HAVE_NEON) += encoder/clpf_rdo_neon.c
 endif
 AV1_CX_SRCS-$(HAVE_SSE2) += encoder/x86/temporal_filter_apply_sse2.asm
 AV1_CX_SRCS-$(HAVE_SSE2) += encoder/x86/quantize_sse2.c
diff --git a/av1/common/av1_rtcd_defs.pl b/av1/common/av1_rtcd_defs.pl
index 6f1d462..be23948 100644
--- a/av1/common/av1_rtcd_defs.pl
+++ b/av1/common/av1_rtcd_defs.pl
@@ -91,10 +91,10 @@
 
     if (aom_config("CONFIG_EXT_TX") eq "yes") {
       add_proto qw/void av1_iht4x8_32_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
-      specialize qw/av1_iht4x8_32_add/;
+      specialize qw/av1_iht4x8_32_add sse2/;
 
       add_proto qw/void av1_iht8x4_32_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
-      specialize qw/av1_iht8x4_32_add/;
+      specialize qw/av1_iht8x4_32_add sse2/;
 
       add_proto qw/void av1_iht8x16_128_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
       specialize qw/av1_iht8x16_128_add sse2/;
@@ -152,10 +152,10 @@
 
     if (aom_config("CONFIG_EXT_TX") eq "yes") {
       add_proto qw/void av1_iht4x8_32_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
-      specialize qw/av1_iht4x8_32_add/;
+      specialize qw/av1_iht4x8_32_add sse2/;
 
       add_proto qw/void av1_iht8x4_32_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
-      specialize qw/av1_iht8x4_32_add/;
+      specialize qw/av1_iht8x4_32_add sse2/;
 
       add_proto qw/void av1_iht8x16_128_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
       specialize qw/av1_iht8x16_128_add sse2/;
@@ -395,10 +395,10 @@
 
 if (aom_config("CONFIG_EXT_TX") eq "yes") {
   add_proto qw/void av1_fht4x8/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
-  specialize qw/av1_fht4x8/;
+  specialize qw/av1_fht4x8 sse2/;
 
   add_proto qw/void av1_fht8x4/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
-  specialize qw/av1_fht8x4/;
+  specialize qw/av1_fht8x4 sse2/;
 
   add_proto qw/void av1_fht8x16/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
   specialize qw/av1_fht8x16 sse2/;
diff --git a/av1/common/clpf.c b/av1/common/clpf.c
index 861dde6..388a7c9 100644
--- a/av1/common/clpf.c
+++ b/av1/common/clpf.c
@@ -9,6 +9,7 @@
  * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
  */
 #include "av1/common/clpf.h"
+#include "./aom_dsp_rtcd.h"
 #include "aom_dsp/aom_dsp_common.h"
 
 int av1_clpf_maxbits(const AV1_COMMON *cm) {
@@ -27,9 +28,9 @@
   return (8 + delta - (delta < 0)) >> 4;
 }
 
-static void clpf_block(const uint8_t *src, uint8_t *dst, int stride, int x0,
-                       int y0, int sizex, int sizey, int width, int height,
-                       unsigned int strength) {
+void aom_clpf_block_c(const uint8_t *src, uint8_t *dst, int stride, int x0,
+                      int y0, int sizex, int sizey, int width, int height,
+                      unsigned int strength) {
   int x, y;
   for (y = y0; y < y0 + sizey; y++) {
     for (x = x0; x < x0 + sizex; x++) {
@@ -102,8 +103,8 @@
             if (!cm->mi_grid_visible[ypos / bs * cm->mi_stride + xpos / bs]
                      ->mbmi.skip) {
               // Not skip block, apply the filter
-              clpf_block(rec->y_buffer, dst->y_buffer, stride_y, xpos, ypos, bs,
-                         bs, width, height, strength);
+              aom_clpf_block(rec->y_buffer, dst->y_buffer, stride_y, xpos, ypos,
+                             bs, bs, width, height, strength);
             } else {  // Skip block, copy instead
               for (c = 0; c < bs; c++)
                 *(uint64_t *)(dst->y_buffer + (ypos + c) * stride_y + xpos) =
diff --git a/av1/common/clpf_neon.c b/av1/common/clpf_neon.c
new file mode 100644
index 0000000..f1a004c
--- /dev/null
+++ b/av1/common/clpf_neon.c
@@ -0,0 +1,14 @@
+/*
+ * Copyright (c) 2016, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#include "aom_dsp/aom_simd.h"
+#define SIMD_FUNC(name) name##_neon
+#include "./clpf_simd.h"
diff --git a/av1/common/clpf_simd.h b/av1/common/clpf_simd.h
new file mode 100644
index 0000000..faaf8ea
--- /dev/null
+++ b/av1/common/clpf_simd.h
@@ -0,0 +1,196 @@
+/*
+ * Copyright (c) 2016, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#include "./aom_dsp_rtcd.h"
+
+static void clpf_block(const uint8_t *src, uint8_t *dst, int stride, int x0,
+                       int y0, int sizey, int width, int height,
+                       unsigned int strength) {
+  dst += x0 + y0 * stride;
+  src += x0 + y0 * stride;
+  {
+    int bottom = height - 2 - y0;
+    const v128 sp = v128_dup_8(strength);
+    const v128 sm = v128_dup_8(-(int)strength);
+    const v128 c8 = v128_dup_8(8);
+    const v128 c128 = v128_dup_8(128);
+
+    if (!x0) {  // Clip left
+      const v128 b_shuff = v128_from_v64(v64_from_64(0x0d0c0b0a09080808LL),
+                                         v64_from_64(0x0504030201000000LL));
+      const v128 c_shuff = v128_from_v64(v64_from_64(0x0e0d0c0b0a090808LL),
+                                         v64_from_64(0x0605040302010000LL));
+      int y;
+
+      for (y = 0; y < sizey; y += 2) {
+        const v64 l1 = v64_load_aligned(src);
+        const v64 l2 = v64_load_aligned(src + stride);
+        v128 o = v128_from_v64(l1, l2);
+        const v128 x = v128_add_8(c128, o);
+        const v128 a = v128_add_8(
+            c128,
+            v128_from_v64(v64_load_aligned(src - (y != -y0) * stride), l1));
+        const v128 b = v128_shuffle_8(x, b_shuff);
+        const v128 c = v128_shuffle_8(x, c_shuff);
+        const v128 d = v128_add_8(
+            c128, v128_from_v64(v64_load_unaligned(src + 1),
+                                v64_load_unaligned(src + 1 + stride)));
+        const v128 e = v128_add_8(
+            c128, v128_from_v64(v64_load_unaligned(src + 2),
+                                v64_load_unaligned(src + 2 + stride)));
+        const v128 f = v128_add_8(
+            c128, v128_from_v64(l2, v64_load_aligned(
+                                        src + ((y != bottom) + 1) * stride)));
+
+        const v128 tmp =
+            v128_add_8(v128_max_s8(v128_min_s8(v128_ssub_s8(c, x), sp), sm),
+                       v128_max_s8(v128_min_s8(v128_ssub_s8(d, x), sp), sm));
+        const v128 delta = v128_add_8(
+            v128_add_8(
+                v128_shl_8(
+                    v128_add_8(
+                        v128_max_s8(v128_min_s8(v128_ssub_s8(a, x), sp), sm),
+                        v128_max_s8(v128_min_s8(v128_ssub_s8(f, x), sp), sm)),
+                    2),
+                v128_add_8(
+                    v128_max_s8(v128_min_s8(v128_ssub_s8(b, x), sp), sm),
+                    v128_max_s8(v128_min_s8(v128_ssub_s8(e, x), sp), sm))),
+            v128_add_8(v128_add_8(tmp, tmp), tmp));
+        o = v128_add_8(
+            o, v128_shr_s8(
+                   v128_add_8(c8, v128_add_8(delta, v128_cmplt_s8(
+                                                        delta, v128_zero()))),
+                   4));
+        v64_store_aligned(dst, v128_high_v64(o));
+        v64_store_aligned(dst + stride, v128_low_v64(o));
+        src += stride * 2;
+        dst += stride * 2;
+      }
+    } else if (!(width - x0 - 8)) {  // Clip right
+      const v128 d_shuff = v128_from_v64(v64_from_64(0x0f0f0e0d0c0b0a09LL),
+                                         v64_from_64(0x0707060504030201LL));
+      const v128 e_shuff = v128_from_v64(v64_from_64(0x0f0f0f0e0d0c0b0aLL),
+                                         v64_from_64(0x0707070605040302LL));
+      int y;
+
+      for (y = 0; y < sizey; y += 2) {
+        const v64 l1 = v64_load_aligned(src);
+        const v64 l2 = v64_load_aligned(src + stride);
+        v128 o = v128_from_v64(l1, l2);
+        const v128 x = v128_add_8(c128, o);
+        const v128 a = v128_add_8(
+            c128,
+            v128_from_v64(v64_load_aligned(src - (y != -y0) * stride), l1));
+        const v128 b = v128_add_8(
+            c128, v128_from_v64(v64_load_unaligned(src - 2),
+                                v64_load_unaligned(src - 2 + stride)));
+        const v128 c = v128_add_8(
+            c128, v128_from_v64(v64_load_unaligned(src - 1),
+                                v64_load_unaligned(src - 1 + stride)));
+        const v128 d = v128_shuffle_8(x, d_shuff);
+        const v128 e = v128_shuffle_8(x, e_shuff);
+        const v128 f = v128_add_8(
+            c128, v128_from_v64(l2, v64_load_aligned(
+                                        src + ((y != bottom) + 1) * stride)));
+
+        const v128 tmp =
+            v128_add_8(v128_max_s8(v128_min_s8(v128_ssub_s8(c, x), sp), sm),
+                       v128_max_s8(v128_min_s8(v128_ssub_s8(d, x), sp), sm));
+        const v128 delta = v128_add_8(
+            v128_add_8(
+                v128_shl_8(
+                    v128_add_8(
+                        v128_max_s8(v128_min_s8(v128_ssub_s8(a, x), sp), sm),
+                        v128_max_s8(v128_min_s8(v128_ssub_s8(f, x), sp), sm)),
+                    2),
+                v128_add_8(
+                    v128_max_s8(v128_min_s8(v128_ssub_s8(b, x), sp), sm),
+                    v128_max_s8(v128_min_s8(v128_ssub_s8(e, x), sp), sm))),
+            v128_add_8(v128_add_8(tmp, tmp), tmp));
+        o = v128_add_8(
+            o, v128_shr_s8(
+                   v128_add_8(c8, v128_add_8(delta, v128_cmplt_s8(
+                                                        delta, v128_zero()))),
+                   4));
+        v64_store_aligned(dst, v128_high_v64(o));
+        v64_store_aligned(dst + stride, v128_low_v64(o));
+        src += stride * 2;
+        dst += stride * 2;
+      }
+    } else {  // No left/right clipping
+      int y;
+      for (y = 0; y < sizey; y += 2) {
+        const v64 l1 = v64_load_aligned(src);
+        const v64 l2 = v64_load_aligned(src + stride);
+        v128 o = v128_from_v64(l1, l2);
+        const v128 x = v128_add_8(c128, o);
+        const v128 a = v128_add_8(
+            c128,
+            v128_from_v64(v64_load_aligned(src - (y != -y0) * stride), l1));
+        const v128 b = v128_add_8(
+            c128, v128_from_v64(v64_load_unaligned(src - 2),
+                                v64_load_unaligned(src - 2 + stride)));
+        const v128 c = v128_add_8(
+            c128, v128_from_v64(v64_load_unaligned(src - 1),
+                                v64_load_unaligned(src - 1 + stride)));
+        const v128 d = v128_add_8(
+            c128, v128_from_v64(v64_load_unaligned(src + 1),
+                                v64_load_unaligned(src + 1 + stride)));
+        const v128 e = v128_add_8(
+            c128, v128_from_v64(v64_load_unaligned(src + 2),
+                                v64_load_unaligned(src + 2 + stride)));
+        const v128 f = v128_add_8(
+            c128, v128_from_v64(l2, v64_load_aligned(
+                                        src + ((y != bottom) + 1) * stride)));
+
+        const v128 tmp =
+            v128_add_8(v128_max_s8(v128_min_s8(v128_ssub_s8(c, x), sp), sm),
+                       v128_max_s8(v128_min_s8(v128_ssub_s8(d, x), sp), sm));
+        const v128 delta = v128_add_8(
+            v128_add_8(
+                v128_shl_8(
+                    v128_add_8(
+                        v128_max_s8(v128_min_s8(v128_ssub_s8(a, x), sp), sm),
+                        v128_max_s8(v128_min_s8(v128_ssub_s8(f, x), sp), sm)),
+                    2),
+                v128_add_8(
+                    v128_max_s8(v128_min_s8(v128_ssub_s8(b, x), sp), sm),
+                    v128_max_s8(v128_min_s8(v128_ssub_s8(e, x), sp), sm))),
+            v128_add_8(v128_add_8(tmp, tmp), tmp));
+        o = v128_add_8(
+            o, v128_shr_s8(
+                   v128_add_8(c8, v128_add_8(delta, v128_cmplt_s8(
+                                                        delta, v128_zero()))),
+                   4));
+        v64_store_aligned(dst, v128_high_v64(o));
+        v64_store_aligned(dst + stride, v128_low_v64(o));
+        src += stride * 2;
+        dst += stride * 2;
+      }
+    }
+  }
+}
+
+void SIMD_FUNC(aom_clpf_block)(const uint8_t *src, uint8_t *dst, int stride,
+                               int x0, int y0, int sizex, int sizey, int width,
+                               int height, unsigned int strength) {
+  // TODO(stemidts):
+  // A sizex different from 8 will only be needed if CLPF is extended to chroma.
+  // This will only be used if 4:2:0 and width not a multiple of 16 and along
+  // the right edge only, so we can fall back to the plain C implementation in
+  // this case.  If not extended to chroma, this test will be redundant.
+  if (sizex != 8 || width < 16) {  // Fallback to C if frame width < 16
+    aom_clpf_block_c(src, dst, stride, x0, y0, sizex, sizey, width, height,
+                     strength);
+  } else {
+    clpf_block(src, dst, stride, x0, y0, sizey, width, height, strength);
+  }
+}
diff --git a/av1/common/clpf_sse2.c b/av1/common/clpf_sse2.c
new file mode 100644
index 0000000..e29c2ab
--- /dev/null
+++ b/av1/common/clpf_sse2.c
@@ -0,0 +1,14 @@
+/*
+ * Copyright (c) 2016, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#include "aom_dsp/aom_simd.h"
+#define SIMD_FUNC(name) name##_sse2
+#include "./clpf_simd.h"
diff --git a/av1/common/clpf_sse4_1.c b/av1/common/clpf_sse4_1.c
new file mode 100644
index 0000000..537139f
--- /dev/null
+++ b/av1/common/clpf_sse4_1.c
@@ -0,0 +1,14 @@
+/*
+ * Copyright (c) 2016, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#include "aom_dsp/aom_simd.h"
+#define SIMD_FUNC(name) name##_sse4_1
+#include "./clpf_simd.h"
diff --git a/av1/common/clpf_ssse3.c b/av1/common/clpf_ssse3.c
new file mode 100644
index 0000000..d7ed8de
--- /dev/null
+++ b/av1/common/clpf_ssse3.c
@@ -0,0 +1,14 @@
+/*
+ * Copyright (c) 2016, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#include "aom_dsp/aom_simd.h"
+#define SIMD_FUNC(name) name##_ssse3
+#include "./clpf_simd.h"
diff --git a/av1/common/x86/idct_intrin_sse2.c b/av1/common/x86/idct_intrin_sse2.c
index 35d3b3b..e90be26 100644
--- a/av1/common/x86/idct_intrin_sse2.c
+++ b/av1/common/x86/idct_intrin_sse2.c
@@ -507,6 +507,25 @@
   in[7] = _mm_slli_epi16(in[7], 1);
 }
 
+static INLINE void iidtx4_sse2(__m128i *in) {
+  const __m128i v_scale_w = _mm_set1_epi16(Sqrt2);
+
+  const __m128i v_p0l_w = _mm_mullo_epi16(in[0], v_scale_w);
+  const __m128i v_p0h_w = _mm_mulhi_epi16(in[0], v_scale_w);
+  const __m128i v_p1l_w = _mm_mullo_epi16(in[1], v_scale_w);
+  const __m128i v_p1h_w = _mm_mulhi_epi16(in[1], v_scale_w);
+
+  const __m128i v_p0a_d = _mm_unpacklo_epi16(v_p0l_w, v_p0h_w);
+  const __m128i v_p0b_d = _mm_unpackhi_epi16(v_p0l_w, v_p0h_w);
+  const __m128i v_p1a_d = _mm_unpacklo_epi16(v_p1l_w, v_p1h_w);
+  const __m128i v_p1b_d = _mm_unpackhi_epi16(v_p1l_w, v_p1h_w);
+
+  in[0] = _mm_packs_epi32(xx_roundn_epi32_unsigned(v_p0a_d, DCT_CONST_BITS),
+                          xx_roundn_epi32_unsigned(v_p0b_d, DCT_CONST_BITS));
+  in[1] = _mm_packs_epi32(xx_roundn_epi32_unsigned(v_p1a_d, DCT_CONST_BITS),
+                          xx_roundn_epi32_unsigned(v_p1b_d, DCT_CONST_BITS));
+}
+
 // load 8x8 array
 static INLINE void flip_buffer_lr_8x8(__m128i *in) {
   in[0] = mm_reverse_epi16(in[0]);
@@ -519,6 +538,39 @@
   in[7] = mm_reverse_epi16(in[7]);
 }
 
+static INLINE void scale_sqrt2_8x4(__m128i *in) {
+  // Implements 'ROUND_POWER_OF_TWO(input * Sqrt2, DCT_CONST_BITS)'
+  // for each element
+  const __m128i v_scale_w = _mm_set1_epi16(Sqrt2);
+
+  const __m128i v_p0l_w = _mm_mullo_epi16(in[0], v_scale_w);
+  const __m128i v_p0h_w = _mm_mulhi_epi16(in[0], v_scale_w);
+  const __m128i v_p1l_w = _mm_mullo_epi16(in[1], v_scale_w);
+  const __m128i v_p1h_w = _mm_mulhi_epi16(in[1], v_scale_w);
+  const __m128i v_p2l_w = _mm_mullo_epi16(in[2], v_scale_w);
+  const __m128i v_p2h_w = _mm_mulhi_epi16(in[2], v_scale_w);
+  const __m128i v_p3l_w = _mm_mullo_epi16(in[3], v_scale_w);
+  const __m128i v_p3h_w = _mm_mulhi_epi16(in[3], v_scale_w);
+
+  const __m128i v_p0a_d = _mm_unpacklo_epi16(v_p0l_w, v_p0h_w);
+  const __m128i v_p0b_d = _mm_unpackhi_epi16(v_p0l_w, v_p0h_w);
+  const __m128i v_p1a_d = _mm_unpacklo_epi16(v_p1l_w, v_p1h_w);
+  const __m128i v_p1b_d = _mm_unpackhi_epi16(v_p1l_w, v_p1h_w);
+  const __m128i v_p2a_d = _mm_unpacklo_epi16(v_p2l_w, v_p2h_w);
+  const __m128i v_p2b_d = _mm_unpackhi_epi16(v_p2l_w, v_p2h_w);
+  const __m128i v_p3a_d = _mm_unpacklo_epi16(v_p3l_w, v_p3h_w);
+  const __m128i v_p3b_d = _mm_unpackhi_epi16(v_p3l_w, v_p3h_w);
+
+  in[0] = _mm_packs_epi32(xx_roundn_epi32_unsigned(v_p0a_d, DCT_CONST_BITS),
+                          xx_roundn_epi32_unsigned(v_p0b_d, DCT_CONST_BITS));
+  in[1] = _mm_packs_epi32(xx_roundn_epi32_unsigned(v_p1a_d, DCT_CONST_BITS),
+                          xx_roundn_epi32_unsigned(v_p1b_d, DCT_CONST_BITS));
+  in[2] = _mm_packs_epi32(xx_roundn_epi32_unsigned(v_p2a_d, DCT_CONST_BITS),
+                          xx_roundn_epi32_unsigned(v_p2b_d, DCT_CONST_BITS));
+  in[3] = _mm_packs_epi32(xx_roundn_epi32_unsigned(v_p3a_d, DCT_CONST_BITS),
+                          xx_roundn_epi32_unsigned(v_p3b_d, DCT_CONST_BITS));
+}
+
 static INLINE void scale_sqrt2_8x8(__m128i *in) {
   // Implements 'ROUND_POWER_OF_TWO_SIGNED(input * Sqrt2, DCT_CONST_BITS)'
   // for each element
@@ -835,4 +887,319 @@
     default: assert(0); break;
   }
 }
+
+static INLINE void write_buffer_8x4_round5(uint8_t *dest, __m128i *in,
+                                           int stride) {
+  const __m128i final_rounding = _mm_set1_epi16(1 << 4);
+  const __m128i zero = _mm_setzero_si128();
+  // Final rounding and shift
+  in[0] = _mm_adds_epi16(in[0], final_rounding);
+  in[1] = _mm_adds_epi16(in[1], final_rounding);
+  in[2] = _mm_adds_epi16(in[2], final_rounding);
+  in[3] = _mm_adds_epi16(in[3], final_rounding);
+
+  in[0] = _mm_srai_epi16(in[0], 5);
+  in[1] = _mm_srai_epi16(in[1], 5);
+  in[2] = _mm_srai_epi16(in[2], 5);
+  in[3] = _mm_srai_epi16(in[3], 5);
+
+  RECON_AND_STORE(dest + 0 * stride, in[0]);
+  RECON_AND_STORE(dest + 1 * stride, in[1]);
+  RECON_AND_STORE(dest + 2 * stride, in[2]);
+  RECON_AND_STORE(dest + 3 * stride, in[3]);
+}
+
+void av1_iht8x4_32_add_sse2(const tran_low_t *input, uint8_t *dest, int stride,
+                            int tx_type) {
+  __m128i in[8];
+
+  in[0] = load_input_data(input + 0 * 8);
+  in[1] = load_input_data(input + 1 * 8);
+  in[2] = load_input_data(input + 2 * 8);
+  in[3] = load_input_data(input + 3 * 8);
+
+  // Row transform
+  switch (tx_type) {
+    case DCT_DCT:
+    case ADST_DCT:
+    case FLIPADST_DCT:
+    case H_DCT: idct8_sse2(in); break;
+    case DCT_ADST:
+    case ADST_ADST:
+    case DCT_FLIPADST:
+    case FLIPADST_FLIPADST:
+    case ADST_FLIPADST:
+    case FLIPADST_ADST:
+    case H_ADST:
+    case H_FLIPADST: iadst8_sse2(in); break;
+    case V_FLIPADST:
+    case V_ADST:
+    case V_DCT:
+    case IDTX:
+      iidtx8_sse2(in);
+      array_transpose_8x8(in, in);
+      break;
+    default: assert(0); break;
+  }
+
+  scale_sqrt2_8x8(in);
+
+  // Repack data. We pack into the bottom half of 'in'
+  // so that the next repacking stage can pack into the
+  // top half without overwriting anything
+  in[7] = _mm_unpacklo_epi64(in[6], in[7]);
+  in[6] = _mm_unpacklo_epi64(in[4], in[5]);
+  in[5] = _mm_unpacklo_epi64(in[2], in[3]);
+  in[4] = _mm_unpacklo_epi64(in[0], in[1]);
+
+  // Column transform
+  switch (tx_type) {
+    case DCT_DCT:
+    case DCT_ADST:
+    case DCT_FLIPADST:
+    case V_DCT:
+      idct4_sse2(in + 4);
+      idct4_sse2(in + 6);
+      break;
+    case ADST_DCT:
+    case ADST_ADST:
+    case FLIPADST_ADST:
+    case ADST_FLIPADST:
+    case FLIPADST_FLIPADST:
+    case FLIPADST_DCT:
+    case V_ADST:
+    case V_FLIPADST:
+      iadst4_sse2(in + 4);
+      iadst4_sse2(in + 6);
+      break;
+    case H_DCT:
+    case H_ADST:
+    case H_FLIPADST:
+    case IDTX:
+      iidtx4_sse2(in + 4);
+      array_transpose_4x4(in + 4);
+      iidtx4_sse2(in + 6);
+      array_transpose_4x4(in + 6);
+      break;
+    default: assert(0); break;
+  }
+
+  // Repack data
+  in[0] = _mm_unpacklo_epi64(in[4], in[6]);
+  in[1] = _mm_unpackhi_epi64(in[4], in[6]);
+  in[2] = _mm_unpacklo_epi64(in[5], in[7]);
+  in[3] = _mm_unpackhi_epi64(in[5], in[7]);
+
+  switch (tx_type) {
+    case DCT_DCT:
+    case ADST_DCT:
+    case H_DCT:
+    case DCT_ADST:
+    case ADST_ADST:
+    case H_ADST:
+    case V_ADST:
+    case V_DCT:
+    case IDTX: break;
+    case FLIPADST_DCT:
+    case FLIPADST_ADST:
+    case V_FLIPADST: FLIPUD_PTR(dest, stride, 4); break;
+    case DCT_FLIPADST:
+    case ADST_FLIPADST:
+    case H_FLIPADST:
+      in[0] = mm_reverse_epi16(in[0]);
+      in[1] = mm_reverse_epi16(in[1]);
+      in[2] = mm_reverse_epi16(in[2]);
+      in[3] = mm_reverse_epi16(in[3]);
+      break;
+    case FLIPADST_FLIPADST:
+      in[0] = mm_reverse_epi16(in[0]);
+      in[1] = mm_reverse_epi16(in[1]);
+      in[2] = mm_reverse_epi16(in[2]);
+      in[3] = mm_reverse_epi16(in[3]);
+      FLIPUD_PTR(dest, stride, 4);
+      break;
+    default: assert(0); break;
+  }
+  write_buffer_8x4_round5(dest, in, stride);
+}
+
+static INLINE void write_buffer_4x8_round5(uint8_t *dest, __m128i *in,
+                                           int stride) {
+  const __m128i final_rounding = _mm_set1_epi16(1 << 4);
+  const __m128i zero = _mm_setzero_si128();
+  // Final rounding and shift
+  in[0] = _mm_adds_epi16(in[0], final_rounding);
+  in[1] = _mm_adds_epi16(in[1], final_rounding);
+  in[2] = _mm_adds_epi16(in[2], final_rounding);
+  in[3] = _mm_adds_epi16(in[3], final_rounding);
+
+  in[0] = _mm_srai_epi16(in[0], 5);
+  in[1] = _mm_srai_epi16(in[1], 5);
+  in[2] = _mm_srai_epi16(in[2], 5);
+  in[3] = _mm_srai_epi16(in[3], 5);
+
+  // Reconstruction and Store
+  {
+    __m128i d0 = _mm_cvtsi32_si128(*(const int *)(dest + stride * 0));
+    __m128i d1 = _mm_cvtsi32_si128(*(const int *)(dest + stride * 1));
+    __m128i d2 = _mm_cvtsi32_si128(*(const int *)(dest + stride * 2));
+    __m128i d3 = _mm_cvtsi32_si128(*(const int *)(dest + stride * 3));
+    __m128i d4 = _mm_cvtsi32_si128(*(const int *)(dest + stride * 4));
+    __m128i d5 = _mm_cvtsi32_si128(*(const int *)(dest + stride * 5));
+    __m128i d6 = _mm_cvtsi32_si128(*(const int *)(dest + stride * 6));
+    __m128i d7 = _mm_cvtsi32_si128(*(const int *)(dest + stride * 7));
+
+    d0 = _mm_unpacklo_epi32(d0, d1);
+    d2 = _mm_unpacklo_epi32(d2, d3);
+    d4 = _mm_unpacklo_epi32(d4, d5);
+    d6 = _mm_unpacklo_epi32(d6, d7);
+    d0 = _mm_unpacklo_epi8(d0, zero);
+    d2 = _mm_unpacklo_epi8(d2, zero);
+    d4 = _mm_unpacklo_epi8(d4, zero);
+    d6 = _mm_unpacklo_epi8(d6, zero);
+    d0 = _mm_add_epi16(d0, in[0]);
+    d2 = _mm_add_epi16(d2, in[1]);
+    d4 = _mm_add_epi16(d4, in[2]);
+    d6 = _mm_add_epi16(d6, in[3]);
+
+    d0 = _mm_packus_epi16(d0, d2);
+    *(int *)dest = _mm_cvtsi128_si32(d0);
+    d0 = _mm_srli_si128(d0, 4);
+    *(int *)(dest + stride) = _mm_cvtsi128_si32(d0);
+    d0 = _mm_srli_si128(d0, 4);
+    *(int *)(dest + stride * 2) = _mm_cvtsi128_si32(d0);
+    d0 = _mm_srli_si128(d0, 4);
+    *(int *)(dest + stride * 3) = _mm_cvtsi128_si32(d0);
+    d0 = _mm_packus_epi16(d4, d6);
+    *(int *)(dest + stride * 4) = _mm_cvtsi128_si32(d0);
+    d0 = _mm_srli_si128(d0, 4);
+    *(int *)(dest + stride * 5) = _mm_cvtsi128_si32(d0);
+    d0 = _mm_srli_si128(d0, 4);
+    *(int *)(dest + stride * 6) = _mm_cvtsi128_si32(d0);
+    d0 = _mm_srli_si128(d0, 4);
+    *(int *)(dest + stride * 7) = _mm_cvtsi128_si32(d0);
+  }
+}
+
+void av1_iht4x8_32_add_sse2(const tran_low_t *input, uint8_t *dest, int stride,
+                            int tx_type) {
+  __m128i in[8];
+
+  // Load rows, packed two per element of 'in'.
+  // We pack into the bottom half of 'in' so that the
+  // later repacking stage can pack into the
+  // top half without overwriting anything
+  in[4] = load_input_data(input + 0 * 8);
+  in[5] = load_input_data(input + 1 * 8);
+  in[6] = load_input_data(input + 2 * 8);
+  in[7] = load_input_data(input + 3 * 8);
+
+  scale_sqrt2_8x4(in + 4);
+
+  // Row transform
+  switch (tx_type) {
+    case DCT_DCT:
+    case ADST_DCT:
+    case FLIPADST_DCT:
+    case H_DCT:
+      idct4_sse2(in + 4);
+      idct4_sse2(in + 6);
+      break;
+    case DCT_ADST:
+    case ADST_ADST:
+    case DCT_FLIPADST:
+    case FLIPADST_FLIPADST:
+    case ADST_FLIPADST:
+    case FLIPADST_ADST:
+    case H_ADST:
+    case H_FLIPADST:
+      iadst4_sse2(in + 4);
+      iadst4_sse2(in + 6);
+      break;
+    case V_FLIPADST:
+    case V_ADST:
+    case V_DCT:
+    case IDTX:
+      iidtx4_sse2(in + 4);
+      array_transpose_4x4(in + 4);
+      iidtx4_sse2(in + 6);
+      array_transpose_4x4(in + 6);
+      break;
+    default: assert(0); break;
+  }
+
+  // Repack data
+  in[0] = _mm_unpacklo_epi64(in[4], in[6]);
+  in[1] = _mm_unpackhi_epi64(in[4], in[6]);
+  in[2] = _mm_unpacklo_epi64(in[5], in[7]);
+  in[3] = _mm_unpackhi_epi64(in[5], in[7]);
+
+  // Column transform
+  switch (tx_type) {
+    case DCT_DCT:
+    case DCT_ADST:
+    case DCT_FLIPADST:
+    case V_DCT: idct8_sse2(in); break;
+    case ADST_DCT:
+    case ADST_ADST:
+    case FLIPADST_ADST:
+    case ADST_FLIPADST:
+    case FLIPADST_FLIPADST:
+    case FLIPADST_DCT:
+    case V_ADST:
+    case V_FLIPADST: iadst8_sse2(in); break;
+    case H_DCT:
+    case H_ADST:
+    case H_FLIPADST:
+    case IDTX:
+      iidtx8_sse2(in);
+      array_transpose_8x8(in, in);
+      break;
+    default: assert(0); break;
+  }
+
+  switch (tx_type) {
+    case DCT_DCT:
+    case ADST_DCT:
+    case H_DCT:
+    case DCT_ADST:
+    case ADST_ADST:
+    case H_ADST:
+    case V_ADST:
+    case V_DCT:
+    case IDTX: break;
+    case FLIPADST_DCT:
+    case FLIPADST_ADST:
+    case V_FLIPADST: FLIPUD_PTR(dest, stride, 8); break;
+    case DCT_FLIPADST:
+    case ADST_FLIPADST:
+    case H_FLIPADST:
+      in[0] = _mm_shufflelo_epi16(in[0], 0x1b);
+      in[1] = _mm_shufflelo_epi16(in[1], 0x1b);
+      in[2] = _mm_shufflelo_epi16(in[2], 0x1b);
+      in[3] = _mm_shufflelo_epi16(in[3], 0x1b);
+      in[4] = _mm_shufflelo_epi16(in[4], 0x1b);
+      in[5] = _mm_shufflelo_epi16(in[5], 0x1b);
+      in[6] = _mm_shufflelo_epi16(in[6], 0x1b);
+      in[7] = _mm_shufflelo_epi16(in[7], 0x1b);
+      break;
+    case FLIPADST_FLIPADST:
+      in[0] = _mm_shufflelo_epi16(in[0], 0x1b);
+      in[1] = _mm_shufflelo_epi16(in[1], 0x1b);
+      in[2] = _mm_shufflelo_epi16(in[2], 0x1b);
+      in[3] = _mm_shufflelo_epi16(in[3], 0x1b);
+      in[4] = _mm_shufflelo_epi16(in[4], 0x1b);
+      in[5] = _mm_shufflelo_epi16(in[5], 0x1b);
+      in[6] = _mm_shufflelo_epi16(in[6], 0x1b);
+      in[7] = _mm_shufflelo_epi16(in[7], 0x1b);
+      FLIPUD_PTR(dest, stride, 8);
+      break;
+    default: assert(0); break;
+  }
+  in[0] = _mm_unpacklo_epi64(in[0], in[1]);
+  in[1] = _mm_unpacklo_epi64(in[2], in[3]);
+  in[2] = _mm_unpacklo_epi64(in[4], in[5]);
+  in[3] = _mm_unpacklo_epi64(in[6], in[7]);
+  write_buffer_4x8_round5(dest, in, stride);
+}
 #endif  // CONFIG_EXT_TX
diff --git a/av1/encoder/clpf_rdo.c b/av1/encoder/clpf_rdo.c
index 7710de4..8639add 100644
--- a/av1/encoder/clpf_rdo.c
+++ b/av1/encoder/clpf_rdo.c
@@ -10,24 +10,25 @@
  */
 
 #include "av1/common/clpf.h"
+#include "./aom_dsp_rtcd.h"
 #include "aom/aom_integer.h"
 #include "av1/common/quant_common.h"
 
 // Calculate the error of a filtered and unfiltered block
-static void detect_clpf(const uint8_t *rec, const uint8_t *org, int x0, int y0,
-                        int width, int height, int so, int stride, int *sum0,
-                        int *sum1, unsigned int strength) {
+void aom_clpf_detect_c(const uint8_t *rec, const uint8_t *org, int rstride,
+                       int ostride, int x0, int y0, int width, int height,
+                       int *sum0, int *sum1, unsigned int strength) {
   int x, y;
   for (y = y0; y < y0 + 8; y++) {
     for (x = x0; x < x0 + 8; x++) {
-      int O = org[y * so + x];
-      int X = rec[y * stride + x];
-      int A = rec[AOMMAX(0, y - 1) * stride + x];
-      int B = rec[y * stride + AOMMAX(0, x - 2)];
-      int C = rec[y * stride + AOMMAX(0, x - 1)];
-      int D = rec[y * stride + AOMMIN(width - 1, x + 1)];
-      int E = rec[y * stride + AOMMIN(width - 1, x + 2)];
-      int F = rec[AOMMIN(height - 1, y + 1) * stride + x];
+      int O = org[y * ostride + x];
+      int X = rec[y * rstride + x];
+      int A = rec[AOMMAX(0, y - 1) * rstride + x];
+      int B = rec[y * rstride + AOMMAX(0, x - 2)];
+      int C = rec[y * rstride + AOMMAX(0, x - 1)];
+      int D = rec[y * rstride + AOMMIN(width - 1, x + 1)];
+      int E = rec[y * rstride + AOMMIN(width - 1, x + 2)];
+      int F = rec[AOMMIN(height - 1, y + 1) * rstride + x];
       int delta = av1_clpf_sample(X, A, B, C, D, E, F, strength);
       int Y = X + delta;
       *sum0 += (O - X) * (O - X);
@@ -36,21 +37,21 @@
   }
 }
 
-static void detect_multi_clpf(const uint8_t *rec, const uint8_t *org, int x0,
-                              int y0, int width, int height, int so, int stride,
-                              int *sum) {
+void aom_clpf_detect_multi_c(const uint8_t *rec, const uint8_t *org,
+                             int rstride, int ostride, int x0, int y0,
+                             int width, int height, int *sum) {
   int x, y;
 
   for (y = y0; y < y0 + 8; y++) {
     for (x = x0; x < x0 + 8; x++) {
-      int O = org[y * so + x];
-      int X = rec[y * stride + x];
-      int A = rec[AOMMAX(0, y - 1) * stride + x];
-      int B = rec[y * stride + AOMMAX(0, x - 2)];
-      int C = rec[y * stride + AOMMAX(0, x - 1)];
-      int D = rec[y * stride + AOMMIN(width - 1, x + 1)];
-      int E = rec[y * stride + AOMMIN(width - 1, x + 2)];
-      int F = rec[AOMMIN(height - 1, y + 1) * stride + x];
+      int O = org[y * ostride + x];
+      int X = rec[y * rstride + x];
+      int A = rec[AOMMAX(0, y - 1) * rstride + x];
+      int B = rec[y * rstride + AOMMAX(0, x - 2)];
+      int C = rec[y * rstride + AOMMAX(0, x - 1)];
+      int D = rec[y * rstride + AOMMIN(width - 1, x + 1)];
+      int E = rec[y * rstride + AOMMIN(width - 1, x + 2)];
+      int F = rec[AOMMIN(height - 1, y + 1) * rstride + x];
       int delta1 = av1_clpf_sample(X, A, B, C, D, E, F, 1);
       int delta2 = av1_clpf_sample(X, A, B, C, D, E, F, 2);
       int delta3 = av1_clpf_sample(X, A, B, C, D, E, F, 4);
@@ -77,9 +78,9 @@
       const int bs = MAX_MIB_SIZE;
       if (!cm->mi_grid_visible[ypos / bs * cm->mi_stride + xpos / bs]
                ->mbmi.skip)
-        detect_clpf(rec->y_buffer, org->y_buffer, xpos, ypos, rec->y_crop_width,
-                    rec->y_crop_height, org->y_stride, rec->y_stride, &sum0,
-                    &sum1, strength);
+        aom_clpf_detect(rec->y_buffer, org->y_buffer, rec->y_stride,
+                        org->y_stride, xpos, ypos, rec->y_crop_width,
+                        rec->y_crop_height, &sum0, &sum1, strength);
     }
   }
   *res = sum1 < sum0;
@@ -144,9 +145,9 @@
       if (!cm->mi_grid_visible[ypos / MAX_MIB_SIZE * cm->mi_stride +
                                xpos / MAX_MIB_SIZE]
                ->mbmi.skip) {
-        detect_multi_clpf(rec->y_buffer, org->y_buffer, xpos, ypos,
-                          rec->y_crop_width, rec->y_crop_height, org->y_stride,
-                          rec->y_stride, sum);
+        aom_clpf_detect_multi(rec->y_buffer, org->y_buffer, rec->y_stride,
+                              org->y_stride, xpos, ypos, rec->y_crop_width,
+                              rec->y_crop_height, sum);
         filtered = 1;
       }
     }
diff --git a/av1/encoder/clpf_rdo_neon.c b/av1/encoder/clpf_rdo_neon.c
new file mode 100644
index 0000000..02053c5
--- /dev/null
+++ b/av1/encoder/clpf_rdo_neon.c
@@ -0,0 +1,14 @@
+/*
+ * Copyright (c) 2016, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#include "aom_dsp/aom_simd.h"
+#define SIMD_FUNC(name) name##_neon
+#include "./clpf_rdo_simd.h"
diff --git a/av1/encoder/clpf_rdo_simd.h b/av1/encoder/clpf_rdo_simd.h
new file mode 100644
index 0000000..abbbe7c
--- /dev/null
+++ b/av1/encoder/clpf_rdo_simd.h
@@ -0,0 +1,504 @@
+/*
+ * Copyright (c) 2016, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#include "aom_dsp/aom_simd.h"
+
+void SIMD_FUNC(aom_clpf_detect)(const uint8_t *rec, const uint8_t *org,
+                                int rstride, int ostride, int x0, int y0,
+                                int width, int height, int *sum0, int *sum1,
+                                unsigned int strength) {
+  ssd128_internal ssd0 = v128_ssd_u8_init();
+  ssd128_internal ssd1 = v128_ssd_u8_init();
+  const v128 c128 = v128_dup_8(128);
+  const v128 sp = v128_dup_8(strength);
+  const v128 sm = v128_dup_8(-(int)strength);
+  const int bottom = height - 2 - y0;
+
+  rec += x0 + y0 * rstride;
+  org += x0 + y0 * ostride;
+
+  if (!x0) {  // Clip left
+    const v128 b_shuff = v128_from_v64(v64_from_64(0x0d0c0b0a09080808LL),
+                                       v64_from_64(0x0504030201000000LL));
+    const v128 c_shuff = v128_from_v64(v64_from_64(0x0e0d0c0b0a090808LL),
+                                       v64_from_64(0x0605040302010000LL));
+    int y;
+
+    for (y = 0; y < 8; y += 2) {
+      const v64 k1 = v64_load_aligned(org);
+      const v64 k2 = v64_load_aligned(org + ostride);
+      const v64 l1 = v64_load_aligned(rec);
+      const v64 l2 = v64_load_aligned(rec + rstride);
+      v128 o = v128_from_v64(k1, k2);
+      const v128 q = v128_from_v64(l1, l2);
+      const v128 x = v128_add_8(c128, q);
+      const v128 a = v128_add_8(
+          c128,
+          v128_from_v64(v64_load_aligned(rec - (y != -y0) * rstride), l1));
+      const v128 b = v128_shuffle_8(x, b_shuff);
+      const v128 c = v128_shuffle_8(x, c_shuff);
+      const v128 d = v128_add_8(
+          c128, v128_from_v64(v64_load_unaligned(rec + 1),
+                              v64_load_unaligned(rec + 1 + rstride)));
+      const v128 e = v128_add_8(
+          c128, v128_from_v64(v64_load_unaligned(rec + 2),
+                              v64_load_unaligned(rec + 2 + rstride)));
+      const v128 f = v128_add_8(
+          c128, v128_from_v64(
+                    l2, v64_load_aligned(rec + ((y != bottom) + 1) * rstride)));
+
+      const v128 tmp =
+          v128_add_8(v128_max_s8(v128_min_s8(v128_ssub_s8(c, x), sp), sm),
+                     v128_max_s8(v128_min_s8(v128_ssub_s8(d, x), sp), sm));
+      v128 delta = v128_add_8(
+          v128_add_8(
+              v128_shl_8(
+                  v128_add_8(
+                      v128_max_s8(v128_min_s8(v128_ssub_s8(a, x), sp), sm),
+                      v128_max_s8(v128_min_s8(v128_ssub_s8(f, x), sp), sm)),
+                  2),
+              v128_add_8(v128_max_s8(v128_min_s8(v128_ssub_s8(b, x), sp), sm),
+                         v128_max_s8(v128_min_s8(v128_ssub_s8(e, x), sp), sm))),
+          v128_add_8(v128_add_8(tmp, tmp), tmp));
+
+      delta = v128_shr_s8(
+          v128_add_8(v128_dup_8(8),
+                     v128_add_8(delta, v128_cmplt_s8(delta, v128_zero()))),
+          4);
+      ssd0 = v128_ssd_u8(ssd0, o, q);
+      ssd1 = v128_ssd_u8(ssd1, o, v128_add_8(q, delta));
+      rec += rstride * 2;
+      org += ostride * 2;
+    }
+  } else if (!(width - x0 - 8)) {  // Clip right
+    const v128 d_shuff = v128_from_v64(v64_from_64(0x0f0f0e0d0c0b0a09LL),
+                                       v64_from_64(0x0707060504030201LL));
+    const v128 e_shuff = v128_from_v64(v64_from_64(0x0f0f0f0e0d0c0b0aLL),
+                                       v64_from_64(0x0707070605040302LL));
+    int y;
+
+    for (y = 0; y < 8; y += 2) {
+      const v64 k1 = v64_load_aligned(org);
+      const v64 k2 = v64_load_aligned(org + ostride);
+      const v64 l1 = v64_load_aligned(rec);
+      const v64 l2 = v64_load_aligned(rec + rstride);
+      v128 o = v128_from_v64(k1, k2);
+      const v128 q = v128_from_v64(l1, l2);
+      const v128 x = v128_add_8(c128, q);
+      const v128 a = v128_add_8(
+          c128,
+          v128_from_v64(v64_load_aligned(rec - (y != -y0) * rstride), l1));
+      const v128 b = v128_add_8(
+          c128, v128_from_v64(v64_load_unaligned(rec - 2),
+                              v64_load_unaligned(rec - 2 + rstride)));
+      const v128 c = v128_add_8(
+          c128, v128_from_v64(v64_load_unaligned(rec - 1),
+                              v64_load_unaligned(rec - 1 + rstride)));
+      const v128 d = v128_shuffle_8(x, d_shuff);
+      const v128 e = v128_shuffle_8(x, e_shuff);
+      const v128 f = v128_add_8(
+          c128, v128_from_v64(
+                    l2, v64_load_aligned(rec + ((y != bottom) + 1) * rstride)));
+
+      const v128 tmp =
+          v128_add_8(v128_max_s8(v128_min_s8(v128_ssub_s8(c, x), sp), sm),
+                     v128_max_s8(v128_min_s8(v128_ssub_s8(d, x), sp), sm));
+      v128 delta = v128_add_8(
+          v128_add_8(
+              v128_shl_8(
+                  v128_add_8(
+                      v128_max_s8(v128_min_s8(v128_ssub_s8(a, x), sp), sm),
+                      v128_max_s8(v128_min_s8(v128_ssub_s8(f, x), sp), sm)),
+                  2),
+              v128_add_8(v128_max_s8(v128_min_s8(v128_ssub_s8(b, x), sp), sm),
+                         v128_max_s8(v128_min_s8(v128_ssub_s8(e, x), sp), sm))),
+          v128_add_8(v128_add_8(tmp, tmp), tmp));
+      delta = v128_shr_s8(
+          v128_add_8(v128_dup_8(8),
+                     v128_add_8(delta, v128_cmplt_s8(delta, v128_zero()))),
+          4);
+      ssd0 = v128_ssd_u8(ssd0, o, q);
+      ssd1 = v128_ssd_u8(ssd1, o, v128_add_8(q, delta));
+      rec += rstride * 2;
+      org += ostride * 2;
+    }
+  } else {  // No left/right clipping
+    int y;
+    for (y = 0; y < 8; y += 2) {
+      const v64 k1 = v64_load_aligned(org);
+      const v64 k2 = v64_load_aligned(org + ostride);
+      const v64 l1 = v64_load_aligned(rec);
+      const v64 l2 = v64_load_aligned(rec + rstride);
+      v128 o = v128_from_v64(k1, k2);
+      const v128 q = v128_from_v64(l1, l2);
+      const v128 x = v128_add_8(c128, q);
+      const v128 a = v128_add_8(
+          c128,
+          v128_from_v64(v64_load_aligned(rec - (y != -y0) * rstride), l1));
+      const v128 b = v128_add_8(
+          c128, v128_from_v64(v64_load_unaligned(rec - 2),
+                              v64_load_unaligned(rec - 2 + rstride)));
+      const v128 c = v128_add_8(
+          c128, v128_from_v64(v64_load_unaligned(rec - 1),
+                              v64_load_unaligned(rec - 1 + rstride)));
+      const v128 d = v128_add_8(
+          c128, v128_from_v64(v64_load_unaligned(rec + 1),
+                              v64_load_unaligned(rec + 1 + rstride)));
+      const v128 e = v128_add_8(
+          c128, v128_from_v64(v64_load_unaligned(rec + 2),
+                              v64_load_unaligned(rec + 2 + rstride)));
+      const v128 f = v128_add_8(
+          c128, v128_from_v64(
+                    l2, v64_load_aligned(rec + ((y != bottom) + 1) * rstride)));
+
+      const v128 tmp =
+          v128_add_8(v128_max_s8(v128_min_s8(v128_ssub_s8(c, x), sp), sm),
+                     v128_max_s8(v128_min_s8(v128_ssub_s8(d, x), sp), sm));
+      v128 delta = v128_add_8(
+          v128_add_8(
+              v128_shl_8(
+                  v128_add_8(
+                      v128_max_s8(v128_min_s8(v128_ssub_s8(a, x), sp), sm),
+                      v128_max_s8(v128_min_s8(v128_ssub_s8(f, x), sp), sm)),
+                  2),
+              v128_add_8(v128_max_s8(v128_min_s8(v128_ssub_s8(b, x), sp), sm),
+                         v128_max_s8(v128_min_s8(v128_ssub_s8(e, x), sp), sm))),
+          v128_add_8(v128_add_8(tmp, tmp), tmp));
+      delta = v128_shr_s8(
+          v128_add_8(v128_dup_8(8),
+                     v128_add_8(delta, v128_cmplt_s8(delta, v128_zero()))),
+          4);
+
+      ssd0 = v128_ssd_u8(ssd0, o, q);
+      ssd1 = v128_ssd_u8(ssd1, o, v128_add_8(q, delta));
+      rec += rstride * 2;
+      org += ostride * 2;
+    }
+  }
+  *sum0 += v128_ssd_u8_sum(ssd0);
+  *sum1 += v128_ssd_u8_sum(ssd1);
+}
+
+// Test multiple filter strengths at once.  Use a simpler filter (4 tap, every
+// second line).
+void SIMD_FUNC(aom_clpf_detect_multi)(const uint8_t *rec, const uint8_t *org,
+                                      int rstride, int ostride, int x0, int y0,
+                                      int width, int height, int *sum) {
+  const v128 c128 = v128_dup_8(128);
+  const v128 cp1 = v128_dup_8(1);
+  const v128 cm1 = v128_dup_8(-1);
+  const v128 cp2 = v128_dup_8(2);
+  const v128 cm2 = v128_dup_8(-2);
+  const v128 cp4 = v128_dup_8(4);
+  const v128 cm4 = v128_dup_8(-4);
+  const v128 c8 = v128_dup_8(8);
+  const int bottom = height - 2 - y0;
+  ssd128_internal ssd0 = v128_ssd_u8_init();
+  ssd128_internal ssd1 = v128_ssd_u8_init();
+  ssd128_internal ssd2 = v128_ssd_u8_init();
+  ssd128_internal ssd3 = v128_ssd_u8_init();
+
+  rec += x0 + y0 * rstride;
+  org += x0 + y0 * ostride;
+
+  if (!x0) {  // Clip left
+    const v128 b_shuff = v128_from_v64(v64_from_64(0x0d0c0b0a09080808LL),
+                                       v64_from_64(0x0504030201000000LL));
+    const v128 c_shuff = v128_from_v64(v64_from_64(0x0e0d0c0b0a090808LL),
+                                       v64_from_64(0x0605040302010000LL));
+    int y;
+
+    for (y = 0; y < 8; y += 2) {
+      const v64 k1 = v64_load_aligned(org);
+      const v64 k2 = v64_load_aligned(org + ostride);
+      const v64 l1 = v64_load_aligned(rec);
+      const v64 l2 = v64_load_aligned(rec + rstride);
+      v128 o = v128_from_v64(k1, k2);
+      const v128 q = v128_from_v64(l1, l2);
+      const v128 x = v128_add_8(c128, q);
+      v128 a = v128_add_8(
+          c128,
+          v128_from_v64(v64_load_aligned(rec - (y != -y0) * rstride), l1));
+      v128 b = v128_shuffle_8(x, b_shuff);
+      v128 c = v128_shuffle_8(x, c_shuff);
+      v128 d = v128_add_8(c128,
+                          v128_from_v64(v64_load_unaligned(rec + 1),
+                                        v64_load_unaligned(rec + 1 + rstride)));
+      v128 e = v128_add_8(c128,
+                          v128_from_v64(v64_load_unaligned(rec + 2),
+                                        v64_load_unaligned(rec + 2 + rstride)));
+      v128 f = v128_add_8(
+          c128, v128_from_v64(
+                    l2, v64_load_aligned(rec + ((y != bottom) + 1) * rstride)));
+      v128 tmp, delta1, delta2, delta3;
+
+      a = v128_ssub_s8(a, x);
+      b = v128_ssub_s8(b, x);
+      c = v128_ssub_s8(c, x);
+      d = v128_ssub_s8(d, x);
+      e = v128_ssub_s8(e, x);
+      f = v128_ssub_s8(f, x);
+      tmp = v128_add_8(v128_max_s8(v128_min_s8(c, cp1), cm1),
+                       v128_max_s8(v128_min_s8(d, cp1), cm1));
+      delta1 = v128_add_8(
+          v128_add_8(
+              v128_shl_8(v128_add_8(v128_max_s8(v128_min_s8(a, cp1), cm1),
+                                    v128_max_s8(v128_min_s8(f, cp1), cm1)),
+                         2),
+              v128_add_8(v128_max_s8(v128_min_s8(b, cp1), cm1),
+                         v128_max_s8(v128_min_s8(e, cp1), cm1))),
+          v128_add_8(v128_add_8(tmp, tmp), tmp));
+      tmp = v128_add_8(v128_max_s8(v128_min_s8(c, cp2), cm2),
+                       v128_max_s8(v128_min_s8(d, cp2), cm2));
+      delta2 = v128_add_8(
+          v128_add_8(
+              v128_shl_8(v128_add_8(v128_max_s8(v128_min_s8(a, cp2), cm2),
+                                    v128_max_s8(v128_min_s8(f, cp2), cm2)),
+                         2),
+              v128_add_8(v128_max_s8(v128_min_s8(b, cp2), cm2),
+                         v128_max_s8(v128_min_s8(e, cp2), cm2))),
+          v128_add_8(v128_add_8(tmp, tmp), tmp));
+      tmp = v128_add_8(v128_max_s8(v128_min_s8(c, cp4), cm4),
+                       v128_max_s8(v128_min_s8(d, cp4), cm4));
+      delta3 = v128_add_8(
+          v128_add_8(
+              v128_shl_8(v128_add_8(v128_max_s8(v128_min_s8(a, cp4), cm4),
+                                    v128_max_s8(v128_min_s8(f, cp4), cm4)),
+                         2),
+              v128_add_8(v128_max_s8(v128_min_s8(b, cp4), cm4),
+                         v128_max_s8(v128_min_s8(e, cp4), cm4))),
+          v128_add_8(v128_add_8(tmp, tmp), tmp));
+
+      ssd0 = v128_ssd_u8(ssd0, o, q);
+      ssd1 = v128_ssd_u8(
+          ssd1, o,
+          v128_add_8(
+              q,
+              v128_shr_s8(
+                  v128_add_8(c8, v128_add_8(delta1, v128_cmplt_s8(
+                                                        delta1, v128_zero()))),
+                  4)));
+      ssd2 = v128_ssd_u8(
+          ssd2, o,
+          v128_add_8(
+              q,
+              v128_shr_s8(
+                  v128_add_8(c8, v128_add_8(delta2, v128_cmplt_s8(
+                                                        delta2, v128_zero()))),
+                  4)));
+      ssd3 = v128_ssd_u8(
+          ssd3, o,
+          v128_add_8(
+              q,
+              v128_shr_s8(
+                  v128_add_8(c8, v128_add_8(delta3, v128_cmplt_s8(
+                                                        delta3, v128_zero()))),
+                  4)));
+      rec += 2 * rstride;
+      org += 2 * ostride;
+    }
+  } else if (!(width - x0 - 8)) {  // Clip right
+    const v128 d_shuff = v128_from_v64(v64_from_64(0x0f0f0e0d0c0b0a09LL),
+                                       v64_from_64(0x0707060504030201LL));
+    const v128 e_shuff = v128_from_v64(v64_from_64(0x0f0f0f0e0d0c0b0aLL),
+                                       v64_from_64(0x0707070605040302LL));
+    int y;
+
+    for (y = 0; y < 8; y += 2) {
+      const v64 k1 = v64_load_aligned(org);
+      const v64 k2 = v64_load_aligned(org + ostride);
+      const v64 l1 = v64_load_aligned(rec);
+      const v64 l2 = v64_load_aligned(rec + rstride);
+      v128 o = v128_from_v64(k1, k2);
+      const v128 q = v128_from_v64(l1, l2);
+      const v128 x = v128_add_8(c128, q);
+      v128 a = v128_add_8(
+          c128,
+          v128_from_v64(v64_load_aligned(rec - (y != -y0) * rstride), l1));
+      v128 b = v128_add_8(c128,
+                          v128_from_v64(v64_load_unaligned(rec - 2),
+                                        v64_load_unaligned(rec - 2 + rstride)));
+      v128 c = v128_add_8(c128,
+                          v128_from_v64(v64_load_unaligned(rec - 1),
+                                        v64_load_unaligned(rec - 1 + rstride)));
+      v128 d = v128_shuffle_8(x, d_shuff);
+      v128 e = v128_shuffle_8(x, e_shuff);
+      v128 f = v128_add_8(
+          c128, v128_from_v64(
+                    l2, v64_load_aligned(rec + ((y != bottom) + 1) * rstride)));
+      v128 tmp, delta1, delta2, delta3;
+
+      a = v128_ssub_s8(a, x);
+      b = v128_ssub_s8(b, x);
+      c = v128_ssub_s8(c, x);
+      d = v128_ssub_s8(d, x);
+      e = v128_ssub_s8(e, x);
+      f = v128_ssub_s8(f, x);
+      tmp = v128_add_8(v128_max_s8(v128_min_s8(c, cp1), cm1),
+                       v128_max_s8(v128_min_s8(d, cp1), cm1));
+      delta1 = v128_add_8(
+          v128_add_8(
+              v128_shl_8(v128_add_8(v128_max_s8(v128_min_s8(a, cp1), cm1),
+                                    v128_max_s8(v128_min_s8(f, cp1), cm1)),
+                         2),
+              v128_add_8(v128_max_s8(v128_min_s8(b, cp1), cm1),
+                         v128_max_s8(v128_min_s8(e, cp1), cm1))),
+          v128_add_8(v128_add_8(tmp, tmp), tmp));
+      tmp = v128_add_8(v128_max_s8(v128_min_s8(c, cp2), cm2),
+                       v128_max_s8(v128_min_s8(d, cp2), cm2));
+      delta2 = v128_add_8(
+          v128_add_8(
+              v128_shl_8(v128_add_8(v128_max_s8(v128_min_s8(a, cp2), cm2),
+                                    v128_max_s8(v128_min_s8(f, cp2), cm2)),
+                         2),
+              v128_add_8(v128_max_s8(v128_min_s8(b, cp2), cm2),
+                         v128_max_s8(v128_min_s8(e, cp2), cm2))),
+          v128_add_8(v128_add_8(tmp, tmp), tmp));
+      tmp = v128_add_8(v128_max_s8(v128_min_s8(c, cp4), cm4),
+                       v128_max_s8(v128_min_s8(d, cp4), cm4));
+      delta3 = v128_add_8(
+          v128_add_8(
+              v128_shl_8(v128_add_8(v128_max_s8(v128_min_s8(a, cp4), cm4),
+                                    v128_max_s8(v128_min_s8(f, cp4), cm4)),
+                         2),
+              v128_add_8(v128_max_s8(v128_min_s8(b, cp4), cm4),
+                         v128_max_s8(v128_min_s8(e, cp4), cm4))),
+          v128_add_8(v128_add_8(tmp, tmp), tmp));
+
+      ssd0 = v128_ssd_u8(ssd0, o, q);
+      ssd1 = v128_ssd_u8(
+          ssd1, o,
+          v128_add_8(
+              q,
+              v128_shr_s8(
+                  v128_add_8(c8, v128_add_8(delta1, v128_cmplt_s8(
+                                                        delta1, v128_zero()))),
+                  4)));
+      ssd2 = v128_ssd_u8(
+          ssd2, o,
+          v128_add_8(
+              q,
+              v128_shr_s8(
+                  v128_add_8(c8, v128_add_8(delta2, v128_cmplt_s8(
+                                                        delta2, v128_zero()))),
+                  4)));
+      ssd3 = v128_ssd_u8(
+          ssd3, o,
+          v128_add_8(
+              q,
+              v128_shr_s8(
+                  v128_add_8(c8, v128_add_8(delta3, v128_cmplt_s8(
+                                                        delta3, v128_zero()))),
+                  4)));
+      rec += 2 * rstride;
+      org += 2 * ostride;
+    }
+  } else {  // No left/right clipping
+    int y;
+    for (y = 0; y < 8; y += 2) {
+      const v64 k1 = v64_load_aligned(org);
+      const v64 k2 = v64_load_aligned(org + ostride);
+      const v64 l1 = v64_load_aligned(rec);
+      const v64 l2 = v64_load_aligned(rec + rstride);
+      v128 o = v128_from_v64(k1, k2);
+      const v128 q = v128_from_v64(l1, l2);
+      const v128 x = v128_add_8(c128, q);
+      v128 a = v128_add_8(
+          c128,
+          v128_from_v64(v64_load_aligned(rec - (y != -y0) * rstride), l1));
+      v128 b = v128_add_8(c128,
+                          v128_from_v64(v64_load_unaligned(rec - 2),
+                                        v64_load_unaligned(rec - 2 + rstride)));
+      v128 c = v128_add_8(c128,
+                          v128_from_v64(v64_load_unaligned(rec - 1),
+                                        v64_load_unaligned(rec - 1 + rstride)));
+      v128 d = v128_add_8(c128,
+                          v128_from_v64(v64_load_unaligned(rec + 1),
+                                        v64_load_unaligned(rec + 1 + rstride)));
+      v128 e = v128_add_8(c128,
+                          v128_from_v64(v64_load_unaligned(rec + 2),
+                                        v64_load_unaligned(rec + 2 + rstride)));
+      v128 f = v128_add_8(
+          c128, v128_from_v64(
+                    l2, v64_load_aligned(rec + ((y != bottom) + 1) * rstride)));
+      v128 tmp, delta1, delta2, delta3;
+
+      a = v128_ssub_s8(a, x);
+      b = v128_ssub_s8(b, x);
+      c = v128_ssub_s8(c, x);
+      d = v128_ssub_s8(d, x);
+      e = v128_ssub_s8(e, x);
+      f = v128_ssub_s8(f, x);
+      tmp = v128_add_8(v128_max_s8(v128_min_s8(c, cp1), cm1),
+                       v128_max_s8(v128_min_s8(d, cp1), cm1));
+      delta1 = v128_add_8(
+          v128_add_8(
+              v128_shl_8(v128_add_8(v128_max_s8(v128_min_s8(a, cp1), cm1),
+                                    v128_max_s8(v128_min_s8(f, cp1), cm1)),
+                         2),
+              v128_add_8(v128_max_s8(v128_min_s8(b, cp1), cm1),
+                         v128_max_s8(v128_min_s8(e, cp1), cm1))),
+          v128_add_8(v128_add_8(tmp, tmp), tmp));
+      tmp = v128_add_8(v128_max_s8(v128_min_s8(c, cp2), cm2),
+                       v128_max_s8(v128_min_s8(d, cp2), cm2));
+      delta2 = v128_add_8(
+          v128_add_8(
+              v128_shl_8(v128_add_8(v128_max_s8(v128_min_s8(a, cp2), cm2),
+                                    v128_max_s8(v128_min_s8(f, cp2), cm2)),
+                         2),
+              v128_add_8(v128_max_s8(v128_min_s8(b, cp2), cm2),
+                         v128_max_s8(v128_min_s8(e, cp2), cm2))),
+          v128_add_8(v128_add_8(tmp, tmp), tmp));
+      tmp = v128_add_8(v128_max_s8(v128_min_s8(c, cp4), cm4),
+                       v128_max_s8(v128_min_s8(d, cp4), cm4));
+      delta3 = v128_add_8(
+          v128_add_8(
+              v128_shl_8(v128_add_8(v128_max_s8(v128_min_s8(a, cp4), cm4),
+                                    v128_max_s8(v128_min_s8(f, cp4), cm4)),
+                         2),
+              v128_add_8(v128_max_s8(v128_min_s8(b, cp4), cm4),
+                         v128_max_s8(v128_min_s8(e, cp4), cm4))),
+          v128_add_8(v128_add_8(tmp, tmp), tmp));
+
+      ssd0 = v128_ssd_u8(ssd0, o, q);
+      ssd1 = v128_ssd_u8(
+          ssd1, o,
+          v128_add_8(
+              q,
+              v128_shr_s8(
+                  v128_add_8(c8, v128_add_8(delta1, v128_cmplt_s8(
+                                                        delta1, v128_zero()))),
+                  4)));
+      ssd2 = v128_ssd_u8(
+          ssd2, o,
+          v128_add_8(
+              q,
+              v128_shr_s8(
+                  v128_add_8(c8, v128_add_8(delta2, v128_cmplt_s8(
+                                                        delta2, v128_zero()))),
+                  4)));
+      ssd3 = v128_ssd_u8(
+          ssd3, o,
+          v128_add_8(
+              q,
+              v128_shr_s8(
+                  v128_add_8(c8, v128_add_8(delta3, v128_cmplt_s8(
+                                                        delta3, v128_zero()))),
+                  4)));
+      rec += 2 * rstride;
+      org += 2 * ostride;
+    }
+  }
+  sum[0] += v128_ssd_u8_sum(ssd0);
+  sum[1] += v128_ssd_u8_sum(ssd1);
+  sum[2] += v128_ssd_u8_sum(ssd2);
+  sum[3] += v128_ssd_u8_sum(ssd3);
+}
diff --git a/av1/encoder/clpf_rdo_sse2.c b/av1/encoder/clpf_rdo_sse2.c
new file mode 100644
index 0000000..99847c0
--- /dev/null
+++ b/av1/encoder/clpf_rdo_sse2.c
@@ -0,0 +1,14 @@
+/*
+ * Copyright (c) 2016, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#include "aom_dsp/aom_simd.h"
+#define SIMD_FUNC(name) name##_sse2
+#include "./clpf_rdo_simd.h"
diff --git a/av1/encoder/clpf_rdo_sse4_1.c b/av1/encoder/clpf_rdo_sse4_1.c
new file mode 100644
index 0000000..049f537
--- /dev/null
+++ b/av1/encoder/clpf_rdo_sse4_1.c
@@ -0,0 +1,14 @@
+/*
+ * Copyright (c) 2016, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#include "aom_dsp/aom_simd.h"
+#define SIMD_FUNC(name) name##_sse4_1
+#include "./clpf_rdo_simd.h"
diff --git a/av1/encoder/clpf_rdo_ssse3.c b/av1/encoder/clpf_rdo_ssse3.c
new file mode 100644
index 0000000..35b23b2
--- /dev/null
+++ b/av1/encoder/clpf_rdo_ssse3.c
@@ -0,0 +1,14 @@
+/*
+ * Copyright (c) 2016, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#include "aom_dsp/aom_simd.h"
+#define SIMD_FUNC(name) name##_ssse3
+#include "./clpf_rdo_simd.h"
diff --git a/av1/encoder/x86/dct_intrin_sse2.c b/av1/encoder/x86/dct_intrin_sse2.c
index 15dd66e..ad61fd3 100644
--- a/av1/encoder/x86/dct_intrin_sse2.c
+++ b/av1/encoder/x86/dct_intrin_sse2.c
@@ -2593,7 +2593,41 @@
 }
 
 #if CONFIG_EXT_TX
-static INLINE void scale_sqrt2_8x8(__m128i *in) {
+static INLINE void scale_sqrt2_8x4(__m128i *in) {
+  // Implements fdct_round_shift(input * Sqrt2), which is equivalent to
+  // ROUND_POWER_OF_TWO(input * Sqrt2, DCT_CONST_BITS),
+  // for 32 consecutive elements.
+  const __m128i v_scale_w = _mm_set1_epi16(Sqrt2);
+
+  const __m128i v_p0l_w = _mm_mullo_epi16(in[0], v_scale_w);
+  const __m128i v_p0h_w = _mm_mulhi_epi16(in[0], v_scale_w);
+  const __m128i v_p1l_w = _mm_mullo_epi16(in[1], v_scale_w);
+  const __m128i v_p1h_w = _mm_mulhi_epi16(in[1], v_scale_w);
+  const __m128i v_p2l_w = _mm_mullo_epi16(in[2], v_scale_w);
+  const __m128i v_p2h_w = _mm_mulhi_epi16(in[2], v_scale_w);
+  const __m128i v_p3l_w = _mm_mullo_epi16(in[3], v_scale_w);
+  const __m128i v_p3h_w = _mm_mulhi_epi16(in[3], v_scale_w);
+
+  const __m128i v_p0a_d = _mm_unpacklo_epi16(v_p0l_w, v_p0h_w);
+  const __m128i v_p0b_d = _mm_unpackhi_epi16(v_p0l_w, v_p0h_w);
+  const __m128i v_p1a_d = _mm_unpacklo_epi16(v_p1l_w, v_p1h_w);
+  const __m128i v_p1b_d = _mm_unpackhi_epi16(v_p1l_w, v_p1h_w);
+  const __m128i v_p2a_d = _mm_unpacklo_epi16(v_p2l_w, v_p2h_w);
+  const __m128i v_p2b_d = _mm_unpackhi_epi16(v_p2l_w, v_p2h_w);
+  const __m128i v_p3a_d = _mm_unpacklo_epi16(v_p3l_w, v_p3h_w);
+  const __m128i v_p3b_d = _mm_unpackhi_epi16(v_p3l_w, v_p3h_w);
+
+  in[0] = _mm_packs_epi32(xx_roundn_epi32_unsigned(v_p0a_d, DCT_CONST_BITS),
+                          xx_roundn_epi32_unsigned(v_p0b_d, DCT_CONST_BITS));
+  in[1] = _mm_packs_epi32(xx_roundn_epi32_unsigned(v_p1a_d, DCT_CONST_BITS),
+                          xx_roundn_epi32_unsigned(v_p1b_d, DCT_CONST_BITS));
+  in[2] = _mm_packs_epi32(xx_roundn_epi32_unsigned(v_p2a_d, DCT_CONST_BITS),
+                          xx_roundn_epi32_unsigned(v_p2b_d, DCT_CONST_BITS));
+  in[3] = _mm_packs_epi32(xx_roundn_epi32_unsigned(v_p3a_d, DCT_CONST_BITS),
+                          xx_roundn_epi32_unsigned(v_p3b_d, DCT_CONST_BITS));
+}
+
+static INLINE void scale_sqrt2_8x8_signed(__m128i *in) {
   // Implements 'ROUND_POWER_OF_TWO_SIGNED(input * Sqrt2, DCT_CONST_BITS)'
   // for each element
   const __m128i v_scale_w = _mm_set1_epi16(Sqrt2);
@@ -2650,6 +2684,419 @@
                           xx_roundn_epi32(v_p7b_d, DCT_CONST_BITS));
 }
 
+// Load input into the left-hand half of in (ie, into lanes 0..3 of
+// each element of in). The right hand half (lanes 4..7) should be
+// treated as being filled with "don't care" values.
+static INLINE void load_buffer_4x8(const int16_t *input, __m128i *in,
+                                   int stride, int flipud, int fliplr) {
+  if (!flipud) {
+    in[0] = _mm_loadl_epi64((const __m128i *)(input + 0 * stride));
+    in[1] = _mm_loadl_epi64((const __m128i *)(input + 1 * stride));
+    in[2] = _mm_loadl_epi64((const __m128i *)(input + 2 * stride));
+    in[3] = _mm_loadl_epi64((const __m128i *)(input + 3 * stride));
+    in[4] = _mm_loadl_epi64((const __m128i *)(input + 4 * stride));
+    in[5] = _mm_loadl_epi64((const __m128i *)(input + 5 * stride));
+    in[6] = _mm_loadl_epi64((const __m128i *)(input + 6 * stride));
+    in[7] = _mm_loadl_epi64((const __m128i *)(input + 7 * stride));
+  } else {
+    in[0] = _mm_loadl_epi64((const __m128i *)(input + 7 * stride));
+    in[1] = _mm_loadl_epi64((const __m128i *)(input + 6 * stride));
+    in[2] = _mm_loadl_epi64((const __m128i *)(input + 5 * stride));
+    in[3] = _mm_loadl_epi64((const __m128i *)(input + 4 * stride));
+    in[4] = _mm_loadl_epi64((const __m128i *)(input + 3 * stride));
+    in[5] = _mm_loadl_epi64((const __m128i *)(input + 2 * stride));
+    in[6] = _mm_loadl_epi64((const __m128i *)(input + 1 * stride));
+    in[7] = _mm_loadl_epi64((const __m128i *)(input + 0 * stride));
+  }
+
+  if (fliplr) {
+    in[0] = _mm_shufflelo_epi16(in[0], 0x1b);
+    in[1] = _mm_shufflelo_epi16(in[1], 0x1b);
+    in[2] = _mm_shufflelo_epi16(in[2], 0x1b);
+    in[3] = _mm_shufflelo_epi16(in[3], 0x1b);
+    in[4] = _mm_shufflelo_epi16(in[4], 0x1b);
+    in[5] = _mm_shufflelo_epi16(in[5], 0x1b);
+    in[6] = _mm_shufflelo_epi16(in[6], 0x1b);
+    in[7] = _mm_shufflelo_epi16(in[7], 0x1b);
+  }
+
+  in[0] = _mm_slli_epi16(in[0], 3);
+  in[1] = _mm_slli_epi16(in[1], 3);
+  in[2] = _mm_slli_epi16(in[2], 3);
+  in[3] = _mm_slli_epi16(in[3], 3);
+  in[4] = _mm_slli_epi16(in[4], 3);
+  in[5] = _mm_slli_epi16(in[5], 3);
+  in[6] = _mm_slli_epi16(in[6], 3);
+  in[7] = _mm_slli_epi16(in[7], 3);
+
+  scale_sqrt2_8x4(in);
+  scale_sqrt2_8x4(in + 4);
+}
+
+static INLINE void write_buffer_4x8(tran_low_t *output, __m128i *res) {
+  const __m128i kOne = _mm_set1_epi16(1);
+  __m128i in01 = _mm_unpacklo_epi64(res[0], res[1]);
+  __m128i in23 = _mm_unpacklo_epi64(res[2], res[3]);
+  __m128i in45 = _mm_unpacklo_epi64(res[4], res[5]);
+  __m128i in67 = _mm_unpacklo_epi64(res[6], res[7]);
+
+  __m128i out01 = _mm_add_epi16(in01, kOne);
+  __m128i out23 = _mm_add_epi16(in23, kOne);
+  __m128i out45 = _mm_add_epi16(in45, kOne);
+  __m128i out67 = _mm_add_epi16(in67, kOne);
+
+  out01 = _mm_srai_epi16(out01, 2);
+  out23 = _mm_srai_epi16(out23, 2);
+  out45 = _mm_srai_epi16(out45, 2);
+  out67 = _mm_srai_epi16(out67, 2);
+
+  store_output(&out01, (output + 0 * 8));
+  store_output(&out23, (output + 1 * 8));
+  store_output(&out45, (output + 2 * 8));
+  store_output(&out67, (output + 3 * 8));
+}
+
+void av1_fht4x8_sse2(const int16_t *input, tran_low_t *output, int stride,
+                     int tx_type) {
+  __m128i in[8];
+
+  switch (tx_type) {
+    case DCT_DCT:
+      load_buffer_4x8(input, in, stride, 0, 0);
+      fdct8_sse2(in);
+      // Repack data into two 4x4 blocks so we can reuse the 4x4 transforms
+      // The other cases (and the 8x4 transforms) all behave similarly
+      in[4] = _mm_shuffle_epi32(in[0], 0xe);
+      in[5] = _mm_shuffle_epi32(in[1], 0xe);
+      in[6] = _mm_shuffle_epi32(in[2], 0xe);
+      in[7] = _mm_shuffle_epi32(in[3], 0xe);
+      fdct4_sse2(in);
+      fdct4_sse2(in + 4);
+      break;
+    case ADST_DCT:
+      load_buffer_4x8(input, in, stride, 0, 0);
+      fadst8_sse2(in);
+      in[4] = _mm_shuffle_epi32(in[0], 0xe);
+      in[5] = _mm_shuffle_epi32(in[1], 0xe);
+      in[6] = _mm_shuffle_epi32(in[2], 0xe);
+      in[7] = _mm_shuffle_epi32(in[3], 0xe);
+      fdct4_sse2(in);
+      fdct4_sse2(in + 4);
+      break;
+    case DCT_ADST:
+      load_buffer_4x8(input, in, stride, 0, 0);
+      fdct8_sse2(in);
+      in[4] = _mm_shuffle_epi32(in[0], 0xe);
+      in[5] = _mm_shuffle_epi32(in[1], 0xe);
+      in[6] = _mm_shuffle_epi32(in[2], 0xe);
+      in[7] = _mm_shuffle_epi32(in[3], 0xe);
+      fadst4_sse2(in);
+      fadst4_sse2(in + 4);
+      break;
+    case ADST_ADST:
+      load_buffer_4x8(input, in, stride, 0, 0);
+      fadst8_sse2(in);
+      in[4] = _mm_shuffle_epi32(in[0], 0xe);
+      in[5] = _mm_shuffle_epi32(in[1], 0xe);
+      in[6] = _mm_shuffle_epi32(in[2], 0xe);
+      in[7] = _mm_shuffle_epi32(in[3], 0xe);
+      fadst4_sse2(in);
+      fadst4_sse2(in + 4);
+      break;
+#if CONFIG_EXT_TX
+    case FLIPADST_DCT:
+      load_buffer_4x8(input, in, stride, 1, 0);
+      fadst8_sse2(in);
+      in[4] = _mm_shuffle_epi32(in[0], 0xe);
+      in[5] = _mm_shuffle_epi32(in[1], 0xe);
+      in[6] = _mm_shuffle_epi32(in[2], 0xe);
+      in[7] = _mm_shuffle_epi32(in[3], 0xe);
+      fdct4_sse2(in);
+      fdct4_sse2(in + 4);
+      break;
+    case DCT_FLIPADST:
+      load_buffer_4x8(input, in, stride, 0, 1);
+      fdct8_sse2(in);
+      in[4] = _mm_shuffle_epi32(in[0], 0xe);
+      in[5] = _mm_shuffle_epi32(in[1], 0xe);
+      in[6] = _mm_shuffle_epi32(in[2], 0xe);
+      in[7] = _mm_shuffle_epi32(in[3], 0xe);
+      fadst4_sse2(in);
+      fadst4_sse2(in + 4);
+      break;
+    case FLIPADST_FLIPADST:
+      load_buffer_4x8(input, in, stride, 1, 1);
+      fadst8_sse2(in);
+      in[4] = _mm_shuffle_epi32(in[0], 0xe);
+      in[5] = _mm_shuffle_epi32(in[1], 0xe);
+      in[6] = _mm_shuffle_epi32(in[2], 0xe);
+      in[7] = _mm_shuffle_epi32(in[3], 0xe);
+      fadst4_sse2(in);
+      fadst4_sse2(in + 4);
+      break;
+    case ADST_FLIPADST:
+      load_buffer_4x8(input, in, stride, 0, 1);
+      fadst8_sse2(in);
+      in[4] = _mm_shuffle_epi32(in[0], 0xe);
+      in[5] = _mm_shuffle_epi32(in[1], 0xe);
+      in[6] = _mm_shuffle_epi32(in[2], 0xe);
+      in[7] = _mm_shuffle_epi32(in[3], 0xe);
+      fadst4_sse2(in);
+      fadst4_sse2(in + 4);
+      break;
+    case FLIPADST_ADST:
+      load_buffer_4x8(input, in, stride, 1, 0);
+      fadst8_sse2(in);
+      in[4] = _mm_shuffle_epi32(in[0], 0xe);
+      in[5] = _mm_shuffle_epi32(in[1], 0xe);
+      in[6] = _mm_shuffle_epi32(in[2], 0xe);
+      in[7] = _mm_shuffle_epi32(in[3], 0xe);
+      fadst4_sse2(in);
+      fadst4_sse2(in + 4);
+      break;
+    case IDTX:
+      load_buffer_4x8(input, in, stride, 0, 0);
+      fidtx8_sse2(in);
+      in[4] = _mm_shuffle_epi32(in[0], 0xe);
+      in[5] = _mm_shuffle_epi32(in[1], 0xe);
+      in[6] = _mm_shuffle_epi32(in[2], 0xe);
+      in[7] = _mm_shuffle_epi32(in[3], 0xe);
+      fidtx4_sse2(in);
+      fidtx4_sse2(in + 4);
+      break;
+    case V_DCT:
+      load_buffer_4x8(input, in, stride, 0, 0);
+      fdct8_sse2(in);
+      in[4] = _mm_shuffle_epi32(in[0], 0xe);
+      in[5] = _mm_shuffle_epi32(in[1], 0xe);
+      in[6] = _mm_shuffle_epi32(in[2], 0xe);
+      in[7] = _mm_shuffle_epi32(in[3], 0xe);
+      fidtx4_sse2(in);
+      fidtx4_sse2(in + 4);
+      break;
+    case H_DCT:
+      load_buffer_4x8(input, in, stride, 0, 0);
+      fidtx8_sse2(in);
+      in[4] = _mm_shuffle_epi32(in[0], 0xe);
+      in[5] = _mm_shuffle_epi32(in[1], 0xe);
+      in[6] = _mm_shuffle_epi32(in[2], 0xe);
+      in[7] = _mm_shuffle_epi32(in[3], 0xe);
+      fdct4_sse2(in);
+      fdct4_sse2(in + 4);
+      break;
+    case V_ADST:
+      load_buffer_4x8(input, in, stride, 0, 0);
+      fadst8_sse2(in);
+      in[4] = _mm_shuffle_epi32(in[0], 0xe);
+      in[5] = _mm_shuffle_epi32(in[1], 0xe);
+      in[6] = _mm_shuffle_epi32(in[2], 0xe);
+      in[7] = _mm_shuffle_epi32(in[3], 0xe);
+      fidtx4_sse2(in);
+      fidtx4_sse2(in + 4);
+      break;
+    case H_ADST:
+      load_buffer_4x8(input, in, stride, 0, 0);
+      fidtx8_sse2(in);
+      in[4] = _mm_shuffle_epi32(in[0], 0xe);
+      in[5] = _mm_shuffle_epi32(in[1], 0xe);
+      in[6] = _mm_shuffle_epi32(in[2], 0xe);
+      in[7] = _mm_shuffle_epi32(in[3], 0xe);
+      fadst4_sse2(in);
+      fadst4_sse2(in + 4);
+      break;
+    case V_FLIPADST:
+      load_buffer_4x8(input, in, stride, 1, 0);
+      fadst8_sse2(in);
+      in[4] = _mm_shuffle_epi32(in[0], 0xe);
+      in[5] = _mm_shuffle_epi32(in[1], 0xe);
+      in[6] = _mm_shuffle_epi32(in[2], 0xe);
+      in[7] = _mm_shuffle_epi32(in[3], 0xe);
+      fidtx4_sse2(in);
+      fidtx4_sse2(in + 4);
+      break;
+    case H_FLIPADST:
+      load_buffer_4x8(input, in, stride, 0, 1);
+      fidtx8_sse2(in);
+      in[4] = _mm_shuffle_epi32(in[0], 0xe);
+      in[5] = _mm_shuffle_epi32(in[1], 0xe);
+      in[6] = _mm_shuffle_epi32(in[2], 0xe);
+      in[7] = _mm_shuffle_epi32(in[3], 0xe);
+      fadst4_sse2(in);
+      fadst4_sse2(in + 4);
+      break;
+#endif  // CONFIG_EXT_TX
+    default: assert(0); break;
+  }
+  write_buffer_4x8(output, in);
+}
+
+// Load input into the left-hand half of in (ie, into lanes 0..3 of
+// each element of in). The right hand half (lanes 4..7) should be
+// treated as being filled with "don't care" values.
+// The input is split horizontally into two 4x4
+// chunks 'l' and 'r'. Then 'l' is stored in the top-left 4x4
+// block of 'in' and 'r' is stored in the bottom-left block.
+// This is to allow us to reuse 4x4 transforms.
+static INLINE void load_buffer_8x4(const int16_t *input, __m128i *in,
+                                   int stride, int flipud, int fliplr) {
+  if (!flipud) {
+    in[0] = _mm_loadu_si128((const __m128i *)(input + 0 * stride));
+    in[1] = _mm_loadu_si128((const __m128i *)(input + 1 * stride));
+    in[2] = _mm_loadu_si128((const __m128i *)(input + 2 * stride));
+    in[3] = _mm_loadu_si128((const __m128i *)(input + 3 * stride));
+  } else {
+    in[0] = _mm_loadu_si128((const __m128i *)(input + 3 * stride));
+    in[1] = _mm_loadu_si128((const __m128i *)(input + 2 * stride));
+    in[2] = _mm_loadu_si128((const __m128i *)(input + 1 * stride));
+    in[3] = _mm_loadu_si128((const __m128i *)(input + 0 * stride));
+  }
+
+  if (fliplr) {
+    in[0] = mm_reverse_epi16(in[0]);
+    in[1] = mm_reverse_epi16(in[1]);
+    in[2] = mm_reverse_epi16(in[2]);
+    in[3] = mm_reverse_epi16(in[3]);
+  }
+
+  in[0] = _mm_slli_epi16(in[0], 3);
+  in[1] = _mm_slli_epi16(in[1], 3);
+  in[2] = _mm_slli_epi16(in[2], 3);
+  in[3] = _mm_slli_epi16(in[3], 3);
+
+  scale_sqrt2_8x4(in);
+
+  in[4] = _mm_shuffle_epi32(in[0], 0xe);
+  in[5] = _mm_shuffle_epi32(in[1], 0xe);
+  in[6] = _mm_shuffle_epi32(in[2], 0xe);
+  in[7] = _mm_shuffle_epi32(in[3], 0xe);
+}
+
+static INLINE void write_buffer_8x4(tran_low_t *output, __m128i *res) {
+  const __m128i kOne = _mm_set1_epi16(1);
+
+  __m128i out0 = _mm_add_epi16(res[0], kOne);
+  __m128i out1 = _mm_add_epi16(res[1], kOne);
+  __m128i out2 = _mm_add_epi16(res[2], kOne);
+  __m128i out3 = _mm_add_epi16(res[3], kOne);
+  out0 = _mm_srai_epi16(out0, 2);
+  out1 = _mm_srai_epi16(out1, 2);
+  out2 = _mm_srai_epi16(out2, 2);
+  out3 = _mm_srai_epi16(out3, 2);
+
+  store_output(&out0, (output + 0 * 8));
+  store_output(&out1, (output + 1 * 8));
+  store_output(&out2, (output + 2 * 8));
+  store_output(&out3, (output + 3 * 8));
+}
+
+void av1_fht8x4_sse2(const int16_t *input, tran_low_t *output, int stride,
+                     int tx_type) {
+  __m128i in[8];
+
+  switch (tx_type) {
+    case DCT_DCT:
+      load_buffer_8x4(input, in, stride, 0, 0);
+      fdct4_sse2(in);
+      fdct4_sse2(in + 4);
+      fdct8_sse2(in);
+      break;
+    case ADST_DCT:
+      load_buffer_8x4(input, in, stride, 0, 0);
+      fadst4_sse2(in);
+      fadst4_sse2(in + 4);
+      fdct8_sse2(in);
+      break;
+    case DCT_ADST:
+      load_buffer_8x4(input, in, stride, 0, 0);
+      fdct4_sse2(in);
+      fdct4_sse2(in + 4);
+      fadst8_sse2(in);
+      break;
+    case ADST_ADST:
+      load_buffer_8x4(input, in, stride, 0, 0);
+      fadst4_sse2(in);
+      fadst4_sse2(in + 4);
+      fadst8_sse2(in);
+      break;
+#if CONFIG_EXT_TX
+    case FLIPADST_DCT:
+      load_buffer_8x4(input, in, stride, 1, 0);
+      fadst4_sse2(in);
+      fadst4_sse2(in + 4);
+      fdct8_sse2(in);
+      break;
+    case DCT_FLIPADST:
+      load_buffer_8x4(input, in, stride, 0, 1);
+      fdct4_sse2(in);
+      fdct4_sse2(in + 4);
+      fadst8_sse2(in);
+      break;
+    case FLIPADST_FLIPADST:
+      load_buffer_8x4(input, in, stride, 1, 1);
+      fadst4_sse2(in);
+      fadst4_sse2(in + 4);
+      fadst8_sse2(in);
+      break;
+    case ADST_FLIPADST:
+      load_buffer_8x4(input, in, stride, 0, 1);
+      fadst4_sse2(in);
+      fadst4_sse2(in + 4);
+      fadst8_sse2(in);
+      break;
+    case FLIPADST_ADST:
+      load_buffer_8x4(input, in, stride, 1, 0);
+      fadst4_sse2(in);
+      fadst4_sse2(in + 4);
+      fadst8_sse2(in);
+      break;
+    case IDTX:
+      load_buffer_8x4(input, in, stride, 0, 0);
+      fidtx4_sse2(in);
+      fidtx4_sse2(in + 4);
+      fidtx8_sse2(in);
+      break;
+    case V_DCT:
+      load_buffer_8x4(input, in, stride, 0, 0);
+      fdct4_sse2(in);
+      fdct4_sse2(in + 4);
+      fidtx8_sse2(in);
+      break;
+    case H_DCT:
+      load_buffer_8x4(input, in, stride, 0, 0);
+      fidtx4_sse2(in);
+      fidtx4_sse2(in + 4);
+      fdct8_sse2(in);
+      break;
+    case V_ADST:
+      load_buffer_8x4(input, in, stride, 0, 0);
+      fadst4_sse2(in);
+      fadst4_sse2(in + 4);
+      fidtx8_sse2(in);
+      break;
+    case H_ADST:
+      load_buffer_8x4(input, in, stride, 0, 0);
+      fidtx4_sse2(in);
+      fidtx4_sse2(in + 4);
+      fadst8_sse2(in);
+      break;
+    case V_FLIPADST:
+      load_buffer_8x4(input, in, stride, 1, 0);
+      fadst4_sse2(in);
+      fadst4_sse2(in + 4);
+      fidtx8_sse2(in);
+      break;
+    case H_FLIPADST:
+      load_buffer_8x4(input, in, stride, 0, 1);
+      fidtx4_sse2(in);
+      fidtx4_sse2(in + 4);
+      fadst8_sse2(in);
+      break;
+#endif  // CONFIG_EXT_TX
+    default: assert(0); break;
+  }
+  write_buffer_8x4(output, in);
+}
+
 static INLINE void load_buffer_8x16(const int16_t *input, __m128i *in,
                                     int stride, int flipud, int fliplr) {
   // Load 2 8x8 blocks
@@ -2663,9 +3110,9 @@
   }
 
   load_buffer_8x8(t, in, stride, flipud, fliplr);
-  scale_sqrt2_8x8(in);
+  scale_sqrt2_8x8_signed(in);
   load_buffer_8x8(b, in + 8, stride, flipud, fliplr);
-  scale_sqrt2_8x8(in + 8);
+  scale_sqrt2_8x8_signed(in + 8);
 }
 
 void av1_fht8x16_sse2(const int16_t *input, tran_low_t *output, int stride,
@@ -2828,9 +3275,9 @@
 
   // load first 8 columns
   load_buffer_8x8(l, in, stride, flipud, fliplr);
-  scale_sqrt2_8x8(in);
+  scale_sqrt2_8x8_signed(in);
   load_buffer_8x8(r, in + 8, stride, flipud, fliplr);
-  scale_sqrt2_8x8(in + 8);
+  scale_sqrt2_8x8_signed(in + 8);
 }
 
 void av1_fht16x8_sse2(const int16_t *input, tran_low_t *output, int stride,
diff --git a/test/av1_fht4x8_test.cc b/test/av1_fht4x8_test.cc
new file mode 100644
index 0000000..a344532
--- /dev/null
+++ b/test/av1_fht4x8_test.cc
@@ -0,0 +1,101 @@
+/*
+ *  Copyright (c) 2016 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "third_party/googletest/src/include/gtest/gtest.h"
+
+#include "./aom_dsp_rtcd.h"
+#include "./av1_rtcd.h"
+
+#include "aom_ports/mem.h"
+#include "test/acm_random.h"
+#include "test/clear_system_state.h"
+#include "test/register_state_check.h"
+#include "test/transform_test_base.h"
+#include "test/util.h"
+
+using libaom_test::ACMRandom;
+
+namespace {
+typedef void (*IhtFunc)(const tran_low_t *in, uint8_t *out, int stride,
+                        int tx_type);
+using std::tr1::tuple;
+using libaom_test::FhtFunc;
+typedef tuple<FhtFunc, IhtFunc, int, aom_bit_depth_t, int> Ht4x8Param;
+
+void fht4x8_ref(const int16_t *in, tran_low_t *out, int stride, int tx_type) {
+  av1_fht4x8_c(in, out, stride, tx_type);
+}
+
+void iht4x8_ref(const tran_low_t *in, uint8_t *out, int stride, int tx_type) {
+  av1_iht4x8_32_add_c(in, out, stride, tx_type);
+}
+
+class AV1Trans4x8HT : public libaom_test::TransformTestBase,
+                      public ::testing::TestWithParam<Ht4x8Param> {
+ public:
+  virtual ~AV1Trans4x8HT() {}
+
+  virtual void SetUp() {
+    fwd_txfm_ = GET_PARAM(0);
+    inv_txfm_ = GET_PARAM(1);
+    tx_type_ = GET_PARAM(2);
+    pitch_ = 4;
+    fwd_txfm_ref = fht4x8_ref;
+    inv_txfm_ref = iht4x8_ref;
+    bit_depth_ = GET_PARAM(3);
+    mask_ = (1 << bit_depth_) - 1;
+    num_coeffs_ = GET_PARAM(4);
+  }
+  virtual void TearDown() { libaom_test::ClearSystemState(); }
+
+ protected:
+  void RunFwdTxfm(const int16_t *in, tran_low_t *out, int stride) {
+    fwd_txfm_(in, out, stride, tx_type_);
+  }
+
+  void RunInvTxfm(const tran_low_t *out, uint8_t *dst, int stride) {
+    inv_txfm_(out, dst, stride, tx_type_);
+  }
+
+  FhtFunc fwd_txfm_;
+  IhtFunc inv_txfm_;
+};
+
+TEST_P(AV1Trans4x8HT, CoeffCheck) { RunCoeffCheck(); }
+TEST_P(AV1Trans4x8HT, InvCoeffCheck) { RunInvCoeffCheck(); }
+
+using std::tr1::make_tuple;
+
+#if HAVE_SSE2
+const Ht4x8Param kArrayHt4x8Param_sse2[] = {
+  make_tuple(&av1_fht4x8_sse2, &av1_iht4x8_32_add_sse2, 0, AOM_BITS_8, 32),
+  make_tuple(&av1_fht4x8_sse2, &av1_iht4x8_32_add_sse2, 1, AOM_BITS_8, 32),
+  make_tuple(&av1_fht4x8_sse2, &av1_iht4x8_32_add_sse2, 2, AOM_BITS_8, 32),
+  make_tuple(&av1_fht4x8_sse2, &av1_iht4x8_32_add_sse2, 3, AOM_BITS_8, 32),
+#if CONFIG_EXT_TX
+  make_tuple(&av1_fht4x8_sse2, &av1_iht4x8_32_add_sse2, 4, AOM_BITS_8, 32),
+  make_tuple(&av1_fht4x8_sse2, &av1_iht4x8_32_add_sse2, 5, AOM_BITS_8, 32),
+  make_tuple(&av1_fht4x8_sse2, &av1_iht4x8_32_add_sse2, 6, AOM_BITS_8, 32),
+  make_tuple(&av1_fht4x8_sse2, &av1_iht4x8_32_add_sse2, 7, AOM_BITS_8, 32),
+  make_tuple(&av1_fht4x8_sse2, &av1_iht4x8_32_add_sse2, 8, AOM_BITS_8, 32),
+  make_tuple(&av1_fht4x8_sse2, &av1_iht4x8_32_add_sse2, 9, AOM_BITS_8, 32),
+  make_tuple(&av1_fht4x8_sse2, &av1_iht4x8_32_add_sse2, 10, AOM_BITS_8, 32),
+  make_tuple(&av1_fht4x8_sse2, &av1_iht4x8_32_add_sse2, 11, AOM_BITS_8, 32),
+  make_tuple(&av1_fht4x8_sse2, &av1_iht4x8_32_add_sse2, 12, AOM_BITS_8, 32),
+  make_tuple(&av1_fht4x8_sse2, &av1_iht4x8_32_add_sse2, 13, AOM_BITS_8, 32),
+  make_tuple(&av1_fht4x8_sse2, &av1_iht4x8_32_add_sse2, 14, AOM_BITS_8, 32),
+  make_tuple(&av1_fht4x8_sse2, &av1_iht4x8_32_add_sse2, 15, AOM_BITS_8, 32)
+#endif  // CONFIG_EXT_TX
+};
+INSTANTIATE_TEST_CASE_P(SSE2, AV1Trans4x8HT,
+                        ::testing::ValuesIn(kArrayHt4x8Param_sse2));
+#endif  // HAVE_SSE2
+
+}  // namespace
diff --git a/test/av1_fht8x4_test.cc b/test/av1_fht8x4_test.cc
new file mode 100644
index 0000000..ee89e96
--- /dev/null
+++ b/test/av1_fht8x4_test.cc
@@ -0,0 +1,101 @@
+/*
+ *  Copyright (c) 2016 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "third_party/googletest/src/include/gtest/gtest.h"
+
+#include "./aom_dsp_rtcd.h"
+#include "./av1_rtcd.h"
+
+#include "aom_ports/mem.h"
+#include "test/acm_random.h"
+#include "test/clear_system_state.h"
+#include "test/register_state_check.h"
+#include "test/transform_test_base.h"
+#include "test/util.h"
+
+using libaom_test::ACMRandom;
+
+namespace {
+typedef void (*IhtFunc)(const tran_low_t *in, uint8_t *out, int stride,
+                        int tx_type);
+using std::tr1::tuple;
+using libaom_test::FhtFunc;
+typedef tuple<FhtFunc, IhtFunc, int, aom_bit_depth_t, int> Ht8x4Param;
+
+void fht8x4_ref(const int16_t *in, tran_low_t *out, int stride, int tx_type) {
+  av1_fht8x4_c(in, out, stride, tx_type);
+}
+
+void iht8x4_ref(const tran_low_t *in, uint8_t *out, int stride, int tx_type) {
+  av1_iht8x4_32_add_c(in, out, stride, tx_type);
+}
+
+class AV1Trans8x4HT : public libaom_test::TransformTestBase,
+                      public ::testing::TestWithParam<Ht8x4Param> {
+ public:
+  virtual ~AV1Trans8x4HT() {}
+
+  virtual void SetUp() {
+    fwd_txfm_ = GET_PARAM(0);
+    inv_txfm_ = GET_PARAM(1);
+    tx_type_ = GET_PARAM(2);
+    pitch_ = 8;
+    fwd_txfm_ref = fht8x4_ref;
+    inv_txfm_ref = iht8x4_ref;
+    bit_depth_ = GET_PARAM(3);
+    mask_ = (1 << bit_depth_) - 1;
+    num_coeffs_ = GET_PARAM(4);
+  }
+  virtual void TearDown() { libaom_test::ClearSystemState(); }
+
+ protected:
+  void RunFwdTxfm(const int16_t *in, tran_low_t *out, int stride) {
+    fwd_txfm_(in, out, stride, tx_type_);
+  }
+
+  void RunInvTxfm(const tran_low_t *out, uint8_t *dst, int stride) {
+    inv_txfm_(out, dst, stride, tx_type_);
+  }
+
+  FhtFunc fwd_txfm_;
+  IhtFunc inv_txfm_;
+};
+
+TEST_P(AV1Trans8x4HT, CoeffCheck) { RunCoeffCheck(); }
+TEST_P(AV1Trans8x4HT, InvCoeffCheck) { RunInvCoeffCheck(); }
+
+using std::tr1::make_tuple;
+
+#if HAVE_SSE2
+const Ht8x4Param kArrayHt8x4Param_sse2[] = {
+  make_tuple(&av1_fht8x4_sse2, &av1_iht8x4_32_add_sse2, 0, AOM_BITS_8, 32),
+  make_tuple(&av1_fht8x4_sse2, &av1_iht8x4_32_add_sse2, 1, AOM_BITS_8, 32),
+  make_tuple(&av1_fht8x4_sse2, &av1_iht8x4_32_add_sse2, 2, AOM_BITS_8, 32),
+  make_tuple(&av1_fht8x4_sse2, &av1_iht8x4_32_add_sse2, 3, AOM_BITS_8, 32),
+#if CONFIG_EXT_TX
+  make_tuple(&av1_fht8x4_sse2, &av1_iht8x4_32_add_sse2, 4, AOM_BITS_8, 32),
+  make_tuple(&av1_fht8x4_sse2, &av1_iht8x4_32_add_sse2, 5, AOM_BITS_8, 32),
+  make_tuple(&av1_fht8x4_sse2, &av1_iht8x4_32_add_sse2, 6, AOM_BITS_8, 32),
+  make_tuple(&av1_fht8x4_sse2, &av1_iht8x4_32_add_sse2, 7, AOM_BITS_8, 32),
+  make_tuple(&av1_fht8x4_sse2, &av1_iht8x4_32_add_sse2, 8, AOM_BITS_8, 32),
+  make_tuple(&av1_fht8x4_sse2, &av1_iht8x4_32_add_sse2, 9, AOM_BITS_8, 32),
+  make_tuple(&av1_fht8x4_sse2, &av1_iht8x4_32_add_sse2, 10, AOM_BITS_8, 32),
+  make_tuple(&av1_fht8x4_sse2, &av1_iht8x4_32_add_sse2, 11, AOM_BITS_8, 32),
+  make_tuple(&av1_fht8x4_sse2, &av1_iht8x4_32_add_sse2, 12, AOM_BITS_8, 32),
+  make_tuple(&av1_fht8x4_sse2, &av1_iht8x4_32_add_sse2, 13, AOM_BITS_8, 32),
+  make_tuple(&av1_fht8x4_sse2, &av1_iht8x4_32_add_sse2, 14, AOM_BITS_8, 32),
+  make_tuple(&av1_fht8x4_sse2, &av1_iht8x4_32_add_sse2, 15, AOM_BITS_8, 32)
+#endif  // CONFIG_EXT_TX
+};
+INSTANTIATE_TEST_CASE_P(SSE2, AV1Trans8x4HT,
+                        ::testing::ValuesIn(kArrayHt8x4Param_sse2));
+#endif  // HAVE_SSE2
+
+}  // namespace
diff --git a/test/clpf_test.cc b/test/clpf_test.cc
new file mode 100644
index 0000000..786180b
--- /dev/null
+++ b/test/clpf_test.cc
@@ -0,0 +1,239 @@
+/*
+ * Copyright (c) 2016, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+*/
+
+#include <cstdlib>
+#include <string>
+
+#include "third_party/googletest/src/include/gtest/gtest.h"
+
+#include "./aom_config.h"
+#include "./aom_dsp_rtcd.h"
+#include "aom_ports/aom_timer.h"
+#include "test/acm_random.h"
+#include "test/clear_system_state.h"
+#include "test/register_state_check.h"
+#include "test/util.h"
+
+using libaom_test::ACMRandom;
+
+namespace {
+
+typedef void (*clpf_block_t)(const uint8_t *src, uint8_t *dst, int stride,
+                             int x0, int y0, int sizex, int sizey, int width,
+                             int height, unsigned int strength);
+
+typedef std::tr1::tuple<clpf_block_t, clpf_block_t, int, int>
+    clpf_block_param_t;
+
+class ClpfBlockTest : public ::testing::TestWithParam<clpf_block_param_t> {
+ public:
+  virtual ~ClpfBlockTest() {}
+  virtual void SetUp() {
+    clpf = GET_PARAM(0);
+    ref_clpf = GET_PARAM(1);
+    sizex = GET_PARAM(2);
+    sizey = GET_PARAM(3);
+  }
+
+  virtual void TearDown() { libaom_test::ClearSystemState(); }
+
+ protected:
+  int sizex;
+  int sizey;
+  clpf_block_t clpf;
+  clpf_block_t ref_clpf;
+};
+
+typedef ClpfBlockTest ClpfSpeedTest;
+
+TEST_P(ClpfBlockTest, TestSIMDNoMismatch) {
+  int w = sizex;
+  int h = sizey;
+  const int size = 32;
+  ACMRandom rnd(ACMRandom::DeterministicSeed());
+  DECLARE_ALIGNED(16, uint8_t, s[size * size]);
+  DECLARE_ALIGNED(16, uint8_t, d[size * size]);
+  DECLARE_ALIGNED(16, uint8_t, ref_d[size * size]);
+  memset(ref_d, 0, size * size);
+  memset(d, 0, size * size);
+
+  int error = 0;
+  int pos = 0;
+  int strength = 0;
+  int xpos = 0, ypos = 0;
+  int bits;
+  int level;
+
+  // Test every combination of:
+  // * Input with 1-8 bits of noise
+  // * Noise level around every value from 0 to 255
+  // * Blocks anywhere in the frame (along all egdes and also fully inside)
+  // * All strengths
+  for (level = 0; level < 256 && !error; level++) {
+    for (bits = 1; bits < 9 && !error; bits++) {
+      for (int i = 0; i < size * size; i++)
+        s[i] = clamp((rnd.Rand8() & ((1 << bits) - 1)) + level, 0, 255);
+
+      for (ypos = 0; ypos < size && !error; ypos += h * !error) {
+        for (xpos = 0; xpos < size && !error; xpos += w * !error) {
+          for (strength = 0; strength < 3 && !error; strength += !error) {
+            ref_clpf(s, ref_d, size, xpos, ypos, w, h, size, size,
+                     1 << strength);
+            ASM_REGISTER_STATE_CHECK(
+                clpf(s, d, size, xpos, ypos, w, h, size, size, 1 << strength));
+
+            for (pos = 0; pos < size * size && !error; pos++) {
+              error = ref_d[pos] != d[pos];
+            }
+          }
+        }
+      }
+    }
+  }
+
+  EXPECT_EQ(0, error)
+      << "Error: ClpfBlockTest, SIMD and C mismatch." << std::endl
+      << "First error at " << pos % size << "," << pos / size << " ("
+      << (int16_t)ref_d[pos] << " != " << (int16_t)d[pos] << ") " << std::endl
+      << "strength: " << (1 << strength) << std::endl
+      << "xpos: " << xpos << std::endl
+      << "ypos: " << ypos << std::endl
+      << "A=" << (pos > size ? (int16_t)s[pos - size] : -1) << std::endl
+      << "B=" << (pos % size - 2 >= 0 ? (int16_t)s[pos - 2] : -1) << std::endl
+      << "C=" << (pos % size - 1 >= 0 ? (int16_t)s[pos - 1] : -1) << std::endl
+      << "X=" << (int16_t)s[pos] << std::endl
+      << "D=" << (pos % size + 1 < size ? (int16_t)s[pos + 1] : -1) << std::endl
+      << "E=" << (pos % size + 2 < size ? (int16_t)s[pos + 2] : -1) << std::endl
+      << "F=" << (pos + size < size * size ? (int16_t)s[pos + size] : -1)
+      << std::endl;
+}
+
+TEST_P(ClpfSpeedTest, TestSpeed) {
+  int w = sizex;
+  int h = sizey;
+  const int size = 32;
+  ACMRandom rnd(ACMRandom::DeterministicSeed());
+  DECLARE_ALIGNED(16, uint8_t, s[size * size]);
+  DECLARE_ALIGNED(16, uint8_t, d[size * size]);
+
+  int strength;
+  int xpos, ypos;
+
+  for (int i = 0; i < size * size; i++) s[i] = rnd.Rand8();
+
+  aom_usec_timer ref_timer;
+  aom_usec_timer timer;
+
+  aom_usec_timer_start(&ref_timer);
+  for (int c = 0; c < 65536; c++) {
+    for (ypos = 0; ypos < size; ypos += h) {
+      for (xpos = 0; xpos < size; xpos += w) {
+        for (strength = 0; strength < 3; strength++) {
+          ref_clpf(s, d, size, xpos, ypos, w, h, size, size, 1 << strength);
+        }
+      }
+    }
+  }
+  aom_usec_timer_mark(&ref_timer);
+  int ref_elapsed_time = aom_usec_timer_elapsed(&ref_timer);
+
+  aom_usec_timer_start(&timer);
+  for (int c = 0; c < 65536; c++) {
+    for (ypos = 0; ypos < size; ypos += h) {
+      for (xpos = 0; xpos < size; xpos += w) {
+        for (strength = 0; strength < 3; strength++) {
+          clpf(s, d, size, xpos, ypos, w, h, size, size, 1 << strength);
+        }
+      }
+    }
+  }
+  aom_usec_timer_mark(&timer);
+  int elapsed_time = aom_usec_timer_elapsed(&timer);
+
+#if 0
+  std::cout << "[          ] C time = " << ref_elapsed_time / 1000
+            << " ms, SIMD time = " << elapsed_time / 1000 << " ms" << std::endl;
+#endif
+
+  EXPECT_GT(ref_elapsed_time, elapsed_time)
+      << "Error: ClpfSpeedTest, SIMD slower than C." << std::endl
+      << "C time: " << ref_elapsed_time << "ms" << std::endl
+      << "SIMD time: " << elapsed_time << "ms" << std::endl;
+}
+
+using std::tr1::make_tuple;
+
+// Test all supported architectures and block sizes
+#if HAVE_SSE2
+INSTANTIATE_TEST_CASE_P(
+    SSE2, ClpfBlockTest,
+    ::testing::Values(make_tuple(&aom_clpf_block_sse2, &aom_clpf_block_c, 8, 8),
+                      make_tuple(&aom_clpf_block_sse2, &aom_clpf_block_c, 8, 4),
+                      make_tuple(&aom_clpf_block_sse2, &aom_clpf_block_c, 4, 8),
+                      make_tuple(&aom_clpf_block_sse2, &aom_clpf_block_c, 4,
+                                 4)));
+#endif
+
+#if HAVE_SSSE3
+INSTANTIATE_TEST_CASE_P(
+    SSSE3, ClpfBlockTest,
+    ::testing::Values(
+        make_tuple(&aom_clpf_block_ssse3, &aom_clpf_block_c, 8, 8),
+        make_tuple(&aom_clpf_block_ssse3, &aom_clpf_block_c, 8, 4),
+        make_tuple(&aom_clpf_block_ssse3, &aom_clpf_block_c, 4, 8),
+        make_tuple(&aom_clpf_block_ssse3, &aom_clpf_block_c, 4, 4)));
+#endif
+
+#if HAVE_SSE4_1
+INSTANTIATE_TEST_CASE_P(
+    SSSE4_1, ClpfBlockTest,
+    ::testing::Values(
+        make_tuple(&aom_clpf_block_sse4_1, &aom_clpf_block_c, 8, 8),
+        make_tuple(&aom_clpf_block_sse4_1, &aom_clpf_block_c, 8, 4),
+        make_tuple(&aom_clpf_block_sse4_1, &aom_clpf_block_c, 4, 8),
+        make_tuple(&aom_clpf_block_sse4_1, &aom_clpf_block_c, 4, 4)));
+#endif
+
+#if HAVE_NEON
+INSTANTIATE_TEST_CASE_P(
+    NEON, ClpfBlockTest,
+    ::testing::Values(make_tuple(&aom_clpf_block_neon, &aom_clpf_block_c, 8, 8),
+                      make_tuple(&aom_clpf_block_neon, &aom_clpf_block_c, 8, 4),
+                      make_tuple(&aom_clpf_block_neon, &aom_clpf_block_c, 4, 8),
+                      make_tuple(&aom_clpf_block_neon, &aom_clpf_block_c, 4,
+                                 4)));
+#endif
+
+// Test speed for all supported architectures
+#if HAVE_SSE2
+INSTANTIATE_TEST_CASE_P(SSE2, ClpfSpeedTest,
+                        ::testing::Values(make_tuple(&aom_clpf_block_sse2,
+                                                     &aom_clpf_block_c, 8, 8)));
+#endif
+
+#if HAVE_SSSE3
+INSTANTIATE_TEST_CASE_P(SSSE3, ClpfSpeedTest,
+                        ::testing::Values(make_tuple(&aom_clpf_block_ssse3,
+                                                     &aom_clpf_block_c, 8, 8)));
+#endif
+
+#if HAVE_SSE4_1
+INSTANTIATE_TEST_CASE_P(SSSE4_1, ClpfSpeedTest,
+                        ::testing::Values(make_tuple(&aom_clpf_block_ssse3,
+                                                     &aom_clpf_block_c, 8, 8)));
+#endif
+
+#if HAVE_NEON
+INSTANTIATE_TEST_CASE_P(NEON, ClpfSpeedTest,
+                        ::testing::Values(make_tuple(&aom_clpf_block_neon,
+                                                     &aom_clpf_block_c, 8, 8)));
+#endif
+}  // namespace
diff --git a/test/test.mk b/test/test.mk
index d3cc3d0..c071cea 100644
--- a/test/test.mk
+++ b/test/test.mk
@@ -112,6 +112,7 @@
 
 #LIBAOM_TEST_SRCS-yes                   += convolve_test.cc
 LIBAOM_TEST_SRCS-yes                   += lpf_8_test.cc
+LIBAOM_TEST_SRCS-$(CONFIG_CLPF)        += clpf_test.cc
 LIBAOM_TEST_SRCS-yes                   += intrapred_test.cc
 #LIBAOM_TEST_SRCS-$(CONFIG_AV1_DECODER) += av1_thread_test.cc
 LIBAOM_TEST_SRCS-$(CONFIG_AV1_ENCODER) += dct16x16_test.cc
@@ -137,6 +138,8 @@
 LIBAOM_TEST_SRCS-$(CONFIG_AV1_ENCODER) += av1_fht8x8_test.cc
 LIBAOM_TEST_SRCS-$(CONFIG_AV1_ENCODER) += av1_fht16x16_test.cc
 ifeq ($(CONFIG_EXT_TX),yes)
+LIBAOM_TEST_SRCS-$(CONFIG_AV1_ENCODER) += av1_fht4x8_test.cc
+LIBAOM_TEST_SRCS-$(CONFIG_AV1_ENCODER) += av1_fht8x4_test.cc
 LIBAOM_TEST_SRCS-$(CONFIG_AV1_ENCODER) += av1_fht8x16_test.cc
 LIBAOM_TEST_SRCS-$(CONFIG_AV1_ENCODER) += av1_fht16x8_test.cc
 LIBAOM_TEST_SRCS-$(CONFIG_AV1_ENCODER) += av1_iht8x16_test.cc