Add SSSE3 warp filter + const-ify warp filters

The SSSE3 filter is very similar to the SSE2 filter, but
the horizontal pass is sped up by using the 8x8->16
multiplies added in SSSE3.

Also apply const-correctness to all versions of the filter

The timings of the existing filters are unchanged, and the
lowbd SSSE3 filter is ~17% faster than the lowbd SSE2 filter.

Timings per 8x8 block:
lowbd SSE2: 320ns
lowbd SSSE3: 273ns
highbd SSSE3: 300ns

Filter output is unchanged.

Change-Id: Ifb428a33b106d900cde1b080794796c0754ae182
diff --git a/av1/av1.cmake b/av1/av1.cmake
index 00f687a..f45206f 100644
--- a/av1/av1.cmake
+++ b/av1/av1.cmake
@@ -151,8 +151,6 @@
     "${AOM_ROOT}/av1/encoder/variance_tree.h")
 
 set(AOM_AV1_COMMON_INTRIN_SSE2
-    # Requires CONFIG_GLOBAL_MOTION or CONFIG_WARPED_MOTION
-    #"${AOM_ROOT}/av1/common/x86/warp_plane_sse2.c"
     "${AOM_ROOT}/av1/common/x86/idct_intrin_sse2.c")
 
 set(AOM_AV1_COMMON_INTRIN_SSSE3
@@ -382,7 +380,7 @@
     endif ()
 endif ()
 
-if (CONFIG_WARPED_MOTION)
+if (CONFIG_WARPED_MOTION OR CONFIG_GLOBAL_MOTION)
   set(AOM_AV1_COMMON_SOURCES
       ${AOM_AV1_COMMON_SOURCES}
       "${AOM_ROOT}/av1/common/warped_motion.c"
@@ -391,6 +389,16 @@
   set(AOM_AV1_COMMON_INTRIN_SSE2
       ${AOM_AV1_COMMON_INTRIN_SSE2}
       "${AOM_ROOT}/av1/common/x86/warp_plane_sse2.c")
+
+  set(AOM_AV1_COMMON_SSSE3_INTRIN
+      ${AOM_AV1_COMMON_SSSE3_INTRIN}
+      "${AOM_ROOT}/av1/common/x86/warp_plane_ssse3.c")
+
+  if (CONFIG_HIGHBITDEPTH)
+    set(AOM_AV1_COMMON_SSSE3_INTRIN
+        ${AOM_AV1_COMMON_SSSE3_INTRIN}
+        "${AOM_ROOT}/av1/common/x86/highbd_warp_plane_ssse3.c")
+  endif ()
 endif ()
 
 # Setup AV1 common/decoder/encoder targets. The libaom target must exist before
diff --git a/av1/av1_common.mk b/av1/av1_common.mk
index 6b9a289..35cda5e 100644
--- a/av1/av1_common.mk
+++ b/av1/av1_common.mk
@@ -172,6 +172,7 @@
 
 ifneq ($(findstring yes,$(CONFIG_GLOBAL_MOTION) $(CONFIG_WARPED_MOTION)),)
 AV1_COMMON_SRCS-$(HAVE_SSE2) += common/x86/warp_plane_sse2.c
+AV1_COMMON_SRCS-$(HAVE_SSSE3) += common/x86/warp_plane_ssse3.c
 ifeq ($(CONFIG_HIGHBITDEPTH),yes)
 AV1_COMMON_SRCS-$(HAVE_SSSE3) += common/x86/highbd_warp_plane_ssse3.c
 endif
diff --git a/av1/common/av1_rtcd_defs.pl b/av1/common/av1_rtcd_defs.pl
index 1dca10c..ec682cf 100755
--- a/av1/common/av1_rtcd_defs.pl
+++ b/av1/common/av1_rtcd_defs.pl
@@ -608,11 +608,11 @@
 
 if ((aom_config("CONFIG_WARPED_MOTION") eq "yes") ||
     (aom_config("CONFIG_GLOBAL_MOTION") eq "yes")) {
-  add_proto qw/void av1_warp_affine/, "int32_t *mat, uint8_t *ref, int width, int height, int stride, uint8_t *pred, int p_col, int p_row, int p_width, int p_height, int p_stride, int subsampling_x, int subsampling_y, int ref_frm, int16_t alpha, int16_t beta, int16_t gamma, int16_t delta";
-  specialize qw/av1_warp_affine sse2/;
+  add_proto qw/void av1_warp_affine/, "const int32_t *mat, const uint8_t *ref, int width, int height, int stride, uint8_t *pred, int p_col, int p_row, int p_width, int p_height, int p_stride, int subsampling_x, int subsampling_y, int ref_frm, int16_t alpha, int16_t beta, int16_t gamma, int16_t delta";
+  specialize qw/av1_warp_affine sse2 ssse3/;
 
   if (aom_config("CONFIG_HIGHBITDEPTH") eq "yes") {
-    add_proto qw/void av1_highbd_warp_affine/, "int32_t *mat, uint16_t *ref, int width, int height, int stride, uint16_t *pred, int p_col, int p_row, int p_width, int p_height, int p_stride, int subsampling_x, int subsampling_y, int bd, int ref_frm, int16_t alpha, int16_t beta, int16_t gamma, int16_t delta";
+    add_proto qw/void av1_highbd_warp_affine/, "const int32_t *mat, const uint16_t *ref, int width, int height, int stride, uint16_t *pred, int p_col, int p_row, int p_width, int p_height, int p_stride, int subsampling_x, int subsampling_y, int bd, int ref_frm, int16_t alpha, int16_t beta, int16_t gamma, int16_t delta";
     specialize qw/av1_highbd_warp_affine ssse3/;
   }
 }
diff --git a/av1/common/warped_motion.c b/av1/common/warped_motion.c
index ada2e4e..7e27985 100644
--- a/av1/common/warped_motion.c
+++ b/av1/common/warped_motion.c
@@ -949,9 +949,9 @@
 //
 // So, as long as HORSHEAR_REDUCE_PREC_BITS >= 5, we can safely use a 16-bit
 // intermediate array.
-void av1_highbd_warp_affine_c(int32_t *mat, uint16_t *ref, int width,
-                              int height, int stride, uint16_t *pred, int p_col,
-                              int p_row, int p_width, int p_height,
+void av1_highbd_warp_affine_c(const int32_t *mat, const uint16_t *ref,
+                              int width, int height, int stride, uint16_t *pred,
+                              int p_col, int p_row, int p_width, int p_height,
                               int p_stride, int subsampling_x,
                               int subsampling_y, int bd, int ref_frm,
                               int16_t alpha, int16_t beta, int16_t gamma,
@@ -1046,7 +1046,7 @@
       // Vertical filter
       for (k = -4; k < AOMMIN(4, p_row + p_height - i - 4); ++k) {
         int sy = sy4 + gamma * (-4) + delta * k;
-        for (l = -4; l < 4; ++l) {
+        for (l = -4; l < AOMMIN(4, p_col + p_width - j - 4); ++l) {
           uint16_t *p =
               &pred[(i - p_row + k + 4) * p_stride + (j - p_col + l + 4)];
           const int offs = ROUND_POWER_OF_TWO(sy, WARPEDDIFF_PREC_BITS) +
@@ -1199,9 +1199,9 @@
 
    TODO(david.barker): Maybe support scaled references?
 */
-void av1_warp_affine_c(int32_t *mat, uint8_t *ref, int width, int height,
-                       int stride, uint8_t *pred, int p_col, int p_row,
-                       int p_width, int p_height, int p_stride,
+void av1_warp_affine_c(const int32_t *mat, const uint8_t *ref, int width,
+                       int height, int stride, uint8_t *pred, int p_col,
+                       int p_row, int p_width, int p_height, int p_stride,
                        int subsampling_x, int subsampling_y, int ref_frm,
                        int16_t alpha, int16_t beta, int16_t gamma,
                        int16_t delta) {
diff --git a/av1/common/x86/highbd_warp_plane_ssse3.c b/av1/common/x86/highbd_warp_plane_ssse3.c
index 1d6af4f..4762340 100644
--- a/av1/common/x86/highbd_warp_plane_ssse3.c
+++ b/av1/common/x86/highbd_warp_plane_ssse3.c
@@ -14,16 +14,14 @@
 #include "./av1_rtcd.h"
 #include "av1/common/warped_motion.h"
 
-static const __m128i *const filter = (const __m128i *const)warped_filter;
-
-/* SSE2 version of the rotzoom/affine warp filter */
-void av1_highbd_warp_affine_ssse3(int32_t *mat, uint16_t *ref, int width,
-                                  int height, int stride, uint16_t *pred,
-                                  int p_col, int p_row, int p_width,
-                                  int p_height, int p_stride, int subsampling_x,
-                                  int subsampling_y, int bd, int ref_frm,
-                                  int16_t alpha, int16_t beta, int16_t gamma,
-                                  int16_t delta) {
+/* SSSE3 version of the rotzoom/affine warp filter */
+void av1_highbd_warp_affine_ssse3(const int32_t *mat, const uint16_t *ref,
+                                  int width, int height, int stride,
+                                  uint16_t *pred, int p_col, int p_row,
+                                  int p_width, int p_height, int p_stride,
+                                  int subsampling_x, int subsampling_y, int bd,
+                                  int ref_frm, int16_t alpha, int16_t beta,
+                                  int16_t gamma, int16_t delta) {
 #if HORSHEAR_REDUCE_PREC_BITS >= 5
   __m128i tmp[15];
 #else
@@ -47,8 +45,8 @@
     for (j = 0; j < p_width; j += 8) {
       // (x, y) coordinates of the center of this block in the destination
       // image
-      int32_t dst_x = p_col + j + 4;
-      int32_t dst_y = p_row + i + 4;
+      const int32_t dst_x = p_col + j + 4;
+      const int32_t dst_y = p_row + i + 4;
 
       int32_t x4, y4, ix4, sx4, iy4, sy4;
       if (subsampling_x)
@@ -92,55 +90,59 @@
               ref[iy * stride + (width - 1)] *
               (1 << (WARPEDPIXEL_FILTER_BITS - HORSHEAR_REDUCE_PREC_BITS)));
         } else {
-          int sx = sx4 + alpha * (-4) + beta * k +
-                   // Include rounding and offset here
-                   (1 << (WARPEDDIFF_PREC_BITS - 1)) +
-                   (WARPEDPIXEL_PREC_SHIFTS << WARPEDDIFF_PREC_BITS);
+          const int sx = sx4 + alpha * (-4) + beta * k +
+                         // Include rounding and offset here
+                         (1 << (WARPEDDIFF_PREC_BITS - 1)) +
+                         (WARPEDPIXEL_PREC_SHIFTS << WARPEDDIFF_PREC_BITS);
 
           // Load source pixels
-          __m128i src =
+          const __m128i src =
               _mm_loadu_si128((__m128i *)(ref + iy * stride + ix4 - 7));
-          __m128i src2 =
+          const __m128i src2 =
               _mm_loadu_si128((__m128i *)(ref + iy * stride + ix4 + 1));
 
           // Filter even-index pixels
-          __m128i tmp_0 = _mm_loadu_si128(
-              (__m128i *)(filter + ((sx + 0 * alpha) >> WARPEDDIFF_PREC_BITS)));
-          __m128i tmp_2 = _mm_loadu_si128(
-              (__m128i *)(filter + ((sx + 2 * alpha) >> WARPEDDIFF_PREC_BITS)));
-          __m128i tmp_4 = _mm_loadu_si128(
-              (__m128i *)(filter + ((sx + 4 * alpha) >> WARPEDDIFF_PREC_BITS)));
-          __m128i tmp_6 = _mm_loadu_si128(
-              (__m128i *)(filter + ((sx + 6 * alpha) >> WARPEDDIFF_PREC_BITS)));
+          const __m128i tmp_0 = _mm_loadu_si128(
+              (__m128i *)(warped_filter +
+                          ((sx + 0 * alpha) >> WARPEDDIFF_PREC_BITS)));
+          const __m128i tmp_2 = _mm_loadu_si128(
+              (__m128i *)(warped_filter +
+                          ((sx + 2 * alpha) >> WARPEDDIFF_PREC_BITS)));
+          const __m128i tmp_4 = _mm_loadu_si128(
+              (__m128i *)(warped_filter +
+                          ((sx + 4 * alpha) >> WARPEDDIFF_PREC_BITS)));
+          const __m128i tmp_6 = _mm_loadu_si128(
+              (__m128i *)(warped_filter +
+                          ((sx + 6 * alpha) >> WARPEDDIFF_PREC_BITS)));
 
           // coeffs 0 1 0 1 2 3 2 3 for pixels 0, 2
-          __m128i tmp_8 = _mm_unpacklo_epi32(tmp_0, tmp_2);
+          const __m128i tmp_8 = _mm_unpacklo_epi32(tmp_0, tmp_2);
           // coeffs 0 1 0 1 2 3 2 3 for pixels 4, 6
-          __m128i tmp_10 = _mm_unpacklo_epi32(tmp_4, tmp_6);
+          const __m128i tmp_10 = _mm_unpacklo_epi32(tmp_4, tmp_6);
           // coeffs 4 5 4 5 6 7 6 7 for pixels 0, 2
-          __m128i tmp_12 = _mm_unpackhi_epi32(tmp_0, tmp_2);
+          const __m128i tmp_12 = _mm_unpackhi_epi32(tmp_0, tmp_2);
           // coeffs 4 5 4 5 6 7 6 7 for pixels 4, 6
-          __m128i tmp_14 = _mm_unpackhi_epi32(tmp_4, tmp_6);
+          const __m128i tmp_14 = _mm_unpackhi_epi32(tmp_4, tmp_6);
 
           // coeffs 0 1 0 1 0 1 0 1 for pixels 0, 2, 4, 6
-          __m128i coeff_0 = _mm_unpacklo_epi64(tmp_8, tmp_10);
+          const __m128i coeff_0 = _mm_unpacklo_epi64(tmp_8, tmp_10);
           // coeffs 2 3 2 3 2 3 2 3 for pixels 0, 2, 4, 6
-          __m128i coeff_2 = _mm_unpackhi_epi64(tmp_8, tmp_10);
+          const __m128i coeff_2 = _mm_unpackhi_epi64(tmp_8, tmp_10);
           // coeffs 4 5 4 5 4 5 4 5 for pixels 0, 2, 4, 6
-          __m128i coeff_4 = _mm_unpacklo_epi64(tmp_12, tmp_14);
+          const __m128i coeff_4 = _mm_unpacklo_epi64(tmp_12, tmp_14);
           // coeffs 6 7 6 7 6 7 6 7 for pixels 0, 2, 4, 6
-          __m128i coeff_6 = _mm_unpackhi_epi64(tmp_12, tmp_14);
+          const __m128i coeff_6 = _mm_unpackhi_epi64(tmp_12, tmp_14);
 
-          __m128i round_const =
+          const __m128i round_const =
               _mm_set1_epi32((1 << HORSHEAR_REDUCE_PREC_BITS) >> 1);
 
           // Calculate filtered results
-          __m128i res_0 = _mm_madd_epi16(src, coeff_0);
-          __m128i res_2 =
+          const __m128i res_0 = _mm_madd_epi16(src, coeff_0);
+          const __m128i res_2 =
               _mm_madd_epi16(_mm_alignr_epi8(src2, src, 4), coeff_2);
-          __m128i res_4 =
+          const __m128i res_4 =
               _mm_madd_epi16(_mm_alignr_epi8(src2, src, 8), coeff_4);
-          __m128i res_6 =
+          const __m128i res_6 =
               _mm_madd_epi16(_mm_alignr_epi8(src2, src, 12), coeff_6);
 
           __m128i res_even = _mm_add_epi32(_mm_add_epi32(res_0, res_4),
@@ -149,32 +151,36 @@
                                     HORSHEAR_REDUCE_PREC_BITS);
 
           // Filter odd-index pixels
-          __m128i tmp_1 = _mm_loadu_si128(
-              (__m128i *)(filter + ((sx + 1 * alpha) >> WARPEDDIFF_PREC_BITS)));
-          __m128i tmp_3 = _mm_loadu_si128(
-              (__m128i *)(filter + ((sx + 3 * alpha) >> WARPEDDIFF_PREC_BITS)));
-          __m128i tmp_5 = _mm_loadu_si128(
-              (__m128i *)(filter + ((sx + 5 * alpha) >> WARPEDDIFF_PREC_BITS)));
-          __m128i tmp_7 = _mm_loadu_si128(
-              (__m128i *)(filter + ((sx + 7 * alpha) >> WARPEDDIFF_PREC_BITS)));
+          const __m128i tmp_1 = _mm_loadu_si128(
+              (__m128i *)(warped_filter +
+                          ((sx + 1 * alpha) >> WARPEDDIFF_PREC_BITS)));
+          const __m128i tmp_3 = _mm_loadu_si128(
+              (__m128i *)(warped_filter +
+                          ((sx + 3 * alpha) >> WARPEDDIFF_PREC_BITS)));
+          const __m128i tmp_5 = _mm_loadu_si128(
+              (__m128i *)(warped_filter +
+                          ((sx + 5 * alpha) >> WARPEDDIFF_PREC_BITS)));
+          const __m128i tmp_7 = _mm_loadu_si128(
+              (__m128i *)(warped_filter +
+                          ((sx + 7 * alpha) >> WARPEDDIFF_PREC_BITS)));
 
-          __m128i tmp_9 = _mm_unpacklo_epi32(tmp_1, tmp_3);
-          __m128i tmp_11 = _mm_unpacklo_epi32(tmp_5, tmp_7);
-          __m128i tmp_13 = _mm_unpackhi_epi32(tmp_1, tmp_3);
-          __m128i tmp_15 = _mm_unpackhi_epi32(tmp_5, tmp_7);
+          const __m128i tmp_9 = _mm_unpacklo_epi32(tmp_1, tmp_3);
+          const __m128i tmp_11 = _mm_unpacklo_epi32(tmp_5, tmp_7);
+          const __m128i tmp_13 = _mm_unpackhi_epi32(tmp_1, tmp_3);
+          const __m128i tmp_15 = _mm_unpackhi_epi32(tmp_5, tmp_7);
 
-          __m128i coeff_1 = _mm_unpacklo_epi64(tmp_9, tmp_11);
-          __m128i coeff_3 = _mm_unpackhi_epi64(tmp_9, tmp_11);
-          __m128i coeff_5 = _mm_unpacklo_epi64(tmp_13, tmp_15);
-          __m128i coeff_7 = _mm_unpackhi_epi64(tmp_13, tmp_15);
+          const __m128i coeff_1 = _mm_unpacklo_epi64(tmp_9, tmp_11);
+          const __m128i coeff_3 = _mm_unpackhi_epi64(tmp_9, tmp_11);
+          const __m128i coeff_5 = _mm_unpacklo_epi64(tmp_13, tmp_15);
+          const __m128i coeff_7 = _mm_unpackhi_epi64(tmp_13, tmp_15);
 
-          __m128i res_1 =
+          const __m128i res_1 =
               _mm_madd_epi16(_mm_alignr_epi8(src2, src, 2), coeff_1);
-          __m128i res_3 =
+          const __m128i res_3 =
               _mm_madd_epi16(_mm_alignr_epi8(src2, src, 6), coeff_3);
-          __m128i res_5 =
+          const __m128i res_5 =
               _mm_madd_epi16(_mm_alignr_epi8(src2, src, 10), coeff_5);
-          __m128i res_7 =
+          const __m128i res_7 =
               _mm_madd_epi16(_mm_alignr_epi8(src2, src, 14), coeff_7);
 
           __m128i res_odd = _mm_add_epi32(_mm_add_epi32(res_1, res_5),
@@ -191,100 +197,108 @@
 
       // Vertical filter
       for (k = -4; k < AOMMIN(4, p_height - i - 4); ++k) {
-        int sy = sy4 + gamma * (-4) + delta * k +
-                 (1 << (WARPEDDIFF_PREC_BITS - 1)) +
-                 (WARPEDPIXEL_PREC_SHIFTS << WARPEDDIFF_PREC_BITS);
+        const int sy = sy4 + gamma * (-4) + delta * k +
+                       (1 << (WARPEDDIFF_PREC_BITS - 1)) +
+                       (WARPEDPIXEL_PREC_SHIFTS << WARPEDDIFF_PREC_BITS);
 
         // Load from tmp and rearrange pairs of consecutive rows into the
         // column order 0 0 2 2 4 4 6 6; 1 1 3 3 5 5 7 7
-        __m128i *src = tmp + (k + 4);
-        __m128i src_0 = _mm_unpacklo_epi16(src[0], src[1]);
-        __m128i src_2 = _mm_unpacklo_epi16(src[2], src[3]);
-        __m128i src_4 = _mm_unpacklo_epi16(src[4], src[5]);
-        __m128i src_6 = _mm_unpacklo_epi16(src[6], src[7]);
+        const __m128i *src = tmp + (k + 4);
+        const __m128i src_0 = _mm_unpacklo_epi16(src[0], src[1]);
+        const __m128i src_2 = _mm_unpacklo_epi16(src[2], src[3]);
+        const __m128i src_4 = _mm_unpacklo_epi16(src[4], src[5]);
+        const __m128i src_6 = _mm_unpacklo_epi16(src[6], src[7]);
 
         // Filter even-index pixels
-        __m128i tmp_0 = _mm_loadu_si128(
-            (__m128i *)(filter + ((sy + 0 * gamma) >> WARPEDDIFF_PREC_BITS)));
-        __m128i tmp_2 = _mm_loadu_si128(
-            (__m128i *)(filter + ((sy + 2 * gamma) >> WARPEDDIFF_PREC_BITS)));
-        __m128i tmp_4 = _mm_loadu_si128(
-            (__m128i *)(filter + ((sy + 4 * gamma) >> WARPEDDIFF_PREC_BITS)));
-        __m128i tmp_6 = _mm_loadu_si128(
-            (__m128i *)(filter + ((sy + 6 * gamma) >> WARPEDDIFF_PREC_BITS)));
+        const __m128i tmp_0 = _mm_loadu_si128(
+            (__m128i *)(warped_filter +
+                        ((sy + 0 * gamma) >> WARPEDDIFF_PREC_BITS)));
+        const __m128i tmp_2 = _mm_loadu_si128(
+            (__m128i *)(warped_filter +
+                        ((sy + 2 * gamma) >> WARPEDDIFF_PREC_BITS)));
+        const __m128i tmp_4 = _mm_loadu_si128(
+            (__m128i *)(warped_filter +
+                        ((sy + 4 * gamma) >> WARPEDDIFF_PREC_BITS)));
+        const __m128i tmp_6 = _mm_loadu_si128(
+            (__m128i *)(warped_filter +
+                        ((sy + 6 * gamma) >> WARPEDDIFF_PREC_BITS)));
 
-        __m128i tmp_8 = _mm_unpacklo_epi32(tmp_0, tmp_2);
-        __m128i tmp_10 = _mm_unpacklo_epi32(tmp_4, tmp_6);
-        __m128i tmp_12 = _mm_unpackhi_epi32(tmp_0, tmp_2);
-        __m128i tmp_14 = _mm_unpackhi_epi32(tmp_4, tmp_6);
+        const __m128i tmp_8 = _mm_unpacklo_epi32(tmp_0, tmp_2);
+        const __m128i tmp_10 = _mm_unpacklo_epi32(tmp_4, tmp_6);
+        const __m128i tmp_12 = _mm_unpackhi_epi32(tmp_0, tmp_2);
+        const __m128i tmp_14 = _mm_unpackhi_epi32(tmp_4, tmp_6);
 
-        __m128i coeff_0 = _mm_unpacklo_epi64(tmp_8, tmp_10);
-        __m128i coeff_2 = _mm_unpackhi_epi64(tmp_8, tmp_10);
-        __m128i coeff_4 = _mm_unpacklo_epi64(tmp_12, tmp_14);
-        __m128i coeff_6 = _mm_unpackhi_epi64(tmp_12, tmp_14);
+        const __m128i coeff_0 = _mm_unpacklo_epi64(tmp_8, tmp_10);
+        const __m128i coeff_2 = _mm_unpackhi_epi64(tmp_8, tmp_10);
+        const __m128i coeff_4 = _mm_unpacklo_epi64(tmp_12, tmp_14);
+        const __m128i coeff_6 = _mm_unpackhi_epi64(tmp_12, tmp_14);
 
-        __m128i res_0 = _mm_madd_epi16(src_0, coeff_0);
-        __m128i res_2 = _mm_madd_epi16(src_2, coeff_2);
-        __m128i res_4 = _mm_madd_epi16(src_4, coeff_4);
-        __m128i res_6 = _mm_madd_epi16(src_6, coeff_6);
+        const __m128i res_0 = _mm_madd_epi16(src_0, coeff_0);
+        const __m128i res_2 = _mm_madd_epi16(src_2, coeff_2);
+        const __m128i res_4 = _mm_madd_epi16(src_4, coeff_4);
+        const __m128i res_6 = _mm_madd_epi16(src_6, coeff_6);
 
-        __m128i res_even = _mm_add_epi32(_mm_add_epi32(res_0, res_2),
-                                         _mm_add_epi32(res_4, res_6));
+        const __m128i res_even = _mm_add_epi32(_mm_add_epi32(res_0, res_2),
+                                               _mm_add_epi32(res_4, res_6));
 
         // Filter odd-index pixels
-        __m128i src_1 = _mm_unpackhi_epi16(src[0], src[1]);
-        __m128i src_3 = _mm_unpackhi_epi16(src[2], src[3]);
-        __m128i src_5 = _mm_unpackhi_epi16(src[4], src[5]);
-        __m128i src_7 = _mm_unpackhi_epi16(src[6], src[7]);
+        const __m128i src_1 = _mm_unpackhi_epi16(src[0], src[1]);
+        const __m128i src_3 = _mm_unpackhi_epi16(src[2], src[3]);
+        const __m128i src_5 = _mm_unpackhi_epi16(src[4], src[5]);
+        const __m128i src_7 = _mm_unpackhi_epi16(src[6], src[7]);
 
-        __m128i tmp_1 = _mm_loadu_si128(
-            (__m128i *)(filter + ((sy + 1 * gamma) >> WARPEDDIFF_PREC_BITS)));
-        __m128i tmp_3 = _mm_loadu_si128(
-            (__m128i *)(filter + ((sy + 3 * gamma) >> WARPEDDIFF_PREC_BITS)));
-        __m128i tmp_5 = _mm_loadu_si128(
-            (__m128i *)(filter + ((sy + 5 * gamma) >> WARPEDDIFF_PREC_BITS)));
-        __m128i tmp_7 = _mm_loadu_si128(
-            (__m128i *)(filter + ((sy + 7 * gamma) >> WARPEDDIFF_PREC_BITS)));
+        const __m128i tmp_1 = _mm_loadu_si128(
+            (__m128i *)(warped_filter +
+                        ((sy + 1 * gamma) >> WARPEDDIFF_PREC_BITS)));
+        const __m128i tmp_3 = _mm_loadu_si128(
+            (__m128i *)(warped_filter +
+                        ((sy + 3 * gamma) >> WARPEDDIFF_PREC_BITS)));
+        const __m128i tmp_5 = _mm_loadu_si128(
+            (__m128i *)(warped_filter +
+                        ((sy + 5 * gamma) >> WARPEDDIFF_PREC_BITS)));
+        const __m128i tmp_7 = _mm_loadu_si128(
+            (__m128i *)(warped_filter +
+                        ((sy + 7 * gamma) >> WARPEDDIFF_PREC_BITS)));
 
-        __m128i tmp_9 = _mm_unpacklo_epi32(tmp_1, tmp_3);
-        __m128i tmp_11 = _mm_unpacklo_epi32(tmp_5, tmp_7);
-        __m128i tmp_13 = _mm_unpackhi_epi32(tmp_1, tmp_3);
-        __m128i tmp_15 = _mm_unpackhi_epi32(tmp_5, tmp_7);
+        const __m128i tmp_9 = _mm_unpacklo_epi32(tmp_1, tmp_3);
+        const __m128i tmp_11 = _mm_unpacklo_epi32(tmp_5, tmp_7);
+        const __m128i tmp_13 = _mm_unpackhi_epi32(tmp_1, tmp_3);
+        const __m128i tmp_15 = _mm_unpackhi_epi32(tmp_5, tmp_7);
 
-        __m128i coeff_1 = _mm_unpacklo_epi64(tmp_9, tmp_11);
-        __m128i coeff_3 = _mm_unpackhi_epi64(tmp_9, tmp_11);
-        __m128i coeff_5 = _mm_unpacklo_epi64(tmp_13, tmp_15);
-        __m128i coeff_7 = _mm_unpackhi_epi64(tmp_13, tmp_15);
+        const __m128i coeff_1 = _mm_unpacklo_epi64(tmp_9, tmp_11);
+        const __m128i coeff_3 = _mm_unpackhi_epi64(tmp_9, tmp_11);
+        const __m128i coeff_5 = _mm_unpacklo_epi64(tmp_13, tmp_15);
+        const __m128i coeff_7 = _mm_unpackhi_epi64(tmp_13, tmp_15);
 
-        __m128i res_1 = _mm_madd_epi16(src_1, coeff_1);
-        __m128i res_3 = _mm_madd_epi16(src_3, coeff_3);
-        __m128i res_5 = _mm_madd_epi16(src_5, coeff_5);
-        __m128i res_7 = _mm_madd_epi16(src_7, coeff_7);
+        const __m128i res_1 = _mm_madd_epi16(src_1, coeff_1);
+        const __m128i res_3 = _mm_madd_epi16(src_3, coeff_3);
+        const __m128i res_5 = _mm_madd_epi16(src_5, coeff_5);
+        const __m128i res_7 = _mm_madd_epi16(src_7, coeff_7);
 
-        __m128i res_odd = _mm_add_epi32(_mm_add_epi32(res_1, res_3),
-                                        _mm_add_epi32(res_5, res_7));
+        const __m128i res_odd = _mm_add_epi32(_mm_add_epi32(res_1, res_3),
+                                              _mm_add_epi32(res_5, res_7));
 
         // Rearrange pixels back into the order 0 ... 7
-        __m128i res_lo = _mm_unpacklo_epi32(res_even, res_odd);
-        __m128i res_hi = _mm_unpackhi_epi32(res_even, res_odd);
+        const __m128i res_lo = _mm_unpacklo_epi32(res_even, res_odd);
+        const __m128i res_hi = _mm_unpackhi_epi32(res_even, res_odd);
 
         // Round and pack into 8 bits
-        __m128i round_const =
+        const __m128i round_const =
             _mm_set1_epi32((1 << VERSHEAR_REDUCE_PREC_BITS) >> 1);
 
-        __m128i res_lo_round = _mm_srai_epi32(
+        const __m128i res_lo_round = _mm_srai_epi32(
             _mm_add_epi32(res_lo, round_const), VERSHEAR_REDUCE_PREC_BITS);
-        __m128i res_hi_round = _mm_srai_epi32(
+        const __m128i res_hi_round = _mm_srai_epi32(
             _mm_add_epi32(res_hi, round_const), VERSHEAR_REDUCE_PREC_BITS);
 
         __m128i res_16bit = _mm_packs_epi32(res_lo_round, res_hi_round);
         // Clamp res_16bit to the range [0, 2^bd - 1]
-        __m128i max_val = _mm_set1_epi16((1 << bd) - 1);
-        __m128i zero = _mm_setzero_si128();
+        const __m128i max_val = _mm_set1_epi16((1 << bd) - 1);
+        const __m128i zero = _mm_setzero_si128();
         res_16bit = _mm_max_epi16(_mm_min_epi16(res_16bit, max_val), zero);
 
         // Store, blending with 'pred' if needed
-        __m128i *p = (__m128i *)&pred[(i + k + 4) * p_stride + j];
+        __m128i *const p = (__m128i *)&pred[(i + k + 4) * p_stride + j];
 
         // Note: If we're outputting a 4x4 block, we need to be very careful
         // to only output 4 pixels at this point, to avoid encode/decode
diff --git a/av1/common/x86/warp_plane_sse2.c b/av1/common/x86/warp_plane_sse2.c
index 925e465..81145b6 100644
--- a/av1/common/x86/warp_plane_sse2.c
+++ b/av1/common/x86/warp_plane_sse2.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016, Alliance for Open Media. All rights reserved
+ * Copyright (c) 2017, Alliance for Open Media. All rights reserved
  *
  * This source code is subject to the terms of the BSD 2 Clause License and
  * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
@@ -14,12 +14,10 @@
 #include "./av1_rtcd.h"
 #include "av1/common/warped_motion.h"
 
-static const __m128i *const filter = (const __m128i *const)warped_filter;
-
 /* SSE2 version of the rotzoom/affine warp filter */
-void av1_warp_affine_sse2(int32_t *mat, uint8_t *ref, int width, int height,
-                          int stride, uint8_t *pred, int p_col, int p_row,
-                          int p_width, int p_height, int p_stride,
+void av1_warp_affine_sse2(const int32_t *mat, const uint8_t *ref, int width,
+                          int height, int stride, uint8_t *pred, int p_col,
+                          int p_row, int p_width, int p_height, int p_stride,
                           int subsampling_x, int subsampling_y, int ref_frm,
                           int16_t alpha, int16_t beta, int16_t gamma,
                           int16_t delta) {
@@ -42,8 +40,8 @@
     for (j = 0; j < p_width; j += 8) {
       // (x, y) coordinates of the center of this block in the destination
       // image
-      int32_t dst_x = p_col + j + 4;
-      int32_t dst_y = p_row + i + 4;
+      const int32_t dst_x = p_col + j + 4;
+      const int32_t dst_y = p_row + i + 4;
 
       int32_t x4, y4, ix4, sx4, iy4, sy4;
       if (subsampling_x)
@@ -87,56 +85,60 @@
               ref[iy * stride + (width - 1)] *
               (1 << (WARPEDPIXEL_FILTER_BITS - HORSHEAR_REDUCE_PREC_BITS)));
         } else {
-          int sx = sx4 + alpha * (-4) + beta * k +
-                   // Include rounding and offset here
-                   (1 << (WARPEDDIFF_PREC_BITS - 1)) +
-                   (WARPEDPIXEL_PREC_SHIFTS << WARPEDDIFF_PREC_BITS);
+          const int sx = sx4 + alpha * (-4) + beta * k +
+                         // Include rounding and offset here
+                         (1 << (WARPEDDIFF_PREC_BITS - 1)) +
+                         (WARPEDPIXEL_PREC_SHIFTS << WARPEDDIFF_PREC_BITS);
 
           // Load source pixels
-          __m128i zero = _mm_setzero_si128();
-          __m128i src =
+          const __m128i zero = _mm_setzero_si128();
+          const __m128i src =
               _mm_loadu_si128((__m128i *)(ref + iy * stride + ix4 - 7));
 
           // Filter even-index pixels
-          __m128i tmp_0 = _mm_loadu_si128(
-              (__m128i *)(filter + ((sx + 0 * alpha) >> WARPEDDIFF_PREC_BITS)));
-          __m128i tmp_2 = _mm_loadu_si128(
-              (__m128i *)(filter + ((sx + 2 * alpha) >> WARPEDDIFF_PREC_BITS)));
-          __m128i tmp_4 = _mm_loadu_si128(
-              (__m128i *)(filter + ((sx + 4 * alpha) >> WARPEDDIFF_PREC_BITS)));
-          __m128i tmp_6 = _mm_loadu_si128(
-              (__m128i *)(filter + ((sx + 6 * alpha) >> WARPEDDIFF_PREC_BITS)));
+          const __m128i tmp_0 = _mm_loadu_si128(
+              (__m128i *)(warped_filter +
+                          ((sx + 0 * alpha) >> WARPEDDIFF_PREC_BITS)));
+          const __m128i tmp_2 = _mm_loadu_si128(
+              (__m128i *)(warped_filter +
+                          ((sx + 2 * alpha) >> WARPEDDIFF_PREC_BITS)));
+          const __m128i tmp_4 = _mm_loadu_si128(
+              (__m128i *)(warped_filter +
+                          ((sx + 4 * alpha) >> WARPEDDIFF_PREC_BITS)));
+          const __m128i tmp_6 = _mm_loadu_si128(
+              (__m128i *)(warped_filter +
+                          ((sx + 6 * alpha) >> WARPEDDIFF_PREC_BITS)));
 
           // coeffs 0 1 0 1 2 3 2 3 for pixels 0, 2
-          __m128i tmp_8 = _mm_unpacklo_epi32(tmp_0, tmp_2);
+          const __m128i tmp_8 = _mm_unpacklo_epi32(tmp_0, tmp_2);
           // coeffs 0 1 0 1 2 3 2 3 for pixels 4, 6
-          __m128i tmp_10 = _mm_unpacklo_epi32(tmp_4, tmp_6);
+          const __m128i tmp_10 = _mm_unpacklo_epi32(tmp_4, tmp_6);
           // coeffs 4 5 4 5 6 7 6 7 for pixels 0, 2
-          __m128i tmp_12 = _mm_unpackhi_epi32(tmp_0, tmp_2);
+          const __m128i tmp_12 = _mm_unpackhi_epi32(tmp_0, tmp_2);
           // coeffs 4 5 4 5 6 7 6 7 for pixels 4, 6
-          __m128i tmp_14 = _mm_unpackhi_epi32(tmp_4, tmp_6);
+          const __m128i tmp_14 = _mm_unpackhi_epi32(tmp_4, tmp_6);
 
           // coeffs 0 1 0 1 0 1 0 1 for pixels 0, 2, 4, 6
-          __m128i coeff_0 = _mm_unpacklo_epi64(tmp_8, tmp_10);
+          const __m128i coeff_0 = _mm_unpacklo_epi64(tmp_8, tmp_10);
           // coeffs 2 3 2 3 2 3 2 3 for pixels 0, 2, 4, 6
-          __m128i coeff_2 = _mm_unpackhi_epi64(tmp_8, tmp_10);
+          const __m128i coeff_2 = _mm_unpackhi_epi64(tmp_8, tmp_10);
           // coeffs 4 5 4 5 4 5 4 5 for pixels 0, 2, 4, 6
-          __m128i coeff_4 = _mm_unpacklo_epi64(tmp_12, tmp_14);
+          const __m128i coeff_4 = _mm_unpacklo_epi64(tmp_12, tmp_14);
           // coeffs 6 7 6 7 6 7 6 7 for pixels 0, 2, 4, 6
-          __m128i coeff_6 = _mm_unpackhi_epi64(tmp_12, tmp_14);
+          const __m128i coeff_6 = _mm_unpackhi_epi64(tmp_12, tmp_14);
 
-          __m128i round_const =
+          const __m128i round_const =
               _mm_set1_epi32((1 << HORSHEAR_REDUCE_PREC_BITS) >> 1);
 
           // Calculate filtered results
-          __m128i src_0 = _mm_unpacklo_epi8(src, zero);
-          __m128i res_0 = _mm_madd_epi16(src_0, coeff_0);
-          __m128i src_2 = _mm_unpacklo_epi8(_mm_srli_si128(src, 2), zero);
-          __m128i res_2 = _mm_madd_epi16(src_2, coeff_2);
-          __m128i src_4 = _mm_unpacklo_epi8(_mm_srli_si128(src, 4), zero);
-          __m128i res_4 = _mm_madd_epi16(src_4, coeff_4);
-          __m128i src_6 = _mm_unpacklo_epi8(_mm_srli_si128(src, 6), zero);
-          __m128i res_6 = _mm_madd_epi16(src_6, coeff_6);
+          const __m128i src_0 = _mm_unpacklo_epi8(src, zero);
+          const __m128i res_0 = _mm_madd_epi16(src_0, coeff_0);
+          const __m128i src_2 = _mm_unpacklo_epi8(_mm_srli_si128(src, 2), zero);
+          const __m128i res_2 = _mm_madd_epi16(src_2, coeff_2);
+          const __m128i src_4 = _mm_unpacklo_epi8(_mm_srli_si128(src, 4), zero);
+          const __m128i res_4 = _mm_madd_epi16(src_4, coeff_4);
+          const __m128i src_6 = _mm_unpacklo_epi8(_mm_srli_si128(src, 6), zero);
+          const __m128i res_6 = _mm_madd_epi16(src_6, coeff_6);
 
           __m128i res_even = _mm_add_epi32(_mm_add_epi32(res_0, res_4),
                                            _mm_add_epi32(res_2, res_6));
@@ -144,33 +146,37 @@
                                     HORSHEAR_REDUCE_PREC_BITS);
 
           // Filter odd-index pixels
-          __m128i tmp_1 = _mm_loadu_si128(
-              (__m128i *)(filter + ((sx + 1 * alpha) >> WARPEDDIFF_PREC_BITS)));
-          __m128i tmp_3 = _mm_loadu_si128(
-              (__m128i *)(filter + ((sx + 3 * alpha) >> WARPEDDIFF_PREC_BITS)));
-          __m128i tmp_5 = _mm_loadu_si128(
-              (__m128i *)(filter + ((sx + 5 * alpha) >> WARPEDDIFF_PREC_BITS)));
-          __m128i tmp_7 = _mm_loadu_si128(
-              (__m128i *)(filter + ((sx + 7 * alpha) >> WARPEDDIFF_PREC_BITS)));
+          const __m128i tmp_1 = _mm_loadu_si128(
+              (__m128i *)(warped_filter +
+                          ((sx + 1 * alpha) >> WARPEDDIFF_PREC_BITS)));
+          const __m128i tmp_3 = _mm_loadu_si128(
+              (__m128i *)(warped_filter +
+                          ((sx + 3 * alpha) >> WARPEDDIFF_PREC_BITS)));
+          const __m128i tmp_5 = _mm_loadu_si128(
+              (__m128i *)(warped_filter +
+                          ((sx + 5 * alpha) >> WARPEDDIFF_PREC_BITS)));
+          const __m128i tmp_7 = _mm_loadu_si128(
+              (__m128i *)(warped_filter +
+                          ((sx + 7 * alpha) >> WARPEDDIFF_PREC_BITS)));
 
-          __m128i tmp_9 = _mm_unpacklo_epi32(tmp_1, tmp_3);
-          __m128i tmp_11 = _mm_unpacklo_epi32(tmp_5, tmp_7);
-          __m128i tmp_13 = _mm_unpackhi_epi32(tmp_1, tmp_3);
-          __m128i tmp_15 = _mm_unpackhi_epi32(tmp_5, tmp_7);
+          const __m128i tmp_9 = _mm_unpacklo_epi32(tmp_1, tmp_3);
+          const __m128i tmp_11 = _mm_unpacklo_epi32(tmp_5, tmp_7);
+          const __m128i tmp_13 = _mm_unpackhi_epi32(tmp_1, tmp_3);
+          const __m128i tmp_15 = _mm_unpackhi_epi32(tmp_5, tmp_7);
 
-          __m128i coeff_1 = _mm_unpacklo_epi64(tmp_9, tmp_11);
-          __m128i coeff_3 = _mm_unpackhi_epi64(tmp_9, tmp_11);
-          __m128i coeff_5 = _mm_unpacklo_epi64(tmp_13, tmp_15);
-          __m128i coeff_7 = _mm_unpackhi_epi64(tmp_13, tmp_15);
+          const __m128i coeff_1 = _mm_unpacklo_epi64(tmp_9, tmp_11);
+          const __m128i coeff_3 = _mm_unpackhi_epi64(tmp_9, tmp_11);
+          const __m128i coeff_5 = _mm_unpacklo_epi64(tmp_13, tmp_15);
+          const __m128i coeff_7 = _mm_unpackhi_epi64(tmp_13, tmp_15);
 
-          __m128i src_1 = _mm_unpacklo_epi8(_mm_srli_si128(src, 1), zero);
-          __m128i res_1 = _mm_madd_epi16(src_1, coeff_1);
-          __m128i src_3 = _mm_unpacklo_epi8(_mm_srli_si128(src, 3), zero);
-          __m128i res_3 = _mm_madd_epi16(src_3, coeff_3);
-          __m128i src_5 = _mm_unpacklo_epi8(_mm_srli_si128(src, 5), zero);
-          __m128i res_5 = _mm_madd_epi16(src_5, coeff_5);
-          __m128i src_7 = _mm_unpacklo_epi8(_mm_srli_si128(src, 7), zero);
-          __m128i res_7 = _mm_madd_epi16(src_7, coeff_7);
+          const __m128i src_1 = _mm_unpacklo_epi8(_mm_srli_si128(src, 1), zero);
+          const __m128i res_1 = _mm_madd_epi16(src_1, coeff_1);
+          const __m128i src_3 = _mm_unpacklo_epi8(_mm_srli_si128(src, 3), zero);
+          const __m128i res_3 = _mm_madd_epi16(src_3, coeff_3);
+          const __m128i src_5 = _mm_unpacklo_epi8(_mm_srli_si128(src, 5), zero);
+          const __m128i res_5 = _mm_madd_epi16(src_5, coeff_5);
+          const __m128i src_7 = _mm_unpacklo_epi8(_mm_srli_si128(src, 7), zero);
+          const __m128i res_7 = _mm_madd_epi16(src_7, coeff_7);
 
           __m128i res_odd = _mm_add_epi32(_mm_add_epi32(res_1, res_5),
                                           _mm_add_epi32(res_3, res_7));
@@ -186,97 +192,105 @@
 
       // Vertical filter
       for (k = -4; k < AOMMIN(4, p_height - i - 4); ++k) {
-        int sy = sy4 + gamma * (-4) + delta * k +
-                 (1 << (WARPEDDIFF_PREC_BITS - 1)) +
-                 (WARPEDPIXEL_PREC_SHIFTS << WARPEDDIFF_PREC_BITS);
+        const int sy = sy4 + gamma * (-4) + delta * k +
+                       (1 << (WARPEDDIFF_PREC_BITS - 1)) +
+                       (WARPEDPIXEL_PREC_SHIFTS << WARPEDDIFF_PREC_BITS);
 
         // Load from tmp and rearrange pairs of consecutive rows into the
         // column order 0 0 2 2 4 4 6 6; 1 1 3 3 5 5 7 7
-        __m128i *src = tmp + (k + 4);
-        __m128i src_0 = _mm_unpacklo_epi16(src[0], src[1]);
-        __m128i src_2 = _mm_unpacklo_epi16(src[2], src[3]);
-        __m128i src_4 = _mm_unpacklo_epi16(src[4], src[5]);
-        __m128i src_6 = _mm_unpacklo_epi16(src[6], src[7]);
+        const __m128i *src = tmp + (k + 4);
+        const __m128i src_0 = _mm_unpacklo_epi16(src[0], src[1]);
+        const __m128i src_2 = _mm_unpacklo_epi16(src[2], src[3]);
+        const __m128i src_4 = _mm_unpacklo_epi16(src[4], src[5]);
+        const __m128i src_6 = _mm_unpacklo_epi16(src[6], src[7]);
 
         // Filter even-index pixels
-        __m128i tmp_0 = _mm_loadu_si128(
-            (__m128i *)(filter + ((sy + 0 * gamma) >> WARPEDDIFF_PREC_BITS)));
-        __m128i tmp_2 = _mm_loadu_si128(
-            (__m128i *)(filter + ((sy + 2 * gamma) >> WARPEDDIFF_PREC_BITS)));
-        __m128i tmp_4 = _mm_loadu_si128(
-            (__m128i *)(filter + ((sy + 4 * gamma) >> WARPEDDIFF_PREC_BITS)));
-        __m128i tmp_6 = _mm_loadu_si128(
-            (__m128i *)(filter + ((sy + 6 * gamma) >> WARPEDDIFF_PREC_BITS)));
+        const __m128i tmp_0 = _mm_loadu_si128(
+            (__m128i *)(warped_filter +
+                        ((sy + 0 * gamma) >> WARPEDDIFF_PREC_BITS)));
+        const __m128i tmp_2 = _mm_loadu_si128(
+            (__m128i *)(warped_filter +
+                        ((sy + 2 * gamma) >> WARPEDDIFF_PREC_BITS)));
+        const __m128i tmp_4 = _mm_loadu_si128(
+            (__m128i *)(warped_filter +
+                        ((sy + 4 * gamma) >> WARPEDDIFF_PREC_BITS)));
+        const __m128i tmp_6 = _mm_loadu_si128(
+            (__m128i *)(warped_filter +
+                        ((sy + 6 * gamma) >> WARPEDDIFF_PREC_BITS)));
 
-        __m128i tmp_8 = _mm_unpacklo_epi32(tmp_0, tmp_2);
-        __m128i tmp_10 = _mm_unpacklo_epi32(tmp_4, tmp_6);
-        __m128i tmp_12 = _mm_unpackhi_epi32(tmp_0, tmp_2);
-        __m128i tmp_14 = _mm_unpackhi_epi32(tmp_4, tmp_6);
+        const __m128i tmp_8 = _mm_unpacklo_epi32(tmp_0, tmp_2);
+        const __m128i tmp_10 = _mm_unpacklo_epi32(tmp_4, tmp_6);
+        const __m128i tmp_12 = _mm_unpackhi_epi32(tmp_0, tmp_2);
+        const __m128i tmp_14 = _mm_unpackhi_epi32(tmp_4, tmp_6);
 
-        __m128i coeff_0 = _mm_unpacklo_epi64(tmp_8, tmp_10);
-        __m128i coeff_2 = _mm_unpackhi_epi64(tmp_8, tmp_10);
-        __m128i coeff_4 = _mm_unpacklo_epi64(tmp_12, tmp_14);
-        __m128i coeff_6 = _mm_unpackhi_epi64(tmp_12, tmp_14);
+        const __m128i coeff_0 = _mm_unpacklo_epi64(tmp_8, tmp_10);
+        const __m128i coeff_2 = _mm_unpackhi_epi64(tmp_8, tmp_10);
+        const __m128i coeff_4 = _mm_unpacklo_epi64(tmp_12, tmp_14);
+        const __m128i coeff_6 = _mm_unpackhi_epi64(tmp_12, tmp_14);
 
-        __m128i res_0 = _mm_madd_epi16(src_0, coeff_0);
-        __m128i res_2 = _mm_madd_epi16(src_2, coeff_2);
-        __m128i res_4 = _mm_madd_epi16(src_4, coeff_4);
-        __m128i res_6 = _mm_madd_epi16(src_6, coeff_6);
+        const __m128i res_0 = _mm_madd_epi16(src_0, coeff_0);
+        const __m128i res_2 = _mm_madd_epi16(src_2, coeff_2);
+        const __m128i res_4 = _mm_madd_epi16(src_4, coeff_4);
+        const __m128i res_6 = _mm_madd_epi16(src_6, coeff_6);
 
-        __m128i res_even = _mm_add_epi32(_mm_add_epi32(res_0, res_2),
-                                         _mm_add_epi32(res_4, res_6));
+        const __m128i res_even = _mm_add_epi32(_mm_add_epi32(res_0, res_2),
+                                               _mm_add_epi32(res_4, res_6));
 
         // Filter odd-index pixels
-        __m128i src_1 = _mm_unpackhi_epi16(src[0], src[1]);
-        __m128i src_3 = _mm_unpackhi_epi16(src[2], src[3]);
-        __m128i src_5 = _mm_unpackhi_epi16(src[4], src[5]);
-        __m128i src_7 = _mm_unpackhi_epi16(src[6], src[7]);
+        const __m128i src_1 = _mm_unpackhi_epi16(src[0], src[1]);
+        const __m128i src_3 = _mm_unpackhi_epi16(src[2], src[3]);
+        const __m128i src_5 = _mm_unpackhi_epi16(src[4], src[5]);
+        const __m128i src_7 = _mm_unpackhi_epi16(src[6], src[7]);
 
-        __m128i tmp_1 = _mm_loadu_si128(
-            (__m128i *)(filter + ((sy + 1 * gamma) >> WARPEDDIFF_PREC_BITS)));
-        __m128i tmp_3 = _mm_loadu_si128(
-            (__m128i *)(filter + ((sy + 3 * gamma) >> WARPEDDIFF_PREC_BITS)));
-        __m128i tmp_5 = _mm_loadu_si128(
-            (__m128i *)(filter + ((sy + 5 * gamma) >> WARPEDDIFF_PREC_BITS)));
-        __m128i tmp_7 = _mm_loadu_si128(
-            (__m128i *)(filter + ((sy + 7 * gamma) >> WARPEDDIFF_PREC_BITS)));
+        const __m128i tmp_1 = _mm_loadu_si128(
+            (__m128i *)(warped_filter +
+                        ((sy + 1 * gamma) >> WARPEDDIFF_PREC_BITS)));
+        const __m128i tmp_3 = _mm_loadu_si128(
+            (__m128i *)(warped_filter +
+                        ((sy + 3 * gamma) >> WARPEDDIFF_PREC_BITS)));
+        const __m128i tmp_5 = _mm_loadu_si128(
+            (__m128i *)(warped_filter +
+                        ((sy + 5 * gamma) >> WARPEDDIFF_PREC_BITS)));
+        const __m128i tmp_7 = _mm_loadu_si128(
+            (__m128i *)(warped_filter +
+                        ((sy + 7 * gamma) >> WARPEDDIFF_PREC_BITS)));
 
-        __m128i tmp_9 = _mm_unpacklo_epi32(tmp_1, tmp_3);
-        __m128i tmp_11 = _mm_unpacklo_epi32(tmp_5, tmp_7);
-        __m128i tmp_13 = _mm_unpackhi_epi32(tmp_1, tmp_3);
-        __m128i tmp_15 = _mm_unpackhi_epi32(tmp_5, tmp_7);
+        const __m128i tmp_9 = _mm_unpacklo_epi32(tmp_1, tmp_3);
+        const __m128i tmp_11 = _mm_unpacklo_epi32(tmp_5, tmp_7);
+        const __m128i tmp_13 = _mm_unpackhi_epi32(tmp_1, tmp_3);
+        const __m128i tmp_15 = _mm_unpackhi_epi32(tmp_5, tmp_7);
 
-        __m128i coeff_1 = _mm_unpacklo_epi64(tmp_9, tmp_11);
-        __m128i coeff_3 = _mm_unpackhi_epi64(tmp_9, tmp_11);
-        __m128i coeff_5 = _mm_unpacklo_epi64(tmp_13, tmp_15);
-        __m128i coeff_7 = _mm_unpackhi_epi64(tmp_13, tmp_15);
+        const __m128i coeff_1 = _mm_unpacklo_epi64(tmp_9, tmp_11);
+        const __m128i coeff_3 = _mm_unpackhi_epi64(tmp_9, tmp_11);
+        const __m128i coeff_5 = _mm_unpacklo_epi64(tmp_13, tmp_15);
+        const __m128i coeff_7 = _mm_unpackhi_epi64(tmp_13, tmp_15);
 
-        __m128i res_1 = _mm_madd_epi16(src_1, coeff_1);
-        __m128i res_3 = _mm_madd_epi16(src_3, coeff_3);
-        __m128i res_5 = _mm_madd_epi16(src_5, coeff_5);
-        __m128i res_7 = _mm_madd_epi16(src_7, coeff_7);
+        const __m128i res_1 = _mm_madd_epi16(src_1, coeff_1);
+        const __m128i res_3 = _mm_madd_epi16(src_3, coeff_3);
+        const __m128i res_5 = _mm_madd_epi16(src_5, coeff_5);
+        const __m128i res_7 = _mm_madd_epi16(src_7, coeff_7);
 
-        __m128i res_odd = _mm_add_epi32(_mm_add_epi32(res_1, res_3),
-                                        _mm_add_epi32(res_5, res_7));
+        const __m128i res_odd = _mm_add_epi32(_mm_add_epi32(res_1, res_3),
+                                              _mm_add_epi32(res_5, res_7));
 
         // Rearrange pixels back into the order 0 ... 7
-        __m128i res_lo = _mm_unpacklo_epi32(res_even, res_odd);
-        __m128i res_hi = _mm_unpackhi_epi32(res_even, res_odd);
+        const __m128i res_lo = _mm_unpacklo_epi32(res_even, res_odd);
+        const __m128i res_hi = _mm_unpackhi_epi32(res_even, res_odd);
 
         // Round and pack into 8 bits
-        __m128i round_const =
+        const __m128i round_const =
             _mm_set1_epi32((1 << VERSHEAR_REDUCE_PREC_BITS) >> 1);
 
-        __m128i res_lo_round = _mm_srai_epi32(
+        const __m128i res_lo_round = _mm_srai_epi32(
             _mm_add_epi32(res_lo, round_const), VERSHEAR_REDUCE_PREC_BITS);
-        __m128i res_hi_round = _mm_srai_epi32(
+        const __m128i res_hi_round = _mm_srai_epi32(
             _mm_add_epi32(res_hi, round_const), VERSHEAR_REDUCE_PREC_BITS);
 
-        __m128i res_16bit = _mm_packs_epi32(res_lo_round, res_hi_round);
+        const __m128i res_16bit = _mm_packs_epi32(res_lo_round, res_hi_round);
         __m128i res_8bit = _mm_packus_epi16(res_16bit, res_16bit);
 
         // Store, blending with 'pred' if needed
-        __m128i *p = (__m128i *)&pred[(i + k + 4) * p_stride + j];
+        __m128i *const p = (__m128i *)&pred[(i + k + 4) * p_stride + j];
 
         // Note: If we're outputting a 4x4 block, we need to be very careful
         // to only output 4 pixels at this point, to avoid encode/decode
diff --git a/av1/common/x86/warp_plane_ssse3.c b/av1/common/x86/warp_plane_ssse3.c
new file mode 100644
index 0000000..f6cc2d6
--- /dev/null
+++ b/av1/common/x86/warp_plane_ssse3.c
@@ -0,0 +1,488 @@
+/*
+ * Copyright (c) 2017, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#include <tmmintrin.h>
+
+#include "./av1_rtcd.h"
+#include "av1/common/warped_motion.h"
+
+/* This is a modified version of 'warped_filter' from warped_motion.c:
+   * Each coefficient is stored in 8 bits instead of 16 bits
+   * The coefficients are rearranged in the column order 0, 2, 4, 6, 1, 3, 5, 7
+
+     This is done in order to avoid overflow: Since the tap with the largest
+     coefficient could be any of taps 2, 3, 4 or 5, we can't use the summation
+     order ((0 + 1) + (4 + 5)) + ((2 + 3) + (6 + 7)) used in the regular
+     convolve functions.
+
+     Instead, we use the summation order
+     ((0 + 2) + (4 + 6)) + ((1 + 3) + (5 + 7)).
+     The rearrangement of coefficients in this table is so that we can get the
+     coefficients into the correct order more quickly.
+*/
+/* clang-format off */
+DECLARE_ALIGNED(8, static const int8_t,
+                filter_8bit[WARPEDPIXEL_PREC_SHIFTS * 3 + 1][8]) = {
+#if WARPEDPIXEL_PREC_BITS == 6
+  // [-1, 0)
+  { 0, 127,   0, 0,   0,   1, 0, 0}, { 0, 127,   0, 0,  -1,   2, 0, 0},
+  { 1, 127,  -1, 0,  -3,   4, 0, 0}, { 1, 126,  -2, 0,  -4,   6, 1, 0},
+  { 1, 126,  -3, 0,  -5,   8, 1, 0}, { 1, 125,  -4, 0,  -6,  11, 1, 0},
+  { 1, 124,  -4, 0,  -7,  13, 1, 0}, { 2, 123,  -5, 0,  -8,  15, 1, 0},
+  { 2, 122,  -6, 0,  -9,  18, 1, 0}, { 2, 121,  -6, 0, -10,  20, 1, 0},
+  { 2, 120,  -7, 0, -11,  22, 2, 0}, { 2, 119,  -8, 0, -12,  25, 2, 0},
+  { 3, 117,  -8, 0, -13,  27, 2, 0}, { 3, 116,  -9, 0, -13,  29, 2, 0},
+  { 3, 114, -10, 0, -14,  32, 3, 0}, { 3, 113, -10, 0, -15,  35, 2, 0},
+  { 3, 111, -11, 0, -15,  37, 3, 0}, { 3, 109, -11, 0, -16,  40, 3, 0},
+  { 3, 108, -12, 0, -16,  42, 3, 0}, { 4, 106, -13, 0, -17,  45, 3, 0},
+  { 4, 104, -13, 0, -17,  47, 3, 0}, { 4, 102, -14, 0, -17,  50, 3, 0},
+  { 4, 100, -14, 0, -17,  52, 3, 0}, { 4,  98, -15, 0, -18,  55, 4, 0},
+  { 4,  96, -15, 0, -18,  58, 3, 0}, { 4,  94, -16, 0, -18,  60, 4, 0},
+  { 4,  91, -16, 0, -18,  63, 4, 0}, { 4,  89, -16, 0, -18,  65, 4, 0},
+  { 4,  87, -17, 0, -18,  68, 4, 0}, { 4,  85, -17, 0, -18,  70, 4, 0},
+  { 4,  82, -17, 0, -18,  73, 4, 0}, { 4,  80, -17, 0, -18,  75, 4, 0},
+  { 4,  78, -18, 0, -18,  78, 4, 0}, { 4,  75, -18, 0, -17,  80, 4, 0},
+  { 4,  73, -18, 0, -17,  82, 4, 0}, { 4,  70, -18, 0, -17,  85, 4, 0},
+  { 4,  68, -18, 0, -17,  87, 4, 0}, { 4,  65, -18, 0, -16,  89, 4, 0},
+  { 4,  63, -18, 0, -16,  91, 4, 0}, { 4,  60, -18, 0, -16,  94, 4, 0},
+  { 3,  58, -18, 0, -15,  96, 4, 0}, { 4,  55, -18, 0, -15,  98, 4, 0},
+  { 3,  52, -17, 0, -14, 100, 4, 0}, { 3,  50, -17, 0, -14, 102, 4, 0},
+  { 3,  47, -17, 0, -13, 104, 4, 0}, { 3,  45, -17, 0, -13, 106, 4, 0},
+  { 3,  42, -16, 0, -12, 108, 3, 0}, { 3,  40, -16, 0, -11, 109, 3, 0},
+  { 3,  37, -15, 0, -11, 111, 3, 0}, { 2,  35, -15, 0, -10, 113, 3, 0},
+  { 3,  32, -14, 0, -10, 114, 3, 0}, { 2,  29, -13, 0,  -9, 116, 3, 0},
+  { 2,  27, -13, 0,  -8, 117, 3, 0}, { 2,  25, -12, 0,  -8, 119, 2, 0},
+  { 2,  22, -11, 0,  -7, 120, 2, 0}, { 1,  20, -10, 0,  -6, 121, 2, 0},
+  { 1,  18,  -9, 0,  -6, 122, 2, 0}, { 1,  15,  -8, 0,  -5, 123, 2, 0},
+  { 1,  13,  -7, 0,  -4, 124, 1, 0}, { 1,  11,  -6, 0,  -4, 125, 1, 0},
+  { 1,   8,  -5, 0,  -3, 126, 1, 0}, { 1,   6,  -4, 0,  -2, 126, 1, 0},
+  { 0,   4,  -3, 0,  -1, 127, 1, 0}, { 0,   2,  -1, 0,   0, 127, 0, 0},
+  // [0, 1)
+  { 0,   0,   1, 0, 0, 127,   0,  0}, { 0,  -1,   2, 0, 0, 127,   0,  0},
+  { 0,  -3,   4, 1, 1, 127,  -2,  0}, { 0,  -5,   6, 1, 1, 127,  -2,  0},
+  { 0,  -6,   8, 1, 2, 126,  -3,  0}, {-1,  -7,  11, 2, 2, 126,  -4, -1},
+  {-1,  -8,  13, 2, 3, 125,  -5, -1}, {-1, -10,  16, 3, 3, 124,  -6, -1},
+  {-1, -11,  18, 3, 4, 123,  -7, -1}, {-1, -12,  20, 3, 4, 122,  -7, -1},
+  {-1, -13,  23, 3, 4, 121,  -8, -1}, {-2, -14,  25, 4, 5, 120,  -9, -1},
+  {-1, -15,  27, 4, 5, 119, -10, -1}, {-1, -16,  30, 4, 5, 118, -11, -1},
+  {-2, -17,  33, 5, 6, 116, -12, -1}, {-2, -17,  35, 5, 6, 114, -12, -1},
+  {-2, -18,  38, 5, 6, 113, -13, -1}, {-2, -19,  41, 6, 7, 111, -14, -2},
+  {-2, -19,  43, 6, 7, 110, -15, -2}, {-2, -20,  46, 6, 7, 108, -15, -2},
+  {-2, -20,  49, 6, 7, 106, -16, -2}, {-2, -21,  51, 7, 7, 104, -16, -2},
+  {-2, -21,  54, 7, 7, 102, -17, -2}, {-2, -21,  56, 7, 8, 100, -18, -2},
+  {-2, -22,  59, 7, 8,  98, -18, -2}, {-2, -22,  62, 7, 8,  96, -19, -2},
+  {-2, -22,  64, 7, 8,  94, -19, -2}, {-2, -22,  67, 8, 8,  91, -20, -2},
+  {-2, -22,  69, 8, 8,  89, -20, -2}, {-2, -22,  72, 8, 8,  87, -21, -2},
+  {-2, -21,  74, 8, 8,  84, -21, -2}, {-2, -22,  77, 8, 8,  82, -21, -2},
+  {-2, -21,  79, 8, 8,  79, -21, -2}, {-2, -21,  82, 8, 8,  77, -22, -2},
+  {-2, -21,  84, 8, 8,  74, -21, -2}, {-2, -21,  87, 8, 8,  72, -22, -2},
+  {-2, -20,  89, 8, 8,  69, -22, -2}, {-2, -20,  91, 8, 8,  67, -22, -2},
+  {-2, -19,  94, 8, 7,  64, -22, -2}, {-2, -19,  96, 8, 7,  62, -22, -2},
+  {-2, -18,  98, 8, 7,  59, -22, -2}, {-2, -18, 100, 8, 7,  56, -21, -2},
+  {-2, -17, 102, 7, 7,  54, -21, -2}, {-2, -16, 104, 7, 7,  51, -21, -2},
+  {-2, -16, 106, 7, 6,  49, -20, -2}, {-2, -15, 108, 7, 6,  46, -20, -2},
+  {-2, -15, 110, 7, 6,  43, -19, -2}, {-2, -14, 111, 7, 6,  41, -19, -2},
+  {-1, -13, 113, 6, 5,  38, -18, -2}, {-1, -12, 114, 6, 5,  35, -17, -2},
+  {-1, -12, 116, 6, 5,  33, -17, -2}, {-1, -11, 118, 5, 4,  30, -16, -1},
+  {-1, -10, 119, 5, 4,  27, -15, -1}, {-1,  -9, 120, 5, 4,  25, -14, -2},
+  {-1,  -8, 121, 4, 3,  23, -13, -1}, {-1,  -7, 122, 4, 3,  20, -12, -1},
+  {-1,  -7, 123, 4, 3,  18, -11, -1}, {-1,  -6, 124, 3, 3,  16, -10, -1},
+  {-1,  -5, 125, 3, 2,  13,  -8, -1}, {-1,  -4, 126, 2, 2,  11,  -7, -1},
+  { 0,  -3, 126, 2, 1,   8,  -6,  0}, { 0,  -2, 127, 1, 1,   6,  -5,  0},
+  { 0,  -2, 127, 1, 1,   4,  -3,  0}, { 0,   0, 127, 0, 0,   2,  -1,  0},
+  // [1, 2)
+  { 0, 0, 127,   0, 0,   1,   0, 0}, { 0, 0, 127,   0, 0,  -1,   2, 0},
+  { 0, 1, 127,  -1, 0,  -3,   4, 0}, { 0, 1, 126,  -2, 0,  -4,   6, 1},
+  { 0, 1, 126,  -3, 0,  -5,   8, 1}, { 0, 1, 125,  -4, 0,  -6,  11, 1},
+  { 0, 1, 124,  -4, 0,  -7,  13, 1}, { 0, 2, 123,  -5, 0,  -8,  15, 1},
+  { 0, 2, 122,  -6, 0,  -9,  18, 1}, { 0, 2, 121,  -6, 0, -10,  20, 1},
+  { 0, 2, 120,  -7, 0, -11,  22, 2}, { 0, 2, 119,  -8, 0, -12,  25, 2},
+  { 0, 3, 117,  -8, 0, -13,  27, 2}, { 0, 3, 116,  -9, 0, -13,  29, 2},
+  { 0, 3, 114, -10, 0, -14,  32, 3}, { 0, 3, 113, -10, 0, -15,  35, 2},
+  { 0, 3, 111, -11, 0, -15,  37, 3}, { 0, 3, 109, -11, 0, -16,  40, 3},
+  { 0, 3, 108, -12, 0, -16,  42, 3}, { 0, 4, 106, -13, 0, -17,  45, 3},
+  { 0, 4, 104, -13, 0, -17,  47, 3}, { 0, 4, 102, -14, 0, -17,  50, 3},
+  { 0, 4, 100, -14, 0, -17,  52, 3}, { 0, 4,  98, -15, 0, -18,  55, 4},
+  { 0, 4,  96, -15, 0, -18,  58, 3}, { 0, 4,  94, -16, 0, -18,  60, 4},
+  { 0, 4,  91, -16, 0, -18,  63, 4}, { 0, 4,  89, -16, 0, -18,  65, 4},
+  { 0, 4,  87, -17, 0, -18,  68, 4}, { 0, 4,  85, -17, 0, -18,  70, 4},
+  { 0, 4,  82, -17, 0, -18,  73, 4}, { 0, 4,  80, -17, 0, -18,  75, 4},
+  { 0, 4,  78, -18, 0, -18,  78, 4}, { 0, 4,  75, -18, 0, -17,  80, 4},
+  { 0, 4,  73, -18, 0, -17,  82, 4}, { 0, 4,  70, -18, 0, -17,  85, 4},
+  { 0, 4,  68, -18, 0, -17,  87, 4}, { 0, 4,  65, -18, 0, -16,  89, 4},
+  { 0, 4,  63, -18, 0, -16,  91, 4}, { 0, 4,  60, -18, 0, -16,  94, 4},
+  { 0, 3,  58, -18, 0, -15,  96, 4}, { 0, 4,  55, -18, 0, -15,  98, 4},
+  { 0, 3,  52, -17, 0, -14, 100, 4}, { 0, 3,  50, -17, 0, -14, 102, 4},
+  { 0, 3,  47, -17, 0, -13, 104, 4}, { 0, 3,  45, -17, 0, -13, 106, 4},
+  { 0, 3,  42, -16, 0, -12, 108, 3}, { 0, 3,  40, -16, 0, -11, 109, 3},
+  { 0, 3,  37, -15, 0, -11, 111, 3}, { 0, 2,  35, -15, 0, -10, 113, 3},
+  { 0, 3,  32, -14, 0, -10, 114, 3}, { 0, 2,  29, -13, 0,  -9, 116, 3},
+  { 0, 2,  27, -13, 0,  -8, 117, 3}, { 0, 2,  25, -12, 0,  -8, 119, 2},
+  { 0, 2,  22, -11, 0,  -7, 120, 2}, { 0, 1,  20, -10, 0,  -6, 121, 2},
+  { 0, 1,  18,  -9, 0,  -6, 122, 2}, { 0, 1,  15,  -8, 0,  -5, 123, 2},
+  { 0, 1,  13,  -7, 0,  -4, 124, 1}, { 0, 1,  11,  -6, 0,  -4, 125, 1},
+  { 0, 1,   8,  -5, 0,  -3, 126, 1}, { 0, 1,   6,  -4, 0,  -2, 126, 1},
+  { 0, 0,   4,  -3, 0,  -1, 127, 1}, { 0, 0,   2,  -1, 0,   0, 127, 0},
+  // dummy (replicate row index 191)
+  { 0, 0,   2,  -1, 0,   0, 127, 0},
+
+#else
+  // [-1, 0)
+  { 0, 127,   0, 0,   0,   1, 0, 0}, { 1, 127,  -1, 0,  -3,   4, 0, 0},
+  { 1, 126,  -3, 0,  -5,   8, 1, 0}, { 1, 124,  -4, 0,  -7,  13, 1, 0},
+  { 2, 122,  -6, 0,  -9,  18, 1, 0}, { 2, 120,  -7, 0, -11,  22, 2, 0},
+  { 3, 117,  -8, 0, -13,  27, 2, 0}, { 3, 114, -10, 0, -14,  32, 3, 0},
+  { 3, 111, -11, 0, -15,  37, 3, 0}, { 3, 108, -12, 0, -16,  42, 3, 0},
+  { 4, 104, -13, 0, -17,  47, 3, 0}, { 4, 100, -14, 0, -17,  52, 3, 0},
+  { 4,  96, -15, 0, -18,  58, 3, 0}, { 4,  91, -16, 0, -18,  63, 4, 0},
+  { 4,  87, -17, 0, -18,  68, 4, 0}, { 4,  82, -17, 0, -18,  73, 4, 0},
+  { 4,  78, -18, 0, -18,  78, 4, 0}, { 4,  73, -18, 0, -17,  82, 4, 0},
+  { 4,  68, -18, 0, -17,  87, 4, 0}, { 4,  63, -18, 0, -16,  91, 4, 0},
+  { 3,  58, -18, 0, -15,  96, 4, 0}, { 3,  52, -17, 0, -14, 100, 4, 0},
+  { 3,  47, -17, 0, -13, 104, 4, 0}, { 3,  42, -16, 0, -12, 108, 3, 0},
+  { 3,  37, -15, 0, -11, 111, 3, 0}, { 3,  32, -14, 0, -10, 114, 3, 0},
+  { 2,  27, -13, 0,  -8, 117, 3, 0}, { 2,  22, -11, 0,  -7, 120, 2, 0},
+  { 1,  18,  -9, 0,  -6, 122, 2, 0}, { 1,  13,  -7, 0,  -4, 124, 1, 0},
+  { 1,   8,  -5, 0,  -3, 126, 1, 0}, { 0,   4,  -3, 0,  -1, 127, 1, 0},
+  // [0, 1)
+  { 0,   0,   1, 0, 0, 127,   0,  0}, { 0,  -3,   4, 1, 1, 127,  -2,  0},
+  { 0,  -6,   8, 1, 2, 126,  -3,  0}, {-1,  -8,  13, 2, 3, 125,  -5, -1},
+  {-1, -11,  18, 3, 4, 123,  -7, -1}, {-1, -13,  23, 3, 4, 121,  -8, -1},
+  {-1, -15,  27, 4, 5, 119, -10, -1}, {-2, -17,  33, 5, 6, 116, -12, -1},
+  {-2, -18,  38, 5, 6, 113, -13, -1}, {-2, -19,  43, 6, 7, 110, -15, -2},
+  {-2, -20,  49, 6, 7, 106, -16, -2}, {-2, -21,  54, 7, 7, 102, -17, -2},
+  {-2, -22,  59, 7, 8,  98, -18, -2}, {-2, -22,  64, 7, 8,  94, -19, -2},
+  {-2, -22,  69, 8, 8,  89, -20, -2}, {-2, -21,  74, 8, 8,  84, -21, -2},
+  {-2, -21,  79, 8, 8,  79, -21, -2}, {-2, -21,  84, 8, 8,  74, -21, -2},
+  {-2, -20,  89, 8, 8,  69, -22, -2}, {-2, -19,  94, 8, 7,  64, -22, -2},
+  {-2, -18,  98, 8, 7,  59, -22, -2}, {-2, -17, 102, 7, 7,  54, -21, -2},
+  {-2, -16, 106, 7, 6,  49, -20, -2}, {-2, -15, 110, 7, 6,  43, -19, -2},
+  {-1, -13, 113, 6, 5,  38, -18, -2}, {-1, -12, 116, 6, 5,  33, -17, -2},
+  {-1, -10, 119, 5, 4,  27, -15, -1}, {-1,  -8, 121, 4, 3,  23, -13, -1},
+  {-1,  -7, 123, 4, 3,  18, -11, -1}, {-1,  -5, 125, 3, 2,  13,  -8, -1},
+  { 0,  -3, 126, 2, 1,   8,  -6,  0}, { 0,  -2, 127, 1, 1,   4,  -3,  0},
+  // [1, 2)
+  { 0,  0, 127,   0, 0,   1,   0, 0}, { 0, 1, 127,  -1, 0,  -3,   4, 0},
+  { 0,  1, 126,  -3, 0,  -5,   8, 1}, { 0, 1, 124,  -4, 0,  -7,  13, 1},
+  { 0,  2, 122,  -6, 0,  -9,  18, 1}, { 0, 2, 120,  -7, 0, -11,  22, 2},
+  { 0,  3, 117,  -8, 0, -13,  27, 2}, { 0, 3, 114, -10, 0, -14,  32, 3},
+  { 0,  3, 111, -11, 0, -15,  37, 3}, { 0, 3, 108, -12, 0, -16,  42, 3},
+  { 0,  4, 104, -13, 0, -17,  47, 3}, { 0, 4, 100, -14, 0, -17,  52, 3},
+  { 0,  4,  96, -15, 0, -18,  58, 3}, { 0, 4,  91, -16, 0, -18,  63, 4},
+  { 0,  4,  87, -17, 0, -18,  68, 4}, { 0, 4,  82, -17, 0, -18,  73, 4},
+  { 0,  4,  78, -18, 0, -18,  78, 4}, { 0, 4,  73, -18, 0, -17,  82, 4},
+  { 0,  4,  68, -18, 0, -17,  87, 4}, { 0, 4,  63, -18, 0, -16,  91, 4},
+  { 0,  3,  58, -18, 0, -15,  96, 4}, { 0, 3,  52, -17, 0, -14, 100, 4},
+  { 0,  3,  47, -17, 0, -13, 104, 4}, { 0, 3,  42, -16, 0, -12, 108, 3},
+  { 0,  3,  37, -15, 0, -11, 111, 3}, { 0, 3,  32, -14, 0, -10, 114, 3},
+  { 0,  2,  27, -13, 0,  -8, 117, 3}, { 0, 2,  22, -11, 0,  -7, 120, 2},
+  { 0,  1,  18,  -9, 0,  -6, 122, 2}, { 0, 1,  13,  -7, 0,  -4, 124, 1},
+  { 0,  1,   8,  -5, 0,  -3, 126, 1}, { 0, 0,   4,  -3, 0,  -1, 127, 1},
+  // dummy (replicate row index 95)
+  { 0, 0,   4,  -3, 0,  -1, 127, 1},
+#endif  // WARPEDPIXEL_PREC_BITS == 6
+};
+/* clang-format on */
+
+// Shuffle masks: we want to convert a sequence of bytes 0, 1, 2, ..., 15
+// in an SSE register into two sequences:
+// 0, 2, 2, 4, ..., 12, 12, 14, <don't care>
+// 1, 3, 3, 5, ..., 13, 13, 15, <don't care>
+static const uint8_t even_mask[16] = { 0, 2,  2,  4,  4,  6,  6,  8,
+                                       8, 10, 10, 12, 12, 14, 14, 0 };
+static const uint8_t odd_mask[16] = { 1, 3,  3,  5,  5,  7,  7,  9,
+                                      9, 11, 11, 13, 13, 15, 15, 0 };
+
+/* SSSE3 version of the rotzoom/affine warp filter */
+void av1_warp_affine_ssse3(const int32_t *mat, const uint8_t *ref, int width,
+                           int height, int stride, uint8_t *pred, int p_col,
+                           int p_row, int p_width, int p_height, int p_stride,
+                           int subsampling_x, int subsampling_y, int ref_frm,
+                           int16_t alpha, int16_t beta, int16_t gamma,
+                           int16_t delta) {
+  __m128i tmp[15];
+  int i, j, k;
+
+  /* Note: For this code to work, the left/right frame borders need to be
+     extended by at least 13 pixels each. By the time we get here, other
+     code will have set up this border, but we allow an explicit check
+     for debugging purposes.
+  */
+  /*for (i = 0; i < height; ++i) {
+    for (j = 0; j < 13; ++j) {
+      assert(ref[i * stride - 13 + j] == ref[i * stride]);
+      assert(ref[i * stride + width + j] == ref[i * stride + (width - 1)]);
+    }
+  }*/
+
+  for (i = 0; i < p_height; i += 8) {
+    for (j = 0; j < p_width; j += 8) {
+      // (x, y) coordinates of the center of this block in the destination
+      // image
+      const int32_t dst_x = p_col + j + 4;
+      const int32_t dst_y = p_row + i + 4;
+
+      int32_t x4, y4, ix4, sx4, iy4, sy4;
+      if (subsampling_x)
+        x4 = ROUND_POWER_OF_TWO_SIGNED(
+            mat[2] * 2 * dst_x + mat[3] * 2 * dst_y + mat[0] +
+                (mat[2] + mat[3] - (1 << WARPEDMODEL_PREC_BITS)) / 2,
+            1);
+      else
+        x4 = mat[2] * dst_x + mat[3] * dst_y + mat[0];
+
+      if (subsampling_y)
+        y4 = ROUND_POWER_OF_TWO_SIGNED(
+            mat[4] * 2 * dst_x + mat[5] * 2 * dst_y + mat[1] +
+                (mat[4] + mat[5] - (1 << WARPEDMODEL_PREC_BITS)) / 2,
+            1);
+      else
+        y4 = mat[4] * dst_x + mat[5] * dst_y + mat[1];
+
+      ix4 = x4 >> WARPEDMODEL_PREC_BITS;
+      sx4 = x4 & ((1 << WARPEDMODEL_PREC_BITS) - 1);
+      iy4 = y4 >> WARPEDMODEL_PREC_BITS;
+      sy4 = y4 & ((1 << WARPEDMODEL_PREC_BITS) - 1);
+
+      // Horizontal filter
+      for (k = -7; k < AOMMIN(8, p_height - i); ++k) {
+        int iy = iy4 + k;
+        if (iy < 0)
+          iy = 0;
+        else if (iy > height - 1)
+          iy = height - 1;
+
+        // If the block is aligned such that, after clamping, every sample
+        // would be taken from the leftmost/rightmost column, then we can
+        // skip the expensive horizontal filter.
+        if (ix4 <= -7) {
+          tmp[k + 7] = _mm_set1_epi16(
+              ref[iy * stride] *
+              (1 << (WARPEDPIXEL_FILTER_BITS - HORSHEAR_REDUCE_PREC_BITS)));
+        } else if (ix4 >= width + 6) {
+          tmp[k + 7] = _mm_set1_epi16(
+              ref[iy * stride + (width - 1)] *
+              (1 << (WARPEDPIXEL_FILTER_BITS - HORSHEAR_REDUCE_PREC_BITS)));
+        } else {
+          const int sx = sx4 + alpha * (-4) + beta * k +
+                         // Include rounding and offset here
+                         (1 << (WARPEDDIFF_PREC_BITS - 1)) +
+                         (WARPEDPIXEL_PREC_SHIFTS << WARPEDDIFF_PREC_BITS);
+
+          // Load source pixels
+          const __m128i src =
+              _mm_loadu_si128((__m128i *)(ref + iy * stride + ix4 - 7));
+          const __m128i src_even =
+              _mm_shuffle_epi8(src, _mm_loadu_si128((__m128i *)even_mask));
+          const __m128i src_odd =
+              _mm_shuffle_epi8(src, _mm_loadu_si128((__m128i *)odd_mask));
+
+          // Filter even-index pixels
+          const __m128i tmp_0 = _mm_loadl_epi64((
+              __m128i *)&filter_8bit[(sx + 0 * alpha) >> WARPEDDIFF_PREC_BITS]);
+          const __m128i tmp_1 = _mm_loadl_epi64((
+              __m128i *)&filter_8bit[(sx + 1 * alpha) >> WARPEDDIFF_PREC_BITS]);
+          const __m128i tmp_2 = _mm_loadl_epi64((
+              __m128i *)&filter_8bit[(sx + 2 * alpha) >> WARPEDDIFF_PREC_BITS]);
+          const __m128i tmp_3 = _mm_loadl_epi64((
+              __m128i *)&filter_8bit[(sx + 3 * alpha) >> WARPEDDIFF_PREC_BITS]);
+          const __m128i tmp_4 = _mm_loadl_epi64((
+              __m128i *)&filter_8bit[(sx + 4 * alpha) >> WARPEDDIFF_PREC_BITS]);
+          const __m128i tmp_5 = _mm_loadl_epi64((
+              __m128i *)&filter_8bit[(sx + 5 * alpha) >> WARPEDDIFF_PREC_BITS]);
+          const __m128i tmp_6 = _mm_loadl_epi64((
+              __m128i *)&filter_8bit[(sx + 6 * alpha) >> WARPEDDIFF_PREC_BITS]);
+          const __m128i tmp_7 = _mm_loadl_epi64((
+              __m128i *)&filter_8bit[(sx + 7 * alpha) >> WARPEDDIFF_PREC_BITS]);
+
+          // Coeffs 0 2 0 2 4 6 4 6 1 3 1 3 5 7 5 7 for pixels 0 2
+          const __m128i tmp_8 = _mm_unpacklo_epi16(tmp_0, tmp_2);
+          // Coeffs 0 2 0 2 4 6 4 6 1 3 1 3 5 7 5 7 for pixels 1 3
+          const __m128i tmp_9 = _mm_unpacklo_epi16(tmp_1, tmp_3);
+          // Coeffs 0 2 0 2 4 6 4 6 1 3 1 3 5 7 5 7 for pixels 4 6
+          const __m128i tmp_10 = _mm_unpacklo_epi16(tmp_4, tmp_6);
+          // Coeffs 0 2 0 2 4 6 4 6 1 3 1 3 5 7 5 7 for pixels 5 7
+          const __m128i tmp_11 = _mm_unpacklo_epi16(tmp_5, tmp_7);
+
+          // Coeffs 0 2 0 2 0 2 0 2 4 6 4 6 4 6 4 6 for pixels 0 2 4 6
+          const __m128i tmp_12 = _mm_unpacklo_epi32(tmp_8, tmp_10);
+          // Coeffs 1 3 1 3 1 3 1 3 5 7 5 7 5 7 5 7 for pixels 0 2 4 6
+          const __m128i tmp_13 = _mm_unpackhi_epi32(tmp_8, tmp_10);
+          // Coeffs 0 2 0 2 0 2 0 2 4 6 4 6 4 6 4 6 for pixels 1 3 5 7
+          const __m128i tmp_14 = _mm_unpacklo_epi32(tmp_9, tmp_11);
+          // Coeffs 1 3 1 3 1 3 1 3 5 7 5 7 5 7 5 7 for pixels 1 3 5 7
+          const __m128i tmp_15 = _mm_unpackhi_epi32(tmp_9, tmp_11);
+
+          // Coeffs 0 2 for pixels 0 2 4 6 1 3 5 7
+          const __m128i coeff_02 = _mm_unpacklo_epi64(tmp_12, tmp_14);
+          // Coeffs 4 6 for pixels 0 2 4 6 1 3 5 7
+          const __m128i coeff_46 = _mm_unpackhi_epi64(tmp_12, tmp_14);
+          // Coeffs 1 3 for pixels 0 2 4 6 1 3 5 7
+          const __m128i coeff_13 = _mm_unpacklo_epi64(tmp_13, tmp_15);
+          // Coeffs 5 7 for pixels 0 2 4 6 1 3 5 7
+          const __m128i coeff_57 = _mm_unpackhi_epi64(tmp_13, tmp_15);
+
+          // The pixel order we need for 'src' is:
+          // 0 2 2 4 4 6 6 8 1 3 3 5 5 7 7 9
+          const __m128i src_02 = _mm_unpacklo_epi64(src_even, src_odd);
+          const __m128i res_02 = _mm_maddubs_epi16(src_02, coeff_02);
+          // 4 6 6 8 8 10 10 12 5 7 7 9 9 11 11 13
+          const __m128i src_46 = _mm_unpacklo_epi64(_mm_srli_si128(src_even, 4),
+                                                    _mm_srli_si128(src_odd, 4));
+          const __m128i res_46 = _mm_maddubs_epi16(src_46, coeff_46);
+          // 1 3 3 5 5 7 7 9 2 4 4 6 6 8 8 10
+          const __m128i src_13 =
+              _mm_unpacklo_epi64(src_odd, _mm_srli_si128(src_even, 2));
+          const __m128i res_13 = _mm_maddubs_epi16(src_13, coeff_13);
+          // 5 7 7 9 9 11 11 13 6 8 8 10 10 12 12 14
+          const __m128i src_57 = _mm_unpacklo_epi64(
+              _mm_srli_si128(src_odd, 4), _mm_srli_si128(src_even, 6));
+          const __m128i res_57 = _mm_maddubs_epi16(src_57, coeff_57);
+
+          const __m128i round_const =
+              _mm_set1_epi16((1 << HORSHEAR_REDUCE_PREC_BITS) >> 1);
+
+          // Note: res_02 + res_46 and res_13 + res_57 are always in the range
+          // [-6120, 32640]. This gives us enough room to add the rounding
+          // constant to res_a, *as long as HORSHEAR_REDUCE_PREC_BITS <= 6*
+          const __m128i res_a =
+              _mm_add_epi16(_mm_add_epi16(res_02, res_46), round_const);
+          const __m128i res_b = _mm_add_epi16(res_13, res_57);
+
+          // Calculate (res_a + res_b) >> 1 while avoiding overflow
+          const __m128i t1 = _mm_and_si128(res_a, res_b);
+          const __m128i t2 = _mm_srai_epi16(_mm_xor_si128(res_a, res_b), 1);
+
+          const __m128i res = _mm_srai_epi16(_mm_add_epi16(t1, t2),
+                                             HORSHEAR_REDUCE_PREC_BITS - 1);
+          tmp[k + 7] = res;
+        }
+      }
+
+      // Vertical filter
+      for (k = -4; k < AOMMIN(4, p_height - i - 4); ++k) {
+        const int sy = sy4 + gamma * (-4) + delta * k +
+                       (1 << (WARPEDDIFF_PREC_BITS - 1)) +
+                       (WARPEDPIXEL_PREC_SHIFTS << WARPEDDIFF_PREC_BITS);
+
+        // Load from tmp and rearrange pairs of consecutive rows into the
+        // column order 0 0 2 2 4 4 6 6; 1 1 3 3 5 5 7 7
+        const __m128i *src = tmp + (k + 4);
+        const __m128i src_0 = _mm_unpacklo_epi16(src[0], src[1]);
+        const __m128i src_2 = _mm_unpacklo_epi16(src[2], src[3]);
+        const __m128i src_4 = _mm_unpacklo_epi16(src[4], src[5]);
+        const __m128i src_6 = _mm_unpacklo_epi16(src[6], src[7]);
+
+        // Filter even-index pixels
+        const __m128i tmp_0 = _mm_loadu_si128(
+            (__m128i *)(warped_filter +
+                        ((sy + 0 * gamma) >> WARPEDDIFF_PREC_BITS)));
+        const __m128i tmp_2 = _mm_loadu_si128(
+            (__m128i *)(warped_filter +
+                        ((sy + 2 * gamma) >> WARPEDDIFF_PREC_BITS)));
+        const __m128i tmp_4 = _mm_loadu_si128(
+            (__m128i *)(warped_filter +
+                        ((sy + 4 * gamma) >> WARPEDDIFF_PREC_BITS)));
+        const __m128i tmp_6 = _mm_loadu_si128(
+            (__m128i *)(warped_filter +
+                        ((sy + 6 * gamma) >> WARPEDDIFF_PREC_BITS)));
+
+        const __m128i tmp_8 = _mm_unpacklo_epi32(tmp_0, tmp_2);
+        const __m128i tmp_10 = _mm_unpacklo_epi32(tmp_4, tmp_6);
+        const __m128i tmp_12 = _mm_unpackhi_epi32(tmp_0, tmp_2);
+        const __m128i tmp_14 = _mm_unpackhi_epi32(tmp_4, tmp_6);
+
+        const __m128i coeff_0 = _mm_unpacklo_epi64(tmp_8, tmp_10);
+        const __m128i coeff_2 = _mm_unpackhi_epi64(tmp_8, tmp_10);
+        const __m128i coeff_4 = _mm_unpacklo_epi64(tmp_12, tmp_14);
+        const __m128i coeff_6 = _mm_unpackhi_epi64(tmp_12, tmp_14);
+
+        const __m128i res_0 = _mm_madd_epi16(src_0, coeff_0);
+        const __m128i res_2 = _mm_madd_epi16(src_2, coeff_2);
+        const __m128i res_4 = _mm_madd_epi16(src_4, coeff_4);
+        const __m128i res_6 = _mm_madd_epi16(src_6, coeff_6);
+
+        const __m128i res_even = _mm_add_epi32(_mm_add_epi32(res_0, res_2),
+                                               _mm_add_epi32(res_4, res_6));
+
+        // Filter odd-index pixels
+        const __m128i src_1 = _mm_unpackhi_epi16(src[0], src[1]);
+        const __m128i src_3 = _mm_unpackhi_epi16(src[2], src[3]);
+        const __m128i src_5 = _mm_unpackhi_epi16(src[4], src[5]);
+        const __m128i src_7 = _mm_unpackhi_epi16(src[6], src[7]);
+
+        const __m128i tmp_1 = _mm_loadu_si128(
+            (__m128i *)(warped_filter +
+                        ((sy + 1 * gamma) >> WARPEDDIFF_PREC_BITS)));
+        const __m128i tmp_3 = _mm_loadu_si128(
+            (__m128i *)(warped_filter +
+                        ((sy + 3 * gamma) >> WARPEDDIFF_PREC_BITS)));
+        const __m128i tmp_5 = _mm_loadu_si128(
+            (__m128i *)(warped_filter +
+                        ((sy + 5 * gamma) >> WARPEDDIFF_PREC_BITS)));
+        const __m128i tmp_7 = _mm_loadu_si128(
+            (__m128i *)(warped_filter +
+                        ((sy + 7 * gamma) >> WARPEDDIFF_PREC_BITS)));
+
+        const __m128i tmp_9 = _mm_unpacklo_epi32(tmp_1, tmp_3);
+        const __m128i tmp_11 = _mm_unpacklo_epi32(tmp_5, tmp_7);
+        const __m128i tmp_13 = _mm_unpackhi_epi32(tmp_1, tmp_3);
+        const __m128i tmp_15 = _mm_unpackhi_epi32(tmp_5, tmp_7);
+
+        const __m128i coeff_1 = _mm_unpacklo_epi64(tmp_9, tmp_11);
+        const __m128i coeff_3 = _mm_unpackhi_epi64(tmp_9, tmp_11);
+        const __m128i coeff_5 = _mm_unpacklo_epi64(tmp_13, tmp_15);
+        const __m128i coeff_7 = _mm_unpackhi_epi64(tmp_13, tmp_15);
+
+        const __m128i res_1 = _mm_madd_epi16(src_1, coeff_1);
+        const __m128i res_3 = _mm_madd_epi16(src_3, coeff_3);
+        const __m128i res_5 = _mm_madd_epi16(src_5, coeff_5);
+        const __m128i res_7 = _mm_madd_epi16(src_7, coeff_7);
+
+        const __m128i res_odd = _mm_add_epi32(_mm_add_epi32(res_1, res_3),
+                                              _mm_add_epi32(res_5, res_7));
+
+        // Rearrange pixels back into the order 0 ... 7
+        const __m128i res_lo = _mm_unpacklo_epi32(res_even, res_odd);
+        const __m128i res_hi = _mm_unpackhi_epi32(res_even, res_odd);
+
+        // Round and pack into 8 bits
+        const __m128i round_const =
+            _mm_set1_epi32((1 << VERSHEAR_REDUCE_PREC_BITS) >> 1);
+
+        const __m128i res_lo_round = _mm_srai_epi32(
+            _mm_add_epi32(res_lo, round_const), VERSHEAR_REDUCE_PREC_BITS);
+        const __m128i res_hi_round = _mm_srai_epi32(
+            _mm_add_epi32(res_hi, round_const), VERSHEAR_REDUCE_PREC_BITS);
+
+        const __m128i res_16bit = _mm_packs_epi32(res_lo_round, res_hi_round);
+        __m128i res_8bit = _mm_packus_epi16(res_16bit, res_16bit);
+
+        // Store, blending with 'pred' if needed
+        __m128i *const p = (__m128i *)&pred[(i + k + 4) * p_stride + j];
+
+        // Note: If we're outputting a 4x4 block, we need to be very careful
+        // to only output 4 pixels at this point, to avoid encode/decode
+        // mismatches when encoding with multiple threads.
+        if (p_width == 4) {
+          if (ref_frm) {
+            const __m128i orig = _mm_cvtsi32_si128(*(uint32_t *)p);
+            res_8bit = _mm_avg_epu8(res_8bit, orig);
+          }
+          *(uint32_t *)p = _mm_cvtsi128_si32(res_8bit);
+        } else {
+          if (ref_frm) res_8bit = _mm_avg_epu8(res_8bit, _mm_loadl_epi64(p));
+          _mm_storel_epi64(p, res_8bit);
+        }
+      }
+    }
+  }
+}
diff --git a/test/test.mk b/test/test.mk
index fb0ab37..c01a7e2 100644
--- a/test/test.mk
+++ b/test/test.mk
@@ -20,7 +20,6 @@
 LIBAOM_TEST_SRCS-yes += video_source.h
 LIBAOM_TEST_SRCS-yes += transform_test_base.h
 LIBAOM_TEST_SRCS-yes += function_equivalence_test.h
-LIBAOM_TEST_SRCS-yes += warp_filter_test_util.h
 
 ##
 ## BLACK BOX TESTS
@@ -226,7 +225,8 @@
 LIBAOM_TEST_SRCS-$(CONFIG_AV1_ENCODER) += av1_inv_txfm2d_test.cc
 LIBAOM_TEST_SRCS-$(CONFIG_AV1) += av1_convolve_test.cc
 LIBAOM_TEST_SRCS-$(CONFIG_AV1) += av1_convolve_optimz_test.cc
-ifneq ($(findstring yes,$(CONFIG_GLOBAL_MOTION) $(CONFIG_WARPED_MOTION)),)
+ifneq ($(findstring yes,$(CONFIG_GLOBAL_MOTION)$(CONFIG_WARPED_MOTION)),)
+LIBAOM_TEST_SRCS-$(HAVE_SSE2) += warp_filter_test_util.h
 LIBAOM_TEST_SRCS-$(HAVE_SSE2) += warp_filter_test.cc warp_filter_test_util.cc
 endif
 ifeq ($(CONFIG_LOOP_RESTORATION),yes)
diff --git a/test/warp_filter_test.cc b/test/warp_filter_test.cc
index fd6608b..2e4e6c3 100644
--- a/test/warp_filter_test.cc
+++ b/test/warp_filter_test.cc
@@ -22,12 +22,17 @@
 
 namespace {
 
-TEST_P(AV1WarpFilterTest, CheckOutput) { RunCheckOutput(av1_warp_affine_sse2); }
+TEST_P(AV1WarpFilterTest, CheckOutput) { RunCheckOutput(GET_PARAM(3)); }
 
-INSTANTIATE_TEST_CASE_P(SSE2, AV1WarpFilterTest,
-                        libaom_test::AV1WarpFilter::GetDefaultParams());
+INSTANTIATE_TEST_CASE_P(
+    SSE2, AV1WarpFilterTest,
+    libaom_test::AV1WarpFilter::BuildParams(av1_warp_affine_sse2));
 
-#if CONFIG_HIGHBITDEPTH
+INSTANTIATE_TEST_CASE_P(
+    SSSE3, AV1WarpFilterTest,
+    libaom_test::AV1WarpFilter::BuildParams(av1_warp_affine_ssse3));
+
+#if CONFIG_HIGHBITDEPTH && HAVE_SSSE3
 TEST_P(AV1HighbdWarpFilterTest, CheckOutput) {
   RunCheckOutput(av1_highbd_warp_affine_ssse3);
 }
diff --git a/test/warp_filter_test_util.cc b/test/warp_filter_test_util.cc
index 1ce265b..5e8e3c5 100644
--- a/test/warp_filter_test_util.cc
+++ b/test/warp_filter_test_util.cc
@@ -23,13 +23,13 @@
 #endif
 
 ::testing::internal::ParamGenerator<WarpTestParam>
-libaom_test::AV1WarpFilter::GetDefaultParams() {
-  const WarpTestParam defaultParams[] = {
-    make_tuple(4, 4, 50000),  make_tuple(8, 8, 50000),
-    make_tuple(64, 64, 1000), make_tuple(4, 16, 20000),
-    make_tuple(32, 8, 10000),
+libaom_test::AV1WarpFilter::BuildParams(warp_affine_func filter) {
+  const WarpTestParam params[] = {
+    make_tuple(4, 4, 50000, filter),  make_tuple(8, 8, 50000, filter),
+    make_tuple(64, 64, 1000, filter), make_tuple(4, 16, 20000, filter),
+    make_tuple(32, 8, 10000, filter),
   };
-  return ::testing::ValuesIn(defaultParams);
+  return ::testing::ValuesIn(params);
 }
 
 AV1WarpFilterTest::~AV1WarpFilterTest() {}
diff --git a/test/warp_filter_test_util.h b/test/warp_filter_test_util.h
index 6a87e46..651a9f8 100644
--- a/test/warp_filter_test_util.h
+++ b/test/warp_filter_test_util.h
@@ -26,16 +26,18 @@
 
 namespace AV1WarpFilter {
 
-typedef void (*warp_affine_func)(int32_t *mat, uint8_t *ref, int width,
-                                 int height, int stride, uint8_t *pred,
-                                 int p_col, int p_row, int p_width,
-                                 int p_height, int p_stride, int subsampling_x,
-                                 int subsampling_y, int ref_frm, int16_t alpha,
-                                 int16_t beta, int16_t gamma, int16_t delta);
+typedef void (*warp_affine_func)(const int32_t *mat, const uint8_t *ref,
+                                 int width, int height, int stride,
+                                 uint8_t *pred, int p_col, int p_row,
+                                 int p_width, int p_height, int p_stride,
+                                 int subsampling_x, int subsampling_y,
+                                 int ref_frm, int16_t alpha, int16_t beta,
+                                 int16_t gamma, int16_t delta);
 
-typedef std::tr1::tuple<int, int, int> WarpTestParam;
+typedef std::tr1::tuple<int, int, int, warp_affine_func> WarpTestParam;
 
-::testing::internal::ParamGenerator<WarpTestParam> GetDefaultParams();
+::testing::internal::ParamGenerator<WarpTestParam> BuildParams(
+    warp_affine_func filter);
 
 class AV1WarpFilterTest : public ::testing::TestWithParam<WarpTestParam> {
  public:
@@ -59,7 +61,7 @@
 #if CONFIG_HIGHBITDEPTH
 namespace AV1HighbdWarpFilter {
 typedef void (*highbd_warp_affine_func)(
-    int32_t *mat, uint16_t *ref, int width, int height, int stride,
+    const int32_t *mat, const uint16_t *ref, int width, int height, int stride,
     uint16_t *pred, int p_col, int p_row, int p_width, int p_height,
     int p_stride, int subsampling_x, int subsampling_y, int bd, int ref_frm,
     int16_t alpha, int16_t beta, int16_t gamma, int16_t delta);