Retune the CLPF kernel

CLPF performance had degraded by about 0.5% over the past six months,
which isn't totally surprising since the codec is a moving target.
About half of that degradation comes from the improved 7 bit filter
coefficients.  Therefore, CLPF needs to be retuned for the current
codec.

This patch makes two (normative) changes to the CLPF kernel:

* The clipping function was changed from clamp(x, -s, s) to
      sign(x) * max(0, abs(x) - max(0, abs(x) - s +
             (abs(x) >> (bitdepth - 3 - log2(s)))))
  This adds a rampdown to 0 at -32 and 32 (for 8 bit, -128 & 128
  for 10 bit, etc), so large differences are ignored.

* 8 taps instead of 6 taps:
               1
    4          3
  13 31  ->  13 31
    4          3
               1

AWCY results: low delay  high delay
PSNR:           -0.40%     -0.47%
PSNR HVS:        0.00%     -0.11%
SSIM:           -0.31%     -0.39%
CIEDE 2000:     -0.22%     -0.31%
APSNR:          -0.40%     -0.48%
MS SSIM:         0.01%     -0.12%

About 3/4 of the gains come from the new clipping function.

Change-Id: Idad9dc4004e71a9c7ec81ba62ebd12fb76fb044a
diff --git a/aom_dsp/aom_dsp_rtcd_defs.pl b/aom_dsp/aom_dsp_rtcd_defs.pl
index a5c492c..cd3e613 100644
--- a/aom_dsp/aom_dsp_rtcd_defs.pl
+++ b/aom_dsp/aom_dsp_rtcd_defs.pl
@@ -853,9 +853,9 @@
 
 if (aom_config("CONFIG_CLPF") eq "yes") {
   if (aom_config("CONFIG_AOM_HIGHBITDEPTH") eq "yes") {
-    add_proto qw/void aom_clpf_block_hbd/, "const uint16_t *src, uint16_t *dst, int sstride, int dstride, int x0, int y0, int sizex, int sizey, unsigned int strength, BOUNDARY_TYPE bt";
-    add_proto qw/void aom_clpf_detect_hbd/, "const uint16_t *rec, const uint16_t *org, int rstride, int ostride, int x0, int y0, int width, int height, int *sum0, int *sum1, unsigned int strength, int shift, int size";
-    add_proto qw/void aom_clpf_detect_multi_hbd/, "const uint16_t *rec, const uint16_t *org, int rstride, int ostride, int x0, int y0, int width, int height, int *sum, int shift, int size";
+    add_proto qw/void aom_clpf_block_hbd/, "const uint16_t *src, uint16_t *dst, int sstride, int dstride, int x0, int y0, int sizex, int sizey, unsigned int strength, BOUNDARY_TYPE bt, unsigned int bd";
+    add_proto qw/void aom_clpf_detect_hbd/, "const uint16_t *rec, const uint16_t *org, int rstride, int ostride, int x0, int y0, int width, int height, int *sum0, int *sum1, unsigned int strength, int size, unsigned int bd";
+    add_proto qw/void aom_clpf_detect_multi_hbd/, "const uint16_t *rec, const uint16_t *org, int rstride, int ostride, int x0, int y0, int width, int height, int *sum, int size, unsigned int bd";
     # VS compiling for 32 bit targets does not support vector types in
     # structs as arguments, which makes the v256 type of the intrinsics
     # hard to support, so optimizations for this target are disabled.
@@ -865,9 +865,9 @@
       specialize qw/aom_clpf_detect_multi_hbd sse2 ssse3 sse4_1 neon/;
     }
   }
-  add_proto qw/void aom_clpf_block/, "const uint8_t *src, uint8_t *dst, int sstride, int dstride, int x0, int y0, int sizex, int sizey, unsigned int strength, BOUNDARY_TYPE bt";
-  add_proto qw/void aom_clpf_detect/, "const uint8_t *rec, const uint8_t *org, int rstride, int ostride, int x0, int y0, int width, int height, int *sum0, int *sum1, unsigned int strength, int size";
-  add_proto qw/void aom_clpf_detect_multi/, "const uint8_t *rec, const uint8_t *org, int rstride, int ostride, int x0, int y0, int width, int height, int *sum, int size";
+  add_proto qw/void aom_clpf_block/, "const uint8_t *src, uint8_t *dst, int sstride, int dstride, int x0, int y0, int sizex, int sizey, unsigned int strength, BOUNDARY_TYPE bt, unsigned int bd";
+  add_proto qw/void aom_clpf_detect/, "const uint8_t *rec, const uint8_t *org, int rstride, int ostride, int x0, int y0, int width, int height, int *sum0, int *sum1, unsigned int strength, int size, unsigned int bd";
+  add_proto qw/void aom_clpf_detect_multi/, "const uint8_t *rec, const uint8_t *org, int rstride, int ostride, int x0, int y0, int width, int height, int *sum, int size, unsigned int bd";
   # VS compiling for 32 bit targets does not support vector types in
   # structs as arguments, which makes the v256 type of the intrinsics
   # hard to support, so optimizations for this target are disabled.
diff --git a/av1/av1_common.mk b/av1/av1_common.mk
index abc812a..5b5f6c9 100644
--- a/av1/av1_common.mk
+++ b/av1/av1_common.mk
@@ -85,6 +85,7 @@
 AV1_COMMON_SRCS-yes += common/clpf.c
 AV1_COMMON_SRCS-yes += common/clpf.h
 AV1_COMMON_SRCS-yes += common/clpf_simd.h
+AV1_COMMON_SRCS-yes += common/clpf_simd_kernel.h
 AV1_COMMON_SRCS-$(HAVE_SSE2) += common/clpf_sse2.c
 AV1_COMMON_SRCS-$(HAVE_SSSE3) += common/clpf_ssse3.c
 AV1_COMMON_SRCS-$(HAVE_SSE4_1) += common/clpf_sse4.c
diff --git a/av1/common/clpf.c b/av1/common/clpf.c
index b1306a3..8dfe5c0 100644
--- a/av1/common/clpf.c
+++ b/av1/common/clpf.c
@@ -14,32 +14,46 @@
 #include "aom/aom_image.h"
 #include "aom_dsp/aom_dsp_common.h"
 
-int av1_clpf_sample(int X, int A, int B, int C, int D, int E, int F, int b) {
-  int delta = 4 * clamp(A - X, -b, b) + clamp(B - X, -b, b) +
-              3 * clamp(C - X, -b, b) + 3 * clamp(D - X, -b, b) +
-              clamp(E - X, -b, b) + 4 * clamp(F - X, -b, b);
+int sign(int i) { return i < 0 ? -1 : 1; }
+
+int constrain(int x, int s, unsigned int bitdepth) {
+  return sign(x) *
+         AOMMAX(0, abs(x) - AOMMAX(0, abs(x) - s + (abs(x) >> (bitdepth - 3 -
+                                                               get_msb(s)))));
+}
+
+int av1_clpf_sample(int X, int A, int B, int C, int D, int E, int F, int G,
+                    int H, int s, unsigned int bd) {
+  int delta = 1 * constrain(A - X, s, bd) + 3 * constrain(B - X, s, bd) +
+              1 * constrain(C - X, s, bd) + 3 * constrain(D - X, s, bd) +
+              3 * constrain(E - X, s, bd) + 1 * constrain(F - X, s, bd) +
+              3 * constrain(G - X, s, bd) + 1 * constrain(H - X, s, bd);
   return (8 + delta - (delta < 0)) >> 4;
 }
 
 void aom_clpf_block_c(const uint8_t *src, uint8_t *dst, int sstride,
                       int dstride, int x0, int y0, int sizex, int sizey,
-                      unsigned int strength, BOUNDARY_TYPE bt) {
+                      unsigned int strength, BOUNDARY_TYPE bt,
+                      unsigned int bitdepth) {
   int x, y;
-  int xmin = x0 - !(bt & TILE_LEFT_BOUNDARY) * 2;
-  int ymin = y0 - !(bt & TILE_ABOVE_BOUNDARY);
-  int xmax = x0 + sizex + !(bt & TILE_RIGHT_BOUNDARY) * 2 - 1;
-  int ymax = y0 + sizey + !(bt & TILE_BOTTOM_BOUNDARY) - 1;
+  const int xmin = x0 - !(bt & TILE_LEFT_BOUNDARY) * 2;
+  const int ymin = y0 - !(bt & TILE_ABOVE_BOUNDARY) * 2;
+  const int xmax = x0 + sizex + !(bt & TILE_RIGHT_BOUNDARY) * 2 - 1;
+  const int ymax = y0 + sizey + !(bt & TILE_BOTTOM_BOUNDARY) * 2 - 1;
+
   for (y = y0; y < y0 + sizey; y++) {
     for (x = x0; x < x0 + sizex; x++) {
-      int X = src[y * sstride + x];
-      int A = src[AOMMAX(ymin, y - 1) * sstride + x];
-      int B = src[y * sstride + AOMMAX(xmin, x - 2)];
-      int C = src[y * sstride + AOMMAX(xmin, x - 1)];
-      int D = src[y * sstride + AOMMIN(xmax, x + 1)];
-      int E = src[y * sstride + AOMMIN(xmax, x + 2)];
-      int F = src[AOMMIN(ymax, y + 1) * sstride + x];
-      int delta;
-      delta = av1_clpf_sample(X, A, B, C, D, E, F, strength);
+      const int X = src[y * sstride + x];
+      const int A = src[AOMMAX(ymin, y - 2) * sstride + x];
+      const int B = src[AOMMAX(ymin, y - 1) * sstride + x];
+      const int C = src[y * sstride + AOMMAX(xmin, x - 2)];
+      const int D = src[y * sstride + AOMMAX(xmin, x - 1)];
+      const int E = src[y * sstride + AOMMIN(xmax, x + 1)];
+      const int F = src[y * sstride + AOMMIN(xmax, x + 2)];
+      const int G = src[AOMMIN(ymax, y + 1) * sstride + x];
+      const int H = src[AOMMIN(ymax, y + 2) * sstride + x];
+      const int delta =
+          av1_clpf_sample(X, A, B, C, D, E, F, G, H, strength, bitdepth);
       dst[y * dstride + x] = X + delta;
     }
   }
@@ -49,24 +63,27 @@
 // Identical to aom_clpf_block_c() apart from "src" and "dst".
 void aom_clpf_block_hbd_c(const uint16_t *src, uint16_t *dst, int sstride,
                           int dstride, int x0, int y0, int sizex, int sizey,
-                          unsigned int strength, BOUNDARY_TYPE bt) {
+                          unsigned int strength, BOUNDARY_TYPE bt,
+                          unsigned int bitdepth) {
   int x, y;
-  int xmin = x0 - !(bt & TILE_LEFT_BOUNDARY) * 2;
-  int ymin = y0 - !(bt & TILE_ABOVE_BOUNDARY);
-  int xmax = x0 + sizex + !(bt & TILE_RIGHT_BOUNDARY) * 2 - 1;
-  int ymax = y0 + sizey + !(bt & TILE_BOTTOM_BOUNDARY) - 1;
+  const int xmin = x0 - !(bt & TILE_LEFT_BOUNDARY) * 2;
+  const int ymin = y0 - !(bt & TILE_ABOVE_BOUNDARY) * 2;
+  const int xmax = x0 + sizex + !(bt & TILE_RIGHT_BOUNDARY) * 2 - 1;
+  const int ymax = y0 + sizey + !(bt & TILE_BOTTOM_BOUNDARY) * 2 - 1;
 
   for (y = y0; y < y0 + sizey; y++) {
     for (x = x0; x < x0 + sizex; x++) {
-      int X = src[y * sstride + x];
-      int A = src[AOMMAX(ymin, y - 1) * sstride + x];
-      int B = src[y * sstride + AOMMAX(xmin, x - 2)];
-      int C = src[y * sstride + AOMMAX(xmin, x - 1)];
-      int D = src[y * sstride + AOMMIN(xmax, x + 1)];
-      int E = src[y * sstride + AOMMIN(xmax, x + 2)];
-      int F = src[AOMMIN(ymax, y + 1) * sstride + x];
-      int delta;
-      delta = av1_clpf_sample(X, A, B, C, D, E, F, strength);
+      const int X = src[y * sstride + x];
+      const int A = src[AOMMAX(ymin, y - 2) * sstride + x];
+      const int B = src[AOMMAX(ymin, y - 1) * sstride + x];
+      const int C = src[y * sstride + AOMMAX(xmin, x - 2)];
+      const int D = src[y * sstride + AOMMAX(xmin, x - 1)];
+      const int E = src[y * sstride + AOMMIN(xmax, x + 1)];
+      const int F = src[y * sstride + AOMMIN(xmax, x + 2)];
+      const int G = src[AOMMIN(ymax, y + 1) * sstride + x];
+      const int H = src[AOMMIN(ymax, y + 2) * sstride + x];
+      const int delta =
+          av1_clpf_sample(X, A, B, C, D, E, F, G, H, strength, bitdepth);
       dst[y * dstride + x] = X + delta;
     }
   }
@@ -243,14 +260,16 @@
                 aom_clpf_block_hbd(CONVERT_TO_SHORTPTR(src_buffer),
                                    CONVERT_TO_SHORTPTR(dst_buffer), sstride,
                                    dstride, xpos, ypos, sizex, sizey, strength,
-                                   boundary_type);
+                                   boundary_type, cm->bit_depth);
               } else {
                 aom_clpf_block(src_buffer, dst_buffer, sstride, dstride, xpos,
-                               ypos, sizex, sizey, strength, boundary_type);
+                               ypos, sizex, sizey, strength, boundary_type,
+                               cm->bit_depth);
               }
 #else
               aom_clpf_block(src_buffer, dst_buffer, sstride, dstride, xpos,
-                             ypos, sizex, sizey, strength, boundary_type);
+                             ypos, sizex, sizey, strength, boundary_type,
+                             cm->bit_depth);
 #endif
             }
           }
diff --git a/av1/common/clpf.h b/av1/common/clpf.h
index fc74f2c..1642fb3 100644
--- a/av1/common/clpf.h
+++ b/av1/common/clpf.h
@@ -18,7 +18,8 @@
 #define MAX_FB_SIZE (1 << MAX_FB_SIZE_LOG2)
 #define MIN_FB_SIZE (1 << MIN_FB_SIZE_LOG2)
 
-int av1_clpf_sample(int X, int A, int B, int C, int D, int E, int F, int b);
+int av1_clpf_sample(int X, int A, int B, int C, int D, int E, int F, int G,
+                    int H, int b, unsigned int bd);
 void av1_clpf_frame(const YV12_BUFFER_CONFIG *frame,
                     const YV12_BUFFER_CONFIG *org, AV1_COMMON *cm,
                     int enable_fb_flag, unsigned int strength,
diff --git a/av1/common/clpf_simd.h b/av1/common/clpf_simd.h
index 7cf9486..efc579c 100644
--- a/av1/common/clpf_simd.h
+++ b/av1/common/clpf_simd.h
@@ -11,62 +11,25 @@
 
 #include "./aom_dsp_rtcd.h"
 #include "aom_ports/mem.h"
-
-// delta = 4/16 * clamp(a - o, -s, s) + 1/16 * clamp(b - o, -s, s) +
-//         3/16 * clamp(c - o, -s, s) + 3/16 * clamp(d - o, -s, s) +
-//         1/16 * clamp(e - o, -s, s) + 4/16 * clamp(f - o, -s, s)
-SIMD_INLINE v128 calc_delta(v128 o, v128 a, v128 b, v128 c, v128 d, v128 e,
-                            v128 f, v128 sp, v128 sm) {
-  // The difference will be 9 bit, offset by 128 so we can use saturated
-  // sub to avoid going to 16 bit temporarily before "strength" clipping.
-  const v128 c128 = v128_dup_8(128);
-  const v128 x = v128_add_8(c128, o);
-  const v128 c8 = v128_dup_8(8);
-  const v128 tmp = v128_add_8(
-      v128_max_s8(v128_min_s8(v128_ssub_s8(v128_add_8(c128, c), x), sp), sm),
-      v128_max_s8(v128_min_s8(v128_ssub_s8(v128_add_8(c128, d), x), sp), sm));
-  const v128 delta = v128_add_8(
-      v128_add_8(
-          v128_shl_8(
-              v128_add_8(
-                  v128_max_s8(
-                      v128_min_s8(v128_ssub_s8(v128_add_8(c128, a), x), sp),
-                      sm),
-                  v128_max_s8(
-                      v128_min_s8(v128_ssub_s8(v128_add_8(c128, f), x), sp),
-                      sm)),
-              2),
-          v128_add_8(
-              v128_max_s8(v128_min_s8(v128_ssub_s8(v128_add_8(c128, b), x), sp),
-                          sm),
-              v128_max_s8(v128_min_s8(v128_ssub_s8(v128_add_8(c128, e), x), sp),
-                          sm))),
-      v128_add_8(v128_add_8(tmp, tmp), tmp));
-  return v128_add_8(
-      o,
-      v128_shr_s8(
-          v128_add_8(c8, v128_add_8(delta, v128_cmplt_s8(delta, v128_zero()))),
-          4));
-}
+#include "aom_ports/bitops.h"
+#include "av1/common/clpf_simd_kernel.h"
 
 // Process blocks of width 8, two lines at a time, 8 bit.
 static void clpf_block8(const uint8_t *src, uint8_t *dst, int sstride,
                         int dstride, int x0, int y0, int sizey,
-                        unsigned int strength, BOUNDARY_TYPE bt) {
+                        BOUNDARY_TYPE bt, unsigned int strength) {
   const int bottom = bt & TILE_BOTTOM_BOUNDARY ? sizey - 2 : -1;
   const int right = !(bt & TILE_RIGHT_BOUNDARY);
   const int left = !(bt & TILE_LEFT_BOUNDARY);
   const int top = bt & TILE_ABOVE_BOUNDARY ? y0 : -1;
-  const v128 sp = v128_dup_8(strength);
-  const v128 sm = v128_dup_8(-(int)strength);
   DECLARE_ALIGNED(16, static const uint64_t,
-                  b_shuff[]) = { 0x0504030201000000LL, 0x0d0c0b0a09080808LL };
+                  c_shuff[]) = { 0x0504030201000000LL, 0x0d0c0b0a09080808LL };
   DECLARE_ALIGNED(16, static const uint64_t,
-                  c_shuff[]) = { 0x0605040302010000LL, 0x0e0d0c0b0a090808LL };
+                  d_shuff[]) = { 0x0605040302010000LL, 0x0e0d0c0b0a090808LL };
   DECLARE_ALIGNED(16, static const uint64_t,
-                  d_shuff[]) = { 0x0707060504030201LL, 0x0f0f0e0d0c0b0a09LL };
+                  e_shuff[]) = { 0x0707060504030201LL, 0x0f0f0e0d0c0b0a09LL };
   DECLARE_ALIGNED(16, static const uint64_t,
-                  e_shuff[]) = { 0x0707070605040302LL, 0x0f0f0f0e0d0c0b0aLL };
+                  f_shuff[]) = { 0x0707070605040302LL, 0x0f0f0f0e0d0c0b0aLL };
   int y;
 
   dst += x0 + y0 * dstride;
@@ -75,33 +38,37 @@
   for (y = 0; y < sizey; y += 2) {
     const v64 l1 = v64_load_aligned(src);
     const v64 l2 = v64_load_aligned(src + sstride);
+    const v64 l3 = v64_load_aligned(src - (y != top) * sstride);
+    const v64 l4 = v64_load_aligned(src + ((y != bottom) + 1) * sstride);
     v128 o = v128_from_v64(l1, l2);
     const v128 a =
-        v128_from_v64(v64_load_aligned(src - (y != top) * sstride), l1);
-    const v128 f = v128_from_v64(
-        l2, v64_load_aligned(src + ((y != bottom) + 1) * sstride));
-    v128 b, c, d, e;
+        v128_from_v64(v64_load_aligned(src - 2 * (y != top) * sstride), l3);
+    const v128 b = v128_from_v64(l3, l1);
+    const v128 g = v128_from_v64(l2, l4);
+    const v128 h = v128_from_v64(
+        l4, v64_load_aligned(src + (2 * (y != bottom) + 1) * sstride));
+    v128 c, d, e, f;
 
     if (left) {
-      b = v128_from_v64(v64_load_unaligned(src - 2),
+      c = v128_from_v64(v64_load_unaligned(src - 2),
                         v64_load_unaligned(src - 2 + sstride));
-      c = v128_from_v64(v64_load_unaligned(src - 1),
+      d = v128_from_v64(v64_load_unaligned(src - 1),
                         v64_load_unaligned(src - 1 + sstride));
     } else {  // Left clipping
-      b = v128_shuffle_8(o, v128_load_aligned(b_shuff));
       c = v128_shuffle_8(o, v128_load_aligned(c_shuff));
+      d = v128_shuffle_8(o, v128_load_aligned(d_shuff));
     }
     if (right) {
-      d = v128_from_v64(v64_load_unaligned(src + 1),
+      e = v128_from_v64(v64_load_unaligned(src + 1),
                         v64_load_unaligned(src + 1 + sstride));
-      e = v128_from_v64(v64_load_unaligned(src + 2),
+      f = v128_from_v64(v64_load_unaligned(src + 2),
                         v64_load_unaligned(src + 2 + sstride));
     } else {  // Right clipping
-      d = v128_shuffle_8(o, v128_load_aligned(d_shuff));
       e = v128_shuffle_8(o, v128_load_aligned(e_shuff));
+      f = v128_shuffle_8(o, v128_load_aligned(f_shuff));
     }
 
-    o = calc_delta(o, a, b, c, d, e, f, sp, sm);
+    o = calc_delta(o, a, b, c, d, e, f, g, h, strength);
     v64_store_aligned(dst, v128_high_v64(o));
     v64_store_aligned(dst + dstride, v128_low_v64(o));
     src += sstride * 2;
@@ -112,67 +79,70 @@
 // Process blocks of width 4, four lines at a time, 8 bit.
 static void clpf_block4(const uint8_t *src, uint8_t *dst, int sstride,
                         int dstride, int x0, int y0, int sizey,
-                        unsigned int strength, BOUNDARY_TYPE bt) {
-  const v128 sp = v128_dup_8(strength);
-  const v128 sm = v128_dup_8(-(int)strength);
+                        BOUNDARY_TYPE bt, unsigned int strength) {
   const int right = !(bt & TILE_RIGHT_BOUNDARY);
   const int bottom = bt & TILE_BOTTOM_BOUNDARY ? sizey - 4 : -1;
   const int left = !(bt & TILE_LEFT_BOUNDARY);
   const int top = bt & TILE_ABOVE_BOUNDARY ? y0 : -1;
 
   DECLARE_ALIGNED(16, static const uint64_t,
-                  b_shuff[]) = { 0x0504040401000000LL, 0x0d0c0c0c09080808LL };
+                  c_shuff[]) = { 0x0504040401000000LL, 0x0d0c0c0c09080808LL };
   DECLARE_ALIGNED(16, static const uint64_t,
-                  c_shuff[]) = { 0x0605040402010000LL, 0x0e0d0c0c0a090808LL };
+                  d_shuff[]) = { 0x0605040402010000LL, 0x0e0d0c0c0a090808LL };
   DECLARE_ALIGNED(16, static const uint64_t,
-                  d_shuff[]) = { 0x0707060503030201LL, 0x0f0f0e0d0b0b0a09LL };
+                  e_shuff[]) = { 0x0707060503030201LL, 0x0f0f0e0d0b0b0a09LL };
   DECLARE_ALIGNED(16, static const uint64_t,
-                  e_shuff[]) = { 0x0707070603030302LL, 0x0f0f0f0e0b0b0b0aLL };
+                  f_shuff[]) = { 0x0707070603030302LL, 0x0f0f0f0e0b0b0b0aLL };
   int y;
 
   dst += x0 + y0 * dstride;
   src += x0 + y0 * sstride;
 
   for (y = 0; y < sizey; y += 4) {
-    const uint32_t l0 = u32_load_aligned(src - (y != top) * sstride);
-    const uint32_t l1 = u32_load_aligned(src);
-    const uint32_t l2 = u32_load_aligned(src + sstride);
-    const uint32_t l3 = u32_load_aligned(src + 2 * sstride);
-    const uint32_t l4 = u32_load_aligned(src + 3 * sstride);
-    const uint32_t l5 = u32_load_aligned(src + ((y != bottom) + 3) * sstride);
-    v128 o = v128_from_32(l1, l2, l3, l4);
+    const uint32_t l0 = u32_load_aligned(src - 2 * (y != top) * sstride);
+    const uint32_t l1 = u32_load_aligned(src - (y != top) * sstride);
+    const uint32_t l2 = u32_load_aligned(src);
+    const uint32_t l3 = u32_load_aligned(src + sstride);
+    const uint32_t l4 = u32_load_aligned(src + 2 * sstride);
+    const uint32_t l5 = u32_load_aligned(src + 3 * sstride);
+    const uint32_t l6 = u32_load_aligned(src + ((y != bottom) + 3) * sstride);
+    const uint32_t l7 =
+        u32_load_aligned(src + (2 * (y != bottom) + 3) * sstride);
+    v128 o = v128_from_32(l2, l3, l4, l5);
     const v128 a = v128_from_32(l0, l1, l2, l3);
-    const v128 f = v128_from_32(l2, l3, l4, l5);
-    v128 b, c, d, e;
+    const v128 b = v128_from_32(l1, l2, l3, l4);
+    const v128 g = v128_from_32(l3, l4, l5, l6);
+    const v128 h = v128_from_32(l4, l5, l6, l7);
+    v128 c, d, e, f;
 
     if (left) {
-      b = v128_from_32(u32_load_unaligned(src - 2),
+      c = v128_from_32(u32_load_unaligned(src - 2),
                        u32_load_unaligned(src + sstride - 2),
                        u32_load_unaligned(src + 2 * sstride - 2),
                        u32_load_unaligned(src + 3 * sstride - 2));
-      c = v128_from_32(u32_load_unaligned(src - 1),
+      d = v128_from_32(u32_load_unaligned(src - 1),
                        u32_load_unaligned(src + sstride - 1),
                        u32_load_unaligned(src + 2 * sstride - 1),
                        u32_load_unaligned(src + 3 * sstride - 1));
     } else {  // Left clipping
-      b = v128_shuffle_8(o, v128_load_aligned(b_shuff));
       c = v128_shuffle_8(o, v128_load_aligned(c_shuff));
+      d = v128_shuffle_8(o, v128_load_aligned(d_shuff));
     }
     if (right) {
-      d = v128_from_32(u32_load_unaligned(src + 1),
+      e = v128_from_32(u32_load_unaligned(src + 1),
                        u32_load_unaligned(src + sstride + 1),
                        u32_load_unaligned(src + 2 * sstride + 1),
                        u32_load_unaligned(src + 3 * sstride + 1));
-      e = v128_from_32(u32_load_unaligned(src + 2 * !!right),
+      f = v128_from_32(u32_load_unaligned(src + 2),
                        u32_load_unaligned(src + sstride + 2),
                        u32_load_unaligned(src + 2 * sstride + 2),
                        u32_load_unaligned(src + 3 * sstride + 2));
     } else {  // Right clipping
-      d = v128_shuffle_8(o, v128_load_aligned(d_shuff));
       e = v128_shuffle_8(o, v128_load_aligned(e_shuff));
+      f = v128_shuffle_8(o, v128_load_aligned(f_shuff));
     }
 
-    o = calc_delta(o, a, b, c, d, e, f, sp, sm);
+    o = calc_delta(o, a, b, c, d, e, f, g, h, strength);
     u32_store_aligned(dst, v128_low_u32(v128_shr_n_byte(o, 12)));
     u32_store_aligned(dst + dstride, v128_low_u32(v128_shr_n_byte(o, 8)));
     u32_store_aligned(dst + 2 * dstride, v128_low_u32(v128_shr_n_byte(o, 4)));
@@ -186,79 +156,93 @@
 void SIMD_FUNC(aom_clpf_block)(const uint8_t *src, uint8_t *dst, int sstride,
                                int dstride, int x0, int y0, int sizex,
                                int sizey, unsigned int strength,
-                               BOUNDARY_TYPE bt) {
+                               BOUNDARY_TYPE bt, unsigned int bd) {
   if ((sizex != 4 && sizex != 8) || ((sizey & 3) && sizex == 4)) {
     // Fallback to C for odd sizes:
     // * block widths not 4 or 8
     // * block heights not a multiple of 4 if the block width is 4
     aom_clpf_block_c(src, dst, sstride, dstride, x0, y0, sizex, sizey, strength,
-                     bt);
+                     bt, bd);
   } else {
     (sizex == 4 ? clpf_block4 : clpf_block8)(src, dst, sstride, dstride, x0, y0,
-                                             sizey, strength, bt);
+                                             sizey, bt, strength);
   }
 }
 
 #if CONFIG_AOM_HIGHBITDEPTH
-// delta = 4/16 * clamp(a - o, -s, s) + 1/16 * clamp(b - o, -s, s) +
-//         3/16 * clamp(c - o, -s, s) + 3/16 * clamp(d - o, -s, s) +
-//         1/16 * clamp(e - o, -s, s) + 4/16 * clamp(f - o, -s, s)
-SIMD_INLINE v128 calc_delta_hbd(v128 o, v128 a, v128 b, v128 c, v128 d, v128 e,
-                                v128 f, v128 sp, v128 sm) {
-  const v128 c8 = v128_dup_16(8);
-  const v128 tmp =
-      v128_add_16(v128_max_s16(v128_min_s16(v128_sub_16(c, o), sp), sm),
-                  v128_max_s16(v128_min_s16(v128_sub_16(d, o), sp), sm));
+// sign(a - b) * max(0, abs(a - b) - max(0, abs(a - b) -
+// strength + (abs(a - b) >> (bd - 3 - log2(s)))))
+SIMD_INLINE v128 constrain_hbd(v128 a, v128 b, unsigned int strength,
+                               unsigned int bd) {
+  const v128 diff = v128_sub_16(v128_max_s16(a, b), v128_min_s16(a, b));
+  const v128 sign = v128_cmpeq_16(v128_min_s16(a, b), a);  // -(a <= b)
+  const v128 zero = v128_zero();
+  const v128 s = v128_max_s16(
+      zero, v128_sub_16(v128_dup_16(strength),
+                        v128_shr_u16(diff, bd - 3 - get_msb(strength))));
+  return v128_sub_16(
+      v128_xor(sign,
+               v128_max_s16(
+                   zero, v128_sub_16(
+                             diff, v128_max_s16(zero, v128_sub_16(diff, s))))),
+      sign);
+}
+
+// delta = 1/16 * constrain(a, x, s, bd) + 3/16 * constrain(b, x, s, bd) +
+//         1/16 * constrain(c, x, s, bd) + 3/16 * constrain(d, x, s, bd) +
+//         3/16 * constrain(e, x, s, bd) + 1/16 * constrain(f, x, s, bd) +
+//         3/16 * constrain(g, x, s, bd) + 1/16 * constrain(h, x, s, bd)
+SIMD_INLINE v128 calc_delta_hbd(v128 x, v128 a, v128 b, v128 c, v128 d, v128 e,
+                                v128 f, v128 g, v128 h, unsigned int s,
+                                unsigned int bd) {
+  const v128 bdeg = v128_add_16(
+      v128_add_16(constrain_hbd(b, x, s, bd), constrain_hbd(d, x, s, bd)),
+      v128_add_16(constrain_hbd(e, x, s, bd), constrain_hbd(g, x, s, bd)));
   const v128 delta = v128_add_16(
       v128_add_16(
-          v128_shl_16(
-              v128_add_16(
-                  v128_max_s16(v128_min_s16(v128_sub_16(a, o), sp), sm),
-                  v128_max_s16(v128_min_s16(v128_sub_16(f, o), sp), sm)),
-              2),
-          v128_add_16(v128_max_s16(v128_min_s16(v128_sub_16(b, o), sp), sm),
-                      v128_max_s16(v128_min_s16(v128_sub_16(e, o), sp), sm))),
-      v128_add_16(v128_add_16(tmp, tmp), tmp));
+          v128_add_16(constrain_hbd(a, x, s, bd), constrain_hbd(c, x, s, bd)),
+          v128_add_16(constrain_hbd(f, x, s, bd), constrain_hbd(h, x, s, bd))),
+      v128_add_16(v128_add_16(bdeg, bdeg), bdeg));
   return v128_add_16(
-      o, v128_shr_s16(
-             v128_add_16(
-                 c8, v128_add_16(delta, v128_cmplt_s16(delta, v128_zero()))),
-             4));
+      x,
+      v128_shr_s16(
+          v128_add_16(v128_dup_16(8),
+                      v128_add_16(delta, v128_cmplt_s16(delta, v128_zero()))),
+          4));
 }
 
 static void calc_delta_hbd4(v128 o, v128 a, v128 b, v128 c, v128 d, v128 e,
-                            v128 f, uint16_t *dst, v128 sp, v128 sm,
-                            int dstride) {
-  o = calc_delta_hbd(o, a, b, c, d, e, f, sp, sm);
+                            v128 f, v128 g, v128 h, uint16_t *dst,
+                            unsigned int s, unsigned int bd, int dstride) {
+  o = calc_delta_hbd(o, a, b, c, d, e, f, g, h, s, bd);
   v64_store_aligned(dst, v128_high_v64(o));
   v64_store_aligned(dst + dstride, v128_low_v64(o));
 }
 
 static void calc_delta_hbd8(v128 o, v128 a, v128 b, v128 c, v128 d, v128 e,
-                            v128 f, uint16_t *dst, v128 sp, v128 sm) {
-  v128_store_aligned(dst, calc_delta_hbd(o, a, b, c, d, e, f, sp, sm));
+                            v128 f, v128 g, v128 h, uint16_t *dst,
+                            unsigned int s, unsigned int bd) {
+  v128_store_aligned(dst, calc_delta_hbd(o, a, b, c, d, e, f, g, h, s, bd));
 }
 
 // Process blocks of width 4, two lines at time.
 SIMD_INLINE void clpf_block_hbd4(const uint16_t *src, uint16_t *dst,
                                  int sstride, int dstride, int x0, int y0,
                                  int sizey, unsigned int strength,
-                                 BOUNDARY_TYPE bt) {
-  const v128 sp = v128_dup_16(strength);
-  const v128 sm = v128_dup_16(-(int)strength);
+                                 BOUNDARY_TYPE bt, unsigned int bd) {
   const int right = !(bt & TILE_RIGHT_BOUNDARY);
   const int bottom = bt & TILE_BOTTOM_BOUNDARY ? sizey - 2 : -1;
   const int left = !(bt & TILE_LEFT_BOUNDARY);
   const int top = bt & TILE_ABOVE_BOUNDARY ? y0 : -1;
 
   DECLARE_ALIGNED(16, static const uint64_t,
-                  b_shuff[]) = { 0x0302010001000100LL, 0x0b0a090809080908LL };
+                  c_shuff[]) = { 0x0302010001000100LL, 0x0b0a090809080908LL };
   DECLARE_ALIGNED(16, static const uint64_t,
-                  c_shuff[]) = { 0x0504030201000100LL, 0x0d0c0b0a09080908LL };
+                  d_shuff[]) = { 0x0504030201000100LL, 0x0d0c0b0a09080908LL };
   DECLARE_ALIGNED(16, static const uint64_t,
-                  d_shuff[]) = { 0x0706070605040302LL, 0x0f0e0f0e0d0c0b0aLL };
+                  e_shuff[]) = { 0x0706070605040302LL, 0x0f0e0f0e0d0c0b0aLL };
   DECLARE_ALIGNED(16, static const uint64_t,
-                  e_shuff[]) = { 0x0706070607060504LL, 0x0f0e0f0e0f0e0d0cLL };
+                  f_shuff[]) = { 0x0706070607060504LL, 0x0f0e0f0e0f0e0d0cLL };
   int y;
 
   dst += x0 + y0 * dstride;
@@ -267,32 +251,36 @@
   for (y = 0; y < sizey; y += 2) {
     const v64 l1 = v64_load_aligned(src);
     const v64 l2 = v64_load_aligned(src + sstride);
+    const v64 l3 = v64_load_aligned(src - (y != top) * sstride);
+    const v64 l4 = v64_load_aligned(src + ((y != bottom) + 1) * sstride);
     v128 o = v128_from_v64(l1, l2);
     const v128 a =
-        v128_from_v64(v64_load_aligned(src - (y != top) * sstride), l1);
-    const v128 f = v128_from_v64(
-        l2, v64_load_aligned(src + ((y != bottom) + 1) * sstride));
-    v128 b, c, d, e;
+        v128_from_v64(v64_load_aligned(src - 2 * (y != top) * sstride), l3);
+    const v128 b = v128_from_v64(l3, l1);
+    const v128 g = v128_from_v64(l2, l4);
+    const v128 h = v128_from_v64(
+        l4, v64_load_aligned(src + (2 * (y != bottom) + 1) * sstride));
+    v128 c, d, e, f;
 
     if (left) {
-      b = v128_from_v64(v64_load_unaligned(src - 2),
+      c = v128_from_v64(v64_load_unaligned(src - 2),
                         v64_load_unaligned(src - 2 + sstride));
-      c = v128_from_v64(v64_load_unaligned(src - 1),
+      d = v128_from_v64(v64_load_unaligned(src - 1),
                         v64_load_unaligned(src - 1 + sstride));
     } else {  // Left clipping
-      b = v128_shuffle_8(o, v128_load_aligned(b_shuff));
       c = v128_shuffle_8(o, v128_load_aligned(c_shuff));
+      d = v128_shuffle_8(o, v128_load_aligned(d_shuff));
     }
     if (right) {
-      d = v128_from_v64(v64_load_unaligned(src + 1),
+      e = v128_from_v64(v64_load_unaligned(src + 1),
                         v64_load_unaligned(src + 1 + sstride));
-      e = v128_from_v64(v64_load_unaligned(src + 2),
+      f = v128_from_v64(v64_load_unaligned(src + 2),
                         v64_load_unaligned(src + 2 + sstride));
     } else {  // Right clipping
-      d = v128_shuffle_8(o, v128_load_aligned(d_shuff));
       e = v128_shuffle_8(o, v128_load_aligned(e_shuff));
+      f = v128_shuffle_8(o, v128_load_aligned(f_shuff));
     }
-    calc_delta_hbd4(o, a, b, c, d, e, f, dst, sp, sm, dstride);
+    calc_delta_hbd4(o, a, b, c, d, e, f, g, h, dst, strength, bd, dstride);
     src += sstride * 2;
     dst += dstride * 2;
   }
@@ -301,22 +289,21 @@
 // The most simple case.  Start here if you need to understand the functions.
 SIMD_INLINE void clpf_block_hbd(const uint16_t *src, uint16_t *dst, int sstride,
                                 int dstride, int x0, int y0, int sizey,
-                                unsigned int strength, BOUNDARY_TYPE bt) {
-  const v128 sp = v128_dup_16(strength);
-  const v128 sm = v128_dup_16(-(int)strength);
+                                unsigned int strength, BOUNDARY_TYPE bt,
+                                unsigned int bd) {
   const int right = !(bt & TILE_RIGHT_BOUNDARY);
-  const int bottom = bt & TILE_BOTTOM_BOUNDARY ? sizey - 2 : -2;
   const int left = !(bt & TILE_LEFT_BOUNDARY);
-  const int top = bt & TILE_ABOVE_BOUNDARY ? y0 : -1;
+  const int ymin = -!(bt & TILE_ABOVE_BOUNDARY) * 2;
+  const int ymax = sizey + !(bt & TILE_BOTTOM_BOUNDARY) * 2 - 1;
 
   DECLARE_ALIGNED(16, static const uint64_t,
-                  b_shuff[]) = { 0x0302010001000100LL, 0x0b0a090807060504LL };
+                  c_shuff[]) = { 0x0302010001000100LL, 0x0b0a090807060504LL };
   DECLARE_ALIGNED(16, static const uint64_t,
-                  c_shuff[]) = { 0x0504030201000100LL, 0x0d0c0b0a09080706LL };
+                  d_shuff[]) = { 0x0504030201000100LL, 0x0d0c0b0a09080706LL };
   DECLARE_ALIGNED(16, static const uint64_t,
-                  d_shuff[]) = { 0x0908070605040302LL, 0x0f0e0f0e0d0c0b0aLL };
+                  e_shuff[]) = { 0x0908070605040302LL, 0x0f0e0f0e0d0c0b0aLL };
   DECLARE_ALIGNED(16, static const uint64_t,
-                  e_shuff[]) = { 0x0b0a090807060504LL, 0x0f0e0f0e0f0e0d0cLL };
+                  f_shuff[]) = { 0x0b0a090807060504LL, 0x0f0e0f0e0f0e0d0cLL };
   int y;
 
   dst += x0 + y0 * dstride;
@@ -327,27 +314,28 @@
   // Clipping along the left and right edges is handled by shuffle
   // instructions doing shift and pad.
   for (y = 0; y < sizey; y++) {
-    const v128 o = v128_load_aligned(src);
-    const v128 a = v128_load_aligned(src - (y != top) * sstride);
-    const v128 f = v128_load_aligned(src + (y - 1 != bottom) * sstride);
-    v128 b, c, d, e;
+    const v128 o = v128_load_aligned(src + y * sstride);
+    const v128 a = v128_load_aligned(src + AOMMAX(ymin, y - 2) * sstride);
+    const v128 b = v128_load_aligned(src + AOMMAX(ymin, y - 1) * sstride);
+    const v128 g = v128_load_aligned(src + AOMMIN(ymax, y + 1) * sstride);
+    const v128 h = v128_load_aligned(src + AOMMIN(ymax, y + 2) * sstride);
+    v128 c, d, e, f;
 
     if (left) {
-      b = v128_load_unaligned(src - 2);
-      c = v128_load_unaligned(src - 1);
+      c = v128_load_unaligned(src + y * sstride - 2);
+      d = v128_load_unaligned(src + y * sstride - 1);
     } else {  // Left clipping
-      b = v128_shuffle_8(o, v128_load_aligned(b_shuff));
       c = v128_shuffle_8(o, v128_load_aligned(c_shuff));
+      d = v128_shuffle_8(o, v128_load_aligned(d_shuff));
     }
     if (right) {
-      d = v128_load_unaligned(src + 1);
-      e = v128_load_unaligned(src + 2);
+      e = v128_load_unaligned(src + y * sstride + 1);
+      f = v128_load_unaligned(src + y * sstride + 2);
     } else {  // Right clipping
-      d = v128_shuffle_8(o, v128_load_aligned(d_shuff));
       e = v128_shuffle_8(o, v128_load_aligned(e_shuff));
+      f = v128_shuffle_8(o, v128_load_aligned(f_shuff));
     }
-    calc_delta_hbd8(o, a, b, c, d, e, f, dst, sp, sm);
-    src += sstride;
+    calc_delta_hbd8(o, a, b, c, d, e, f, g, h, dst, strength, bd);
     dst += dstride;
   }
 }
@@ -355,16 +343,16 @@
 void SIMD_FUNC(aom_clpf_block_hbd)(const uint16_t *src, uint16_t *dst,
                                    int sstride, int dstride, int x0, int y0,
                                    int sizex, int sizey, unsigned int strength,
-                                   BOUNDARY_TYPE bt) {
+                                   BOUNDARY_TYPE bt, unsigned int bd) {
   if ((sizex != 4 && sizex != 8) || ((sizey & 1) && sizex == 4)) {
     // Fallback to C for odd sizes:
     // * block width not 4 or 8
     // * block heights not a multiple of 2 if the block width is 4
     aom_clpf_block_hbd_c(src, dst, sstride, dstride, x0, y0, sizex, sizey,
-                         strength, bt);
+                         strength, bt, bd);
   } else {
     (sizex == 4 ? clpf_block_hbd4 : clpf_block_hbd)(
-        src, dst, sstride, dstride, x0, y0, sizey, strength, bt);
+        src, dst, sstride, dstride, x0, y0, sizey, strength, bt, bd);
   }
 }
 #endif
diff --git a/av1/common/clpf_simd_kernel.h b/av1/common/clpf_simd_kernel.h
new file mode 100644
index 0000000..5412746
--- /dev/null
+++ b/av1/common/clpf_simd_kernel.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2016, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#ifndef AV1_COMMON_CLPF_SIMD_KERNEL_H_
+#define AV1_COMMON_CLPF_SIMD_KERNEL_H_
+
+#include "aom_dsp/aom_simd.h"
+
+// sign(a - b) * max(0, abs(a - b) - max(0, abs(a - b) -
+// strength + (abs(a - b) >> (5 - log2(s)))))
+SIMD_INLINE v128 constrain(v128 a, v128 b, unsigned int strength) {
+  const v128 diff = v128_sub_8(v128_max_u8(a, b), v128_min_u8(a, b));
+  const v128 sign = v128_cmpeq_8(v128_min_u8(a, b), a);  // -(a <= b)
+  const v128 s = v128_ssub_u8(v128_dup_8(strength),
+                              v128_shr_u8(diff, 5 - get_msb(strength)));
+  return v128_sub_8(v128_xor(sign, v128_ssub_u8(diff, v128_ssub_u8(diff, s))),
+                    sign);
+}
+
+// delta = 1/16 * constrain(a, x, s) + 3/16 * constrain(b, x, s) +
+//         1/16 * constrain(c, x, s) + 3/16 * constrain(d, x, s) +
+//         3/16 * constrain(e, x, s) + 1/16 * constrain(f, x, s) +
+//         3/16 * constrain(g, x, s) + 1/16 * constrain(h, x, s)
+SIMD_INLINE v128 calc_delta(v128 x, v128 a, v128 b, v128 c, v128 d, v128 e,
+                            v128 f, v128 g, v128 h, unsigned int s) {
+  const v128 bdeg =
+      v128_add_8(v128_add_8(constrain(b, x, s), constrain(d, x, s)),
+                 v128_add_8(constrain(e, x, s), constrain(g, x, s)));
+  const v128 delta =
+      v128_add_8(v128_add_8(v128_add_8(constrain(a, x, s), constrain(c, x, s)),
+                            v128_add_8(constrain(f, x, s), constrain(h, x, s))),
+                 v128_add_8(v128_add_8(bdeg, bdeg), bdeg));
+  return v128_add_8(
+      x, v128_shr_s8(
+             v128_add_8(v128_dup_8(8),
+                        v128_add_8(delta, v128_cmplt_s8(delta, v128_zero()))),
+             4));
+}
+
+#endif
diff --git a/av1/encoder/clpf_rdo.c b/av1/encoder/clpf_rdo.c
index 84e97dc..4ea3989 100644
--- a/av1/encoder/clpf_rdo.c
+++ b/av1/encoder/clpf_rdo.c
@@ -18,20 +18,24 @@
 // Calculate the error of a filtered and unfiltered block
 void aom_clpf_detect_c(const uint8_t *rec, const uint8_t *org, int rstride,
                        int ostride, int x0, int y0, int width, int height,
-                       int *sum0, int *sum1, unsigned int strength, int size) {
+                       int *sum0, int *sum1, unsigned int strength, int size,
+                       unsigned int bd) {
   int x, y;
   for (y = y0; y < y0 + size; y++) {
     for (x = x0; x < x0 + size; x++) {
-      int O = org[y * ostride + x];
-      int X = rec[y * rstride + x];
-      int A = rec[AOMMAX(0, y - 1) * rstride + x];
-      int B = rec[y * rstride + AOMMAX(0, x - 2)];
-      int C = rec[y * rstride + AOMMAX(0, x - 1)];
-      int D = rec[y * rstride + AOMMIN(width - 1, x + 1)];
-      int E = rec[y * rstride + AOMMIN(width - 1, x + 2)];
-      int F = rec[AOMMIN(height - 1, y + 1) * rstride + x];
-      int delta = av1_clpf_sample(X, A, B, C, D, E, F, strength);
-      int Y = X + delta;
+      const int O = org[y * ostride + x];
+      const int X = rec[y * rstride + x];
+      const int A = rec[AOMMAX(0, y - 2) * rstride + x];
+      const int B = rec[AOMMAX(0, y - 1) * rstride + x];
+      const int C = rec[y * rstride + AOMMAX(0, x - 2)];
+      const int D = rec[y * rstride + AOMMAX(0, x - 1)];
+      const int E = rec[y * rstride + AOMMIN(width - 1, x + 1)];
+      const int F = rec[y * rstride + AOMMIN(width - 1, x + 2)];
+      const int G = rec[AOMMIN(height - 1, y + 1) * rstride + x];
+      const int H = rec[AOMMIN(height - 1, y + 2) * rstride + x];
+      const int delta =
+          av1_clpf_sample(X, A, B, C, D, E, F, G, H, strength, bd);
+      const int Y = X + delta;
       *sum0 += (O - X) * (O - X);
       *sum1 += (O - Y) * (O - Y);
     }
@@ -40,25 +44,28 @@
 
 void aom_clpf_detect_multi_c(const uint8_t *rec, const uint8_t *org,
                              int rstride, int ostride, int x0, int y0,
-                             int width, int height, int *sum, int size) {
+                             int width, int height, int *sum, int size,
+                             unsigned int bd) {
   int x, y;
 
   for (y = y0; y < y0 + size; y++) {
     for (x = x0; x < x0 + size; x++) {
-      int O = org[y * ostride + x];
-      int X = rec[y * rstride + x];
-      int A = rec[AOMMAX(0, y - 1) * rstride + x];
-      int B = rec[y * rstride + AOMMAX(0, x - 2)];
-      int C = rec[y * rstride + AOMMAX(0, x - 1)];
-      int D = rec[y * rstride + AOMMIN(width - 1, x + 1)];
-      int E = rec[y * rstride + AOMMIN(width - 1, x + 2)];
-      int F = rec[AOMMIN(height - 1, y + 1) * rstride + x];
-      int delta1 = av1_clpf_sample(X, A, B, C, D, E, F, 1);
-      int delta2 = av1_clpf_sample(X, A, B, C, D, E, F, 2);
-      int delta3 = av1_clpf_sample(X, A, B, C, D, E, F, 4);
-      int F1 = X + delta1;
-      int F2 = X + delta2;
-      int F3 = X + delta3;
+      const int O = org[y * ostride + x];
+      const int X = rec[y * rstride + x];
+      const int A = rec[AOMMAX(0, y - 2) * rstride + x];
+      const int B = rec[AOMMAX(0, y - 1) * rstride + x];
+      const int C = rec[y * rstride + AOMMAX(0, x - 2)];
+      const int D = rec[y * rstride + AOMMAX(0, x - 1)];
+      const int E = rec[y * rstride + AOMMIN(width - 1, x + 1)];
+      const int F = rec[y * rstride + AOMMIN(width - 1, x + 2)];
+      const int G = rec[AOMMIN(height - 1, y + 1) * rstride + x];
+      const int H = rec[AOMMIN(height - 1, y + 2) * rstride + x];
+      const int delta1 = av1_clpf_sample(X, A, B, C, D, E, F, G, H, 1, bd);
+      const int delta2 = av1_clpf_sample(X, A, B, C, D, E, F, G, H, 2, bd);
+      const int delta3 = av1_clpf_sample(X, A, B, C, D, E, F, G, H, 4, bd);
+      const int F1 = X + delta1;
+      const int F2 = X + delta2;
+      const int F3 = X + delta3;
       sum[0] += (O - X) * (O - X);
       sum[1] += (O - F1) * (O - F1);
       sum[2] += (O - F2) * (O - F2);
@@ -72,20 +79,24 @@
 void aom_clpf_detect_hbd_c(const uint16_t *rec, const uint16_t *org,
                            int rstride, int ostride, int x0, int y0, int width,
                            int height, int *sum0, int *sum1,
-                           unsigned int strength, int shift, int size) {
+                           unsigned int strength, int size, unsigned int bd) {
+  const int shift = bd - 8;
   int x, y;
   for (y = y0; y < y0 + size; y++) {
     for (x = x0; x < x0 + size; x++) {
-      int O = org[y * ostride + x] >> shift;
-      int X = rec[y * rstride + x] >> shift;
-      int A = rec[AOMMAX(0, y - 1) * rstride + x] >> shift;
-      int B = rec[y * rstride + AOMMAX(0, x - 2)] >> shift;
-      int C = rec[y * rstride + AOMMAX(0, x - 1)] >> shift;
-      int D = rec[y * rstride + AOMMIN(width - 1, x + 1)] >> shift;
-      int E = rec[y * rstride + AOMMIN(width - 1, x + 2)] >> shift;
-      int F = rec[AOMMIN(height - 1, y + 1) * rstride + x] >> shift;
-      int delta = av1_clpf_sample(X, A, B, C, D, E, F, strength >> shift);
-      int Y = X + delta;
+      const int O = org[y * ostride + x] >> shift;
+      const int X = rec[y * rstride + x] >> shift;
+      const int A = rec[AOMMAX(0, y - 2) * rstride + x] >> shift;
+      const int B = rec[AOMMAX(0, y - 1) * rstride + x] >> shift;
+      const int C = rec[y * rstride + AOMMAX(0, x - 2)] >> shift;
+      const int D = rec[y * rstride + AOMMAX(0, x - 1)] >> shift;
+      const int E = rec[y * rstride + AOMMIN(width - 1, x + 1)] >> shift;
+      const int F = rec[y * rstride + AOMMIN(width - 1, x + 2)] >> shift;
+      const int G = rec[AOMMIN(height - 1, y + 1) * rstride + x] >> shift;
+      const int H = rec[AOMMIN(height - 1, y + 2) * rstride + x] >> shift;
+      const int delta = av1_clpf_sample(X, A, B, C, D, E, F, G, H,
+                                        strength >> shift, bd - shift);
+      const int Y = X + delta;
       *sum0 += (O - X) * (O - X);
       *sum1 += (O - Y) * (O - Y);
     }
@@ -95,26 +106,32 @@
 // aom_clpf_detect_multi_c() apart from "rec" and "org".
 void aom_clpf_detect_multi_hbd_c(const uint16_t *rec, const uint16_t *org,
                                  int rstride, int ostride, int x0, int y0,
-                                 int width, int height, int *sum, int shift,
-                                 int size) {
+                                 int width, int height, int *sum, int size,
+                                 unsigned int bd) {
+  const int shift = bd - 8;
   int x, y;
 
   for (y = y0; y < y0 + size; y++) {
     for (x = x0; x < x0 + size; x++) {
       int O = org[y * ostride + x] >> shift;
       int X = rec[y * rstride + x] >> shift;
-      int A = rec[AOMMAX(0, y - 1) * rstride + x] >> shift;
-      int B = rec[y * rstride + AOMMAX(0, x - 2)] >> shift;
-      int C = rec[y * rstride + AOMMAX(0, x - 1)] >> shift;
-      int D = rec[y * rstride + AOMMIN(width - 1, x + 1)] >> shift;
-      int E = rec[y * rstride + AOMMIN(width - 1, x + 2)] >> shift;
-      int F = rec[AOMMIN(height - 1, y + 1) * rstride + x] >> shift;
-      int delta1 = av1_clpf_sample(X, A, B, C, D, E, F, 1);
-      int delta2 = av1_clpf_sample(X, A, B, C, D, E, F, 2);
-      int delta3 = av1_clpf_sample(X, A, B, C, D, E, F, 4);
-      int F1 = X + delta1;
-      int F2 = X + delta2;
-      int F3 = X + delta3;
+      const int A = rec[AOMMAX(0, y - 2) * rstride + x] >> shift;
+      const int B = rec[AOMMAX(0, y - 1) * rstride + x] >> shift;
+      const int C = rec[y * rstride + AOMMAX(0, x - 2)] >> shift;
+      const int D = rec[y * rstride + AOMMAX(0, x - 1)] >> shift;
+      const int E = rec[y * rstride + AOMMIN(width - 1, x + 1)] >> shift;
+      const int F = rec[y * rstride + AOMMIN(width - 1, x + 2)] >> shift;
+      const int G = rec[AOMMIN(height - 1, y + 1) * rstride + x] >> shift;
+      const int H = rec[AOMMIN(height - 1, y + 2) * rstride + x] >> shift;
+      const int delta1 =
+          av1_clpf_sample(X, A, B, C, D, E, F, G, H, 1, bd - shift);
+      const int delta2 =
+          av1_clpf_sample(X, A, B, C, D, E, F, G, H, 2, bd - shift);
+      const int delta3 =
+          av1_clpf_sample(X, A, B, C, D, E, F, G, H, 4, bd - shift);
+      const int F1 = X + delta1;
+      const int F2 = X + delta2;
+      const int F3 = X + delta3;
       sum[0] += (O - X) * (O - X);
       sum[1] += (O - F1) * (O - F1);
       sum[2] += (O - F2) * (O - F2);
@@ -143,17 +160,18 @@
                               CONVERT_TO_SHORTPTR(org->y_buffer), rec->y_stride,
                               org->y_stride, xpos, ypos, rec->y_crop_width,
                               rec->y_crop_height, &sum0, &sum1, strength,
-                              cm->bit_depth - 8, block_size);
+                              block_size, cm->bit_depth);
         } else {
           aom_clpf_detect(rec->y_buffer, org->y_buffer, rec->y_stride,
                           org->y_stride, xpos, ypos, rec->y_crop_width,
                           rec->y_crop_height, &sum0, &sum1, strength,
-                          block_size);
+                          block_size, cm->bit_depth);
         }
 #else
         aom_clpf_detect(rec->y_buffer, org->y_buffer, rec->y_stride,
                         org->y_stride, xpos, ypos, rec->y_crop_width,
-                        rec->y_crop_height, &sum0, &sum1, strength, block_size);
+                        rec->y_crop_height, &sum0, &sum1, strength, block_size,
+                        cm->bit_depth);
 #endif
       }
     }
@@ -255,16 +273,16 @@
         aom_clpf_detect_multi_hbd(CONVERT_TO_SHORTPTR(rec_buffer),
                                   CONVERT_TO_SHORTPTR(org_buffer), rec_stride,
                                   org_stride, xpos, ypos, rec_width, rec_height,
-                                  sum + skip, cm->bit_depth - 8, block_size);
+                                  sum + skip, block_size, cm->bit_depth);
       } else {
         aom_clpf_detect_multi(rec_buffer, org_buffer, rec_stride, org_stride,
                               xpos, ypos, rec_width, rec_height, sum + skip,
-                              block_size);
+                              block_size, cm->bit_depth);
       }
 #else
       aom_clpf_detect_multi(rec_buffer, org_buffer, rec_stride, org_stride,
                             xpos, ypos, rec_width, rec_height, sum + skip,
-                            block_size);
+                            block_size, cm->bit_depth);
 #endif
       filtered |= !skip;
     }
diff --git a/av1/encoder/clpf_rdo_simd.h b/av1/encoder/clpf_rdo_simd.h
index 7c07329..177359f 100644
--- a/av1/encoder/clpf_rdo_simd.h
+++ b/av1/encoder/clpf_rdo_simd.h
@@ -12,64 +12,27 @@
 #include "./aom_dsp_rtcd.h"
 #include "aom_dsp/aom_simd.h"
 #include "aom_ports/mem.h"
+#include "aom_ports/bitops.h"
+#include "av1/common/clpf_simd_kernel.h"
 
-SIMD_INLINE void calc_diff(v128 o, v128 *a, v128 *b, v128 *c, v128 *d, v128 *e,
-                           v128 *f) {
-  // The difference will be 9 bit, offset by 128 so we can use saturated
-  // sub to avoid going to 16 bit temporarily before "strength" clipping.
-  const v128 c128 = v128_dup_8(128);
-  v128 x = v128_add_8(c128, o);
-  *a = v128_ssub_s8(v128_add_8(c128, *a), x);
-  *b = v128_ssub_s8(v128_add_8(c128, *b), x);
-  *c = v128_ssub_s8(v128_add_8(c128, *c), x);
-  *d = v128_ssub_s8(v128_add_8(c128, *d), x);
-  *e = v128_ssub_s8(v128_add_8(c128, *e), x);
-  *f = v128_ssub_s8(v128_add_8(c128, *f), x);
-}
-
-SIMD_INLINE v128 delta_kernel(v128 o, v128 a, v128 b, v128 c, v128 d, v128 e,
-                              v128 f, v128 sp, v128 sm) {
-  const v128 tmp = v128_add_8(v128_max_s8(v128_min_s8(c, sp), sm),
-                              v128_max_s8(v128_min_s8(d, sp), sm));
-  const v128 delta = v128_add_8(
-      v128_add_8(v128_shl_8(v128_add_8(v128_max_s8(v128_min_s8(a, sp), sm),
-                                       v128_max_s8(v128_min_s8(f, sp), sm)),
-                            2),
-                 v128_add_8(v128_max_s8(v128_min_s8(b, sp), sm),
-                            v128_max_s8(v128_min_s8(e, sp), sm))),
-      v128_add_8(v128_add_8(tmp, tmp), tmp));
-
-  return v128_add_8(
-      o, v128_shr_s8(
-             v128_add_8(v128_dup_8(8),
-                        v128_add_8(delta, v128_cmplt_s8(delta, v128_zero()))),
-             4));
-}
-
-SIMD_INLINE v128 calc_delta(v128 o, v128 a, v128 b, v128 c, v128 d, v128 e,
-                            v128 f, v128 sp, v128 sm) {
-  calc_diff(o, &a, &b, &c, &d, &e, &f);
-  return delta_kernel(o, a, b, c, d, e, f, sp, sm);
-}
-
-SIMD_INLINE void clip_sides(v128 *b, v128 *c, v128 *d, v128 *e, int left,
+SIMD_INLINE void clip_sides(v128 *c, v128 *d, v128 *e, v128 *f, int left,
                             int right) {
   DECLARE_ALIGNED(16, static const uint64_t,
-                  b_shuff[]) = { 0x0504030201000000LL, 0x0d0c0b0a09080808LL };
+                  c_shuff[]) = { 0x0504030201000000LL, 0x0d0c0b0a09080808LL };
   DECLARE_ALIGNED(16, static const uint64_t,
-                  c_shuff[]) = { 0x0605040302010000LL, 0x0e0d0c0b0a090808LL };
+                  d_shuff[]) = { 0x0605040302010000LL, 0x0e0d0c0b0a090808LL };
   DECLARE_ALIGNED(16, static const uint64_t,
-                  d_shuff[]) = { 0x0707060504030201LL, 0x0f0f0e0d0c0b0a09LL };
+                  e_shuff[]) = { 0x0707060504030201LL, 0x0f0f0e0d0c0b0a09LL };
   DECLARE_ALIGNED(16, static const uint64_t,
-                  e_shuff[]) = { 0x0707070605040302LL, 0x0f0f0f0e0d0c0b0aLL };
+                  f_shuff[]) = { 0x0707070605040302LL, 0x0f0f0f0e0d0c0b0aLL };
 
   if (!left) {  // Left clipping
-    *b = v128_shuffle_8(*b, v128_load_aligned(b_shuff));
     *c = v128_shuffle_8(*c, v128_load_aligned(c_shuff));
+    *d = v128_shuffle_8(*d, v128_load_aligned(d_shuff));
   }
   if (!right) {  // Right clipping
-    *d = v128_shuffle_8(*d, v128_load_aligned(d_shuff));
     *e = v128_shuffle_8(*e, v128_load_aligned(e_shuff));
+    *f = v128_shuffle_8(*f, v128_load_aligned(f_shuff));
   }
 }
 
@@ -77,41 +40,45 @@
                                 int rstride, int ostride, int x0, int y0,
                                 int bottom, int right, int y, v128 *o, v128 *r,
                                 v128 *a, v128 *b, v128 *c, v128 *d, v128 *e,
-                                v128 *f) {
+                                v128 *f, v128 *g, v128 *h) {
   const v64 k1 = v64_load_aligned(org);
   const v64 k2 = v64_load_aligned(org + ostride);
   const v64 l1 = v64_load_aligned(rec);
   const v64 l2 = v64_load_aligned(rec + rstride);
+  const v64 l3 = v64_load_aligned(rec - (y != -y0) * rstride);
+  const v64 l4 = v64_load_aligned(rec + ((y != bottom) + 1) * rstride);
   *o = v128_from_v64(k1, k2);
   *r = v128_from_v64(l1, l2);
-  *a = v128_from_v64(v64_load_aligned(rec - (y != -y0) * rstride), l1);
-  *f = v128_from_v64(l2, v64_load_aligned(rec + ((y != bottom) + 1) * rstride));
-  *b = v128_from_v64(v64_load_unaligned(rec - 2 * !!x0),
+  *a = v128_from_v64(v64_load_aligned(rec - 2 * (y != -y0) * rstride), l3);
+  *b = v128_from_v64(l3, l1);
+  *g = v128_from_v64(l2, l4);
+  *h = v128_from_v64(l4,
+                     v64_load_aligned(rec + (2 * (y != bottom) + 1) * rstride));
+  *c = v128_from_v64(v64_load_unaligned(rec - 2 * !!x0),
                      v64_load_unaligned(rec - 2 * !!x0 + rstride));
-  *c = v128_from_v64(v64_load_unaligned(rec - !!x0),
+  *d = v128_from_v64(v64_load_unaligned(rec - !!x0),
                      v64_load_unaligned(rec - !!x0 + rstride));
-  *d = v128_from_v64(v64_load_unaligned(rec + !!right),
+  *e = v128_from_v64(v64_load_unaligned(rec + !!right),
                      v64_load_unaligned(rec + !!right + rstride));
-  *e = v128_from_v64(v64_load_unaligned(rec + 2 * !!right),
+  *f = v128_from_v64(v64_load_unaligned(rec + 2 * !!right),
                      v64_load_unaligned(rec + 2 * !!right + rstride));
-  clip_sides(b, c, d, e, x0, right);
+  clip_sides(c, d, e, f, x0, right);
 }
 
 void SIMD_FUNC(aom_clpf_detect)(const uint8_t *rec, const uint8_t *org,
                                 int rstride, int ostride, int x0, int y0,
                                 int width, int height, int *sum0, int *sum1,
-                                unsigned int strength, int size) {
-  const v128 sp = v128_dup_8(strength);
-  const v128 sm = v128_dup_8(-(int)strength);
-  const int right = width - 8 - x0;
+                                unsigned int strength, int size,
+                                unsigned int bd) {
   const int bottom = height - 2 - y0;
+  const int right = width - 8 - x0;
   ssd128_internal ssd0 = v128_ssd_u8_init();
   ssd128_internal ssd1 = v128_ssd_u8_init();
   int y;
 
   if (size != 8) {  // Fallback to plain C
     aom_clpf_detect_c(rec, org, rstride, ostride, x0, y0, width, height, sum0,
-                      sum1, strength, size);
+                      sum1, strength, size, bd);
     return;
   }
 
@@ -119,11 +86,12 @@
   org += x0 + y0 * ostride;
 
   for (y = 0; y < 8; y += 2) {
-    v128 a, b, c, d, e, f, o, r;
+    v128 a, b, c, d, e, f, g, h, o, r;
     read_two_lines(rec, org, rstride, ostride, x0, y0, bottom, right, y, &o, &r,
-                   &a, &b, &c, &d, &e, &f);
+                   &a, &b, &c, &d, &e, &f, &g, &h);
     ssd0 = v128_ssd_u8(ssd0, o, r);
-    ssd1 = v128_ssd_u8(ssd1, o, calc_delta(r, a, b, c, d, e, f, sp, sm));
+    ssd1 =
+        v128_ssd_u8(ssd1, o, calc_delta(r, a, b, c, d, e, f, g, h, strength));
     rec += rstride * 2;
     org += ostride * 2;
   }
@@ -132,23 +100,19 @@
 }
 
 SIMD_INLINE void calc_delta_multi(v128 r, v128 o, v128 a, v128 b, v128 c,
-                                  v128 d, v128 e, v128 f, ssd128_internal *ssd1,
-                                  ssd128_internal *ssd2,
+                                  v128 d, v128 e, v128 f, v128 g, v128 h,
+                                  ssd128_internal *ssd1, ssd128_internal *ssd2,
                                   ssd128_internal *ssd3) {
-  calc_diff(r, &a, &b, &c, &d, &e, &f);
-  *ssd1 = v128_ssd_u8(*ssd1, o, delta_kernel(r, a, b, c, d, e, f, v128_dup_8(1),
-                                             v128_dup_8(-1)));
-  *ssd2 = v128_ssd_u8(*ssd2, o, delta_kernel(r, a, b, c, d, e, f, v128_dup_8(2),
-                                             v128_dup_8(-2)));
-  *ssd3 = v128_ssd_u8(*ssd3, o, delta_kernel(r, a, b, c, d, e, f, v128_dup_8(4),
-                                             v128_dup_8(-4)));
+  *ssd1 = v128_ssd_u8(*ssd1, o, calc_delta(r, a, b, c, d, e, f, g, h, 1));
+  *ssd2 = v128_ssd_u8(*ssd2, o, calc_delta(r, a, b, c, d, e, f, g, h, 2));
+  *ssd3 = v128_ssd_u8(*ssd3, o, calc_delta(r, a, b, c, d, e, f, g, h, 4));
 }
 
 // Test multiple filter strengths at once.
 void SIMD_FUNC(aom_clpf_detect_multi)(const uint8_t *rec, const uint8_t *org,
                                       int rstride, int ostride, int x0, int y0,
-                                      int width, int height, int *sum,
-                                      int size) {
+                                      int width, int height, int *sum, int size,
+                                      unsigned int bd) {
   const int bottom = height - 2 - y0;
   const int right = width - 8 - x0;
   ssd128_internal ssd0 = v128_ssd_u8_init();
@@ -159,7 +123,7 @@
 
   if (size != 8) {  // Fallback to plain C
     aom_clpf_detect_multi_c(rec, org, rstride, ostride, x0, y0, width, height,
-                            sum, size);
+                            sum, size, bd);
     return;
   }
 
@@ -167,11 +131,11 @@
   org += x0 + y0 * ostride;
 
   for (y = 0; y < 8; y += 2) {
-    v128 a, b, c, d, e, f, o, r;
+    v128 a, b, c, d, e, f, g, h, o, r;
     read_two_lines(rec, org, rstride, ostride, x0, y0, bottom, right, y, &o, &r,
-                   &a, &b, &c, &d, &e, &f);
+                   &a, &b, &c, &d, &e, &f, &g, &h);
     ssd0 = v128_ssd_u8(ssd0, o, r);
-    calc_delta_multi(r, o, a, b, c, d, e, f, &ssd1, &ssd2, &ssd3);
+    calc_delta_multi(r, o, a, b, c, d, e, f, g, h, &ssd1, &ssd2, &ssd3);
     rec += 2 * rstride;
     org += 2 * ostride;
   }
@@ -186,39 +150,48 @@
                                     int rstride, int ostride, int x0, int y0,
                                     int bottom, int right, int y, v128 *o,
                                     v128 *r, v128 *a, v128 *b, v128 *c, v128 *d,
-                                    v128 *e, v128 *f, int shift) {
-  const v128 n1 = v128_shr_u16(v128_load_aligned(rec), shift);
-  const v128 n2 = v128_shr_u16(v128_load_aligned(rec + rstride), shift);
-  *o = v128_unziplo_8(v128_shr_u16(v128_load_aligned(org), shift),
-                      v128_shr_u16(v128_load_aligned(org + ostride), shift));
-  *r = v128_unziplo_8(n1, n2);
+                                    v128 *e, v128 *f, v128 *g, v128 *h,
+                                    int shift) {
+  const v128 k1 = v128_shr_u16(v128_load_aligned(org), shift);
+  const v128 k2 = v128_shr_u16(v128_load_aligned(org + ostride), shift);
+  const v128 l1 = v128_shr_u16(v128_load_aligned(rec), shift);
+  const v128 l2 = v128_shr_u16(v128_load_aligned(rec + rstride), shift);
+  const v128 l3 =
+      v128_shr_u16(v128_load_aligned(rec - (y != -y0) * rstride), shift);
+  const v128 l4 = v128_shr_u16(
+      v128_load_aligned(rec + ((y != bottom) + 1) * rstride), shift);
+  *o = v128_unziplo_8(k1, k2);
+  *r = v128_unziplo_8(l1, l2);
   *a = v128_unziplo_8(
-      v128_shr_u16(v128_load_aligned(rec - (y != -y0) * rstride), shift), n1);
-  *f = v128_unziplo_8(
-      n2, v128_shr_u16(v128_load_unaligned(rec + ((y != bottom) + 1) * rstride),
-                       shift));
-  *b = v128_unziplo_8(
+      v128_shr_u16(v128_load_aligned(rec - 2 * (y != -y0) * rstride), shift),
+      l3);
+  *b = v128_unziplo_8(l3, l1);
+  *g = v128_unziplo_8(l2, l4);
+  *h = v128_unziplo_8(
+      l4,
+      v128_shr_u16(v128_load_unaligned(rec + (2 * (y != bottom) + 1) * rstride),
+                   shift));
+  *c = v128_unziplo_8(
       v128_shr_u16(v128_load_unaligned(rec - 2 * !!x0), shift),
       v128_shr_u16(v128_load_unaligned(rec - 2 * !!x0 + rstride), shift));
-  *c = v128_unziplo_8(
+  *d = v128_unziplo_8(
       v128_shr_u16(v128_load_unaligned(rec - !!x0), shift),
       v128_shr_u16(v128_load_unaligned(rec - !!x0 + rstride), shift));
-  *d = v128_unziplo_8(
+  *e = v128_unziplo_8(
       v128_shr_u16(v128_load_unaligned(rec + !!right), shift),
       v128_shr_u16(v128_load_unaligned(rec + !!right + rstride), shift));
-  *e = v128_unziplo_8(
+  *f = v128_unziplo_8(
       v128_shr_u16(v128_load_unaligned(rec + 2 * !!right), shift),
       v128_shr_u16(v128_load_unaligned(rec + 2 * !!right + rstride), shift));
-  clip_sides(b, c, d, e, x0, right);
+  clip_sides(c, d, e, f, x0, right);
 }
 
 void SIMD_FUNC(aom_clpf_detect_hbd)(const uint16_t *rec, const uint16_t *org,
                                     int rstride, int ostride, int x0, int y0,
                                     int width, int height, int *sum0, int *sum1,
-                                    unsigned int strength, int shift,
-                                    int size) {
-  const v128 sp = v128_dup_8(strength >> shift);
-  const v128 sm = v128_dup_8(-(int)(strength >> shift));
+                                    unsigned int strength, int size,
+                                    unsigned int bitdepth) {
+  const int shift = bitdepth - 8;
   const int bottom = height - 2 - y0;
   const int right = width - 8 - x0;
   ssd128_internal ssd0 = v128_ssd_u8_init();
@@ -227,7 +200,7 @@
 
   if (size != 8) {  // Fallback to plain C
     aom_clpf_detect_hbd_c(rec, org, rstride, ostride, x0, y0, width, height,
-                          sum0, sum1, strength, shift, size);
+                          sum0, sum1, strength, size, bitdepth);
     return;
   }
 
@@ -235,11 +208,12 @@
   org += x0 + y0 * ostride;
 
   for (y = 0; y < 8; y += 2) {
-    v128 a, b, c, d, e, f, o, r;
+    v128 a, b, c, d, e, f, g, h, o, r;
     read_two_lines_hbd(rec, org, rstride, ostride, x0, y0, bottom, right, y, &o,
-                       &r, &a, &b, &c, &d, &e, &f, shift);
+                       &r, &a, &b, &c, &d, &e, &f, &g, &h, shift);
     ssd0 = v128_ssd_u8(ssd0, o, r);
-    ssd1 = v128_ssd_u8(ssd1, o, calc_delta(r, a, b, c, d, e, f, sp, sm));
+    ssd1 = v128_ssd_u8(
+        ssd1, o, calc_delta(r, a, b, c, d, e, f, g, h, strength >> shift));
     rec += rstride * 2;
     org += ostride * 2;
   }
@@ -251,7 +225,7 @@
                                           const uint16_t *org, int rstride,
                                           int ostride, int x0, int y0,
                                           int width, int height, int *sum,
-                                          int shift, int size) {
+                                          int size, unsigned int bitdepth) {
   const int bottom = height - 2 - y0;
   const int right = width - 8 - x0;
   ssd128_internal ssd0 = v128_ssd_u8_init();
@@ -262,7 +236,7 @@
 
   if (size != 8) {  // Fallback to plain C
     aom_clpf_detect_multi_hbd_c(rec, org, rstride, ostride, x0, y0, width,
-                                height, sum, shift, size);
+                                height, sum, size, bitdepth);
     return;
   }
 
@@ -270,13 +244,13 @@
   org += x0 + y0 * ostride;
 
   for (y = 0; y < 8; y += 2) {
-    v128 a, b, c, d, e, f, o, r;
+    v128 a, b, c, d, e, f, g, h, o, r;
     read_two_lines_hbd(rec, org, rstride, ostride, x0, y0, bottom, right, y, &o,
-                       &r, &a, &b, &c, &d, &e, &f, shift);
+                       &r, &a, &b, &c, &d, &e, &f, &g, &h, bitdepth - 8);
     ssd0 = v128_ssd_u8(ssd0, o, r);
-    calc_delta_multi(r, o, a, b, c, d, e, f, &ssd1, &ssd2, &ssd3);
-    rec += 2 * rstride;
-    org += 2 * ostride;
+    calc_delta_multi(r, o, a, b, c, d, e, f, g, h, &ssd1, &ssd2, &ssd3);
+    rec += rstride * 2;
+    org += ostride * 2;
   }
   sum[0] += v128_ssd_u8_sum(ssd0);
   sum[1] += v128_ssd_u8_sum(ssd1);
diff --git a/test/clpf_test.cc b/test/clpf_test.cc
index 2e34f45..8d6dd75 100644
--- a/test/clpf_test.cc
+++ b/test/clpf_test.cc
@@ -28,7 +28,8 @@
 
 typedef void (*clpf_block_t)(const uint8_t *src, uint8_t *dst, int sstride,
                              int dstride, int x0, int y0, int sizex, int sizey,
-                             unsigned int strength, BOUNDARY_TYPE bt);
+                             unsigned int strength, BOUNDARY_TYPE bt,
+                             unsigned int bitdepth);
 
 typedef std::tr1::tuple<clpf_block_t, clpf_block_t, int, int>
     clpf_block_param_t;
@@ -58,7 +59,7 @@
 typedef void (*clpf_block_hbd_t)(const uint16_t *src, uint16_t *dst,
                                  int sstride, int dstride, int x0, int y0,
                                  int sizex, int sizey, unsigned int strength,
-                                 BOUNDARY_TYPE bt);
+                                 BOUNDARY_TYPE bt, unsigned int bitdepth);
 
 typedef std::tr1::tuple<clpf_block_hbd_t, clpf_block_hbd_t, int, int>
     clpf_block_hbd_param_t;
@@ -90,11 +91,12 @@
 void test_clpf(int w, int h, int depth, int iterations,
                void (*clpf)(const pixel *src, pixel *dst, int sstride,
                             int dstride, int x0, int y0, int sizex, int sizey,
-                            unsigned int strength, BOUNDARY_TYPE bt),
+                            unsigned int strength, BOUNDARY_TYPE bt,
+                            unsigned int bitdepth),
                void (*ref_clpf)(const pixel *src, pixel *dst, int sstride,
                                 int dstride, int x0, int y0, int sizex,
                                 int sizey, unsigned int strength,
-                                BOUNDARY_TYPE bt)) {
+                                BOUNDARY_TYPE bt, unsigned int bitdepth)) {
   const int size = 24;
   ACMRandom rnd(ACMRandom::DeterministicSeed());
   DECLARE_ALIGNED(16, pixel, s[size * size]);
@@ -129,10 +131,10 @@
                                 (TILE_RIGHT_BOUNDARY & -(xpos + w == size)) |
                                 (TILE_BOTTOM_BOUNDARY & -(ypos + h == size)));
               ref_clpf(s, ref_d, size, size, xpos, ypos, w, h, 1 << strength,
-                       bt);
+                       bt, depth);
               if (clpf != ref_clpf)
                 ASM_REGISTER_STATE_CHECK(clpf(s, d, size, size, xpos, ypos, w,
-                                              h, 1 << strength, bt));
+                                              h, 1 << strength, bt, depth));
               if (ref_clpf != clpf)
                 for (pos = 0; pos < size * size && !error; pos++) {
                   error = ref_d[pos] != d[pos];
@@ -154,13 +156,17 @@
       << "ypos: " << ypos << std::endl
       << "w: " << w << std::endl
       << "h: " << h << std::endl
-      << "A=" << (pos > size ? (int16_t)s[pos - size] : -1) << std::endl
-      << "B=" << (pos % size - 2 >= 0 ? (int16_t)s[pos - 2] : -1) << std::endl
-      << "C=" << (pos % size - 1 >= 0 ? (int16_t)s[pos - 1] : -1) << std::endl
+      << "A=" << (pos > 2 * size ? (int16_t)s[pos - 2 * size] : -1) << std::endl
+      << "B=" << (pos > size ? (int16_t)s[pos - size] : -1) << std::endl
+      << "C=" << (pos % size - 2 >= 0 ? (int16_t)s[pos - 2] : -1) << std::endl
+      << "D=" << (pos % size - 1 >= 0 ? (int16_t)s[pos - 1] : -1) << std::endl
       << "X=" << (int16_t)s[pos] << std::endl
-      << "D=" << (pos % size + 1 < size ? (int16_t)s[pos + 1] : -1) << std::endl
-      << "E=" << (pos % size + 2 < size ? (int16_t)s[pos + 2] : -1) << std::endl
-      << "F=" << (pos + size < size * size ? (int16_t)s[pos + size] : -1)
+      << "E=" << (pos % size + 1 < size ? (int16_t)s[pos + 1] : -1) << std::endl
+      << "F=" << (pos % size + 2 < size ? (int16_t)s[pos + 2] : -1) << std::endl
+      << "G=" << (pos + size < size * size ? (int16_t)s[pos + size] : -1)
+      << std::endl
+      << "H="
+      << (pos + 2 * size < size * size ? (int16_t)s[pos + 2 * size] : -1)
       << std::endl;
 }
 
@@ -169,11 +175,12 @@
                      void (*clpf)(const pixel *src, pixel *dst, int sstride,
                                   int dstride, int x0, int y0, int sizex,
                                   int sizey, unsigned int strength,
-                                  BOUNDARY_TYPE bt),
+                                  BOUNDARY_TYPE bt, unsigned int bitdepth),
                      void (*ref_clpf)(const pixel *src, pixel *dst, int sstride,
                                       int dstride, int x0, int y0, int sizex,
                                       int sizey, unsigned int strength,
-                                      BOUNDARY_TYPE bt)) {
+                                      BOUNDARY_TYPE bt,
+                                      unsigned int bitdepth)) {
   aom_usec_timer ref_timer;
   aom_usec_timer timer;