Retune the CLPF kernel

CLPF performance had degraded by about 0.5% over the past six months,
which isn't totally surprising since the codec is a moving target.
About half of that degradation comes from the improved 7 bit filter
coefficients.  Therefore, CLPF needs to be retuned for the current
codec.

This patch makes two (normative) changes to the CLPF kernel:

* The clipping function was changed from clamp(x, -s, s) to
      sign(x) * max(0, abs(x) - max(0, abs(x) - s +
             (abs(x) >> (bitdepth - 3 - log2(s)))))
  This adds a rampdown to 0 at -32 and 32 (for 8 bit, -128 & 128
  for 10 bit, etc), so large differences are ignored.

* 8 taps instead of 6 taps:
               1
    4          3
  13 31  ->  13 31
    4          3
               1

AWCY results: low delay  high delay
PSNR:           -0.40%     -0.47%
PSNR HVS:        0.00%     -0.11%
SSIM:           -0.31%     -0.39%
CIEDE 2000:     -0.22%     -0.31%
APSNR:          -0.40%     -0.48%
MS SSIM:         0.01%     -0.12%

About 3/4 of the gains come from the new clipping function.

Change-Id: Idad9dc4004e71a9c7ec81ba62ebd12fb76fb044a
diff --git a/av1/common/clpf_simd.h b/av1/common/clpf_simd.h
index 7cf9486..efc579c 100644
--- a/av1/common/clpf_simd.h
+++ b/av1/common/clpf_simd.h
@@ -11,62 +11,25 @@
 
 #include "./aom_dsp_rtcd.h"
 #include "aom_ports/mem.h"
-
-// delta = 4/16 * clamp(a - o, -s, s) + 1/16 * clamp(b - o, -s, s) +
-//         3/16 * clamp(c - o, -s, s) + 3/16 * clamp(d - o, -s, s) +
-//         1/16 * clamp(e - o, -s, s) + 4/16 * clamp(f - o, -s, s)
-SIMD_INLINE v128 calc_delta(v128 o, v128 a, v128 b, v128 c, v128 d, v128 e,
-                            v128 f, v128 sp, v128 sm) {
-  // The difference will be 9 bit, offset by 128 so we can use saturated
-  // sub to avoid going to 16 bit temporarily before "strength" clipping.
-  const v128 c128 = v128_dup_8(128);
-  const v128 x = v128_add_8(c128, o);
-  const v128 c8 = v128_dup_8(8);
-  const v128 tmp = v128_add_8(
-      v128_max_s8(v128_min_s8(v128_ssub_s8(v128_add_8(c128, c), x), sp), sm),
-      v128_max_s8(v128_min_s8(v128_ssub_s8(v128_add_8(c128, d), x), sp), sm));
-  const v128 delta = v128_add_8(
-      v128_add_8(
-          v128_shl_8(
-              v128_add_8(
-                  v128_max_s8(
-                      v128_min_s8(v128_ssub_s8(v128_add_8(c128, a), x), sp),
-                      sm),
-                  v128_max_s8(
-                      v128_min_s8(v128_ssub_s8(v128_add_8(c128, f), x), sp),
-                      sm)),
-              2),
-          v128_add_8(
-              v128_max_s8(v128_min_s8(v128_ssub_s8(v128_add_8(c128, b), x), sp),
-                          sm),
-              v128_max_s8(v128_min_s8(v128_ssub_s8(v128_add_8(c128, e), x), sp),
-                          sm))),
-      v128_add_8(v128_add_8(tmp, tmp), tmp));
-  return v128_add_8(
-      o,
-      v128_shr_s8(
-          v128_add_8(c8, v128_add_8(delta, v128_cmplt_s8(delta, v128_zero()))),
-          4));
-}
+#include "aom_ports/bitops.h"
+#include "av1/common/clpf_simd_kernel.h"
 
 // Process blocks of width 8, two lines at a time, 8 bit.
 static void clpf_block8(const uint8_t *src, uint8_t *dst, int sstride,
                         int dstride, int x0, int y0, int sizey,
-                        unsigned int strength, BOUNDARY_TYPE bt) {
+                        BOUNDARY_TYPE bt, unsigned int strength) {
   const int bottom = bt & TILE_BOTTOM_BOUNDARY ? sizey - 2 : -1;
   const int right = !(bt & TILE_RIGHT_BOUNDARY);
   const int left = !(bt & TILE_LEFT_BOUNDARY);
   const int top = bt & TILE_ABOVE_BOUNDARY ? y0 : -1;
-  const v128 sp = v128_dup_8(strength);
-  const v128 sm = v128_dup_8(-(int)strength);
   DECLARE_ALIGNED(16, static const uint64_t,
-                  b_shuff[]) = { 0x0504030201000000LL, 0x0d0c0b0a09080808LL };
+                  c_shuff[]) = { 0x0504030201000000LL, 0x0d0c0b0a09080808LL };
   DECLARE_ALIGNED(16, static const uint64_t,
-                  c_shuff[]) = { 0x0605040302010000LL, 0x0e0d0c0b0a090808LL };
+                  d_shuff[]) = { 0x0605040302010000LL, 0x0e0d0c0b0a090808LL };
   DECLARE_ALIGNED(16, static const uint64_t,
-                  d_shuff[]) = { 0x0707060504030201LL, 0x0f0f0e0d0c0b0a09LL };
+                  e_shuff[]) = { 0x0707060504030201LL, 0x0f0f0e0d0c0b0a09LL };
   DECLARE_ALIGNED(16, static const uint64_t,
-                  e_shuff[]) = { 0x0707070605040302LL, 0x0f0f0f0e0d0c0b0aLL };
+                  f_shuff[]) = { 0x0707070605040302LL, 0x0f0f0f0e0d0c0b0aLL };
   int y;
 
   dst += x0 + y0 * dstride;
@@ -75,33 +38,37 @@
   for (y = 0; y < sizey; y += 2) {
     const v64 l1 = v64_load_aligned(src);
     const v64 l2 = v64_load_aligned(src + sstride);
+    const v64 l3 = v64_load_aligned(src - (y != top) * sstride);
+    const v64 l4 = v64_load_aligned(src + ((y != bottom) + 1) * sstride);
     v128 o = v128_from_v64(l1, l2);
     const v128 a =
-        v128_from_v64(v64_load_aligned(src - (y != top) * sstride), l1);
-    const v128 f = v128_from_v64(
-        l2, v64_load_aligned(src + ((y != bottom) + 1) * sstride));
-    v128 b, c, d, e;
+        v128_from_v64(v64_load_aligned(src - 2 * (y != top) * sstride), l3);
+    const v128 b = v128_from_v64(l3, l1);
+    const v128 g = v128_from_v64(l2, l4);
+    const v128 h = v128_from_v64(
+        l4, v64_load_aligned(src + (2 * (y != bottom) + 1) * sstride));
+    v128 c, d, e, f;
 
     if (left) {
-      b = v128_from_v64(v64_load_unaligned(src - 2),
+      c = v128_from_v64(v64_load_unaligned(src - 2),
                         v64_load_unaligned(src - 2 + sstride));
-      c = v128_from_v64(v64_load_unaligned(src - 1),
+      d = v128_from_v64(v64_load_unaligned(src - 1),
                         v64_load_unaligned(src - 1 + sstride));
     } else {  // Left clipping
-      b = v128_shuffle_8(o, v128_load_aligned(b_shuff));
       c = v128_shuffle_8(o, v128_load_aligned(c_shuff));
+      d = v128_shuffle_8(o, v128_load_aligned(d_shuff));
     }
     if (right) {
-      d = v128_from_v64(v64_load_unaligned(src + 1),
+      e = v128_from_v64(v64_load_unaligned(src + 1),
                         v64_load_unaligned(src + 1 + sstride));
-      e = v128_from_v64(v64_load_unaligned(src + 2),
+      f = v128_from_v64(v64_load_unaligned(src + 2),
                         v64_load_unaligned(src + 2 + sstride));
     } else {  // Right clipping
-      d = v128_shuffle_8(o, v128_load_aligned(d_shuff));
       e = v128_shuffle_8(o, v128_load_aligned(e_shuff));
+      f = v128_shuffle_8(o, v128_load_aligned(f_shuff));
     }
 
-    o = calc_delta(o, a, b, c, d, e, f, sp, sm);
+    o = calc_delta(o, a, b, c, d, e, f, g, h, strength);
     v64_store_aligned(dst, v128_high_v64(o));
     v64_store_aligned(dst + dstride, v128_low_v64(o));
     src += sstride * 2;
@@ -112,67 +79,70 @@
 // Process blocks of width 4, four lines at a time, 8 bit.
 static void clpf_block4(const uint8_t *src, uint8_t *dst, int sstride,
                         int dstride, int x0, int y0, int sizey,
-                        unsigned int strength, BOUNDARY_TYPE bt) {
-  const v128 sp = v128_dup_8(strength);
-  const v128 sm = v128_dup_8(-(int)strength);
+                        BOUNDARY_TYPE bt, unsigned int strength) {
   const int right = !(bt & TILE_RIGHT_BOUNDARY);
   const int bottom = bt & TILE_BOTTOM_BOUNDARY ? sizey - 4 : -1;
   const int left = !(bt & TILE_LEFT_BOUNDARY);
   const int top = bt & TILE_ABOVE_BOUNDARY ? y0 : -1;
 
   DECLARE_ALIGNED(16, static const uint64_t,
-                  b_shuff[]) = { 0x0504040401000000LL, 0x0d0c0c0c09080808LL };
+                  c_shuff[]) = { 0x0504040401000000LL, 0x0d0c0c0c09080808LL };
   DECLARE_ALIGNED(16, static const uint64_t,
-                  c_shuff[]) = { 0x0605040402010000LL, 0x0e0d0c0c0a090808LL };
+                  d_shuff[]) = { 0x0605040402010000LL, 0x0e0d0c0c0a090808LL };
   DECLARE_ALIGNED(16, static const uint64_t,
-                  d_shuff[]) = { 0x0707060503030201LL, 0x0f0f0e0d0b0b0a09LL };
+                  e_shuff[]) = { 0x0707060503030201LL, 0x0f0f0e0d0b0b0a09LL };
   DECLARE_ALIGNED(16, static const uint64_t,
-                  e_shuff[]) = { 0x0707070603030302LL, 0x0f0f0f0e0b0b0b0aLL };
+                  f_shuff[]) = { 0x0707070603030302LL, 0x0f0f0f0e0b0b0b0aLL };
   int y;
 
   dst += x0 + y0 * dstride;
   src += x0 + y0 * sstride;
 
   for (y = 0; y < sizey; y += 4) {
-    const uint32_t l0 = u32_load_aligned(src - (y != top) * sstride);
-    const uint32_t l1 = u32_load_aligned(src);
-    const uint32_t l2 = u32_load_aligned(src + sstride);
-    const uint32_t l3 = u32_load_aligned(src + 2 * sstride);
-    const uint32_t l4 = u32_load_aligned(src + 3 * sstride);
-    const uint32_t l5 = u32_load_aligned(src + ((y != bottom) + 3) * sstride);
-    v128 o = v128_from_32(l1, l2, l3, l4);
+    const uint32_t l0 = u32_load_aligned(src - 2 * (y != top) * sstride);
+    const uint32_t l1 = u32_load_aligned(src - (y != top) * sstride);
+    const uint32_t l2 = u32_load_aligned(src);
+    const uint32_t l3 = u32_load_aligned(src + sstride);
+    const uint32_t l4 = u32_load_aligned(src + 2 * sstride);
+    const uint32_t l5 = u32_load_aligned(src + 3 * sstride);
+    const uint32_t l6 = u32_load_aligned(src + ((y != bottom) + 3) * sstride);
+    const uint32_t l7 =
+        u32_load_aligned(src + (2 * (y != bottom) + 3) * sstride);
+    v128 o = v128_from_32(l2, l3, l4, l5);
     const v128 a = v128_from_32(l0, l1, l2, l3);
-    const v128 f = v128_from_32(l2, l3, l4, l5);
-    v128 b, c, d, e;
+    const v128 b = v128_from_32(l1, l2, l3, l4);
+    const v128 g = v128_from_32(l3, l4, l5, l6);
+    const v128 h = v128_from_32(l4, l5, l6, l7);
+    v128 c, d, e, f;
 
     if (left) {
-      b = v128_from_32(u32_load_unaligned(src - 2),
+      c = v128_from_32(u32_load_unaligned(src - 2),
                        u32_load_unaligned(src + sstride - 2),
                        u32_load_unaligned(src + 2 * sstride - 2),
                        u32_load_unaligned(src + 3 * sstride - 2));
-      c = v128_from_32(u32_load_unaligned(src - 1),
+      d = v128_from_32(u32_load_unaligned(src - 1),
                        u32_load_unaligned(src + sstride - 1),
                        u32_load_unaligned(src + 2 * sstride - 1),
                        u32_load_unaligned(src + 3 * sstride - 1));
     } else {  // Left clipping
-      b = v128_shuffle_8(o, v128_load_aligned(b_shuff));
       c = v128_shuffle_8(o, v128_load_aligned(c_shuff));
+      d = v128_shuffle_8(o, v128_load_aligned(d_shuff));
     }
     if (right) {
-      d = v128_from_32(u32_load_unaligned(src + 1),
+      e = v128_from_32(u32_load_unaligned(src + 1),
                        u32_load_unaligned(src + sstride + 1),
                        u32_load_unaligned(src + 2 * sstride + 1),
                        u32_load_unaligned(src + 3 * sstride + 1));
-      e = v128_from_32(u32_load_unaligned(src + 2 * !!right),
+      f = v128_from_32(u32_load_unaligned(src + 2),
                        u32_load_unaligned(src + sstride + 2),
                        u32_load_unaligned(src + 2 * sstride + 2),
                        u32_load_unaligned(src + 3 * sstride + 2));
     } else {  // Right clipping
-      d = v128_shuffle_8(o, v128_load_aligned(d_shuff));
       e = v128_shuffle_8(o, v128_load_aligned(e_shuff));
+      f = v128_shuffle_8(o, v128_load_aligned(f_shuff));
     }
 
-    o = calc_delta(o, a, b, c, d, e, f, sp, sm);
+    o = calc_delta(o, a, b, c, d, e, f, g, h, strength);
     u32_store_aligned(dst, v128_low_u32(v128_shr_n_byte(o, 12)));
     u32_store_aligned(dst + dstride, v128_low_u32(v128_shr_n_byte(o, 8)));
     u32_store_aligned(dst + 2 * dstride, v128_low_u32(v128_shr_n_byte(o, 4)));
@@ -186,79 +156,93 @@
 void SIMD_FUNC(aom_clpf_block)(const uint8_t *src, uint8_t *dst, int sstride,
                                int dstride, int x0, int y0, int sizex,
                                int sizey, unsigned int strength,
-                               BOUNDARY_TYPE bt) {
+                               BOUNDARY_TYPE bt, unsigned int bd) {
   if ((sizex != 4 && sizex != 8) || ((sizey & 3) && sizex == 4)) {
     // Fallback to C for odd sizes:
     // * block widths not 4 or 8
     // * block heights not a multiple of 4 if the block width is 4
     aom_clpf_block_c(src, dst, sstride, dstride, x0, y0, sizex, sizey, strength,
-                     bt);
+                     bt, bd);
   } else {
     (sizex == 4 ? clpf_block4 : clpf_block8)(src, dst, sstride, dstride, x0, y0,
-                                             sizey, strength, bt);
+                                             sizey, bt, strength);
   }
 }
 
 #if CONFIG_AOM_HIGHBITDEPTH
-// delta = 4/16 * clamp(a - o, -s, s) + 1/16 * clamp(b - o, -s, s) +
-//         3/16 * clamp(c - o, -s, s) + 3/16 * clamp(d - o, -s, s) +
-//         1/16 * clamp(e - o, -s, s) + 4/16 * clamp(f - o, -s, s)
-SIMD_INLINE v128 calc_delta_hbd(v128 o, v128 a, v128 b, v128 c, v128 d, v128 e,
-                                v128 f, v128 sp, v128 sm) {
-  const v128 c8 = v128_dup_16(8);
-  const v128 tmp =
-      v128_add_16(v128_max_s16(v128_min_s16(v128_sub_16(c, o), sp), sm),
-                  v128_max_s16(v128_min_s16(v128_sub_16(d, o), sp), sm));
+// sign(a - b) * max(0, abs(a - b) - max(0, abs(a - b) -
+// strength + (abs(a - b) >> (bd - 3 - log2(s)))))
+SIMD_INLINE v128 constrain_hbd(v128 a, v128 b, unsigned int strength,
+                               unsigned int bd) {
+  const v128 diff = v128_sub_16(v128_max_s16(a, b), v128_min_s16(a, b));
+  const v128 sign = v128_cmpeq_16(v128_min_s16(a, b), a);  // -(a <= b)
+  const v128 zero = v128_zero();
+  const v128 s = v128_max_s16(
+      zero, v128_sub_16(v128_dup_16(strength),
+                        v128_shr_u16(diff, bd - 3 - get_msb(strength))));
+  return v128_sub_16(
+      v128_xor(sign,
+               v128_max_s16(
+                   zero, v128_sub_16(
+                             diff, v128_max_s16(zero, v128_sub_16(diff, s))))),
+      sign);
+}
+
+// delta = 1/16 * constrain(a, x, s, bd) + 3/16 * constrain(b, x, s, bd) +
+//         1/16 * constrain(c, x, s, bd) + 3/16 * constrain(d, x, s, bd) +
+//         3/16 * constrain(e, x, s, bd) + 1/16 * constrain(f, x, s, bd) +
+//         3/16 * constrain(g, x, s, bd) + 1/16 * constrain(h, x, s, bd)
+SIMD_INLINE v128 calc_delta_hbd(v128 x, v128 a, v128 b, v128 c, v128 d, v128 e,
+                                v128 f, v128 g, v128 h, unsigned int s,
+                                unsigned int bd) {
+  const v128 bdeg = v128_add_16(
+      v128_add_16(constrain_hbd(b, x, s, bd), constrain_hbd(d, x, s, bd)),
+      v128_add_16(constrain_hbd(e, x, s, bd), constrain_hbd(g, x, s, bd)));
   const v128 delta = v128_add_16(
       v128_add_16(
-          v128_shl_16(
-              v128_add_16(
-                  v128_max_s16(v128_min_s16(v128_sub_16(a, o), sp), sm),
-                  v128_max_s16(v128_min_s16(v128_sub_16(f, o), sp), sm)),
-              2),
-          v128_add_16(v128_max_s16(v128_min_s16(v128_sub_16(b, o), sp), sm),
-                      v128_max_s16(v128_min_s16(v128_sub_16(e, o), sp), sm))),
-      v128_add_16(v128_add_16(tmp, tmp), tmp));
+          v128_add_16(constrain_hbd(a, x, s, bd), constrain_hbd(c, x, s, bd)),
+          v128_add_16(constrain_hbd(f, x, s, bd), constrain_hbd(h, x, s, bd))),
+      v128_add_16(v128_add_16(bdeg, bdeg), bdeg));
   return v128_add_16(
-      o, v128_shr_s16(
-             v128_add_16(
-                 c8, v128_add_16(delta, v128_cmplt_s16(delta, v128_zero()))),
-             4));
+      x,
+      v128_shr_s16(
+          v128_add_16(v128_dup_16(8),
+                      v128_add_16(delta, v128_cmplt_s16(delta, v128_zero()))),
+          4));
 }
 
 static void calc_delta_hbd4(v128 o, v128 a, v128 b, v128 c, v128 d, v128 e,
-                            v128 f, uint16_t *dst, v128 sp, v128 sm,
-                            int dstride) {
-  o = calc_delta_hbd(o, a, b, c, d, e, f, sp, sm);
+                            v128 f, v128 g, v128 h, uint16_t *dst,
+                            unsigned int s, unsigned int bd, int dstride) {
+  o = calc_delta_hbd(o, a, b, c, d, e, f, g, h, s, bd);
   v64_store_aligned(dst, v128_high_v64(o));
   v64_store_aligned(dst + dstride, v128_low_v64(o));
 }
 
 static void calc_delta_hbd8(v128 o, v128 a, v128 b, v128 c, v128 d, v128 e,
-                            v128 f, uint16_t *dst, v128 sp, v128 sm) {
-  v128_store_aligned(dst, calc_delta_hbd(o, a, b, c, d, e, f, sp, sm));
+                            v128 f, v128 g, v128 h, uint16_t *dst,
+                            unsigned int s, unsigned int bd) {
+  v128_store_aligned(dst, calc_delta_hbd(o, a, b, c, d, e, f, g, h, s, bd));
 }
 
 // Process blocks of width 4, two lines at time.
 SIMD_INLINE void clpf_block_hbd4(const uint16_t *src, uint16_t *dst,
                                  int sstride, int dstride, int x0, int y0,
                                  int sizey, unsigned int strength,
-                                 BOUNDARY_TYPE bt) {
-  const v128 sp = v128_dup_16(strength);
-  const v128 sm = v128_dup_16(-(int)strength);
+                                 BOUNDARY_TYPE bt, unsigned int bd) {
   const int right = !(bt & TILE_RIGHT_BOUNDARY);
   const int bottom = bt & TILE_BOTTOM_BOUNDARY ? sizey - 2 : -1;
   const int left = !(bt & TILE_LEFT_BOUNDARY);
   const int top = bt & TILE_ABOVE_BOUNDARY ? y0 : -1;
 
   DECLARE_ALIGNED(16, static const uint64_t,
-                  b_shuff[]) = { 0x0302010001000100LL, 0x0b0a090809080908LL };
+                  c_shuff[]) = { 0x0302010001000100LL, 0x0b0a090809080908LL };
   DECLARE_ALIGNED(16, static const uint64_t,
-                  c_shuff[]) = { 0x0504030201000100LL, 0x0d0c0b0a09080908LL };
+                  d_shuff[]) = { 0x0504030201000100LL, 0x0d0c0b0a09080908LL };
   DECLARE_ALIGNED(16, static const uint64_t,
-                  d_shuff[]) = { 0x0706070605040302LL, 0x0f0e0f0e0d0c0b0aLL };
+                  e_shuff[]) = { 0x0706070605040302LL, 0x0f0e0f0e0d0c0b0aLL };
   DECLARE_ALIGNED(16, static const uint64_t,
-                  e_shuff[]) = { 0x0706070607060504LL, 0x0f0e0f0e0f0e0d0cLL };
+                  f_shuff[]) = { 0x0706070607060504LL, 0x0f0e0f0e0f0e0d0cLL };
   int y;
 
   dst += x0 + y0 * dstride;
@@ -267,32 +251,36 @@
   for (y = 0; y < sizey; y += 2) {
     const v64 l1 = v64_load_aligned(src);
     const v64 l2 = v64_load_aligned(src + sstride);
+    const v64 l3 = v64_load_aligned(src - (y != top) * sstride);
+    const v64 l4 = v64_load_aligned(src + ((y != bottom) + 1) * sstride);
     v128 o = v128_from_v64(l1, l2);
     const v128 a =
-        v128_from_v64(v64_load_aligned(src - (y != top) * sstride), l1);
-    const v128 f = v128_from_v64(
-        l2, v64_load_aligned(src + ((y != bottom) + 1) * sstride));
-    v128 b, c, d, e;
+        v128_from_v64(v64_load_aligned(src - 2 * (y != top) * sstride), l3);
+    const v128 b = v128_from_v64(l3, l1);
+    const v128 g = v128_from_v64(l2, l4);
+    const v128 h = v128_from_v64(
+        l4, v64_load_aligned(src + (2 * (y != bottom) + 1) * sstride));
+    v128 c, d, e, f;
 
     if (left) {
-      b = v128_from_v64(v64_load_unaligned(src - 2),
+      c = v128_from_v64(v64_load_unaligned(src - 2),
                         v64_load_unaligned(src - 2 + sstride));
-      c = v128_from_v64(v64_load_unaligned(src - 1),
+      d = v128_from_v64(v64_load_unaligned(src - 1),
                         v64_load_unaligned(src - 1 + sstride));
     } else {  // Left clipping
-      b = v128_shuffle_8(o, v128_load_aligned(b_shuff));
       c = v128_shuffle_8(o, v128_load_aligned(c_shuff));
+      d = v128_shuffle_8(o, v128_load_aligned(d_shuff));
     }
     if (right) {
-      d = v128_from_v64(v64_load_unaligned(src + 1),
+      e = v128_from_v64(v64_load_unaligned(src + 1),
                         v64_load_unaligned(src + 1 + sstride));
-      e = v128_from_v64(v64_load_unaligned(src + 2),
+      f = v128_from_v64(v64_load_unaligned(src + 2),
                         v64_load_unaligned(src + 2 + sstride));
     } else {  // Right clipping
-      d = v128_shuffle_8(o, v128_load_aligned(d_shuff));
       e = v128_shuffle_8(o, v128_load_aligned(e_shuff));
+      f = v128_shuffle_8(o, v128_load_aligned(f_shuff));
     }
-    calc_delta_hbd4(o, a, b, c, d, e, f, dst, sp, sm, dstride);
+    calc_delta_hbd4(o, a, b, c, d, e, f, g, h, dst, strength, bd, dstride);
     src += sstride * 2;
     dst += dstride * 2;
   }
@@ -301,22 +289,21 @@
 // The most simple case.  Start here if you need to understand the functions.
 SIMD_INLINE void clpf_block_hbd(const uint16_t *src, uint16_t *dst, int sstride,
                                 int dstride, int x0, int y0, int sizey,
-                                unsigned int strength, BOUNDARY_TYPE bt) {
-  const v128 sp = v128_dup_16(strength);
-  const v128 sm = v128_dup_16(-(int)strength);
+                                unsigned int strength, BOUNDARY_TYPE bt,
+                                unsigned int bd) {
   const int right = !(bt & TILE_RIGHT_BOUNDARY);
-  const int bottom = bt & TILE_BOTTOM_BOUNDARY ? sizey - 2 : -2;
   const int left = !(bt & TILE_LEFT_BOUNDARY);
-  const int top = bt & TILE_ABOVE_BOUNDARY ? y0 : -1;
+  const int ymin = -!(bt & TILE_ABOVE_BOUNDARY) * 2;
+  const int ymax = sizey + !(bt & TILE_BOTTOM_BOUNDARY) * 2 - 1;
 
   DECLARE_ALIGNED(16, static const uint64_t,
-                  b_shuff[]) = { 0x0302010001000100LL, 0x0b0a090807060504LL };
+                  c_shuff[]) = { 0x0302010001000100LL, 0x0b0a090807060504LL };
   DECLARE_ALIGNED(16, static const uint64_t,
-                  c_shuff[]) = { 0x0504030201000100LL, 0x0d0c0b0a09080706LL };
+                  d_shuff[]) = { 0x0504030201000100LL, 0x0d0c0b0a09080706LL };
   DECLARE_ALIGNED(16, static const uint64_t,
-                  d_shuff[]) = { 0x0908070605040302LL, 0x0f0e0f0e0d0c0b0aLL };
+                  e_shuff[]) = { 0x0908070605040302LL, 0x0f0e0f0e0d0c0b0aLL };
   DECLARE_ALIGNED(16, static const uint64_t,
-                  e_shuff[]) = { 0x0b0a090807060504LL, 0x0f0e0f0e0f0e0d0cLL };
+                  f_shuff[]) = { 0x0b0a090807060504LL, 0x0f0e0f0e0f0e0d0cLL };
   int y;
 
   dst += x0 + y0 * dstride;
@@ -327,27 +314,28 @@
   // Clipping along the left and right edges is handled by shuffle
   // instructions doing shift and pad.
   for (y = 0; y < sizey; y++) {
-    const v128 o = v128_load_aligned(src);
-    const v128 a = v128_load_aligned(src - (y != top) * sstride);
-    const v128 f = v128_load_aligned(src + (y - 1 != bottom) * sstride);
-    v128 b, c, d, e;
+    const v128 o = v128_load_aligned(src + y * sstride);
+    const v128 a = v128_load_aligned(src + AOMMAX(ymin, y - 2) * sstride);
+    const v128 b = v128_load_aligned(src + AOMMAX(ymin, y - 1) * sstride);
+    const v128 g = v128_load_aligned(src + AOMMIN(ymax, y + 1) * sstride);
+    const v128 h = v128_load_aligned(src + AOMMIN(ymax, y + 2) * sstride);
+    v128 c, d, e, f;
 
     if (left) {
-      b = v128_load_unaligned(src - 2);
-      c = v128_load_unaligned(src - 1);
+      c = v128_load_unaligned(src + y * sstride - 2);
+      d = v128_load_unaligned(src + y * sstride - 1);
     } else {  // Left clipping
-      b = v128_shuffle_8(o, v128_load_aligned(b_shuff));
       c = v128_shuffle_8(o, v128_load_aligned(c_shuff));
+      d = v128_shuffle_8(o, v128_load_aligned(d_shuff));
     }
     if (right) {
-      d = v128_load_unaligned(src + 1);
-      e = v128_load_unaligned(src + 2);
+      e = v128_load_unaligned(src + y * sstride + 1);
+      f = v128_load_unaligned(src + y * sstride + 2);
     } else {  // Right clipping
-      d = v128_shuffle_8(o, v128_load_aligned(d_shuff));
       e = v128_shuffle_8(o, v128_load_aligned(e_shuff));
+      f = v128_shuffle_8(o, v128_load_aligned(f_shuff));
     }
-    calc_delta_hbd8(o, a, b, c, d, e, f, dst, sp, sm);
-    src += sstride;
+    calc_delta_hbd8(o, a, b, c, d, e, f, g, h, dst, strength, bd);
     dst += dstride;
   }
 }
@@ -355,16 +343,16 @@
 void SIMD_FUNC(aom_clpf_block_hbd)(const uint16_t *src, uint16_t *dst,
                                    int sstride, int dstride, int x0, int y0,
                                    int sizex, int sizey, unsigned int strength,
-                                   BOUNDARY_TYPE bt) {
+                                   BOUNDARY_TYPE bt, unsigned int bd) {
   if ((sizex != 4 && sizex != 8) || ((sizey & 1) && sizex == 4)) {
     // Fallback to C for odd sizes:
     // * block width not 4 or 8
     // * block heights not a multiple of 2 if the block width is 4
     aom_clpf_block_hbd_c(src, dst, sstride, dstride, x0, y0, sizex, sizey,
-                         strength, bt);
+                         strength, bt, bd);
   } else {
     (sizex == 4 ? clpf_block_hbd4 : clpf_block_hbd)(
-        src, dst, sstride, dstride, x0, y0, sizey, strength, bt);
+        src, dst, sstride, dstride, x0, y0, sizey, strength, bt, bd);
   }
 }
 #endif