Highbd loop filter AVX2

- Speed test (ms) on i7-6700, Linux x86_64
  FUNCTION             SSE2    AVX2
  horizontal_edge_16   55      28
  vertical_16_dual     84      47
  horizontal_4_dual    27      13
  horizontal_8_dual    36      15
  vertical_4_dual      38      25
  vertical_8_dual      44      27
- Decoder frame rate improves around 1.2% - 2.8%.

Change-Id: I9c4123869bac9b6d32e626173c2a8e7eb0cf49e7
diff --git a/aom_dsp/aom_dsp.cmake b/aom_dsp/aom_dsp.cmake
index 3ce6761..4828345 100644
--- a/aom_dsp/aom_dsp.cmake
+++ b/aom_dsp/aom_dsp.cmake
@@ -46,6 +46,7 @@
     "${AOM_ROOT}/aom_dsp/x86/aom_asm_stubs.c"
     "${AOM_ROOT}/aom_dsp/x86/convolve.h"
     "${AOM_ROOT}/aom_dsp/x86/txfm_common_sse2.h"
+    "${AOM_ROOT}/aom_dsp/x86/lpf_common_sse2.h"
     "${AOM_ROOT}/aom_dsp/x86/loopfilter_sse2.c")
 
 set(AOM_DSP_COMMON_ASM_SSSE3
@@ -66,6 +67,7 @@
     "${AOM_ROOT}/aom_dsp/x86/aom_subpixel_8t_intrin_avx2.c"
     "${AOM_ROOT}/aom_dsp/x86/loopfilter_avx2.c"
     "${AOM_ROOT}/aom_dsp/x86/inv_txfm_avx2.c"
+    "${AOM_ROOT}/aom_dsp/x86/common_avx2.h"
     "${AOM_ROOT}/aom_dsp/x86/inv_txfm_common_avx2.h"
     "${AOM_ROOT}/aom_dsp/x86/txfm_common_avx2.h")
 
@@ -189,7 +191,8 @@
 
   set(AOM_DSP_COMMON_INTRIN_AVX2
       ${AOM_DSP_COMMON_INTRIN_AVX2}
-      "${AOM_ROOT}/aom_dsp/x86/highbd_convolve_avx2.c")
+      "${AOM_ROOT}/aom_dsp/x86/highbd_convolve_avx2.c"
+      "${AOM_ROOT}/aom_dsp/x86/highbd_loopfilter_avx2.c")
 else ()
   set(AOM_DSP_COMMON_INTRIN_DSPR2
       ${AOM_DSP_COMMON_INTRIN_DSPR2}
diff --git a/aom_dsp/aom_dsp.mk b/aom_dsp/aom_dsp.mk
index f9d675a..bf856ac 100644
--- a/aom_dsp/aom_dsp.mk
+++ b/aom_dsp/aom_dsp.mk
@@ -165,6 +165,7 @@
 
 DSP_SRCS-$(ARCH_X86)$(ARCH_X86_64)   += x86/loopfilter_sse2.c
 DSP_SRCS-$(HAVE_AVX2)                += x86/loopfilter_avx2.c
+DSP_SRCS-$(HAVE_SSE2)                += x86/lpf_common_sse2.h
 
 DSP_SRCS-$(HAVE_NEON)   += arm/loopfilter_neon.c
 ifeq ($(HAVE_NEON_ASM),yes)
@@ -194,10 +195,12 @@
 
 ifeq ($(CONFIG_HIGHBITDEPTH),yes)
 DSP_SRCS-$(HAVE_SSE2)   += x86/highbd_loopfilter_sse2.c
+DSP_SRCS-$(HAVE_AVX2)   += x86/highbd_loopfilter_avx2.c
 endif  # CONFIG_HIGHBITDEPTH
 
 DSP_SRCS-yes            += txfm_common.h
 DSP_SRCS-yes            += x86/txfm_common_intrin.h
+DSP_SRCS-$(HAVE_AVX2)   += x86/common_avx2.h
 DSP_SRCS-$(HAVE_SSE2)   += x86/txfm_common_sse2.h
 DSP_SRCS-$(HAVE_SSSE3)  += x86/obmc_intrinsic_ssse3.h
 DSP_SRCS-$(HAVE_MSA)    += mips/txfm_macros_msa.h
diff --git a/aom_dsp/aom_dsp_rtcd_defs.pl b/aom_dsp/aom_dsp_rtcd_defs.pl
index f54b5150..187babe 100755
--- a/aom_dsp/aom_dsp_rtcd_defs.pl
+++ b/aom_dsp/aom_dsp_rtcd_defs.pl
@@ -305,37 +305,37 @@
   specialize qw/aom_highbd_lpf_vertical_16 sse2/;
 
   add_proto qw/void aom_highbd_lpf_vertical_16_dual/, "uint16_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int bd";
-  specialize qw/aom_highbd_lpf_vertical_16_dual sse2/;
+  specialize qw/aom_highbd_lpf_vertical_16_dual sse2 avx2/;
 
   add_proto qw/void aom_highbd_lpf_vertical_8/, "uint16_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int bd";
   specialize qw/aom_highbd_lpf_vertical_8 sse2/;
 
   add_proto qw/void aom_highbd_lpf_vertical_8_dual/, "uint16_t *s, int pitch, const uint8_t *blimit0, const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1, const uint8_t *thresh1, int bd";
-  specialize qw/aom_highbd_lpf_vertical_8_dual sse2/;
+  specialize qw/aom_highbd_lpf_vertical_8_dual sse2 avx2/;
 
   add_proto qw/void aom_highbd_lpf_vertical_4/, "uint16_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int bd";
   specialize qw/aom_highbd_lpf_vertical_4 sse2/;
 
   add_proto qw/void aom_highbd_lpf_vertical_4_dual/, "uint16_t *s, int pitch, const uint8_t *blimit0, const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1, const uint8_t *thresh1, int bd";
-  specialize qw/aom_highbd_lpf_vertical_4_dual sse2/;
+  specialize qw/aom_highbd_lpf_vertical_4_dual sse2 avx2/;
 
   add_proto qw/void aom_highbd_lpf_horizontal_edge_8/, "uint16_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int bd";
   specialize qw/aom_highbd_lpf_horizontal_edge_8 sse2/;
 
   add_proto qw/void aom_highbd_lpf_horizontal_edge_16/, "uint16_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int bd";
-  specialize qw/aom_highbd_lpf_horizontal_edge_16 sse2/;
+  specialize qw/aom_highbd_lpf_horizontal_edge_16 sse2 avx2/;
 
   add_proto qw/void aom_highbd_lpf_horizontal_8/, "uint16_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int bd";
   specialize qw/aom_highbd_lpf_horizontal_8 sse2/;
 
   add_proto qw/void aom_highbd_lpf_horizontal_8_dual/, "uint16_t *s, int pitch, const uint8_t *blimit0, const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1, const uint8_t *thresh1, int bd";
-  specialize qw/aom_highbd_lpf_horizontal_8_dual sse2/;
+  specialize qw/aom_highbd_lpf_horizontal_8_dual sse2 avx2/;
 
   add_proto qw/void aom_highbd_lpf_horizontal_4/, "uint16_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int bd";
   specialize qw/aom_highbd_lpf_horizontal_4 sse2/;
 
   add_proto qw/void aom_highbd_lpf_horizontal_4_dual/, "uint16_t *s, int pitch, const uint8_t *blimit0, const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1, const uint8_t *thresh1, int bd";
-  specialize qw/aom_highbd_lpf_horizontal_4_dual sse2/;
+  specialize qw/aom_highbd_lpf_horizontal_4_dual sse2 avx2/;
 }  # CONFIG_HIGHBITDEPTH
 
 #
diff --git a/aom_dsp/x86/common_avx2.h b/aom_dsp/x86/common_avx2.h
new file mode 100644
index 0000000..5f9596a
--- /dev/null
+++ b/aom_dsp/x86/common_avx2.h
@@ -0,0 +1,147 @@
+/*
+ * Copyright (c) 2017, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#ifndef AOM_DSP_X86_COMMON_AVX2_H
+#define AOM_DSP_X86_COMMON_AVX2_H
+
+#include <immintrin.h>
+
+#include "./aom_config.h"
+
+// Note: in and out could have the same value
+static INLINE void mm256_transpose_16x16(const __m256i *in, __m256i *out) {
+  __m256i tr0_0 = _mm256_unpacklo_epi16(in[0], in[1]);
+  __m256i tr0_1 = _mm256_unpackhi_epi16(in[0], in[1]);
+  __m256i tr0_2 = _mm256_unpacklo_epi16(in[2], in[3]);
+  __m256i tr0_3 = _mm256_unpackhi_epi16(in[2], in[3]);
+  __m256i tr0_4 = _mm256_unpacklo_epi16(in[4], in[5]);
+  __m256i tr0_5 = _mm256_unpackhi_epi16(in[4], in[5]);
+  __m256i tr0_6 = _mm256_unpacklo_epi16(in[6], in[7]);
+  __m256i tr0_7 = _mm256_unpackhi_epi16(in[6], in[7]);
+
+  __m256i tr0_8 = _mm256_unpacklo_epi16(in[8], in[9]);
+  __m256i tr0_9 = _mm256_unpackhi_epi16(in[8], in[9]);
+  __m256i tr0_a = _mm256_unpacklo_epi16(in[10], in[11]);
+  __m256i tr0_b = _mm256_unpackhi_epi16(in[10], in[11]);
+  __m256i tr0_c = _mm256_unpacklo_epi16(in[12], in[13]);
+  __m256i tr0_d = _mm256_unpackhi_epi16(in[12], in[13]);
+  __m256i tr0_e = _mm256_unpacklo_epi16(in[14], in[15]);
+  __m256i tr0_f = _mm256_unpackhi_epi16(in[14], in[15]);
+
+  // 00 10 01 11 02 12 03 13  08 18 09 19 0a 1a 0b 1b
+  // 04 14 05 15 06 16 07 17  0c 1c 0d 1d 0e 1e 0f 1f
+  // 20 30 21 31 22 32 23 33  28 38 29 39 2a 3a 2b 3b
+  // 24 34 25 35 26 36 27 37  2c 3c 2d 3d 2e 3e 2f 3f
+  // 40 50 41 51 42 52 43 53  48 58 49 59 4a 5a 4b 5b
+  // 44 54 45 55 46 56 47 57  4c 5c 4d 5d 4e 5e 4f 5f
+  // 60 70 61 71 62 72 63 73  68 78 69 79 6a 7a 6b 7b
+  // 64 74 65 75 66 76 67 77  6c 7c 6d 7d 6e 7e 6f 7f
+
+  // 80 90 81 91 82 92 83 93  88 98 89 99 8a 9a 8b 9b
+  // 84 94 85 95 86 96 87 97  8c 9c 8d 9d 8e 9e 8f 9f
+  // a0 b0 a1 b1 a2 b2 a3 b3  a8 b8 a9 b9 aa ba ab bb
+  // a4 b4 a5 b5 a6 b6 a7 b7  ac bc ad bd ae be af bf
+  // c0 d0 c1 d1 c2 d2 c3 d3  c8 d8 c9 d9 ca da cb db
+  // c4 d4 c5 d5 c6 d6 c7 d7  cc dc cd dd ce de cf df
+  // e0 f0 e1 f1 e2 f2 e3 f3  e8 f8 e9 f9 ea fa eb fb
+  // e4 f4 e5 f5 e6 f6 e7 f7  ec fc ed fd ee fe ef ff
+
+  __m256i tr1_0 = _mm256_unpacklo_epi32(tr0_0, tr0_2);
+  __m256i tr1_1 = _mm256_unpackhi_epi32(tr0_0, tr0_2);
+  __m256i tr1_2 = _mm256_unpacklo_epi32(tr0_1, tr0_3);
+  __m256i tr1_3 = _mm256_unpackhi_epi32(tr0_1, tr0_3);
+  __m256i tr1_4 = _mm256_unpacklo_epi32(tr0_4, tr0_6);
+  __m256i tr1_5 = _mm256_unpackhi_epi32(tr0_4, tr0_6);
+  __m256i tr1_6 = _mm256_unpacklo_epi32(tr0_5, tr0_7);
+  __m256i tr1_7 = _mm256_unpackhi_epi32(tr0_5, tr0_7);
+
+  __m256i tr1_8 = _mm256_unpacklo_epi32(tr0_8, tr0_a);
+  __m256i tr1_9 = _mm256_unpackhi_epi32(tr0_8, tr0_a);
+  __m256i tr1_a = _mm256_unpacklo_epi32(tr0_9, tr0_b);
+  __m256i tr1_b = _mm256_unpackhi_epi32(tr0_9, tr0_b);
+  __m256i tr1_c = _mm256_unpacklo_epi32(tr0_c, tr0_e);
+  __m256i tr1_d = _mm256_unpackhi_epi32(tr0_c, tr0_e);
+  __m256i tr1_e = _mm256_unpacklo_epi32(tr0_d, tr0_f);
+  __m256i tr1_f = _mm256_unpackhi_epi32(tr0_d, tr0_f);
+
+  // 00 10 20 30 01 11 21 31  08 18 28 38 09 19 29 39
+  // 02 12 22 32 03 13 23 33  0a 1a 2a 3a 0b 1b 2b 3b
+  // 04 14 24 34 05 15 25 35  0c 1c 2c 3c 0d 1d 2d 3d
+  // 06 16 26 36 07 17 27 37  0e 1e 2e 3e 0f 1f 2f 3f
+  // 40 50 60 70 41 51 61 71  48 58 68 78 49 59 69 79
+  // 42 52 62 72 43 53 63 73  4a 5a 6a 7a 4b 5b 6b 7b
+  // 44 54 64 74 45 55 65 75  4c 5c 6c 7c 4d 5d 6d 7d
+  // 46 56 66 76 47 57 67 77  4e 5e 6e 7e 4f 5f 6f 7f
+
+  // 80 90 a0 b0 81 91 a1 b1  88 98 a8 b8 89 99 a9 b9
+  // 82 92 a2 b2 83 93 a3 b3  8a 9a aa ba 8b 9b ab bb
+  // 84 94 a4 b4 85 95 a5 b5  8c 9c ac bc 8d 9d ad bd
+  // 86 96 a6 b6 87 97 a7 b7  8e ae 9e be 8f 9f af bf
+  // c0 d0 e0 f0 c1 d1 e1 f1  c8 d8 e8 f8 c9 d9 e9 f9
+  // c2 d2 e2 f2 c3 d3 e3 f3  ca da ea fa cb db eb fb
+  // c4 d4 e4 f4 c5 d5 e5 f5  cc dc ef fc cd dd ed fd
+  // c6 d6 e6 f6 c7 d7 e7 f7  ce de ee fe cf df ef ff
+
+  tr0_0 = _mm256_unpacklo_epi64(tr1_0, tr1_4);
+  tr0_1 = _mm256_unpackhi_epi64(tr1_0, tr1_4);
+  tr0_2 = _mm256_unpacklo_epi64(tr1_1, tr1_5);
+  tr0_3 = _mm256_unpackhi_epi64(tr1_1, tr1_5);
+  tr0_4 = _mm256_unpacklo_epi64(tr1_2, tr1_6);
+  tr0_5 = _mm256_unpackhi_epi64(tr1_2, tr1_6);
+  tr0_6 = _mm256_unpacklo_epi64(tr1_3, tr1_7);
+  tr0_7 = _mm256_unpackhi_epi64(tr1_3, tr1_7);
+
+  tr0_8 = _mm256_unpacklo_epi64(tr1_8, tr1_c);
+  tr0_9 = _mm256_unpackhi_epi64(tr1_8, tr1_c);
+  tr0_a = _mm256_unpacklo_epi64(tr1_9, tr1_d);
+  tr0_b = _mm256_unpackhi_epi64(tr1_9, tr1_d);
+  tr0_c = _mm256_unpacklo_epi64(tr1_a, tr1_e);
+  tr0_d = _mm256_unpackhi_epi64(tr1_a, tr1_e);
+  tr0_e = _mm256_unpacklo_epi64(tr1_b, tr1_f);
+  tr0_f = _mm256_unpackhi_epi64(tr1_b, tr1_f);
+
+  // 00 10 20 30 40 50 60 70  08 18 28 38 48 58 68 78
+  // 01 11 21 31 41 51 61 71  09 19 29 39 49 59 69 79
+  // 02 12 22 32 42 52 62 72  0a 1a 2a 3a 4a 5a 6a 7a
+  // 03 13 23 33 43 53 63 73  0b 1b 2b 3b 4b 5b 6b 7b
+  // 04 14 24 34 44 54 64 74  0c 1c 2c 3c 4c 5c 6c 7c
+  // 05 15 25 35 45 55 65 75  0d 1d 2d 3d 4d 5d 6d 7d
+  // 06 16 26 36 46 56 66 76  0e 1e 2e 3e 4e 5e 6e 7e
+  // 07 17 27 37 47 57 67 77  0f 1f 2f 3f 4f 5f 6f 7f
+
+  // 80 90 a0 b0 c0 d0 e0 f0  88 98 a8 b8 c8 d8 e8 f8
+  // 81 91 a1 b1 c1 d1 e1 f1  89 99 a9 b9 c9 d9 e9 f9
+  // 82 92 a2 b2 c2 d2 e2 f2  8a 9a aa ba ca da ea fa
+  // 83 93 a3 b3 c3 d3 e3 f3  8b 9b ab bb cb db eb fb
+  // 84 94 a4 b4 c4 d4 e4 f4  8c 9c ac bc cc dc ef fc
+  // 85 95 a5 b5 c5 d5 e5 f5  8d 9d ad bd cd dd ed fd
+  // 86 96 a6 b6 c6 d6 e6 f6  8e ae 9e be ce de ee fe
+  // 87 97 a7 b7 c7 d7 e7 f7  8f 9f af bf cf df ef ff
+
+  out[0] = _mm256_permute2x128_si256(tr0_0, tr0_8, 0x20);  // 0010 0000
+  out[8] = _mm256_permute2x128_si256(tr0_0, tr0_8, 0x31);  // 0011 0001
+  out[1] = _mm256_permute2x128_si256(tr0_1, tr0_9, 0x20);
+  out[9] = _mm256_permute2x128_si256(tr0_1, tr0_9, 0x31);
+  out[2] = _mm256_permute2x128_si256(tr0_2, tr0_a, 0x20);
+  out[10] = _mm256_permute2x128_si256(tr0_2, tr0_a, 0x31);
+  out[3] = _mm256_permute2x128_si256(tr0_3, tr0_b, 0x20);
+  out[11] = _mm256_permute2x128_si256(tr0_3, tr0_b, 0x31);
+
+  out[4] = _mm256_permute2x128_si256(tr0_4, tr0_c, 0x20);
+  out[12] = _mm256_permute2x128_si256(tr0_4, tr0_c, 0x31);
+  out[5] = _mm256_permute2x128_si256(tr0_5, tr0_d, 0x20);
+  out[13] = _mm256_permute2x128_si256(tr0_5, tr0_d, 0x31);
+  out[6] = _mm256_permute2x128_si256(tr0_6, tr0_e, 0x20);
+  out[14] = _mm256_permute2x128_si256(tr0_6, tr0_e, 0x31);
+  out[7] = _mm256_permute2x128_si256(tr0_7, tr0_f, 0x20);
+  out[15] = _mm256_permute2x128_si256(tr0_7, tr0_f, 0x31);
+}
+#endif
diff --git a/aom_dsp/x86/highbd_loopfilter_avx2.c b/aom_dsp/x86/highbd_loopfilter_avx2.c
new file mode 100644
index 0000000..b776897
--- /dev/null
+++ b/aom_dsp/x86/highbd_loopfilter_avx2.c
@@ -0,0 +1,824 @@
+/*
+ * Copyright (c) 2017, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#include <immintrin.h>
+
+#include "./aom_dsp_rtcd.h"
+#include "aom_dsp/x86/common_avx2.h"
+#include "aom_dsp/x86/lpf_common_sse2.h"
+#include "aom/aom_integer.h"
+
+static INLINE void get_limit(const uint8_t *bl, const uint8_t *l,
+                             const uint8_t *t, int bd, __m256i *blt,
+                             __m256i *lt, __m256i *thr) {
+  const int shift = bd - 8;
+  const __m128i zero = _mm_setzero_si128();
+
+  __m128i x = _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)bl), zero);
+  __m256i y = _mm256_inserti128_si256(_mm256_castsi128_si256(x), x, 1);
+  *blt = _mm256_slli_epi16(y, shift);
+
+  x = _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)l), zero);
+  y = _mm256_inserti128_si256(_mm256_castsi128_si256(x), x, 1);
+  *lt = _mm256_slli_epi16(y, shift);
+
+  x = _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)t), zero);
+  y = _mm256_inserti128_si256(_mm256_castsi128_si256(x), x, 1);
+  *thr = _mm256_slli_epi16(y, shift);
+}
+
+static INLINE void load_highbd_pixel(const uint16_t *s, int size, int pitch,
+                                     __m256i *p, __m256i *q) {
+  int i;
+  for (i = 0; i < size; i++) {
+    p[i] = _mm256_loadu_si256((__m256i *)(s - (i + 1) * pitch));
+    q[i] = _mm256_loadu_si256((__m256i *)(s + i * pitch));
+  }
+}
+
+static INLINE void highbd_hev_mask(const __m256i *p, const __m256i *q,
+                                   const __m256i *t, __m256i *hev) {
+  const __m256i abs_p1p0 = _mm256_abs_epi16(_mm256_sub_epi16(p[1], p[0]));
+  const __m256i abs_q1q0 = _mm256_abs_epi16(_mm256_sub_epi16(q[1], q[0]));
+  __m256i h = _mm256_max_epi16(abs_p1p0, abs_q1q0);
+  h = _mm256_subs_epu16(h, *t);
+
+  const __m256i ffff = _mm256_set1_epi16(0xFFFF);
+  const __m256i zero = _mm256_setzero_si256();
+  *hev = _mm256_xor_si256(_mm256_cmpeq_epi16(h, zero), ffff);
+}
+
+static INLINE void highbd_filter_mask(const __m256i *p, const __m256i *q,
+                                      const __m256i *l, const __m256i *bl,
+                                      __m256i *mask) {
+  __m256i abs_p0q0 = _mm256_abs_epi16(_mm256_sub_epi16(p[0], q[0]));
+  __m256i abs_p1q1 = _mm256_abs_epi16(_mm256_sub_epi16(p[1], q[1]));
+  abs_p0q0 = _mm256_adds_epu16(abs_p0q0, abs_p0q0);
+  abs_p1q1 = _mm256_srli_epi16(abs_p1q1, 1);
+
+  const __m256i zero = _mm256_setzero_si256();
+  const __m256i one = _mm256_set1_epi16(1);
+  const __m256i ffff = _mm256_set1_epi16(0xFFFF);
+  __m256i max = _mm256_subs_epu16(_mm256_adds_epu16(abs_p0q0, abs_p1q1), *bl);
+  max = _mm256_xor_si256(_mm256_cmpeq_epi16(max, zero), ffff);
+  max = _mm256_and_si256(max, _mm256_adds_epu16(*l, one));
+
+  int i;
+  for (i = 1; i < 4; ++i) {
+    max = _mm256_max_epi16(max,
+                           _mm256_abs_epi16(_mm256_sub_epi16(p[i], p[i - 1])));
+    max = _mm256_max_epi16(max,
+                           _mm256_abs_epi16(_mm256_sub_epi16(q[i], q[i - 1])));
+  }
+  max = _mm256_subs_epu16(max, *l);
+  *mask = _mm256_cmpeq_epi16(max, zero);  // return ~mask
+}
+
+static INLINE void flat_mask_internal(const __m256i *th, const __m256i *p,
+                                      const __m256i *q, int bd, int start,
+                                      int end, __m256i *flat) {
+  __m256i max = _mm256_setzero_si256();
+  int i;
+  for (i = start; i < end; ++i) {
+    max = _mm256_max_epi16(max, _mm256_abs_epi16(_mm256_sub_epi16(p[i], p[0])));
+    max = _mm256_max_epi16(max, _mm256_abs_epi16(_mm256_sub_epi16(q[i], q[0])));
+  }
+
+  __m256i ft;
+  if (bd == 8)
+    ft = _mm256_subs_epu16(max, *th);
+  else if (bd == 10)
+    ft = _mm256_subs_epu16(max, _mm256_slli_epi16(*th, 2));
+  else  // bd == 12
+    ft = _mm256_subs_epu16(max, _mm256_slli_epi16(*th, 4));
+
+  const __m256i zero = _mm256_setzero_si256();
+  *flat = _mm256_cmpeq_epi16(ft, zero);
+}
+
+// Note:
+//  Access p[3-1], p[0], and q[3-1], q[0]
+static INLINE void highbd_flat_mask4(const __m256i *th, const __m256i *p,
+                                     const __m256i *q, __m256i *flat, int bd) {
+  // check the distance 1,2,3 against 0
+  flat_mask_internal(th, p, q, bd, 1, 4, flat);
+}
+
+// Note:
+//  access p[7-4], p[0], and q[7-4], q[0]
+static INLINE void highbd_flat_mask5(const __m256i *th, const __m256i *p,
+                                     const __m256i *q, __m256i *flat, int bd) {
+  flat_mask_internal(th, p, q, bd, 4, 8, flat);
+}
+
+static INLINE void pixel_clamp(const __m256i *min, const __m256i *max,
+                               __m256i *pixel) {
+  __m256i clamped, mask;
+
+  mask = _mm256_cmpgt_epi16(*pixel, *max);
+  clamped = _mm256_andnot_si256(mask, *pixel);
+  mask = _mm256_and_si256(mask, *max);
+  clamped = _mm256_or_si256(mask, clamped);
+
+  mask = _mm256_cmpgt_epi16(clamped, *min);
+  clamped = _mm256_and_si256(mask, clamped);
+  mask = _mm256_andnot_si256(mask, *min);
+  *pixel = _mm256_or_si256(clamped, mask);
+}
+
+static INLINE void highbd_filter4(__m256i *p, __m256i *q, const __m256i *mask,
+                                  const __m256i *th, int bd, __m256i *ps,
+                                  __m256i *qs) {
+  __m256i t80;
+  if (bd == 8)
+    t80 = _mm256_set1_epi16(0x80);
+  else if (bd == 10)
+    t80 = _mm256_set1_epi16(0x200);
+  else  // bd == 12
+    t80 = _mm256_set1_epi16(0x800);
+
+  __m256i ps0 = _mm256_subs_epi16(p[0], t80);
+  __m256i ps1 = _mm256_subs_epi16(p[1], t80);
+  __m256i qs0 = _mm256_subs_epi16(q[0], t80);
+  __m256i qs1 = _mm256_subs_epi16(q[1], t80);
+
+  const __m256i one = _mm256_set1_epi16(1);
+  const __m256i pmax = _mm256_subs_epi16(
+      _mm256_subs_epi16(_mm256_slli_epi16(one, bd), one), t80);
+  const __m256i zero = _mm256_setzero_si256();
+  const __m256i pmin = _mm256_subs_epi16(zero, t80);
+
+  __m256i filter = _mm256_subs_epi16(ps1, qs1);
+  pixel_clamp(&pmin, &pmax, &filter);
+
+  __m256i hev;
+  highbd_hev_mask(p, q, th, &hev);
+  filter = _mm256_and_si256(filter, hev);
+
+  const __m256i x = _mm256_subs_epi16(qs0, ps0);
+  filter = _mm256_adds_epi16(filter, x);
+  filter = _mm256_adds_epi16(filter, x);
+  filter = _mm256_adds_epi16(filter, x);
+  pixel_clamp(&pmin, &pmax, &filter);
+  filter = _mm256_and_si256(filter, *mask);
+
+  const __m256i t3 = _mm256_set1_epi16(3);
+  const __m256i t4 = _mm256_set1_epi16(4);
+
+  __m256i filter1 = _mm256_adds_epi16(filter, t4);
+  __m256i filter2 = _mm256_adds_epi16(filter, t3);
+  pixel_clamp(&pmin, &pmax, &filter1);
+  pixel_clamp(&pmin, &pmax, &filter2);
+  filter1 = _mm256_srai_epi16(filter1, 3);
+  filter2 = _mm256_srai_epi16(filter2, 3);
+
+  qs0 = _mm256_subs_epi16(qs0, filter1);
+  pixel_clamp(&pmin, &pmax, &qs0);
+  ps0 = _mm256_adds_epi16(ps0, filter2);
+  pixel_clamp(&pmin, &pmax, &ps0);
+
+  qs[0] = _mm256_adds_epi16(qs0, t80);
+  ps[0] = _mm256_adds_epi16(ps0, t80);
+
+  filter = _mm256_adds_epi16(filter1, one);
+  filter = _mm256_srai_epi16(filter, 1);
+  filter = _mm256_andnot_si256(hev, filter);
+
+  qs1 = _mm256_subs_epi16(qs1, filter);
+  pixel_clamp(&pmin, &pmax, &qs1);
+  ps1 = _mm256_adds_epi16(ps1, filter);
+  pixel_clamp(&pmin, &pmax, &ps1);
+
+  qs[1] = _mm256_adds_epi16(qs1, t80);
+  ps[1] = _mm256_adds_epi16(ps1, t80);
+}
+
+void aom_highbd_lpf_horizontal_edge_16_avx2(uint16_t *s, int pitch,
+                                            const uint8_t *blt,
+                                            const uint8_t *lt,
+                                            const uint8_t *thr, int bd) {
+  __m256i blimit, limit, thresh;
+  get_limit(blt, lt, thr, bd, &blimit, &limit, &thresh);
+
+  __m256i p[8], q[8];
+  load_highbd_pixel(s, 8, pitch, p, q);
+
+  __m256i mask;
+  highbd_filter_mask(p, q, &limit, &blimit, &mask);
+
+  __m256i flat, flat2;
+  const __m256i one = _mm256_set1_epi16(1);
+  highbd_flat_mask4(&one, p, q, &flat, bd);
+  highbd_flat_mask5(&one, p, q, &flat2, bd);
+
+  flat = _mm256_and_si256(flat, mask);
+  flat2 = _mm256_and_si256(flat2, flat);
+
+  __m256i ps[2], qs[2];
+  highbd_filter4(p, q, &mask, &thresh, bd, ps, qs);
+
+  // flat and wide flat calculations
+  __m256i flat_p[3], flat_q[3];
+  __m256i flat2_p[7], flat2_q[7];
+  {
+    const __m256i eight = _mm256_set1_epi16(8);
+    const __m256i four = _mm256_set1_epi16(4);
+
+    __m256i sum_p = _mm256_add_epi16(_mm256_add_epi16(p[6], p[5]),
+                                     _mm256_add_epi16(p[4], p[3]));
+    __m256i sum_q = _mm256_add_epi16(_mm256_add_epi16(q[6], q[5]),
+                                     _mm256_add_epi16(q[4], q[3]));
+
+    __m256i sum_lp = _mm256_add_epi16(p[0], _mm256_add_epi16(p[2], p[1]));
+    sum_p = _mm256_add_epi16(sum_p, sum_lp);
+
+    __m256i sum_lq = _mm256_add_epi16(q[0], _mm256_add_epi16(q[2], q[1]));
+    sum_q = _mm256_add_epi16(sum_q, sum_lq);
+    sum_p = _mm256_add_epi16(eight, _mm256_add_epi16(sum_p, sum_q));
+    sum_lp = _mm256_add_epi16(four, _mm256_add_epi16(sum_lp, sum_lq));
+
+    flat2_p[0] = _mm256_srli_epi16(
+        _mm256_add_epi16(sum_p, _mm256_add_epi16(p[7], p[0])), 4);
+    flat2_q[0] = _mm256_srli_epi16(
+        _mm256_add_epi16(sum_p, _mm256_add_epi16(q[7], q[0])), 4);
+    flat_p[0] = _mm256_srli_epi16(
+        _mm256_add_epi16(sum_lp, _mm256_add_epi16(p[3], p[0])), 3);
+    flat_q[0] = _mm256_srli_epi16(
+        _mm256_add_epi16(sum_lp, _mm256_add_epi16(q[3], q[0])), 3);
+
+    __m256i sum_p7 = _mm256_add_epi16(p[7], p[7]);
+    __m256i sum_q7 = _mm256_add_epi16(q[7], q[7]);
+    __m256i sum_p3 = _mm256_add_epi16(p[3], p[3]);
+    __m256i sum_q3 = _mm256_add_epi16(q[3], q[3]);
+
+    sum_q = _mm256_sub_epi16(sum_p, p[6]);
+    sum_p = _mm256_sub_epi16(sum_p, q[6]);
+    flat2_p[1] = _mm256_srli_epi16(
+        _mm256_add_epi16(sum_p, _mm256_add_epi16(sum_p7, p[1])), 4);
+    flat2_q[1] = _mm256_srli_epi16(
+        _mm256_add_epi16(sum_q, _mm256_add_epi16(sum_q7, q[1])), 4);
+
+    sum_lq = _mm256_sub_epi16(sum_lp, p[2]);
+    sum_lp = _mm256_sub_epi16(sum_lp, q[2]);
+    flat_p[1] = _mm256_srli_epi16(
+        _mm256_add_epi16(sum_lp, _mm256_add_epi16(sum_p3, p[1])), 3);
+    flat_q[1] = _mm256_srli_epi16(
+        _mm256_add_epi16(sum_lq, _mm256_add_epi16(sum_q3, q[1])), 3);
+
+    sum_p7 = _mm256_add_epi16(sum_p7, p[7]);
+    sum_q7 = _mm256_add_epi16(sum_q7, q[7]);
+    sum_p3 = _mm256_add_epi16(sum_p3, p[3]);
+    sum_q3 = _mm256_add_epi16(sum_q3, q[3]);
+
+    sum_p = _mm256_sub_epi16(sum_p, q[5]);
+    sum_q = _mm256_sub_epi16(sum_q, p[5]);
+    flat2_p[2] = _mm256_srli_epi16(
+        _mm256_add_epi16(sum_p, _mm256_add_epi16(sum_p7, p[2])), 4);
+    flat2_q[2] = _mm256_srli_epi16(
+        _mm256_add_epi16(sum_q, _mm256_add_epi16(sum_q7, q[2])), 4);
+
+    sum_lp = _mm256_sub_epi16(sum_lp, q[1]);
+    sum_lq = _mm256_sub_epi16(sum_lq, p[1]);
+    flat_p[2] = _mm256_srli_epi16(
+        _mm256_add_epi16(sum_lp, _mm256_add_epi16(sum_p3, p[2])), 3);
+    flat_q[2] = _mm256_srli_epi16(
+        _mm256_add_epi16(sum_lq, _mm256_add_epi16(sum_q3, q[2])), 3);
+
+    int i;
+    for (i = 3; i < 7; ++i) {
+      sum_p7 = _mm256_add_epi16(sum_p7, p[7]);
+      sum_q7 = _mm256_add_epi16(sum_q7, q[7]);
+      sum_p = _mm256_sub_epi16(sum_p, q[7 - i]);
+      sum_q = _mm256_sub_epi16(sum_q, p[7 - i]);
+      flat2_p[i] = _mm256_srli_epi16(
+          _mm256_add_epi16(sum_p, _mm256_add_epi16(sum_p7, p[i])), 4);
+      flat2_q[i] = _mm256_srli_epi16(
+          _mm256_add_epi16(sum_q, _mm256_add_epi16(sum_q7, q[i])), 4);
+    }
+  }
+
+  // highbd_filter8
+  p[2] = _mm256_andnot_si256(flat, p[2]);
+  //  p2 remains unchanged if !(flat && mask)
+  flat_p[2] = _mm256_and_si256(flat, flat_p[2]);
+  //  when (flat && mask)
+  p[2] = _mm256_or_si256(p[2], flat_p[2]);  // full list of p2 values
+  q[2] = _mm256_andnot_si256(flat, q[2]);
+  flat_q[2] = _mm256_and_si256(flat, flat_q[2]);
+  q[2] = _mm256_or_si256(q[2], flat_q[2]);  // full list of q2 values
+
+  int i;
+  for (i = 1; i >= 0; i--) {
+    ps[i] = _mm256_andnot_si256(flat, ps[i]);
+    flat_p[i] = _mm256_and_si256(flat, flat_p[i]);
+    p[i] = _mm256_or_si256(ps[i], flat_p[i]);
+    qs[i] = _mm256_andnot_si256(flat, qs[i]);
+    flat_q[i] = _mm256_and_si256(flat, flat_q[i]);
+    q[i] = _mm256_or_si256(qs[i], flat_q[i]);
+  }
+
+  // highbd_filter16
+
+  for (i = 6; i >= 0; i--) {
+    //  p[i] remains unchanged if !(flat2 && flat && mask)
+    p[i] = _mm256_andnot_si256(flat2, p[i]);
+    flat2_p[i] = _mm256_and_si256(flat2, flat2_p[i]);
+    //  get values for when (flat2 && flat && mask)
+    p[i] = _mm256_or_si256(p[i], flat2_p[i]);  // full list of p values
+
+    q[i] = _mm256_andnot_si256(flat2, q[i]);
+    flat2_q[i] = _mm256_and_si256(flat2, flat2_q[i]);
+    q[i] = _mm256_or_si256(q[i], flat2_q[i]);
+    _mm256_storeu_si256((__m256i *)(s - (i + 1) * pitch), p[i]);
+    _mm256_storeu_si256((__m256i *)(s + i * pitch), q[i]);
+  }
+}
+
+static INLINE void highbd_transpose16x16(uint16_t *src, int src_p,
+                                         uint16_t *dst, int dst_p) {
+  __m256i x[16];
+  int i;
+  for (i = 0; i < 16; ++i) {
+    x[i] = _mm256_loadu_si256((const __m256i *)src);
+    src += src_p;
+  }
+  mm256_transpose_16x16(x, x);
+  for (i = 0; i < 16; ++i) {
+    _mm256_storeu_si256((__m256i *)dst, x[i]);
+    dst += dst_p;
+  }
+}
+
+void aom_highbd_lpf_vertical_16_dual_avx2(uint16_t *s, int p,
+                                          const uint8_t *blimit,
+                                          const uint8_t *limit,
+                                          const uint8_t *thresh, int bd) {
+  DECLARE_ALIGNED(16, uint16_t, t_dst[256]);
+
+  //  Transpose 16x16
+  highbd_transpose16x16(s - 8, p, t_dst, 16);
+
+  //  Loop filtering
+  aom_highbd_lpf_horizontal_edge_16_avx2(t_dst + 8 * 16, 16, blimit, limit,
+                                         thresh, bd);
+
+  //  Transpose back
+  highbd_transpose16x16(t_dst, 16, s - 8, p);
+}
+
+static INLINE void get_dual_limit(const uint8_t *b0, const uint8_t *l0,
+                                  const uint8_t *t0, const uint8_t *b1,
+                                  const uint8_t *l1, const uint8_t *t1, int bd,
+                                  __m256i *blt, __m256i *lt, __m256i *thr) {
+  const __m128i z128 = _mm_setzero_si128();
+  const __m128i blimit0 =
+      _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)b0), z128);
+  const __m128i limit0 =
+      _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)l0), z128);
+  const __m128i thresh0 =
+      _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)t0), z128);
+  const __m128i blimit1 =
+      _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)b1), z128);
+  const __m128i limit1 =
+      _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)l1), z128);
+  const __m128i thresh1 =
+      _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)t1), z128);
+
+  *blt = _mm256_inserti128_si256(_mm256_castsi128_si256(blimit0), blimit1, 1);
+  *lt = _mm256_inserti128_si256(_mm256_castsi128_si256(limit0), limit1, 1);
+  *thr = _mm256_inserti128_si256(_mm256_castsi128_si256(thresh0), thresh1, 1);
+
+  int shift = bd - 8;
+  *blt = _mm256_slli_epi16(*blt, shift);
+  *lt = _mm256_slli_epi16(*lt, shift);
+  *thr = _mm256_slli_epi16(*thr, shift);
+}
+
+void aom_highbd_lpf_horizontal_4_dual_avx2(
+    uint16_t *s, int p, const uint8_t *_blimit0, const uint8_t *_limit0,
+    const uint8_t *_thresh0, const uint8_t *_blimit1, const uint8_t *_limit1,
+    const uint8_t *_thresh1, int bd) {
+  __m256i p3 = _mm256_loadu_si256((__m256i *)(s - 4 * p));
+  __m256i p2 = _mm256_loadu_si256((__m256i *)(s - 3 * p));
+  __m256i p1 = _mm256_loadu_si256((__m256i *)(s - 2 * p));
+  __m256i p0 = _mm256_loadu_si256((__m256i *)(s - 1 * p));
+  __m256i q0 = _mm256_loadu_si256((__m256i *)(s - 0 * p));
+  __m256i q1 = _mm256_loadu_si256((__m256i *)(s + 1 * p));
+  __m256i q2 = _mm256_loadu_si256((__m256i *)(s + 2 * p));
+  __m256i q3 = _mm256_loadu_si256((__m256i *)(s + 3 * p));
+
+  const __m256i abs_p1p0 = _mm256_abs_epi16(_mm256_sub_epi16(p1, p0));
+  const __m256i abs_q1q0 = _mm256_abs_epi16(_mm256_sub_epi16(q1, q0));
+
+  __m256i abs_p0q0 = _mm256_abs_epi16(_mm256_sub_epi16(p0, q0));
+  __m256i abs_p1q1 = _mm256_abs_epi16(_mm256_sub_epi16(p1, q1));
+
+  __m256i blimit, limit, thresh;
+  get_dual_limit(_blimit0, _limit0, _thresh0, _blimit1, _limit1, _thresh1, bd,
+                 &blimit, &limit, &thresh);
+
+  __m256i t80, tff80, tffe0, t1f, t7f;
+  if (bd == 8) {
+    t80 = _mm256_set1_epi16(0x80);
+    tff80 = _mm256_set1_epi16(0xff80);
+    tffe0 = _mm256_set1_epi16(0xffe0);
+    t1f = _mm256_srli_epi16(_mm256_set1_epi16(0x1fff), 8);
+    t7f = _mm256_srli_epi16(_mm256_set1_epi16(0x7fff), 8);
+  } else if (bd == 10) {
+    t80 = _mm256_slli_epi16(_mm256_set1_epi16(0x80), 2);
+    tff80 = _mm256_slli_epi16(_mm256_set1_epi16(0xff80), 2);
+    tffe0 = _mm256_slli_epi16(_mm256_set1_epi16(0xffe0), 2);
+    t1f = _mm256_srli_epi16(_mm256_set1_epi16(0x1fff), 6);
+    t7f = _mm256_srli_epi16(_mm256_set1_epi16(0x7fff), 6);
+  } else {  // bd == 12
+    t80 = _mm256_slli_epi16(_mm256_set1_epi16(0x80), 4);
+    tff80 = _mm256_slli_epi16(_mm256_set1_epi16(0xff80), 4);
+    tffe0 = _mm256_slli_epi16(_mm256_set1_epi16(0xffe0), 4);
+    t1f = _mm256_srli_epi16(_mm256_set1_epi16(0x1fff), 4);
+    t7f = _mm256_srli_epi16(_mm256_set1_epi16(0x7fff), 4);
+  }
+
+  __m256i ps1 =
+      _mm256_subs_epi16(_mm256_loadu_si256((__m256i *)(s - 2 * p)), t80);
+  __m256i ps0 =
+      _mm256_subs_epi16(_mm256_loadu_si256((__m256i *)(s - 1 * p)), t80);
+  __m256i qs0 =
+      _mm256_subs_epi16(_mm256_loadu_si256((__m256i *)(s + 0 * p)), t80);
+  __m256i qs1 =
+      _mm256_subs_epi16(_mm256_loadu_si256((__m256i *)(s + 1 * p)), t80);
+
+  // filter_mask and hev_mask
+  const __m256i zero = _mm256_setzero_si256();
+  __m256i flat = _mm256_max_epi16(abs_p1p0, abs_q1q0);
+  __m256i hev = _mm256_subs_epu16(flat, thresh);
+  const __m256i ffff = _mm256_set1_epi16(0xFFFF);
+  hev = _mm256_xor_si256(_mm256_cmpeq_epi16(hev, zero), ffff);
+
+  abs_p0q0 = _mm256_adds_epu16(abs_p0q0, abs_p0q0);
+  abs_p1q1 = _mm256_srli_epi16(abs_p1q1, 1);
+  __m256i mask =
+      _mm256_subs_epu16(_mm256_adds_epu16(abs_p0q0, abs_p1q1), blimit);
+  mask = _mm256_xor_si256(_mm256_cmpeq_epi16(mask, zero), ffff);
+  // mask |= (abs(p0 - q0) * 2 + abs(p1 - q1) / 2  > blimit) * -1;
+  // So taking maximums continues to work:
+  const __m256i one = _mm256_set1_epi16(1);
+  mask = _mm256_and_si256(mask, _mm256_adds_epu16(limit, one));
+  mask = _mm256_max_epi16(flat, mask);
+  // mask |= (abs(p1 - p0) > limit) * -1;
+  // mask |= (abs(q1 - q0) > limit) * -1;
+  __m256i work = _mm256_max_epi16(
+      _mm256_or_si256(_mm256_subs_epu16(p2, p1), _mm256_subs_epu16(p1, p2)),
+      _mm256_or_si256(_mm256_subs_epu16(p3, p2), _mm256_subs_epu16(p2, p3)));
+  mask = _mm256_max_epi16(work, mask);
+  work = _mm256_max_epi16(
+      _mm256_or_si256(_mm256_subs_epu16(q2, q1), _mm256_subs_epu16(q1, q2)),
+      _mm256_or_si256(_mm256_subs_epu16(q3, q2), _mm256_subs_epu16(q2, q3)));
+  mask = _mm256_max_epi16(work, mask);
+  mask = _mm256_subs_epu16(mask, limit);
+  mask = _mm256_cmpeq_epi16(mask, zero);
+
+  // filter4
+  const __m256i pmax = _mm256_subs_epi16(
+      _mm256_subs_epi16(_mm256_slli_epi16(one, bd), one), t80);
+  const __m256i pmin = _mm256_subs_epi16(zero, t80);
+
+  __m256i filt = _mm256_subs_epi16(ps1, qs1);
+  pixel_clamp(&pmin, &pmax, &filt);
+  filt = _mm256_and_si256(filt, hev);
+  __m256i work_a = _mm256_subs_epi16(qs0, ps0);
+  filt = _mm256_adds_epi16(filt, work_a);
+  filt = _mm256_adds_epi16(filt, work_a);
+  filt = _mm256_adds_epi16(filt, work_a);
+  pixel_clamp(&pmin, &pmax, &filt);
+
+  // (aom_filter + 3 * (qs0 - ps0)) & mask
+  filt = _mm256_and_si256(filt, mask);
+
+  const __m256i t4 = _mm256_set1_epi16(4);
+  const __m256i t3 = _mm256_set1_epi16(3);
+
+  __m256i filter1 = _mm256_adds_epi16(filt, t4);
+  pixel_clamp(&pmin, &pmax, &filter1);
+  __m256i filter2 = _mm256_adds_epi16(filt, t3);
+  pixel_clamp(&pmin, &pmax, &filter2);
+
+  // Filter1 >> 3
+  work_a = _mm256_cmpgt_epi16(zero, filter1);  // get the values that are <0
+  filter1 = _mm256_srli_epi16(filter1, 3);
+  work_a = _mm256_and_si256(work_a, tffe0);    // sign bits for the values < 0
+  filter1 = _mm256_and_si256(filter1, t1f);    // clamp the range
+  filter1 = _mm256_or_si256(filter1, work_a);  // reinsert the sign bits
+
+  // Filter2 >> 3
+  work_a = _mm256_cmpgt_epi16(zero, filter2);
+  filter2 = _mm256_srli_epi16(filter2, 3);
+  work_a = _mm256_and_si256(work_a, tffe0);
+  filter2 = _mm256_and_si256(filter2, t1f);
+  filter2 = _mm256_or_si256(filter2, work_a);
+
+  // filt >> 1
+  // equivalent to shifting 0x1f left by bitdepth - 8
+  // and setting new bits to 1
+  filt = _mm256_adds_epi16(filter1, one);
+  work_a = _mm256_cmpgt_epi16(zero, filt);
+  filt = _mm256_srli_epi16(filt, 1);
+  work_a = _mm256_and_si256(work_a, tff80);
+  filt = _mm256_and_si256(filt, t7f);
+  filt = _mm256_or_si256(filt, work_a);
+
+  filt = _mm256_andnot_si256(hev, filt);
+
+  filter1 = _mm256_subs_epi16(qs0, filter1);
+  pixel_clamp(&pmin, &pmax, &filter1);
+  q0 = _mm256_adds_epi16(filter1, t80);
+
+  filter1 = _mm256_subs_epi16(qs1, filt);
+  pixel_clamp(&pmin, &pmax, &filter1);
+  q1 = _mm256_adds_epi16(filter1, t80);
+
+  filter2 = _mm256_adds_epi16(ps0, filter2);
+  pixel_clamp(&pmin, &pmax, &filter2);
+  p0 = _mm256_adds_epi16(filter2, t80);
+
+  filter2 = _mm256_adds_epi16(ps1, filt);
+  pixel_clamp(&pmin, &pmax, &filter2);
+  p1 = _mm256_adds_epi16(filter2, t80);
+
+  _mm256_storeu_si256((__m256i *)(s - 2 * p), p1);
+  _mm256_storeu_si256((__m256i *)(s - 1 * p), p0);
+  _mm256_storeu_si256((__m256i *)(s + 0 * p), q0);
+  _mm256_storeu_si256((__m256i *)(s + 1 * p), q1);
+}
+
+void aom_highbd_lpf_horizontal_8_dual_avx2(
+    uint16_t *s, int p, const uint8_t *_blimit0, const uint8_t *_limit0,
+    const uint8_t *_thresh0, const uint8_t *_blimit1, const uint8_t *_limit1,
+    const uint8_t *_thresh1, int bd) {
+  DECLARE_ALIGNED(16, uint16_t, flat_op2[16]);
+  DECLARE_ALIGNED(16, uint16_t, flat_op1[16]);
+  DECLARE_ALIGNED(16, uint16_t, flat_op0[16]);
+  DECLARE_ALIGNED(16, uint16_t, flat_oq2[16]);
+  DECLARE_ALIGNED(16, uint16_t, flat_oq1[16]);
+  DECLARE_ALIGNED(16, uint16_t, flat_oq0[16]);
+
+  __m256i p3 = _mm256_loadu_si256((__m256i *)(s - 4 * p));
+  __m256i q3 = _mm256_loadu_si256((__m256i *)(s + 3 * p));
+  __m256i p2 = _mm256_loadu_si256((__m256i *)(s - 3 * p));
+  __m256i q2 = _mm256_loadu_si256((__m256i *)(s + 2 * p));
+  __m256i p1 = _mm256_loadu_si256((__m256i *)(s - 2 * p));
+  __m256i q1 = _mm256_loadu_si256((__m256i *)(s + 1 * p));
+  __m256i p0 = _mm256_loadu_si256((__m256i *)(s - 1 * p));
+  __m256i q0 = _mm256_loadu_si256((__m256i *)(s + 0 * p));
+
+  __m256i blimit, limit, thresh;
+  get_dual_limit(_blimit0, _limit0, _thresh0, _blimit1, _limit1, _thresh1, bd,
+                 &blimit, &limit, &thresh);
+
+  __m256i t80;
+  if (bd == 8) {
+    t80 = _mm256_set1_epi16(0x80);
+  } else if (bd == 10) {
+    t80 = _mm256_set1_epi16(0x200);
+  } else {  // bd == 12
+    t80 = _mm256_set1_epi16(0x800);
+  }
+
+  __m256i ps1, ps0, qs0, qs1;
+  ps1 = _mm256_subs_epi16(p1, t80);
+  ps0 = _mm256_subs_epi16(p0, t80);
+  qs0 = _mm256_subs_epi16(q0, t80);
+  qs1 = _mm256_subs_epi16(q1, t80);
+
+  // filter_mask and hev_mask
+  __m256i abs_p1q1, abs_p0q0, abs_q1q0, abs_p1p0, work;
+  abs_p1p0 = _mm256_abs_epi16(_mm256_sub_epi16(p1, p0));
+  abs_q1q0 = _mm256_abs_epi16(_mm256_sub_epi16(q1, q0));
+
+  abs_p0q0 = _mm256_abs_epi16(_mm256_sub_epi16(p0, q0));
+  abs_p1q1 = _mm256_abs_epi16(_mm256_sub_epi16(p1, q1));
+  __m256i flat = _mm256_max_epi16(abs_p1p0, abs_q1q0);
+  __m256i hev = _mm256_subs_epu16(flat, thresh);
+  const __m256i zero = _mm256_set1_epi16(0);
+  const __m256i ffff = _mm256_set1_epi16(0xFFFF);
+  hev = _mm256_xor_si256(_mm256_cmpeq_epi16(hev, zero), ffff);
+
+  abs_p0q0 = _mm256_adds_epu16(abs_p0q0, abs_p0q0);
+  abs_p1q1 = _mm256_srli_epi16(abs_p1q1, 1);
+  __m256i mask =
+      _mm256_subs_epu16(_mm256_adds_epu16(abs_p0q0, abs_p1q1), blimit);
+  mask = _mm256_xor_si256(_mm256_cmpeq_epi16(mask, zero), ffff);
+  // mask |= (abs(p0 - q0) * 2 + abs(p1 - q1) / 2  > blimit) * -1;
+  // So taking maximums continues to work:
+
+  const __m256i one = _mm256_set1_epi16(1);
+  mask = _mm256_and_si256(mask, _mm256_adds_epu16(limit, one));
+  mask = _mm256_max_epi16(abs_p1p0, mask);
+  // mask |= (abs(p1 - p0) > limit) * -1;
+  mask = _mm256_max_epi16(abs_q1q0, mask);
+  // mask |= (abs(q1 - q0) > limit) * -1;
+
+  work = _mm256_max_epi16(_mm256_abs_epi16(_mm256_sub_epi16(p2, p1)),
+                          _mm256_abs_epi16(_mm256_sub_epi16(q2, q1)));
+  mask = _mm256_max_epi16(work, mask);
+  work = _mm256_max_epi16(_mm256_abs_epi16(_mm256_sub_epi16(p3, p2)),
+                          _mm256_abs_epi16(_mm256_sub_epi16(q3, q2)));
+  mask = _mm256_max_epi16(work, mask);
+  mask = _mm256_subs_epu16(mask, limit);
+  mask = _mm256_cmpeq_epi16(mask, zero);
+
+  // flat_mask4
+  flat = _mm256_max_epi16(_mm256_abs_epi16(_mm256_sub_epi16(p2, p0)),
+                          _mm256_abs_epi16(_mm256_sub_epi16(q2, q0)));
+  work = _mm256_max_epi16(_mm256_abs_epi16(_mm256_sub_epi16(p3, p0)),
+                          _mm256_abs_epi16(_mm256_sub_epi16(q3, q0)));
+  flat = _mm256_max_epi16(work, flat);
+  flat = _mm256_max_epi16(abs_p1p0, flat);
+  flat = _mm256_max_epi16(abs_q1q0, flat);
+
+  if (bd == 8)
+    flat = _mm256_subs_epu16(flat, one);
+  else if (bd == 10)
+    flat = _mm256_subs_epu16(flat, _mm256_slli_epi16(one, 2));
+  else  // bd == 12
+    flat = _mm256_subs_epu16(flat, _mm256_slli_epi16(one, 4));
+
+  flat = _mm256_cmpeq_epi16(flat, zero);
+  flat = _mm256_and_si256(flat, mask);  // flat & mask
+
+  // Added before shift for rounding part of ROUND_POWER_OF_TWO
+  __m256i workp_a, workp_b, workp_shft;
+  workp_a =
+      _mm256_add_epi16(_mm256_add_epi16(p3, p3), _mm256_add_epi16(p2, p1));
+  const __m256i four = _mm256_set1_epi16(4);
+  workp_a = _mm256_add_epi16(_mm256_add_epi16(workp_a, four), p0);
+  workp_b = _mm256_add_epi16(_mm256_add_epi16(q0, p2), p3);
+  workp_shft = _mm256_srli_epi16(_mm256_add_epi16(workp_a, workp_b), 3);
+  _mm256_storeu_si256((__m256i *)&flat_op2[0], workp_shft);
+
+  workp_b = _mm256_add_epi16(_mm256_add_epi16(q0, q1), p1);
+  workp_shft = _mm256_srli_epi16(_mm256_add_epi16(workp_a, workp_b), 3);
+  _mm256_storeu_si256((__m256i *)&flat_op1[0], workp_shft);
+
+  workp_a = _mm256_add_epi16(_mm256_sub_epi16(workp_a, p3), q2);
+  workp_b = _mm256_add_epi16(_mm256_sub_epi16(workp_b, p1), p0);
+  workp_shft = _mm256_srli_epi16(_mm256_add_epi16(workp_a, workp_b), 3);
+  _mm256_storeu_si256((__m256i *)&flat_op0[0], workp_shft);
+
+  workp_a = _mm256_add_epi16(_mm256_sub_epi16(workp_a, p3), q3);
+  workp_b = _mm256_add_epi16(_mm256_sub_epi16(workp_b, p0), q0);
+  workp_shft = _mm256_srli_epi16(_mm256_add_epi16(workp_a, workp_b), 3);
+  _mm256_storeu_si256((__m256i *)&flat_oq0[0], workp_shft);
+
+  workp_a = _mm256_add_epi16(_mm256_sub_epi16(workp_a, p2), q3);
+  workp_b = _mm256_add_epi16(_mm256_sub_epi16(workp_b, q0), q1);
+  workp_shft = _mm256_srli_epi16(_mm256_add_epi16(workp_a, workp_b), 3);
+  _mm256_storeu_si256((__m256i *)&flat_oq1[0], workp_shft);
+
+  workp_a = _mm256_add_epi16(_mm256_sub_epi16(workp_a, p1), q3);
+  workp_b = _mm256_add_epi16(_mm256_sub_epi16(workp_b, q1), q2);
+  workp_shft = _mm256_srli_epi16(_mm256_add_epi16(workp_a, workp_b), 3);
+  _mm256_storeu_si256((__m256i *)&flat_oq2[0], workp_shft);
+
+  // lp filter
+  const __m256i pmax = _mm256_subs_epi16(
+      _mm256_subs_epi16(_mm256_slli_epi16(one, bd), one), t80);
+  const __m256i pmin = _mm256_subs_epi16(zero, t80);
+
+  __m256i filt, filter1, filter2, work_a;
+  filt = _mm256_subs_epi16(ps1, qs1);
+  pixel_clamp(&pmin, &pmax, &filt);
+  filt = _mm256_and_si256(filt, hev);
+  work_a = _mm256_subs_epi16(qs0, ps0);
+  filt = _mm256_adds_epi16(filt, work_a);
+  filt = _mm256_adds_epi16(filt, work_a);
+  filt = _mm256_adds_epi16(filt, work_a);
+  // (aom_filter + 3 * (qs0 - ps0)) & mask
+  pixel_clamp(&pmin, &pmax, &filt);
+  filt = _mm256_and_si256(filt, mask);
+
+  const __m256i t4 = _mm256_set1_epi16(4);
+  const __m256i t3 = _mm256_set1_epi16(3);
+
+  filter1 = _mm256_adds_epi16(filt, t4);
+  filter2 = _mm256_adds_epi16(filt, t3);
+
+  // Filter1 >> 3
+  pixel_clamp(&pmin, &pmax, &filter1);
+  filter1 = _mm256_srai_epi16(filter1, 3);
+
+  // Filter2 >> 3
+  pixel_clamp(&pmin, &pmax, &filter2);
+  filter2 = _mm256_srai_epi16(filter2, 3);
+
+  // filt >> 1
+  filt = _mm256_adds_epi16(filter1, one);
+  filt = _mm256_srai_epi16(filt, 1);
+  // filter = ROUND_POWER_OF_TWO(filter1, 1) & ~hev;
+  filt = _mm256_andnot_si256(hev, filt);
+
+  work_a = _mm256_subs_epi16(qs0, filter1);
+  pixel_clamp(&pmin, &pmax, &work_a);
+  work_a = _mm256_adds_epi16(work_a, t80);
+  q0 = _mm256_loadu_si256((__m256i *)flat_oq0);
+  work_a = _mm256_andnot_si256(flat, work_a);
+  q0 = _mm256_and_si256(flat, q0);
+  q0 = _mm256_or_si256(work_a, q0);
+
+  work_a = _mm256_subs_epi16(qs1, filt);
+  pixel_clamp(&pmin, &pmax, &work_a);
+  work_a = _mm256_adds_epi16(work_a, t80);
+  q1 = _mm256_loadu_si256((__m256i *)flat_oq1);
+  work_a = _mm256_andnot_si256(flat, work_a);
+  q1 = _mm256_and_si256(flat, q1);
+  q1 = _mm256_or_si256(work_a, q1);
+
+  work_a = _mm256_loadu_si256((__m256i *)(s + 2 * p));
+  q2 = _mm256_loadu_si256((__m256i *)flat_oq2);
+  work_a = _mm256_andnot_si256(flat, work_a);
+  q2 = _mm256_and_si256(flat, q2);
+  q2 = _mm256_or_si256(work_a, q2);
+
+  work_a = _mm256_adds_epi16(ps0, filter2);
+  pixel_clamp(&pmin, &pmax, &work_a);
+  work_a = _mm256_adds_epi16(work_a, t80);
+  p0 = _mm256_loadu_si256((__m256i *)flat_op0);
+  work_a = _mm256_andnot_si256(flat, work_a);
+  p0 = _mm256_and_si256(flat, p0);
+  p0 = _mm256_or_si256(work_a, p0);
+
+  work_a = _mm256_adds_epi16(ps1, filt);
+  pixel_clamp(&pmin, &pmax, &work_a);
+  work_a = _mm256_adds_epi16(work_a, t80);
+  p1 = _mm256_loadu_si256((__m256i *)flat_op1);
+  work_a = _mm256_andnot_si256(flat, work_a);
+  p1 = _mm256_and_si256(flat, p1);
+  p1 = _mm256_or_si256(work_a, p1);
+
+  work_a = _mm256_loadu_si256((__m256i *)(s - 3 * p));
+  p2 = _mm256_loadu_si256((__m256i *)flat_op2);
+  work_a = _mm256_andnot_si256(flat, work_a);
+  p2 = _mm256_and_si256(flat, p2);
+  p2 = _mm256_or_si256(work_a, p2);
+
+  _mm256_storeu_si256((__m256i *)(s - 3 * p), p2);
+  _mm256_storeu_si256((__m256i *)(s - 2 * p), p1);
+  _mm256_storeu_si256((__m256i *)(s - 1 * p), p0);
+  _mm256_storeu_si256((__m256i *)(s + 0 * p), q0);
+  _mm256_storeu_si256((__m256i *)(s + 1 * p), q1);
+  _mm256_storeu_si256((__m256i *)(s + 2 * p), q2);
+}
+
+void aom_highbd_lpf_vertical_4_dual_avx2(
+    uint16_t *s, int p, const uint8_t *blimit0, const uint8_t *limit0,
+    const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1,
+    const uint8_t *thresh1, int bd) {
+  DECLARE_ALIGNED(16, uint16_t, t_dst[16 * 8]);
+  uint16_t *src[2];
+  uint16_t *dst[2];
+
+  // Transpose 8x16
+  highbd_transpose8x16(s - 4, s - 4 + p * 8, p, t_dst, 16);
+
+  // Loop filtering
+  aom_highbd_lpf_horizontal_4_dual_avx2(t_dst + 4 * 16, 16, blimit0, limit0,
+                                        thresh0, blimit1, limit1, thresh1, bd);
+  src[0] = t_dst;
+  src[1] = t_dst + 8;
+  dst[0] = s - 4;
+  dst[1] = s - 4 + p * 8;
+
+  // Transpose back
+  highbd_transpose(src, 16, dst, p, 2);
+}
+
+void aom_highbd_lpf_vertical_8_dual_avx2(
+    uint16_t *s, int p, const uint8_t *blimit0, const uint8_t *limit0,
+    const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1,
+    const uint8_t *thresh1, int bd) {
+  DECLARE_ALIGNED(16, uint16_t, t_dst[16 * 8]);
+  uint16_t *src[2];
+  uint16_t *dst[2];
+
+  // Transpose 8x16
+  highbd_transpose8x16(s - 4, s - 4 + p * 8, p, t_dst, 16);
+
+  // Loop filtering
+  aom_highbd_lpf_horizontal_8_dual_avx2(t_dst + 4 * 16, 16, blimit0, limit0,
+                                        thresh0, blimit1, limit1, thresh1, bd);
+  src[0] = t_dst;
+  src[1] = t_dst + 8;
+
+  dst[0] = s - 4;
+  dst[1] = s - 4 + p * 8;
+
+  // Transpose back
+  highbd_transpose(src, 16, dst, p, 2);
+}
diff --git a/aom_dsp/x86/highbd_loopfilter_sse2.c b/aom_dsp/x86/highbd_loopfilter_sse2.c
index 7636987..b7f7d96 100644
--- a/aom_dsp/x86/highbd_loopfilter_sse2.c
+++ b/aom_dsp/x86/highbd_loopfilter_sse2.c
@@ -12,6 +12,7 @@
 #include <emmintrin.h>  // SSE2
 
 #include "./aom_dsp_rtcd.h"
+#include "aom_dsp/x86/lpf_common_sse2.h"
 #include "aom_ports/mem.h"
 #include "aom_ports/emmintrin_compat.h"
 
@@ -888,118 +889,6 @@
   aom_highbd_lpf_horizontal_4_sse2(s + 8, p, _blimit1, _limit1, _thresh1, bd);
 }
 
-static INLINE void highbd_transpose(uint16_t *src[], int in_p, uint16_t *dst[],
-                                    int out_p, int num_8x8_to_transpose) {
-  int idx8x8 = 0;
-  __m128i p0, p1, p2, p3, p4, p5, p6, p7, x0, x1, x2, x3, x4, x5, x6, x7;
-  do {
-    uint16_t *in = src[idx8x8];
-    uint16_t *out = dst[idx8x8];
-
-    p0 =
-        _mm_loadu_si128((__m128i *)(in + 0 * in_p));  // 00 01 02 03 04 05 06 07
-    p1 =
-        _mm_loadu_si128((__m128i *)(in + 1 * in_p));  // 10 11 12 13 14 15 16 17
-    p2 =
-        _mm_loadu_si128((__m128i *)(in + 2 * in_p));  // 20 21 22 23 24 25 26 27
-    p3 =
-        _mm_loadu_si128((__m128i *)(in + 3 * in_p));  // 30 31 32 33 34 35 36 37
-    p4 =
-        _mm_loadu_si128((__m128i *)(in + 4 * in_p));  // 40 41 42 43 44 45 46 47
-    p5 =
-        _mm_loadu_si128((__m128i *)(in + 5 * in_p));  // 50 51 52 53 54 55 56 57
-    p6 =
-        _mm_loadu_si128((__m128i *)(in + 6 * in_p));  // 60 61 62 63 64 65 66 67
-    p7 =
-        _mm_loadu_si128((__m128i *)(in + 7 * in_p));  // 70 71 72 73 74 75 76 77
-    // 00 10 01 11 02 12 03 13
-    x0 = _mm_unpacklo_epi16(p0, p1);
-    // 20 30 21 31 22 32 23 33
-    x1 = _mm_unpacklo_epi16(p2, p3);
-    // 40 50 41 51 42 52 43 53
-    x2 = _mm_unpacklo_epi16(p4, p5);
-    // 60 70 61 71 62 72 63 73
-    x3 = _mm_unpacklo_epi16(p6, p7);
-    // 00 10 20 30 01 11 21 31
-    x4 = _mm_unpacklo_epi32(x0, x1);
-    // 40 50 60 70 41 51 61 71
-    x5 = _mm_unpacklo_epi32(x2, x3);
-    // 00 10 20 30 40 50 60 70
-    x6 = _mm_unpacklo_epi64(x4, x5);
-    // 01 11 21 31 41 51 61 71
-    x7 = _mm_unpackhi_epi64(x4, x5);
-
-    _mm_storeu_si128((__m128i *)(out + 0 * out_p), x6);
-    // 00 10 20 30 40 50 60 70
-    _mm_storeu_si128((__m128i *)(out + 1 * out_p), x7);
-    // 01 11 21 31 41 51 61 71
-
-    // 02 12 22 32 03 13 23 33
-    x4 = _mm_unpackhi_epi32(x0, x1);
-    // 42 52 62 72 43 53 63 73
-    x5 = _mm_unpackhi_epi32(x2, x3);
-    // 02 12 22 32 42 52 62 72
-    x6 = _mm_unpacklo_epi64(x4, x5);
-    // 03 13 23 33 43 53 63 73
-    x7 = _mm_unpackhi_epi64(x4, x5);
-
-    _mm_storeu_si128((__m128i *)(out + 2 * out_p), x6);
-    // 02 12 22 32 42 52 62 72
-    _mm_storeu_si128((__m128i *)(out + 3 * out_p), x7);
-    // 03 13 23 33 43 53 63 73
-
-    // 04 14 05 15 06 16 07 17
-    x0 = _mm_unpackhi_epi16(p0, p1);
-    // 24 34 25 35 26 36 27 37
-    x1 = _mm_unpackhi_epi16(p2, p3);
-    // 44 54 45 55 46 56 47 57
-    x2 = _mm_unpackhi_epi16(p4, p5);
-    // 64 74 65 75 66 76 67 77
-    x3 = _mm_unpackhi_epi16(p6, p7);
-    // 04 14 24 34 05 15 25 35
-    x4 = _mm_unpacklo_epi32(x0, x1);
-    // 44 54 64 74 45 55 65 75
-    x5 = _mm_unpacklo_epi32(x2, x3);
-    // 04 14 24 34 44 54 64 74
-    x6 = _mm_unpacklo_epi64(x4, x5);
-    // 05 15 25 35 45 55 65 75
-    x7 = _mm_unpackhi_epi64(x4, x5);
-
-    _mm_storeu_si128((__m128i *)(out + 4 * out_p), x6);
-    // 04 14 24 34 44 54 64 74
-    _mm_storeu_si128((__m128i *)(out + 5 * out_p), x7);
-    // 05 15 25 35 45 55 65 75
-
-    // 06 16 26 36 07 17 27 37
-    x4 = _mm_unpackhi_epi32(x0, x1);
-    // 46 56 66 76 47 57 67 77
-    x5 = _mm_unpackhi_epi32(x2, x3);
-    // 06 16 26 36 46 56 66 76
-    x6 = _mm_unpacklo_epi64(x4, x5);
-    // 07 17 27 37 47 57 67 77
-    x7 = _mm_unpackhi_epi64(x4, x5);
-
-    _mm_storeu_si128((__m128i *)(out + 6 * out_p), x6);
-    // 06 16 26 36 46 56 66 76
-    _mm_storeu_si128((__m128i *)(out + 7 * out_p), x7);
-    // 07 17 27 37 47 57 67 77
-  } while (++idx8x8 < num_8x8_to_transpose);
-}
-
-static INLINE void highbd_transpose8x16(uint16_t *in0, uint16_t *in1, int in_p,
-                                        uint16_t *out, int out_p) {
-  uint16_t *src0[1];
-  uint16_t *src1[1];
-  uint16_t *dest0[1];
-  uint16_t *dest1[1];
-  src0[0] = in0;
-  src1[0] = in1;
-  dest0[0] = out;
-  dest1[0] = out + 8;
-  highbd_transpose(src0, in_p, dest0, out_p, 1);
-  highbd_transpose(src1, in_p, dest1, out_p, 1);
-}
-
 void aom_highbd_lpf_vertical_4_sse2(uint16_t *s, int p, const uint8_t *blimit,
                                     const uint8_t *limit, const uint8_t *thresh,
                                     int bd) {
diff --git a/aom_dsp/x86/lpf_common_sse2.h b/aom_dsp/x86/lpf_common_sse2.h
new file mode 100644
index 0000000..027c890
--- /dev/null
+++ b/aom_dsp/x86/lpf_common_sse2.h
@@ -0,0 +1,130 @@
+/*
+ * Copyright (c) 2017, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#ifndef _AOM_DSP_X86_LPF_COMMON_X86_H
+#define _AOM_DSP_X86_LPF_COMMON_X86_H
+
+#include <emmintrin.h>  // SSE2
+
+#include "./aom_config.h"
+
+static INLINE void highbd_transpose(uint16_t *src[], int in_p, uint16_t *dst[],
+                                    int out_p, int num_8x8_to_transpose) {
+  int idx8x8 = 0;
+  __m128i p0, p1, p2, p3, p4, p5, p6, p7, x0, x1, x2, x3, x4, x5, x6, x7;
+  do {
+    uint16_t *in = src[idx8x8];
+    uint16_t *out = dst[idx8x8];
+
+    p0 =
+        _mm_loadu_si128((__m128i *)(in + 0 * in_p));  // 00 01 02 03 04 05 06 07
+    p1 =
+        _mm_loadu_si128((__m128i *)(in + 1 * in_p));  // 10 11 12 13 14 15 16 17
+    p2 =
+        _mm_loadu_si128((__m128i *)(in + 2 * in_p));  // 20 21 22 23 24 25 26 27
+    p3 =
+        _mm_loadu_si128((__m128i *)(in + 3 * in_p));  // 30 31 32 33 34 35 36 37
+    p4 =
+        _mm_loadu_si128((__m128i *)(in + 4 * in_p));  // 40 41 42 43 44 45 46 47
+    p5 =
+        _mm_loadu_si128((__m128i *)(in + 5 * in_p));  // 50 51 52 53 54 55 56 57
+    p6 =
+        _mm_loadu_si128((__m128i *)(in + 6 * in_p));  // 60 61 62 63 64 65 66 67
+    p7 =
+        _mm_loadu_si128((__m128i *)(in + 7 * in_p));  // 70 71 72 73 74 75 76 77
+    // 00 10 01 11 02 12 03 13
+    x0 = _mm_unpacklo_epi16(p0, p1);
+    // 20 30 21 31 22 32 23 33
+    x1 = _mm_unpacklo_epi16(p2, p3);
+    // 40 50 41 51 42 52 43 53
+    x2 = _mm_unpacklo_epi16(p4, p5);
+    // 60 70 61 71 62 72 63 73
+    x3 = _mm_unpacklo_epi16(p6, p7);
+    // 00 10 20 30 01 11 21 31
+    x4 = _mm_unpacklo_epi32(x0, x1);
+    // 40 50 60 70 41 51 61 71
+    x5 = _mm_unpacklo_epi32(x2, x3);
+    // 00 10 20 30 40 50 60 70
+    x6 = _mm_unpacklo_epi64(x4, x5);
+    // 01 11 21 31 41 51 61 71
+    x7 = _mm_unpackhi_epi64(x4, x5);
+
+    _mm_storeu_si128((__m128i *)(out + 0 * out_p), x6);
+    // 00 10 20 30 40 50 60 70
+    _mm_storeu_si128((__m128i *)(out + 1 * out_p), x7);
+    // 01 11 21 31 41 51 61 71
+
+    // 02 12 22 32 03 13 23 33
+    x4 = _mm_unpackhi_epi32(x0, x1);
+    // 42 52 62 72 43 53 63 73
+    x5 = _mm_unpackhi_epi32(x2, x3);
+    // 02 12 22 32 42 52 62 72
+    x6 = _mm_unpacklo_epi64(x4, x5);
+    // 03 13 23 33 43 53 63 73
+    x7 = _mm_unpackhi_epi64(x4, x5);
+
+    _mm_storeu_si128((__m128i *)(out + 2 * out_p), x6);
+    // 02 12 22 32 42 52 62 72
+    _mm_storeu_si128((__m128i *)(out + 3 * out_p), x7);
+    // 03 13 23 33 43 53 63 73
+
+    // 04 14 05 15 06 16 07 17
+    x0 = _mm_unpackhi_epi16(p0, p1);
+    // 24 34 25 35 26 36 27 37
+    x1 = _mm_unpackhi_epi16(p2, p3);
+    // 44 54 45 55 46 56 47 57
+    x2 = _mm_unpackhi_epi16(p4, p5);
+    // 64 74 65 75 66 76 67 77
+    x3 = _mm_unpackhi_epi16(p6, p7);
+    // 04 14 24 34 05 15 25 35
+    x4 = _mm_unpacklo_epi32(x0, x1);
+    // 44 54 64 74 45 55 65 75
+    x5 = _mm_unpacklo_epi32(x2, x3);
+    // 04 14 24 34 44 54 64 74
+    x6 = _mm_unpacklo_epi64(x4, x5);
+    // 05 15 25 35 45 55 65 75
+    x7 = _mm_unpackhi_epi64(x4, x5);
+
+    _mm_storeu_si128((__m128i *)(out + 4 * out_p), x6);
+    // 04 14 24 34 44 54 64 74
+    _mm_storeu_si128((__m128i *)(out + 5 * out_p), x7);
+    // 05 15 25 35 45 55 65 75
+
+    // 06 16 26 36 07 17 27 37
+    x4 = _mm_unpackhi_epi32(x0, x1);
+    // 46 56 66 76 47 57 67 77
+    x5 = _mm_unpackhi_epi32(x2, x3);
+    // 06 16 26 36 46 56 66 76
+    x6 = _mm_unpacklo_epi64(x4, x5);
+    // 07 17 27 37 47 57 67 77
+    x7 = _mm_unpackhi_epi64(x4, x5);
+
+    _mm_storeu_si128((__m128i *)(out + 6 * out_p), x6);
+    // 06 16 26 36 46 56 66 76
+    _mm_storeu_si128((__m128i *)(out + 7 * out_p), x7);
+    // 07 17 27 37 47 57 67 77
+  } while (++idx8x8 < num_8x8_to_transpose);
+}
+
+static INLINE void highbd_transpose8x16(uint16_t *in0, uint16_t *in1, int in_p,
+                                        uint16_t *out, int out_p) {
+  uint16_t *src0[1];
+  uint16_t *src1[1];
+  uint16_t *dest0[1];
+  uint16_t *dest1[1];
+  src0[0] = in0;
+  src1[0] = in1;
+  dest0[0] = out;
+  dest1[0] = out + 8;
+  highbd_transpose(src0, in_p, dest0, out_p, 1);
+  highbd_transpose(src1, in_p, dest1, out_p, 1);
+}
+#endif  // _AOM_DSP_X86_LPF_COMMON_X86_H
diff --git a/aom_dsp/x86/txfm_common_avx2.h b/aom_dsp/x86/txfm_common_avx2.h
index 4f7a60c..1a8fed7 100644
--- a/aom_dsp/x86/txfm_common_avx2.h
+++ b/aom_dsp/x86/txfm_common_avx2.h
@@ -15,6 +15,7 @@
 #include <immintrin.h>
 
 #include "aom_dsp/txfm_common.h"
+#include "aom_dsp/x86/common_avx2.h"
 
 #define pair256_set_epi16(a, b)                                            \
   _mm256_set_epi16((int16_t)(b), (int16_t)(a), (int16_t)(b), (int16_t)(a), \
@@ -34,135 +35,6 @@
   *u = _mm256_permute2x128_si256(v, v, 1);
 }
 
-// Note: in and out could have the same value
-static INLINE void mm256_transpose_16x16(const __m256i *in, __m256i *out) {
-  __m256i tr0_0 = _mm256_unpacklo_epi16(in[0], in[1]);
-  __m256i tr0_1 = _mm256_unpackhi_epi16(in[0], in[1]);
-  __m256i tr0_2 = _mm256_unpacklo_epi16(in[2], in[3]);
-  __m256i tr0_3 = _mm256_unpackhi_epi16(in[2], in[3]);
-  __m256i tr0_4 = _mm256_unpacklo_epi16(in[4], in[5]);
-  __m256i tr0_5 = _mm256_unpackhi_epi16(in[4], in[5]);
-  __m256i tr0_6 = _mm256_unpacklo_epi16(in[6], in[7]);
-  __m256i tr0_7 = _mm256_unpackhi_epi16(in[6], in[7]);
-
-  __m256i tr0_8 = _mm256_unpacklo_epi16(in[8], in[9]);
-  __m256i tr0_9 = _mm256_unpackhi_epi16(in[8], in[9]);
-  __m256i tr0_a = _mm256_unpacklo_epi16(in[10], in[11]);
-  __m256i tr0_b = _mm256_unpackhi_epi16(in[10], in[11]);
-  __m256i tr0_c = _mm256_unpacklo_epi16(in[12], in[13]);
-  __m256i tr0_d = _mm256_unpackhi_epi16(in[12], in[13]);
-  __m256i tr0_e = _mm256_unpacklo_epi16(in[14], in[15]);
-  __m256i tr0_f = _mm256_unpackhi_epi16(in[14], in[15]);
-
-  // 00 10 01 11 02 12 03 13  08 18 09 19 0a 1a 0b 1b
-  // 04 14 05 15 06 16 07 17  0c 1c 0d 1d 0e 1e 0f 1f
-  // 20 30 21 31 22 32 23 33  28 38 29 39 2a 3a 2b 3b
-  // 24 34 25 35 26 36 27 37  2c 3c 2d 3d 2e 3e 2f 3f
-  // 40 50 41 51 42 52 43 53  48 58 49 59 4a 5a 4b 5b
-  // 44 54 45 55 46 56 47 57  4c 5c 4d 5d 4e 5e 4f 5f
-  // 60 70 61 71 62 72 63 73  68 78 69 79 6a 7a 6b 7b
-  // 64 74 65 75 66 76 67 77  6c 7c 6d 7d 6e 7e 6f 7f
-
-  // 80 90 81 91 82 92 83 93  88 98 89 99 8a 9a 8b 9b
-  // 84 94 85 95 86 96 87 97  8c 9c 8d 9d 8e 9e 8f 9f
-  // a0 b0 a1 b1 a2 b2 a3 b3  a8 b8 a9 b9 aa ba ab bb
-  // a4 b4 a5 b5 a6 b6 a7 b7  ac bc ad bd ae be af bf
-  // c0 d0 c1 d1 c2 d2 c3 d3  c8 d8 c9 d9 ca da cb db
-  // c4 d4 c5 d5 c6 d6 c7 d7  cc dc cd dd ce de cf df
-  // e0 f0 e1 f1 e2 f2 e3 f3  e8 f8 e9 f9 ea fa eb fb
-  // e4 f4 e5 f5 e6 f6 e7 f7  ec fc ed fd ee fe ef ff
-
-  __m256i tr1_0 = _mm256_unpacklo_epi32(tr0_0, tr0_2);
-  __m256i tr1_1 = _mm256_unpackhi_epi32(tr0_0, tr0_2);
-  __m256i tr1_2 = _mm256_unpacklo_epi32(tr0_1, tr0_3);
-  __m256i tr1_3 = _mm256_unpackhi_epi32(tr0_1, tr0_3);
-  __m256i tr1_4 = _mm256_unpacklo_epi32(tr0_4, tr0_6);
-  __m256i tr1_5 = _mm256_unpackhi_epi32(tr0_4, tr0_6);
-  __m256i tr1_6 = _mm256_unpacklo_epi32(tr0_5, tr0_7);
-  __m256i tr1_7 = _mm256_unpackhi_epi32(tr0_5, tr0_7);
-
-  __m256i tr1_8 = _mm256_unpacklo_epi32(tr0_8, tr0_a);
-  __m256i tr1_9 = _mm256_unpackhi_epi32(tr0_8, tr0_a);
-  __m256i tr1_a = _mm256_unpacklo_epi32(tr0_9, tr0_b);
-  __m256i tr1_b = _mm256_unpackhi_epi32(tr0_9, tr0_b);
-  __m256i tr1_c = _mm256_unpacklo_epi32(tr0_c, tr0_e);
-  __m256i tr1_d = _mm256_unpackhi_epi32(tr0_c, tr0_e);
-  __m256i tr1_e = _mm256_unpacklo_epi32(tr0_d, tr0_f);
-  __m256i tr1_f = _mm256_unpackhi_epi32(tr0_d, tr0_f);
-
-  // 00 10 20 30 01 11 21 31  08 18 28 38 09 19 29 39
-  // 02 12 22 32 03 13 23 33  0a 1a 2a 3a 0b 1b 2b 3b
-  // 04 14 24 34 05 15 25 35  0c 1c 2c 3c 0d 1d 2d 3d
-  // 06 16 26 36 07 17 27 37  0e 1e 2e 3e 0f 1f 2f 3f
-  // 40 50 60 70 41 51 61 71  48 58 68 78 49 59 69 79
-  // 42 52 62 72 43 53 63 73  4a 5a 6a 7a 4b 5b 6b 7b
-  // 44 54 64 74 45 55 65 75  4c 5c 6c 7c 4d 5d 6d 7d
-  // 46 56 66 76 47 57 67 77  4e 5e 6e 7e 4f 5f 6f 7f
-
-  // 80 90 a0 b0 81 91 a1 b1  88 98 a8 b8 89 99 a9 b9
-  // 82 92 a2 b2 83 93 a3 b3  8a 9a aa ba 8b 9b ab bb
-  // 84 94 a4 b4 85 95 a5 b5  8c 9c ac bc 8d 9d ad bd
-  // 86 96 a6 b6 87 97 a7 b7  8e ae 9e be 8f 9f af bf
-  // c0 d0 e0 f0 c1 d1 e1 f1  c8 d8 e8 f8 c9 d9 e9 f9
-  // c2 d2 e2 f2 c3 d3 e3 f3  ca da ea fa cb db eb fb
-  // c4 d4 e4 f4 c5 d5 e5 f5  cc dc ef fc cd dd ed fd
-  // c6 d6 e6 f6 c7 d7 e7 f7  ce de ee fe cf df ef ff
-
-  tr0_0 = _mm256_unpacklo_epi64(tr1_0, tr1_4);
-  tr0_1 = _mm256_unpackhi_epi64(tr1_0, tr1_4);
-  tr0_2 = _mm256_unpacklo_epi64(tr1_1, tr1_5);
-  tr0_3 = _mm256_unpackhi_epi64(tr1_1, tr1_5);
-  tr0_4 = _mm256_unpacklo_epi64(tr1_2, tr1_6);
-  tr0_5 = _mm256_unpackhi_epi64(tr1_2, tr1_6);
-  tr0_6 = _mm256_unpacklo_epi64(tr1_3, tr1_7);
-  tr0_7 = _mm256_unpackhi_epi64(tr1_3, tr1_7);
-
-  tr0_8 = _mm256_unpacklo_epi64(tr1_8, tr1_c);
-  tr0_9 = _mm256_unpackhi_epi64(tr1_8, tr1_c);
-  tr0_a = _mm256_unpacklo_epi64(tr1_9, tr1_d);
-  tr0_b = _mm256_unpackhi_epi64(tr1_9, tr1_d);
-  tr0_c = _mm256_unpacklo_epi64(tr1_a, tr1_e);
-  tr0_d = _mm256_unpackhi_epi64(tr1_a, tr1_e);
-  tr0_e = _mm256_unpacklo_epi64(tr1_b, tr1_f);
-  tr0_f = _mm256_unpackhi_epi64(tr1_b, tr1_f);
-
-  // 00 10 20 30 40 50 60 70  08 18 28 38 48 58 68 78
-  // 01 11 21 31 41 51 61 71  09 19 29 39 49 59 69 79
-  // 02 12 22 32 42 52 62 72  0a 1a 2a 3a 4a 5a 6a 7a
-  // 03 13 23 33 43 53 63 73  0b 1b 2b 3b 4b 5b 6b 7b
-  // 04 14 24 34 44 54 64 74  0c 1c 2c 3c 4c 5c 6c 7c
-  // 05 15 25 35 45 55 65 75  0d 1d 2d 3d 4d 5d 6d 7d
-  // 06 16 26 36 46 56 66 76  0e 1e 2e 3e 4e 5e 6e 7e
-  // 07 17 27 37 47 57 67 77  0f 1f 2f 3f 4f 5f 6f 7f
-
-  // 80 90 a0 b0 c0 d0 e0 f0  88 98 a8 b8 c8 d8 e8 f8
-  // 81 91 a1 b1 c1 d1 e1 f1  89 99 a9 b9 c9 d9 e9 f9
-  // 82 92 a2 b2 c2 d2 e2 f2  8a 9a aa ba ca da ea fa
-  // 83 93 a3 b3 c3 d3 e3 f3  8b 9b ab bb cb db eb fb
-  // 84 94 a4 b4 c4 d4 e4 f4  8c 9c ac bc cc dc ef fc
-  // 85 95 a5 b5 c5 d5 e5 f5  8d 9d ad bd cd dd ed fd
-  // 86 96 a6 b6 c6 d6 e6 f6  8e ae 9e be ce de ee fe
-  // 87 97 a7 b7 c7 d7 e7 f7  8f 9f af bf cf df ef ff
-
-  out[0] = _mm256_permute2x128_si256(tr0_0, tr0_8, 0x20);  // 0010 0000
-  out[8] = _mm256_permute2x128_si256(tr0_0, tr0_8, 0x31);  // 0011 0001
-  out[1] = _mm256_permute2x128_si256(tr0_1, tr0_9, 0x20);
-  out[9] = _mm256_permute2x128_si256(tr0_1, tr0_9, 0x31);
-  out[2] = _mm256_permute2x128_si256(tr0_2, tr0_a, 0x20);
-  out[10] = _mm256_permute2x128_si256(tr0_2, tr0_a, 0x31);
-  out[3] = _mm256_permute2x128_si256(tr0_3, tr0_b, 0x20);
-  out[11] = _mm256_permute2x128_si256(tr0_3, tr0_b, 0x31);
-
-  out[4] = _mm256_permute2x128_si256(tr0_4, tr0_c, 0x20);
-  out[12] = _mm256_permute2x128_si256(tr0_4, tr0_c, 0x31);
-  out[5] = _mm256_permute2x128_si256(tr0_5, tr0_d, 0x20);
-  out[13] = _mm256_permute2x128_si256(tr0_5, tr0_d, 0x31);
-  out[6] = _mm256_permute2x128_si256(tr0_6, tr0_e, 0x20);
-  out[14] = _mm256_permute2x128_si256(tr0_6, tr0_e, 0x31);
-  out[7] = _mm256_permute2x128_si256(tr0_7, tr0_f, 0x20);
-  out[15] = _mm256_permute2x128_si256(tr0_7, tr0_f, 0x31);
-}
-
 static INLINE __m256i butter_fly(const __m256i *a0, const __m256i *a1,
                                  const __m256i *cospi) {
   const __m256i dct_rounding = _mm256_set1_epi32(DCT_CONST_ROUNDING);
diff --git a/test/lpf_8_test.cc b/test/lpf_8_test.cc
index f050718..30ef04a 100644
--- a/test/lpf_8_test.cc
+++ b/test/lpf_8_test.cc
@@ -35,6 +35,8 @@
 
 const int number_of_iterations = 10000;
 
+const int kSpeedTestNum = 500000;
+
 #if CONFIG_HIGHBITDEPTH
 typedef void (*loop_op_t)(uint16_t *s, int p, const uint8_t *blimit,
                           const uint8_t *limit, const uint8_t *thresh, int bd);
@@ -242,6 +244,43 @@
       << "First failed at test case " << first_failure;
 }
 
+TEST_P(Loop8Test6Param, DISABLED_Speed) {
+  ACMRandom rnd(ACMRandom::DeterministicSeed());
+  const int count_test_block = kSpeedTestNum;
+#if CONFIG_HIGHBITDEPTH
+  const int32_t bd = bit_depth_;
+  DECLARE_ALIGNED(16, uint16_t, s[kNumCoeffs]);
+#else
+  DECLARE_ALIGNED(8, uint8_t, s[kNumCoeffs]);
+#endif  // CONFIG_HIGHBITDEPTH
+
+  uint8_t tmp = static_cast<uint8_t>(rnd(3 * MAX_LOOP_FILTER + 4));
+  DECLARE_ALIGNED(16, const uint8_t,
+                  blimit[16]) = { tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp,
+                                  tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp };
+  tmp = static_cast<uint8_t>(rnd(MAX_LOOP_FILTER));
+  DECLARE_ALIGNED(16, const uint8_t,
+                  limit[16]) = { tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp,
+                                 tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp };
+  tmp = rnd.Rand8();
+  DECLARE_ALIGNED(16, const uint8_t,
+                  thresh[16]) = { tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp,
+                                  tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp };
+
+  int32_t p = kNumCoeffs / 32;
+  for (int j = 0; j < kNumCoeffs; ++j) {
+    s[j] = rnd.Rand16() & mask_;
+  }
+
+  for (int i = 0; i < count_test_block; ++i) {
+#if CONFIG_HIGHBITDEPTH
+    loopfilter_op_(s + 8 + p * 8, p, blimit, limit, thresh, bd);
+#else
+    loopfilter_op_(s + 8 + p * 8, p, blimit, limit, thresh);
+#endif  // CONFIG_HIGHBITDEPTH
+  }
+}
+
 TEST_P(Loop8Test9Param, OperationCheck) {
   ACMRandom rnd(ACMRandom::DeterministicSeed());
   const int count_test_block = number_of_iterations;
@@ -408,6 +447,56 @@
       << "First failed at test case " << first_failure;
 }
 
+TEST_P(Loop8Test9Param, DISABLED_Speed) {
+  ACMRandom rnd(ACMRandom::DeterministicSeed());
+  const int count_test_block = kSpeedTestNum;
+#if CONFIG_HIGHBITDEPTH
+  DECLARE_ALIGNED(16, uint16_t, s[kNumCoeffs]);
+#else
+  DECLARE_ALIGNED(8, uint8_t, s[kNumCoeffs]);
+#endif  // CONFIG_HIGHBITDEPTH
+
+  uint8_t tmp = static_cast<uint8_t>(rnd(3 * MAX_LOOP_FILTER + 4));
+  DECLARE_ALIGNED(16, const uint8_t,
+                  blimit0[16]) = { tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp,
+                                   tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp };
+  tmp = static_cast<uint8_t>(rnd(MAX_LOOP_FILTER));
+  DECLARE_ALIGNED(16, const uint8_t,
+                  limit0[16]) = { tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp,
+                                  tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp };
+  tmp = rnd.Rand8();
+  DECLARE_ALIGNED(16, const uint8_t,
+                  thresh0[16]) = { tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp,
+                                   tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp };
+  tmp = static_cast<uint8_t>(rnd(3 * MAX_LOOP_FILTER + 4));
+  DECLARE_ALIGNED(16, const uint8_t,
+                  blimit1[16]) = { tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp,
+                                   tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp };
+  tmp = static_cast<uint8_t>(rnd(MAX_LOOP_FILTER));
+  DECLARE_ALIGNED(16, const uint8_t,
+                  limit1[16]) = { tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp,
+                                  tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp };
+  tmp = rnd.Rand8();
+  DECLARE_ALIGNED(16, const uint8_t,
+                  thresh1[16]) = { tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp,
+                                   tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp };
+  int32_t p = kNumCoeffs / 32;  // TODO(pdlf) can we have non-square here?
+  for (int j = 0; j < kNumCoeffs; ++j) {
+    s[j] = rnd.Rand16() & mask_;
+  }
+
+  for (int i = 0; i < count_test_block; ++i) {
+#if CONFIG_HIGHBITDEPTH
+    const int32_t bd = bit_depth_;
+    loopfilter_op_(s + 8 + p * 8, p, blimit0, limit0, thresh0, blimit1, limit1,
+                   thresh1, bd);
+#else
+    loopfilter_op_(s + 8 + p * 8, p, blimit0, limit0, thresh0, blimit1, limit1,
+                   thresh1);
+#endif  // CONFIG_HIGHBITDEPTH
+  }
+}
+
 using std::tr1::make_tuple;
 
 #if HAVE_SSE2 && (!CONFIG_PARALLEL_DEBLOCKING)
@@ -478,6 +567,30 @@
 #endif  // CONFIG_HIGHBITDEPTH
 #endif
 
+#if HAVE_AVX2 && (!CONFIG_PARALLEL_DEBLOCKING)
+#if CONFIG_HIGHBITDEPTH
+
+const loop8_param_t kHbdLoop8Test6Avx2[] = {
+  make_tuple(&aom_highbd_lpf_horizontal_edge_16_avx2,
+             &aom_highbd_lpf_horizontal_edge_16_c, 8),
+  make_tuple(&aom_highbd_lpf_horizontal_edge_16_avx2,
+             &aom_highbd_lpf_horizontal_edge_16_c, 10),
+  make_tuple(&aom_highbd_lpf_horizontal_edge_16_avx2,
+             &aom_highbd_lpf_horizontal_edge_16_c, 12),
+  make_tuple(&aom_highbd_lpf_vertical_16_dual_avx2,
+             &aom_highbd_lpf_vertical_16_dual_c, 8),
+  make_tuple(&aom_highbd_lpf_vertical_16_dual_avx2,
+             &aom_highbd_lpf_vertical_16_dual_c, 10),
+  make_tuple(&aom_highbd_lpf_vertical_16_dual_avx2,
+             &aom_highbd_lpf_vertical_16_dual_c, 12)
+};
+
+INSTANTIATE_TEST_CASE_P(AVX2, Loop8Test6Param,
+                        ::testing::ValuesIn(kHbdLoop8Test6Avx2));
+
+#endif
+#endif
+
 #if HAVE_AVX2 && (!CONFIG_HIGHBITDEPTH) && (!CONFIG_PARALLEL_DEBLOCKING)
 INSTANTIATE_TEST_CASE_P(
     AVX2, Loop8Test6Param,
@@ -531,6 +644,40 @@
 #endif  // CONFIG_HIGHBITDEPTH
 #endif
 
+#if HAVE_AVX2 && (!CONFIG_PARALLEL_DEBLOCKING)
+#if CONFIG_HIGHBITDEPTH
+const dualloop8_param_t kHbdLoop8Test9Avx2[] = {
+  make_tuple(&aom_highbd_lpf_horizontal_4_dual_avx2,
+             &aom_highbd_lpf_horizontal_4_dual_c, 8),
+  make_tuple(&aom_highbd_lpf_horizontal_4_dual_avx2,
+             &aom_highbd_lpf_horizontal_4_dual_c, 10),
+  make_tuple(&aom_highbd_lpf_horizontal_4_dual_avx2,
+             &aom_highbd_lpf_horizontal_4_dual_c, 12),
+  make_tuple(&aom_highbd_lpf_horizontal_8_dual_avx2,
+             &aom_highbd_lpf_horizontal_8_dual_c, 8),
+  make_tuple(&aom_highbd_lpf_horizontal_8_dual_avx2,
+             &aom_highbd_lpf_horizontal_8_dual_c, 10),
+  make_tuple(&aom_highbd_lpf_horizontal_8_dual_avx2,
+             &aom_highbd_lpf_horizontal_8_dual_c, 12),
+  make_tuple(&aom_highbd_lpf_vertical_4_dual_avx2,
+             &aom_highbd_lpf_vertical_4_dual_c, 8),
+  make_tuple(&aom_highbd_lpf_vertical_4_dual_avx2,
+             &aom_highbd_lpf_vertical_4_dual_c, 10),
+  make_tuple(&aom_highbd_lpf_vertical_4_dual_avx2,
+             &aom_highbd_lpf_vertical_4_dual_c, 12),
+  make_tuple(&aom_highbd_lpf_vertical_8_dual_avx2,
+             &aom_highbd_lpf_vertical_8_dual_c, 8),
+  make_tuple(&aom_highbd_lpf_vertical_8_dual_avx2,
+             &aom_highbd_lpf_vertical_8_dual_c, 10),
+  make_tuple(&aom_highbd_lpf_vertical_8_dual_avx2,
+             &aom_highbd_lpf_vertical_8_dual_c, 12),
+};
+
+INSTANTIATE_TEST_CASE_P(AVX2, Loop8Test9Param,
+                        ::testing::ValuesIn(kHbdLoop8Test9Avx2));
+#endif
+#endif
+
 #if HAVE_NEON && (!CONFIG_PARALLEL_DEBLOCKING)
 #if CONFIG_HIGHBITDEPTH
 // No neon high bitdepth functions.