Revert to old AVX2 code for better speed

Made small change to C-code to accomodate memory initialization

Change-Id: I05b900fc14ca6f8e1d1aef675da88ec102401d9c
diff --git a/aom_dsp/x86/intrapred_avx2.c b/aom_dsp/x86/intrapred_avx2.c
index e2f802b..58789c3 100644
--- a/aom_dsp/x86/intrapred_avx2.c
+++ b/aom_dsp/x86/intrapred_avx2.c
@@ -10,6 +10,7 @@
  */
 
 #include <immintrin.h>
+
 #include "config/aom_dsp_rtcd.h"
 #include "aom_dsp/x86/intrapred_x86.h"
 #include "aom_dsp/x86/lpf_common_sse2.h"
@@ -64,6 +65,17 @@
   }
 }
 
+static DECLARE_ALIGNED(16, uint8_t, HighbdLoadMaskx[8][16]) = {
+  { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 },
+  { 0, 1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13 },
+  { 0, 1, 0, 1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11 },
+  { 0, 1, 0, 1, 0, 1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 },
+  { 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 2, 3, 4, 5, 6, 7 },
+  { 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 2, 3, 4, 5 },
+  { 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 2, 3 },
+  { 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1 },
+};
+
 static DECLARE_ALIGNED(16, uint8_t, HighbdEvenOddMaskx4[4][16]) = {
   { 0, 1, 4, 5, 8, 9, 12, 13, 2, 3, 6, 7, 10, 11, 14, 15 },
   { 0, 1, 2, 3, 6, 7, 10, 11, 14, 15, 4, 5, 8, 9, 12, 13 },
@@ -90,6 +102,37 @@
     0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 16, 17 }
 };
 
+static DECLARE_ALIGNED(32, uint16_t, HighbdBaseMask[17][16]) = {
+  { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+  { 0xffff, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+  { 0xffff, 0xffff, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+  { 0xffff, 0xffff, 0xffff, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+  { 0xffff, 0xffff, 0xffff, 0xffff, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+  { 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+  { 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+    0 },
+  { 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0, 0, 0, 0, 0, 0, 0,
+    0, 0 },
+  { 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0, 0, 0, 0,
+    0, 0, 0, 0 },
+  { 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0,
+    0, 0, 0, 0, 0, 0 },
+  { 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff,
+    0xffff, 0, 0, 0, 0, 0, 0 },
+  { 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff,
+    0xffff, 0xffff, 0, 0, 0, 0, 0 },
+  { 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff,
+    0xffff, 0xffff, 0xffff, 0, 0, 0, 0 },
+  { 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff,
+    0xffff, 0xffff, 0xffff, 0xffff, 0, 0, 0 },
+  { 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff,
+    0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0, 0 },
+  { 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff,
+    0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0 },
+  { 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff,
+    0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff }
+};
+
 static INLINE void highbd_transpose16x4_8x8_sse2(__m128i *x, __m128i *d) {
   __m128i r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, r12, r13, r14, r15;
 
@@ -1914,857 +1957,937 @@
                                 dst + j * pitchDst + i, pitchDst);
 }
 
+static void highbd_dr_prediction_32bit_z2_Nx4_avx2(
+    int N, uint16_t *dst, ptrdiff_t stride, const uint16_t *above,
+    const uint16_t *left, int upsample_above, int upsample_left, int dx,
+    int dy) {
+  const int min_base_x = -(1 << upsample_above);
+  const int min_base_y = -(1 << upsample_left);
+  const int frac_bits_x = 6 - upsample_above;
+  const int frac_bits_y = 6 - upsample_left;
+
+  assert(dx > 0);
+  // pre-filter above pixels
+  // store in temp buffers:
+  //   above[x] * 32 + 16
+  //   above[x+1] - above[x]
+  // final pixels will be calculated as:
+  //   (above[x] * 32 + 16 + (above[x+1] - above[x]) * shift) >> 5
+  __m256i a0_x, a1_x, a32, a16;
+  __m256i diff;
+  __m128i c3f, min_base_y128;
+
+  a16 = _mm256_set1_epi32(16);
+  c3f = _mm_set1_epi32(0x3f);
+  min_base_y128 = _mm_set1_epi32(min_base_y);
+
+  for (int r = 0; r < N; r++) {
+    __m256i b, res, shift;
+    __m128i resx, resy, resxy;
+    __m128i a0_x128, a1_x128;
+    int y = r + 1;
+    int base_x = (-y * dx) >> frac_bits_x;
+    int base_shift = 0;
+    if (base_x < (min_base_x - 1)) {
+      base_shift = (min_base_x - base_x - 1) >> upsample_above;
+    }
+    int base_min_diff =
+        (min_base_x - base_x + upsample_above) >> upsample_above;
+    if (base_min_diff > 4) {
+      base_min_diff = 4;
+    } else {
+      if (base_min_diff < 0) base_min_diff = 0;
+    }
+
+    if (base_shift > 3) {
+      a0_x = _mm256_setzero_si256();
+      a1_x = _mm256_setzero_si256();
+      shift = _mm256_setzero_si256();
+    } else {
+      a0_x128 = _mm_loadu_si128((__m128i *)(above + base_x + base_shift));
+      if (upsample_above) {
+        a0_x128 = _mm_shuffle_epi8(a0_x128,
+                                   *(__m128i *)HighbdEvenOddMaskx4[base_shift]);
+        a1_x128 = _mm_srli_si128(a0_x128, 8);
+
+        shift = _mm256_castsi128_si256(_mm_srli_epi32(
+            _mm_and_si128(
+                _mm_slli_epi32(
+                    _mm_setr_epi32(-y * dx, (1 << 6) - y * dx,
+                                   (2 << 6) - y * dx, (3 << 6) - y * dx),
+                    upsample_above),
+                c3f),
+            1));
+      } else {
+        a0_x128 =
+            _mm_shuffle_epi8(a0_x128, *(__m128i *)HighbdLoadMaskx[base_shift]);
+        a1_x128 = _mm_srli_si128(a0_x128, 2);
+
+        shift = _mm256_castsi128_si256(_mm_srli_epi32(
+            _mm_and_si128(_mm_setr_epi32(-y * dx, (1 << 6) - y * dx,
+                                         (2 << 6) - y * dx, (3 << 6) - y * dx),
+                          c3f),
+            1));
+      }
+      a0_x = _mm256_cvtepu16_epi32(a0_x128);
+      a1_x = _mm256_cvtepu16_epi32(a1_x128);
+    }
+    // y calc
+    __m128i a0_y, a1_y, shifty;
+    if (base_x < min_base_x) {
+      __m128i r6, c1234, dy128, y_c128, base_y_c128, mask128;
+      DECLARE_ALIGNED(32, int, base_y_c[4]);
+      r6 = _mm_set1_epi32(r << 6);
+      dy128 = _mm_set1_epi32(dy);
+      c1234 = _mm_setr_epi32(1, 2, 3, 4);
+      y_c128 = _mm_sub_epi32(r6, _mm_mullo_epi32(c1234, dy128));
+      base_y_c128 = _mm_srai_epi32(y_c128, frac_bits_y);
+      mask128 = _mm_cmpgt_epi32(min_base_y128, base_y_c128);
+      base_y_c128 = _mm_andnot_si128(mask128, base_y_c128);
+      _mm_store_si128((__m128i *)base_y_c, base_y_c128);
+
+      a0_y = _mm_setr_epi32(left[base_y_c[0]], left[base_y_c[1]],
+                            left[base_y_c[2]], left[base_y_c[3]]);
+      a1_y = _mm_setr_epi32(left[base_y_c[0] + 1], left[base_y_c[1] + 1],
+                            left[base_y_c[2] + 1], left[base_y_c[3] + 1]);
+
+      if (upsample_left) {
+        shifty = _mm_srli_epi32(
+            _mm_and_si128(_mm_slli_epi32(y_c128, upsample_left), c3f), 1);
+      } else {
+        shifty = _mm_srli_epi32(_mm_and_si128(y_c128, c3f), 1);
+      }
+      a0_x = _mm256_inserti128_si256(a0_x, a0_y, 1);
+      a1_x = _mm256_inserti128_si256(a1_x, a1_y, 1);
+      shift = _mm256_inserti128_si256(shift, shifty, 1);
+    }
+
+    diff = _mm256_sub_epi32(a1_x, a0_x);  // a[x+1] - a[x]
+    a32 = _mm256_slli_epi32(a0_x, 5);     // a[x] * 32
+    a32 = _mm256_add_epi32(a32, a16);     // a[x] * 32 + 16
+
+    b = _mm256_mullo_epi32(diff, shift);
+    res = _mm256_add_epi32(a32, b);
+    res = _mm256_srli_epi32(res, 5);
+
+    resx = _mm256_castsi256_si128(res);
+    resx = _mm_packus_epi32(resx, resx);
+
+    resy = _mm256_extracti128_si256(res, 1);
+    resy = _mm_packus_epi32(resy, resy);
+
+    resxy =
+        _mm_blendv_epi8(resx, resy, *(__m128i *)HighbdBaseMask[base_min_diff]);
+    _mm_storel_epi64((__m128i *)(dst), resxy);
+    dst += stride;
+  }
+}
+
+static void highbd_dr_prediction_z2_Nx4_avx2(
+    int N, uint16_t *dst, ptrdiff_t stride, const uint16_t *above,
+    const uint16_t *left, int upsample_above, int upsample_left, int dx,
+    int dy) {
+  const int min_base_x = -(1 << upsample_above);
+  const int min_base_y = -(1 << upsample_left);
+  const int frac_bits_x = 6 - upsample_above;
+  const int frac_bits_y = 6 - upsample_left;
+
+  assert(dx > 0);
+  // pre-filter above pixels
+  // store in temp buffers:
+  //   above[x] * 32 + 16
+  //   above[x+1] - above[x]
+  // final pixels will be calculated as:
+  //   (above[x] * 32 + 16 + (above[x+1] - above[x]) * shift) >> 5
+  __m256i a0_x, a1_x, a32, a16;
+  __m256i diff;
+  __m128i c3f, min_base_y128;
+
+  a16 = _mm256_set1_epi16(16);
+  c3f = _mm_set1_epi16(0x3f);
+  min_base_y128 = _mm_set1_epi16(min_base_y);
+
+  for (int r = 0; r < N; r++) {
+    __m256i b, res, shift;
+    __m128i resx, resy, resxy;
+    __m128i a0_x128, a1_x128;
+    int y = r + 1;
+    int base_x = (-y * dx) >> frac_bits_x;
+    int base_shift = 0;
+    if (base_x < (min_base_x - 1)) {
+      base_shift = (min_base_x - base_x - 1) >> upsample_above;
+    }
+    int base_min_diff =
+        (min_base_x - base_x + upsample_above) >> upsample_above;
+    if (base_min_diff > 4) {
+      base_min_diff = 4;
+    } else {
+      if (base_min_diff < 0) base_min_diff = 0;
+    }
+
+    if (base_shift > 3) {
+      a0_x = _mm256_setzero_si256();
+      a1_x = _mm256_setzero_si256();
+      shift = _mm256_setzero_si256();
+    } else {
+      a0_x128 = _mm_loadu_si128((__m128i *)(above + base_x + base_shift));
+      if (upsample_above) {
+        a0_x128 = _mm_shuffle_epi8(a0_x128,
+                                   *(__m128i *)HighbdEvenOddMaskx4[base_shift]);
+        a1_x128 = _mm_srli_si128(a0_x128, 8);
+
+        shift = _mm256_castsi128_si256(_mm_srli_epi16(
+            _mm_and_si128(
+                _mm_slli_epi16(_mm_setr_epi16(-y * dx, (1 << 6) - y * dx,
+                                              (2 << 6) - y * dx,
+                                              (3 << 6) - y * dx, 0, 0, 0, 0),
+                               upsample_above),
+                c3f),
+            1));
+      } else {
+        a0_x128 =
+            _mm_shuffle_epi8(a0_x128, *(__m128i *)HighbdLoadMaskx[base_shift]);
+        a1_x128 = _mm_srli_si128(a0_x128, 2);
+
+        shift = _mm256_castsi128_si256(_mm_srli_epi16(
+            _mm_and_si128(
+                _mm_setr_epi16(-y * dx, (1 << 6) - y * dx, (2 << 6) - y * dx,
+                               (3 << 6) - y * dx, 0, 0, 0, 0),
+                c3f),
+            1));
+      }
+      a0_x = _mm256_castsi128_si256(a0_x128);
+      a1_x = _mm256_castsi128_si256(a1_x128);
+    }
+    // y calc
+    __m128i a0_y, a1_y, shifty;
+    if (base_x < min_base_x) {
+      __m128i r6, c1234, dy128, y_c128, base_y_c128, mask128;
+      DECLARE_ALIGNED(32, int16_t, base_y_c[8]);
+      r6 = _mm_set1_epi16(r << 6);
+      dy128 = _mm_set1_epi16(dy);
+      c1234 = _mm_setr_epi16(1, 2, 3, 4, 0, 0, 0, 0);
+      y_c128 = _mm_sub_epi16(r6, _mm_mullo_epi16(c1234, dy128));
+      base_y_c128 = _mm_srai_epi16(y_c128, frac_bits_y);
+      mask128 = _mm_cmpgt_epi16(min_base_y128, base_y_c128);
+      base_y_c128 = _mm_andnot_si128(mask128, base_y_c128);
+      _mm_store_si128((__m128i *)base_y_c, base_y_c128);
+
+      a0_y = _mm_setr_epi16(left[base_y_c[0]], left[base_y_c[1]],
+                            left[base_y_c[2]], left[base_y_c[3]], 0, 0, 0, 0);
+      a1_y = _mm_setr_epi16(left[base_y_c[0] + 1], left[base_y_c[1] + 1],
+                            left[base_y_c[2] + 1], left[base_y_c[3] + 1], 0, 0,
+                            0, 0);
+
+      if (upsample_left) {
+        shifty = _mm_srli_epi16(
+            _mm_and_si128(_mm_slli_epi16(y_c128, upsample_left), c3f), 1);
+      } else {
+        shifty = _mm_srli_epi16(_mm_and_si128(y_c128, c3f), 1);
+      }
+      a0_x = _mm256_inserti128_si256(a0_x, a0_y, 1);
+      a1_x = _mm256_inserti128_si256(a1_x, a1_y, 1);
+      shift = _mm256_inserti128_si256(shift, shifty, 1);
+    }
+
+    diff = _mm256_sub_epi16(a1_x, a0_x);  // a[x+1] - a[x]
+    a32 = _mm256_slli_epi16(a0_x, 5);     // a[x] * 32
+    a32 = _mm256_add_epi16(a32, a16);     // a[x] * 32 + 16
+
+    b = _mm256_mullo_epi16(diff, shift);
+    res = _mm256_add_epi16(a32, b);
+    res = _mm256_srli_epi16(res, 5);
+
+    resx = _mm256_castsi256_si128(res);
+    resy = _mm256_extracti128_si256(res, 1);
+    resxy =
+        _mm_blendv_epi8(resx, resy, *(__m128i *)HighbdBaseMask[base_min_diff]);
+    _mm_storel_epi64((__m128i *)(dst), resxy);
+    dst += stride;
+  }
+}
+
+static void highbd_dr_prediction_32bit_z2_Nx8_avx2(
+    int N, uint16_t *dst, ptrdiff_t stride, const uint16_t *above,
+    const uint16_t *left, int upsample_above, int upsample_left, int dx,
+    int dy) {
+  const int min_base_x = -(1 << upsample_above);
+  const int min_base_y = -(1 << upsample_left);
+  const int frac_bits_x = 6 - upsample_above;
+  const int frac_bits_y = 6 - upsample_left;
+
+  // pre-filter above pixels
+  // store in temp buffers:
+  //   above[x] * 32 + 16
+  //   above[x+1] - above[x]
+  // final pixels will be calculated as:
+  //   (above[x] * 32 + 16 + (above[x+1] - above[x]) * shift) >> 5
+  __m256i a0_x, a1_x, a0_y, a1_y, a32, a16, c3f, min_base_y256;
+  __m256i diff;
+  __m128i a0_x128, a1_x128;
+
+  a16 = _mm256_set1_epi32(16);
+  c3f = _mm256_set1_epi32(0x3f);
+  min_base_y256 = _mm256_set1_epi32(min_base_y);
+
+  for (int r = 0; r < N; r++) {
+    __m256i b, res, shift;
+    __m128i resx, resy, resxy;
+    int y = r + 1;
+    int base_x = (-y * dx) >> frac_bits_x;
+    int base_shift = 0;
+    if (base_x < (min_base_x - 1)) {
+      base_shift = (min_base_x - base_x - 1) >> upsample_above;
+    }
+    int base_min_diff =
+        (min_base_x - base_x + upsample_above) >> upsample_above;
+    if (base_min_diff > 8) {
+      base_min_diff = 8;
+    } else {
+      if (base_min_diff < 0) base_min_diff = 0;
+    }
+
+    if (base_shift > 7) {
+      resx = _mm_setzero_si128();
+    } else {
+      a0_x128 = _mm_loadu_si128((__m128i *)(above + base_x + base_shift));
+      if (upsample_above) {
+        __m128i mask, atmp0, atmp1, atmp2, atmp3;
+        a1_x128 = _mm_loadu_si128((__m128i *)(above + base_x + 8 + base_shift));
+        atmp0 = _mm_shuffle_epi8(a0_x128,
+                                 *(__m128i *)HighbdEvenOddMaskx[base_shift]);
+        atmp1 = _mm_shuffle_epi8(a1_x128,
+                                 *(__m128i *)HighbdEvenOddMaskx[base_shift]);
+        atmp2 = _mm_shuffle_epi8(
+            a0_x128, *(__m128i *)(HighbdEvenOddMaskx[base_shift] + 16));
+        atmp3 = _mm_shuffle_epi8(
+            a1_x128, *(__m128i *)(HighbdEvenOddMaskx[base_shift] + 16));
+        mask = _mm_cmpgt_epi8(*(__m128i *)HighbdEvenOddMaskx[base_shift],
+                              _mm_set1_epi8(15));
+        a0_x128 = _mm_blendv_epi8(atmp0, atmp1, mask);
+        mask = _mm_cmpgt_epi8(*(__m128i *)(HighbdEvenOddMaskx[base_shift] + 16),
+                              _mm_set1_epi8(15));
+        a1_x128 = _mm_blendv_epi8(atmp2, atmp3, mask);
+        shift = _mm256_srli_epi32(
+            _mm256_and_si256(
+                _mm256_slli_epi32(
+                    _mm256_setr_epi32(-y * dx, (1 << 6) - y * dx,
+                                      (2 << 6) - y * dx, (3 << 6) - y * dx,
+                                      (4 << 6) - y * dx, (5 << 6) - y * dx,
+                                      (6 << 6) - y * dx, (7 << 6) - y * dx),
+                    upsample_above),
+                c3f),
+            1);
+      } else {
+        a1_x128 = _mm_loadu_si128((__m128i *)(above + base_x + 1 + base_shift));
+        a0_x128 =
+            _mm_shuffle_epi8(a0_x128, *(__m128i *)HighbdLoadMaskx[base_shift]);
+        a1_x128 =
+            _mm_shuffle_epi8(a1_x128, *(__m128i *)HighbdLoadMaskx[base_shift]);
+
+        shift = _mm256_srli_epi32(
+            _mm256_and_si256(
+                _mm256_setr_epi32(-y * dx, (1 << 6) - y * dx, (2 << 6) - y * dx,
+                                  (3 << 6) - y * dx, (4 << 6) - y * dx,
+                                  (5 << 6) - y * dx, (6 << 6) - y * dx,
+                                  (7 << 6) - y * dx),
+                c3f),
+            1);
+      }
+      a0_x = _mm256_cvtepu16_epi32(a0_x128);
+      a1_x = _mm256_cvtepu16_epi32(a1_x128);
+
+      diff = _mm256_sub_epi32(a1_x, a0_x);  // a[x+1] - a[x]
+      a32 = _mm256_slli_epi32(a0_x, 5);     // a[x] * 32
+      a32 = _mm256_add_epi32(a32, a16);     // a[x] * 32 + 16
+
+      b = _mm256_mullo_epi32(diff, shift);
+      res = _mm256_add_epi32(a32, b);
+      res = _mm256_srli_epi32(res, 5);
+
+      resx = _mm256_castsi256_si128(_mm256_packus_epi32(
+          res, _mm256_castsi128_si256(_mm256_extracti128_si256(res, 1))));
+    }
+    // y calc
+    if (base_x < min_base_x) {
+      DECLARE_ALIGNED(32, int, base_y_c[8]);
+      __m256i r6, c256, dy256, y_c256, base_y_c256, mask256;
+      r6 = _mm256_set1_epi32(r << 6);
+      dy256 = _mm256_set1_epi32(dy);
+      c256 = _mm256_setr_epi32(1, 2, 3, 4, 5, 6, 7, 8);
+      y_c256 = _mm256_sub_epi32(r6, _mm256_mullo_epi32(c256, dy256));
+      base_y_c256 = _mm256_srai_epi32(y_c256, frac_bits_y);
+      mask256 = _mm256_cmpgt_epi32(min_base_y256, base_y_c256);
+      base_y_c256 = _mm256_andnot_si256(mask256, base_y_c256);
+      _mm256_store_si256((__m256i *)base_y_c, base_y_c256);
+
+      a0_y = _mm256_cvtepu16_epi32(_mm_setr_epi16(
+          left[base_y_c[0]], left[base_y_c[1]], left[base_y_c[2]],
+          left[base_y_c[3]], left[base_y_c[4]], left[base_y_c[5]],
+          left[base_y_c[6]], left[base_y_c[7]]));
+      a1_y = _mm256_cvtepu16_epi32(_mm_setr_epi16(
+          left[base_y_c[0] + 1], left[base_y_c[1] + 1], left[base_y_c[2] + 1],
+          left[base_y_c[3] + 1], left[base_y_c[4] + 1], left[base_y_c[5] + 1],
+          left[base_y_c[6] + 1], left[base_y_c[7] + 1]));
+
+      if (upsample_left) {
+        shift = _mm256_srli_epi32(
+            _mm256_and_si256(_mm256_slli_epi32((y_c256), upsample_left), c3f),
+            1);
+      } else {
+        shift = _mm256_srli_epi32(_mm256_and_si256(y_c256, c3f), 1);
+      }
+      diff = _mm256_sub_epi32(a1_y, a0_y);  // a[x+1] - a[x]
+      a32 = _mm256_slli_epi32(a0_y, 5);     // a[x] * 32
+      a32 = _mm256_add_epi32(a32, a16);     // a[x] * 32 + 16
+
+      b = _mm256_mullo_epi32(diff, shift);
+      res = _mm256_add_epi32(a32, b);
+      res = _mm256_srli_epi32(res, 5);
+
+      resy = _mm256_castsi256_si128(_mm256_packus_epi32(
+          res, _mm256_castsi128_si256(_mm256_extracti128_si256(res, 1))));
+    } else {
+      resy = resx;
+    }
+    resxy =
+        _mm_blendv_epi8(resx, resy, *(__m128i *)HighbdBaseMask[base_min_diff]);
+    _mm_storeu_si128((__m128i *)(dst), resxy);
+    dst += stride;
+  }
+}
+
+static void highbd_dr_prediction_z2_Nx8_avx2(
+    int N, uint16_t *dst, ptrdiff_t stride, const uint16_t *above,
+    const uint16_t *left, int upsample_above, int upsample_left, int dx,
+    int dy) {
+  const int min_base_x = -(1 << upsample_above);
+  const int min_base_y = -(1 << upsample_left);
+  const int frac_bits_x = 6 - upsample_above;
+  const int frac_bits_y = 6 - upsample_left;
+
+  // pre-filter above pixels
+  // store in temp buffers:
+  //   above[x] * 32 + 16
+  //   above[x+1] - above[x]
+  // final pixels will be calculated as:
+  //   (above[x] * 32 + 16 + (above[x+1] - above[x]) * shift) >> 5
+  __m128i c3f, min_base_y128;
+  __m256i a0_x, a1_x, diff, a32, a16;
+  __m128i a0_x128, a1_x128;
+
+  a16 = _mm256_set1_epi16(16);
+  c3f = _mm_set1_epi16(0x3f);
+  min_base_y128 = _mm_set1_epi16(min_base_y);
+
+  for (int r = 0; r < N; r++) {
+    __m256i b, res, shift;
+    __m128i resx, resy, resxy;
+    int y = r + 1;
+    int base_x = (-y * dx) >> frac_bits_x;
+    int base_shift = 0;
+    if (base_x < (min_base_x - 1)) {
+      base_shift = (min_base_x - base_x - 1) >> upsample_above;
+    }
+    int base_min_diff =
+        (min_base_x - base_x + upsample_above) >> upsample_above;
+    if (base_min_diff > 8) {
+      base_min_diff = 8;
+    } else {
+      if (base_min_diff < 0) base_min_diff = 0;
+    }
+
+    if (base_shift > 7) {
+      a0_x = _mm256_setzero_si256();
+      a1_x = _mm256_setzero_si256();
+      shift = _mm256_setzero_si256();
+    } else {
+      a0_x128 = _mm_loadu_si128((__m128i *)(above + base_x + base_shift));
+      if (upsample_above) {
+        __m128i mask, atmp0, atmp1, atmp2, atmp3;
+        a1_x128 = _mm_loadu_si128((__m128i *)(above + base_x + 8 + base_shift));
+        atmp0 = _mm_shuffle_epi8(a0_x128,
+                                 *(__m128i *)HighbdEvenOddMaskx[base_shift]);
+        atmp1 = _mm_shuffle_epi8(a1_x128,
+                                 *(__m128i *)HighbdEvenOddMaskx[base_shift]);
+        atmp2 = _mm_shuffle_epi8(
+            a0_x128, *(__m128i *)(HighbdEvenOddMaskx[base_shift] + 16));
+        atmp3 = _mm_shuffle_epi8(
+            a1_x128, *(__m128i *)(HighbdEvenOddMaskx[base_shift] + 16));
+        mask = _mm_cmpgt_epi8(*(__m128i *)HighbdEvenOddMaskx[base_shift],
+                              _mm_set1_epi8(15));
+        a0_x128 = _mm_blendv_epi8(atmp0, atmp1, mask);
+        mask = _mm_cmpgt_epi8(*(__m128i *)(HighbdEvenOddMaskx[base_shift] + 16),
+                              _mm_set1_epi8(15));
+        a1_x128 = _mm_blendv_epi8(atmp2, atmp3, mask);
+
+        shift = _mm256_castsi128_si256(_mm_srli_epi16(
+            _mm_and_si128(
+                _mm_slli_epi16(
+                    _mm_setr_epi16(-y * dx, (1 << 6) - y * dx,
+                                   (2 << 6) - y * dx, (3 << 6) - y * dx,
+                                   (4 << 6) - y * dx, (5 << 6) - y * dx,
+                                   (6 << 6) - y * dx, (7 << 6) - y * dx),
+                    upsample_above),
+                c3f),
+            1));
+      } else {
+        a1_x128 = _mm_loadu_si128((__m128i *)(above + base_x + 1 + base_shift));
+        a0_x128 =
+            _mm_shuffle_epi8(a0_x128, *(__m128i *)HighbdLoadMaskx[base_shift]);
+        a1_x128 =
+            _mm_shuffle_epi8(a1_x128, *(__m128i *)HighbdLoadMaskx[base_shift]);
+
+        shift = _mm256_castsi128_si256(_mm_srli_epi16(
+            _mm_and_si128(_mm_setr_epi16(-y * dx, (1 << 6) - y * dx,
+                                         (2 << 6) - y * dx, (3 << 6) - y * dx,
+                                         (4 << 6) - y * dx, (5 << 6) - y * dx,
+                                         (6 << 6) - y * dx, (7 << 6) - y * dx),
+                          c3f),
+            1));
+      }
+      a0_x = _mm256_castsi128_si256(a0_x128);
+      a1_x = _mm256_castsi128_si256(a1_x128);
+    }
+
+    // y calc
+    __m128i a0_y, a1_y, shifty;
+    if (base_x < min_base_x) {
+      DECLARE_ALIGNED(32, int16_t, base_y_c[8]);
+      __m128i r6, c1234, dy128, y_c128, base_y_c128, mask128;
+      r6 = _mm_set1_epi16(r << 6);
+      dy128 = _mm_set1_epi16(dy);
+      c1234 = _mm_setr_epi16(1, 2, 3, 4, 5, 6, 7, 8);
+      y_c128 = _mm_sub_epi16(r6, _mm_mullo_epi16(c1234, dy128));
+      base_y_c128 = _mm_srai_epi16(y_c128, frac_bits_y);
+      mask128 = _mm_cmpgt_epi16(min_base_y128, base_y_c128);
+      base_y_c128 = _mm_andnot_si128(mask128, base_y_c128);
+      _mm_store_si128((__m128i *)base_y_c, base_y_c128);
+
+      a0_y = _mm_setr_epi16(left[base_y_c[0]], left[base_y_c[1]],
+                            left[base_y_c[2]], left[base_y_c[3]],
+                            left[base_y_c[4]], left[base_y_c[5]],
+                            left[base_y_c[6]], left[base_y_c[7]]);
+      a1_y = _mm_setr_epi16(left[base_y_c[0] + 1], left[base_y_c[1] + 1],
+                            left[base_y_c[2] + 1], left[base_y_c[3] + 1],
+                            left[base_y_c[4] + 1], left[base_y_c[5] + 1],
+                            left[base_y_c[6] + 1], left[base_y_c[7] + 1]);
+
+      if (upsample_left) {
+        shifty = _mm_srli_epi16(
+            _mm_and_si128(_mm_slli_epi16((y_c128), upsample_left), c3f), 1);
+      } else {
+        shifty = _mm_srli_epi16(_mm_and_si128(y_c128, c3f), 1);
+      }
+      a0_x = _mm256_inserti128_si256(a0_x, a0_y, 1);
+      a1_x = _mm256_inserti128_si256(a1_x, a1_y, 1);
+      shift = _mm256_inserti128_si256(shift, shifty, 1);
+    }
+
+    diff = _mm256_sub_epi16(a1_x, a0_x);  // a[x+1] - a[x]
+    a32 = _mm256_slli_epi16(a0_x, 5);     // a[x] * 32
+    a32 = _mm256_add_epi16(a32, a16);     // a[x] * 32 + 16
+
+    b = _mm256_mullo_epi16(diff, shift);
+    res = _mm256_add_epi16(a32, b);
+    res = _mm256_srli_epi16(res, 5);
+
+    resx = _mm256_castsi256_si128(res);
+    resy = _mm256_extracti128_si256(res, 1);
+
+    resxy =
+        _mm_blendv_epi8(resx, resy, *(__m128i *)HighbdBaseMask[base_min_diff]);
+    _mm_storeu_si128((__m128i *)(dst), resxy);
+    dst += stride;
+  }
+}
+
+static void highbd_dr_prediction_32bit_z2_HxW_avx2(
+    int H, int W, uint16_t *dst, ptrdiff_t stride, const uint16_t *above,
+    const uint16_t *left, int upsample_above, int upsample_left, int dx,
+    int dy) {
+  // here upsample_above and upsample_left are 0 by design of
+  // av1_use_intra_edge_upsample
+  const int min_base_x = -1;
+  const int min_base_y = -1;
+  (void)upsample_above;
+  (void)upsample_left;
+  const int frac_bits_x = 6;
+  const int frac_bits_y = 6;
+
+  // pre-filter above pixels
+  // store in temp buffers:
+  //   above[x] * 32 + 16
+  //   above[x+1] - above[x]
+  // final pixels will be calculated as:
+  //   (above[x] * 32 + 16 + (above[x+1] - above[x]) * shift) >> 5
+  __m256i a0_x, a1_x, a0_y, a1_y, a32, a0_1_x, a1_1_x, a16, c1;
+  __m256i diff, min_base_y256, c3f, dy256, c1234, c0123, c8;
+  __m128i a0_x128, a1_x128, a0_1_x128, a1_1_x128;
+  DECLARE_ALIGNED(32, int, base_y_c[16]);
+
+  a16 = _mm256_set1_epi32(16);
+  c1 = _mm256_srli_epi32(a16, 4);
+  c8 = _mm256_srli_epi32(a16, 1);
+  min_base_y256 = _mm256_set1_epi16(min_base_y);
+  c3f = _mm256_set1_epi32(0x3f);
+  dy256 = _mm256_set1_epi32(dy);
+  c0123 = _mm256_setr_epi32(0, 1, 2, 3, 4, 5, 6, 7);
+  c1234 = _mm256_add_epi32(c0123, c1);
+
+  for (int r = 0; r < H; r++) {
+    __m256i b, res, shift, ydx;
+    __m256i resx[2], resy[2];
+    __m256i resxy, j256, r6;
+    for (int j = 0; j < W; j += 16) {
+      j256 = _mm256_set1_epi32(j);
+      int y = r + 1;
+      ydx = _mm256_set1_epi32(y * dx);
+
+      int base_x = ((j << 6) - y * dx) >> frac_bits_x;
+      int base_shift = 0;
+      if ((base_x) < (min_base_x - 1)) {
+        base_shift = (min_base_x - base_x - 1);
+      }
+      int base_min_diff = (min_base_x - base_x);
+      if (base_min_diff > 16) {
+        base_min_diff = 16;
+      } else {
+        if (base_min_diff < 0) base_min_diff = 0;
+      }
+
+      if (base_shift > 7) {
+        resx[0] = _mm256_setzero_si256();
+      } else {
+        a0_x128 = _mm_loadu_si128((__m128i *)(above + base_x + base_shift));
+        a1_x128 = _mm_loadu_si128((__m128i *)(above + base_x + base_shift + 1));
+        a0_x128 =
+            _mm_shuffle_epi8(a0_x128, *(__m128i *)HighbdLoadMaskx[base_shift]);
+        a1_x128 =
+            _mm_shuffle_epi8(a1_x128, *(__m128i *)HighbdLoadMaskx[base_shift]);
+
+        a0_x = _mm256_cvtepu16_epi32(a0_x128);
+        a1_x = _mm256_cvtepu16_epi32(a1_x128);
+
+        r6 = _mm256_slli_epi32(_mm256_add_epi32(c0123, j256), 6);
+        shift = _mm256_srli_epi32(
+            _mm256_and_si256(_mm256_sub_epi32(r6, ydx), c3f), 1);
+
+        diff = _mm256_sub_epi32(a1_x, a0_x);  // a[x+1] - a[x]
+        a32 = _mm256_slli_epi32(a0_x, 5);     // a[x] * 32
+        a32 = _mm256_add_epi32(a32, a16);     // a[x] * 32 + 16
+
+        b = _mm256_mullo_epi32(diff, shift);
+        res = _mm256_add_epi32(a32, b);
+        res = _mm256_srli_epi32(res, 5);
+
+        resx[0] = _mm256_packus_epi32(
+            res, _mm256_castsi128_si256(_mm256_extracti128_si256(res, 1)));
+      }
+      int base_shift8 = 0;
+      if ((base_x + 8) < (min_base_x - 1)) {
+        base_shift8 = (min_base_x - (base_x + 8) - 1);
+      }
+      if (base_shift8 > 7) {
+        resx[1] = _mm256_setzero_si256();
+      } else {
+        a0_1_x128 =
+            _mm_loadu_si128((__m128i *)(above + base_x + base_shift8 + 8));
+        a1_1_x128 =
+            _mm_loadu_si128((__m128i *)(above + base_x + base_shift8 + 9));
+        a0_1_x128 = _mm_shuffle_epi8(a0_1_x128,
+                                     *(__m128i *)HighbdLoadMaskx[base_shift8]);
+        a1_1_x128 = _mm_shuffle_epi8(a1_1_x128,
+                                     *(__m128i *)HighbdLoadMaskx[base_shift8]);
+
+        a0_1_x = _mm256_cvtepu16_epi32(a0_1_x128);
+        a1_1_x = _mm256_cvtepu16_epi32(a1_1_x128);
+
+        r6 = _mm256_slli_epi32(
+            _mm256_add_epi32(c0123, _mm256_add_epi32(j256, c8)), 6);
+        shift = _mm256_srli_epi32(
+            _mm256_and_si256(_mm256_sub_epi32(r6, ydx), c3f), 1);
+
+        diff = _mm256_sub_epi32(a1_1_x, a0_1_x);  // a[x+1] - a[x]
+        a32 = _mm256_slli_epi32(a0_1_x, 5);       // a[x] * 32
+        a32 = _mm256_add_epi32(a32, a16);         // a[x] * 32 + 16
+        b = _mm256_mullo_epi32(diff, shift);
+
+        resx[1] = _mm256_add_epi32(a32, b);
+        resx[1] = _mm256_srli_epi32(resx[1], 5);
+        resx[1] = _mm256_packus_epi32(
+            resx[1],
+            _mm256_castsi128_si256(_mm256_extracti128_si256(resx[1], 1)));
+      }
+      resx[0] =
+          _mm256_inserti128_si256(resx[0], _mm256_castsi256_si128(resx[1]),
+                                  1);  // 16 16bit values
+
+      // y calc
+      resy[0] = _mm256_setzero_si256();
+      if ((base_x < min_base_x)) {
+        __m256i c256, y_c256, y_c_1_256, base_y_c256, mask256;
+        r6 = _mm256_set1_epi32(r << 6);
+        c256 = _mm256_add_epi32(j256, c1234);
+        y_c256 = _mm256_sub_epi32(r6, _mm256_mullo_epi32(c256, dy256));
+        base_y_c256 = _mm256_srai_epi32(y_c256, frac_bits_y);
+        mask256 = _mm256_cmpgt_epi32(min_base_y256, base_y_c256);
+        base_y_c256 = _mm256_andnot_si256(mask256, base_y_c256);
+        _mm256_store_si256((__m256i *)base_y_c, base_y_c256);
+        c256 = _mm256_add_epi32(c256, c8);
+        y_c_1_256 = _mm256_sub_epi32(r6, _mm256_mullo_epi32(c256, dy256));
+        base_y_c256 = _mm256_srai_epi32(y_c_1_256, frac_bits_y);
+        mask256 = _mm256_cmpgt_epi32(min_base_y256, base_y_c256);
+        base_y_c256 = _mm256_andnot_si256(mask256, base_y_c256);
+        _mm256_store_si256((__m256i *)(base_y_c + 8), base_y_c256);
+
+        a0_y = _mm256_cvtepu16_epi32(_mm_setr_epi16(
+            left[base_y_c[0]], left[base_y_c[1]], left[base_y_c[2]],
+            left[base_y_c[3]], left[base_y_c[4]], left[base_y_c[5]],
+            left[base_y_c[6]], left[base_y_c[7]]));
+        a1_y = _mm256_cvtepu16_epi32(_mm_setr_epi16(
+            left[base_y_c[0] + 1], left[base_y_c[1] + 1], left[base_y_c[2] + 1],
+            left[base_y_c[3] + 1], left[base_y_c[4] + 1], left[base_y_c[5] + 1],
+            left[base_y_c[6] + 1], left[base_y_c[7] + 1]));
+
+        shift = _mm256_srli_epi32(_mm256_and_si256(y_c256, c3f), 1);
+
+        diff = _mm256_sub_epi32(a1_y, a0_y);  // a[x+1] - a[x]
+        a32 = _mm256_slli_epi32(a0_y, 5);     // a[x] * 32
+        a32 = _mm256_add_epi32(a32, a16);     // a[x] * 32 + 16
+
+        b = _mm256_mullo_epi32(diff, shift);
+        res = _mm256_add_epi32(a32, b);
+        res = _mm256_srli_epi32(res, 5);
+
+        resy[0] = _mm256_packus_epi32(
+            res, _mm256_castsi128_si256(_mm256_extracti128_si256(res, 1)));
+
+        a0_y = _mm256_cvtepu16_epi32(_mm_setr_epi16(
+            left[base_y_c[8]], left[base_y_c[9]], left[base_y_c[10]],
+            left[base_y_c[11]], left[base_y_c[12]], left[base_y_c[13]],
+            left[base_y_c[14]], left[base_y_c[15]]));
+        a1_y = _mm256_cvtepu16_epi32(
+            _mm_setr_epi16(left[base_y_c[8] + 1], left[base_y_c[9] + 1],
+                           left[base_y_c[10] + 1], left[base_y_c[11] + 1],
+                           left[base_y_c[12] + 1], left[base_y_c[13] + 1],
+                           left[base_y_c[14] + 1], left[base_y_c[15] + 1]));
+        shift = _mm256_srli_epi32(_mm256_and_si256(y_c_1_256, c3f), 1);
+
+        diff = _mm256_sub_epi32(a1_y, a0_y);  // a[x+1] - a[x]
+        a32 = _mm256_slli_epi32(a0_y, 5);     // a[x] * 32
+        a32 = _mm256_add_epi32(a32, a16);     // a[x] * 32 + 16
+
+        b = _mm256_mullo_epi32(diff, shift);
+        res = _mm256_add_epi32(a32, b);
+        res = _mm256_srli_epi32(res, 5);
+
+        resy[1] = _mm256_packus_epi32(
+            res, _mm256_castsi128_si256(_mm256_extracti128_si256(res, 1)));
+
+        resy[0] =
+            _mm256_inserti128_si256(resy[0], _mm256_castsi256_si128(resy[1]),
+                                    1);  // 16 16bit values
+      }
+
+      resxy = _mm256_blendv_epi8(resx[0], resy[0],
+                                 *(__m256i *)HighbdBaseMask[base_min_diff]);
+      _mm256_storeu_si256((__m256i *)(dst + j), resxy);
+    }  // for j
+    dst += stride;
+  }
+}
+
+static void highbd_dr_prediction_z2_HxW_avx2(
+    int H, int W, uint16_t *dst, ptrdiff_t stride, const uint16_t *above,
+    const uint16_t *left, int upsample_above, int upsample_left, int dx,
+    int dy) {
+  // here upsample_above and upsample_left are 0 by design of
+  // av1_use_intra_edge_upsample
+  const int min_base_x = -1;
+  const int min_base_y = -1;
+  (void)upsample_above;
+  (void)upsample_left;
+  const int frac_bits_x = 6;
+  const int frac_bits_y = 6;
+
+  // pre-filter above pixels
+  // store in temp buffers:
+  //   above[x] * 32 + 16
+  //   above[x+1] - above[x]
+  // final pixels will be calculated as:
+  //   (above[x] * 32 + 16 + (above[x+1] - above[x]) * shift) >> 5
+  __m256i a0_x, a1_x, a32, a16, c3f, c1;
+  __m256i diff, min_base_y256, dy256, c1234, c0123;
+  DECLARE_ALIGNED(32, int16_t, base_y_c[16]);
+
+  a16 = _mm256_set1_epi16(16);
+  c1 = _mm256_srli_epi16(a16, 4);
+  min_base_y256 = _mm256_set1_epi16(min_base_y);
+  c3f = _mm256_set1_epi16(0x3f);
+  dy256 = _mm256_set1_epi16(dy);
+  c0123 =
+      _mm256_setr_epi16(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
+  c1234 = _mm256_add_epi16(c0123, c1);
+
+  for (int r = 0; r < H; r++) {
+    __m256i b, res, shift;
+    __m256i resx, resy, ydx;
+    __m256i resxy, j256, r6;
+    __m128i a0_x128, a1_x128, a0_1_x128, a1_1_x128;
+    int y = r + 1;
+    ydx = _mm256_set1_epi16((short)(y * dx));
+
+    for (int j = 0; j < W; j += 16) {
+      j256 = _mm256_set1_epi16(j);
+      int base_x = ((j << 6) - y * dx) >> frac_bits_x;
+      int base_shift = 0;
+      if ((base_x) < (min_base_x - 1)) {
+        base_shift = (min_base_x - (base_x)-1);
+      }
+      int base_min_diff = (min_base_x - base_x);
+      if (base_min_diff > 16) {
+        base_min_diff = 16;
+      } else {
+        if (base_min_diff < 0) base_min_diff = 0;
+      }
+
+      if (base_shift < 8) {
+        a0_x128 = _mm_loadu_si128((__m128i *)(above + base_x + base_shift));
+        a1_x128 = _mm_loadu_si128((__m128i *)(above + base_x + base_shift + 1));
+        a0_x128 =
+            _mm_shuffle_epi8(a0_x128, *(__m128i *)HighbdLoadMaskx[base_shift]);
+        a1_x128 =
+            _mm_shuffle_epi8(a1_x128, *(__m128i *)HighbdLoadMaskx[base_shift]);
+
+        a0_x = _mm256_castsi128_si256(a0_x128);
+        a1_x = _mm256_castsi128_si256(a1_x128);
+      } else {
+        a0_x = _mm256_setzero_si256();
+        a1_x = _mm256_setzero_si256();
+      }
+
+      int base_shift1 = 0;
+      if (base_shift > 8) {
+        base_shift1 = base_shift - 8;
+      }
+      if (base_shift1 < 8) {
+        a0_1_x128 =
+            _mm_loadu_si128((__m128i *)(above + base_x + base_shift1 + 8));
+        a1_1_x128 =
+            _mm_loadu_si128((__m128i *)(above + base_x + base_shift1 + 9));
+        a0_1_x128 = _mm_shuffle_epi8(a0_1_x128,
+                                     *(__m128i *)HighbdLoadMaskx[base_shift1]);
+        a1_1_x128 = _mm_shuffle_epi8(a1_1_x128,
+                                     *(__m128i *)HighbdLoadMaskx[base_shift1]);
+
+        a0_x = _mm256_inserti128_si256(a0_x, a0_1_x128, 1);
+        a1_x = _mm256_inserti128_si256(a1_x, a1_1_x128, 1);
+      }
+      r6 = _mm256_slli_epi16(_mm256_add_epi16(c0123, j256), 6);
+      shift = _mm256_srli_epi16(
+          _mm256_and_si256(_mm256_sub_epi16(r6, ydx), c3f), 1);
+
+      diff = _mm256_sub_epi16(a1_x, a0_x);  // a[x+1] - a[x]
+      a32 = _mm256_slli_epi16(a0_x, 5);     // a[x] * 32
+      a32 = _mm256_add_epi16(a32, a16);     // a[x] * 32 + 16
+
+      b = _mm256_mullo_epi16(diff, shift);
+      res = _mm256_add_epi16(a32, b);
+      resx = _mm256_srli_epi16(res, 5);  // 16 16-bit values
+
+      // y calc
+      resy = _mm256_setzero_si256();
+      __m256i a0_y, a1_y, shifty;
+      if ((base_x < min_base_x)) {
+        __m256i c256, y_c256, base_y_c256, mask256, mul16;
+        r6 = _mm256_set1_epi16(r << 6);
+        c256 = _mm256_add_epi16(j256, c1234);
+        mul16 = _mm256_min_epu16(_mm256_mullo_epi16(c256, dy256),
+                                 _mm256_srli_epi16(min_base_y256, 1));
+        y_c256 = _mm256_sub_epi16(r6, mul16);
+        base_y_c256 = _mm256_srai_epi16(y_c256, frac_bits_y);
+        mask256 = _mm256_cmpgt_epi16(min_base_y256, base_y_c256);
+        base_y_c256 = _mm256_andnot_si256(mask256, base_y_c256);
+        _mm256_store_si256((__m256i *)base_y_c, base_y_c256);
+
+        a0_y = _mm256_setr_epi16(
+            left[base_y_c[0]], left[base_y_c[1]], left[base_y_c[2]],
+            left[base_y_c[3]], left[base_y_c[4]], left[base_y_c[5]],
+            left[base_y_c[6]], left[base_y_c[7]], left[base_y_c[8]],
+            left[base_y_c[9]], left[base_y_c[10]], left[base_y_c[11]],
+            left[base_y_c[12]], left[base_y_c[13]], left[base_y_c[14]],
+            left[base_y_c[15]]);
+        base_y_c256 = _mm256_add_epi16(base_y_c256, c1);
+        _mm256_store_si256((__m256i *)base_y_c, base_y_c256);
+
+        a1_y = _mm256_setr_epi16(
+            left[base_y_c[0]], left[base_y_c[1]], left[base_y_c[2]],
+            left[base_y_c[3]], left[base_y_c[4]], left[base_y_c[5]],
+            left[base_y_c[6]], left[base_y_c[7]], left[base_y_c[8]],
+            left[base_y_c[9]], left[base_y_c[10]], left[base_y_c[11]],
+            left[base_y_c[12]], left[base_y_c[13]], left[base_y_c[14]],
+            left[base_y_c[15]]);
+
+        shifty = _mm256_srli_epi16(_mm256_and_si256(y_c256, c3f), 1);
+
+        diff = _mm256_sub_epi16(a1_y, a0_y);  // a[x+1] - a[x]
+        a32 = _mm256_slli_epi16(a0_y, 5);     // a[x] * 32
+        a32 = _mm256_add_epi16(a32, a16);     // a[x] * 32 + 16
+
+        b = _mm256_mullo_epi16(diff, shifty);
+        res = _mm256_add_epi16(a32, b);
+        resy = _mm256_srli_epi16(res, 5);
+      }
+
+      resxy = _mm256_blendv_epi8(resx, resy,
+                                 *(__m256i *)HighbdBaseMask[base_min_diff]);
+      _mm256_storeu_si256((__m256i *)(dst + j), resxy);
+    }  // for j
+    dst += stride;
+  }
+}
+
 // Directional prediction, zone 2: 90 < angle < 180
 void av1_highbd_dr_prediction_z2_avx2(uint16_t *dst, ptrdiff_t stride, int bw,
                                       int bh, const uint16_t *above,
                                       const uint16_t *left, int upsample_above,
                                       int upsample_left, int dx, int dy,
                                       int bd) {
-  const int min_base_x = -(1 << upsample_above);
-  const int min_base_y = -(1 << upsample_left);
-  const int frac_bits_x = 6 - upsample_above;
-  const int frac_bits_y = 6 - upsample_left;
-  (void)min_base_y;
   (void)bd;
   assert(dx > 0);
   assert(dy > 0);
-
-  if (bd < 12) {
-    __m256i c, r, cinc, rinc, r1;
-    int16_t base_x[16];
-    int16_t inc = 1;
-
-    __m256i minbasex = _mm256_set1_epi16((int16_t)min_base_x);
-    __m128i fracx = _mm_insert_epi16(_mm_set1_epi16(0), (int)frac_bits_x, 0);
-    __m128i fracy = _mm_insert_epi16(_mm_set1_epi16(0), (int)frac_bits_y, 0);
-    r1 = _mm256_set1_epi16(1);
-    __m256i vdx = _mm256_set1_epi16(dx);
-    __m256i vdy = _mm256_set1_epi16(dy);
-    __m256i add16 = _mm256_set1_epi16(16);
-    __m256i and3f = _mm256_set1_epi16(0x3f);
-    __m256i maskinv = _mm256_set1_epi16(-1);
-    __m256i addup = _mm256_set1_epi16((int16_t)-min_base_x);
-    __m256i addleft = _mm256_set1_epi16((int16_t)-min_base_y);
-    __m128i a16zeros = _mm_set1_epi16(0);
-    __m256i azeros = _mm256_set1_epi16(0);
-    switch (bw) {
-      case 4:
-        c = _mm256_set_epi16(3, 2, 1, 0, 3, 2, 1, 0, 3, 2, 1, 0, 3, 2, 1, 0);
-        r = _mm256_set_epi16(-4, -4, -4, -4, -3, -3, -3, -3, -2, -2, -2, -2, -1,
-                             -1, -1, -1);
-        rinc = _mm256_set1_epi16(4);
-        for (int i = 0; i < bh; i += 4) {
-          r = _mm256_add_epi16(r, rinc);
-
-          __m256i y = _mm256_add_epi16(r, r1);
-          __m256i basex = _mm256_slli_epi16(c, 6);
-          __m256i x = _mm256_sub_epi16(basex, _mm256_mullo_epi16(y, vdx));
-          basex = _mm256_sra_epi16(x, fracx);
-          __m256i keq = _mm256_cmpeq_epi16(basex, minbasex);
-          __m256i kgt = _mm256_cmpgt_epi16(basex, minbasex);
-          __m256i kmask = _mm256_or_si256(keq, kgt);
-          basex = _mm256_and_si256(basex, kmask);
-          _mm256_storeu_si256((__m256i *)base_x, basex);
-
-          __m256i basexm = _mm256_and_si256(
-              kmask,
-              _mm256_setr_epi16(
-                  (uint16_t)above[base_x[0]], (uint16_t)above[base_x[1]],
-                  (uint16_t)above[base_x[2]], (uint16_t)above[base_x[3]],
-                  (uint16_t)above[base_x[4]], (uint16_t)above[base_x[5]],
-                  (uint16_t)above[base_x[6]], (uint16_t)above[base_x[7]],
-                  (uint16_t)above[base_x[8]], (uint16_t)above[base_x[9]],
-                  (uint16_t)above[base_x[10]], (uint16_t)above[base_x[11]],
-                  (uint16_t)above[base_x[12]], (uint16_t)above[base_x[13]],
-                  (uint16_t)above[base_x[14]], (uint16_t)above[base_x[15]]));
-          __m256i basex1m = _mm256_and_si256(
-              kmask, _mm256_setr_epi16((uint16_t)above[base_x[0] + 1],
-                                       (uint16_t)above[base_x[1] + 1],
-                                       (uint16_t)above[base_x[2] + 1],
-                                       (uint16_t)above[base_x[3] + 1],
-                                       (uint16_t)above[base_x[4] + 1],
-                                       (uint16_t)above[base_x[5] + 1],
-                                       (uint16_t)above[base_x[6] + 1],
-                                       (uint16_t)above[base_x[7] + 1],
-                                       (uint16_t)above[base_x[8] + 1],
-                                       (uint16_t)above[base_x[9] + 1],
-                                       (uint16_t)above[base_x[10] + 1],
-                                       (uint16_t)above[base_x[11] + 1],
-                                       (uint16_t)above[base_x[12] + 1],
-                                       (uint16_t)above[base_x[13] + 1],
-                                       (uint16_t)above[base_x[14] + 1],
-                                       (uint16_t)above[base_x[15] + 1]));
-
-          __m256i shiftx = _mm256_and_si256(
-              kmask,
-              _mm256_srai_epi16(
-                  _mm256_and_si256(_mm256_mullo_epi16(x, addup), and3f), 1));
-          __m256i shiftxc = _mm256_and_si256(
-              kmask, _mm256_sub_epi16(_mm256_set1_epi16(32), shiftx));
-
-          __m256i kmaskinv = _mm256_xor_si256(kmask, maskinv);
-
-          x = _mm256_add_epi16(c, r1);
-          __m256i basey = _mm256_slli_epi16(r, 6);
-          y = _mm256_sub_epi16(basey, _mm256_mullo_epi16(x, vdy));
-          basey = _mm256_sra_epi16(y, fracy);
-          basey = _mm256_and_si256(basey, kmaskinv);
-          _mm256_storeu_si256((__m256i *)base_x, basey);
-
-          __m256i baseym = _mm256_and_si256(
-              kmaskinv,
-              _mm256_setr_epi16(
-                  (uint16_t)left[base_x[0]], (uint16_t)left[base_x[1]],
-                  (uint16_t)left[base_x[2]], (uint16_t)left[base_x[3]],
-                  (uint16_t)left[base_x[4]], (uint16_t)left[base_x[5]],
-                  (uint16_t)left[base_x[6]], (uint16_t)left[base_x[7]],
-                  (uint16_t)left[base_x[8]], (uint16_t)left[base_x[9]],
-                  (uint16_t)left[base_x[10]], (uint16_t)left[base_x[11]],
-                  (uint16_t)left[base_x[12]], (uint16_t)left[base_x[13]],
-                  (uint16_t)left[base_x[14]], (uint16_t)left[base_x[15]]));
-          __m256i basey1m = _mm256_and_si256(
-              kmaskinv,
-              _mm256_setr_epi16(
-                  (uint16_t)left[base_x[0] + 1], (uint16_t)left[base_x[1] + 1],
-                  (uint16_t)left[base_x[2] + 1], (uint16_t)left[base_x[3] + 1],
-                  (uint16_t)left[base_x[4] + 1], (uint16_t)left[base_x[5] + 1],
-                  (uint16_t)left[base_x[6] + 1], (uint16_t)left[base_x[7] + 1],
-                  (uint16_t)left[base_x[8] + 1], (uint16_t)left[base_x[9] + 1],
-                  (uint16_t)left[base_x[10] + 1],
-                  (uint16_t)left[base_x[11] + 1],
-                  (uint16_t)left[base_x[12] + 1],
-                  (uint16_t)left[base_x[13] + 1],
-                  (uint16_t)left[base_x[14] + 1],
-                  (uint16_t)left[base_x[15] + 1]));
-
-          __m256i shifty = _mm256_and_si256(
-              kmaskinv,
-              _mm256_srai_epi16(
-                  _mm256_and_si256(_mm256_mullo_epi16(y, addleft), and3f), 1));
-          __m256i shiftyc = _mm256_and_si256(
-              kmaskinv, _mm256_sub_epi16(_mm256_set1_epi16(32), shifty));
-
-          __m256i basexy = _mm256_or_si256(basexm, baseym);
-          __m256i basexy1 = _mm256_or_si256(basex1m, basey1m);
-
-          __m256i shiftxy = _mm256_or_si256(shiftx, shifty);
-          __m256i shiftxyc = _mm256_or_si256(shiftxc, shiftyc);
-
-          __m256i val1 = _mm256_mullo_epi16(basexy, shiftxyc);
-          __m256i val2 = _mm256_mullo_epi16(basexy1, shiftxy);
-
-          __m256i valsum = _mm256_add_epi16(val1, val2);
-
-          __m256i val3 = _mm256_srai_epi16(_mm256_add_epi16(valsum, add16), 5);
-
-          __m128i lowval = _mm256_extracti128_si256(val3, 0);
-          __m128i highval = _mm256_extracti128_si256(val3, 1);
-
-          __m128i lowvalh = _mm_unpackhi_epi64(lowval, a16zeros);
-          __m128i highvalh = _mm_unpackhi_epi64(highval, a16zeros);
-
-          _mm_storel_epi64((__m128i *)dst, highvalh);
-          dst += stride;
-          _mm_storel_epi64((__m128i *)dst, highval);
-          dst += stride;
-          _mm_storel_epi64((__m128i *)dst, lowvalh);
-          dst += stride;
-          _mm_storel_epi64((__m128i *)dst, lowval);
-          dst += stride;
-        }
-
-        break;
-      case 8:
-        c = _mm256_set_epi16(7, 6, 5, 4, 3, 2, 1, 0, 7, 6, 5, 4, 3, 2, 1, 0);
-        rinc = _mm256_set1_epi16(2);
-        r = _mm256_set_epi16(-2, -2, -2, -2, -2, -2, -2, -2, -1, -1, -1, -1, -1,
-                             -1, -1, -1);
-        for (int i = 0; i < bh; i += 2) {
-          r = _mm256_add_epi16(r, rinc);
-
-          __m256i y = _mm256_add_epi16(r, r1);
-          __m256i basex = _mm256_slli_epi16(c, 6);
-          __m256i x = _mm256_sub_epi16(basex, _mm256_mullo_epi16(y, vdx));
-          basex = _mm256_sra_epi16(x, fracx);
-          __m256i keq = _mm256_cmpeq_epi16(basex, minbasex);
-          __m256i kgt = _mm256_cmpgt_epi16(basex, minbasex);
-          __m256i kmask = _mm256_or_si256(keq, kgt);
-          basex = _mm256_and_si256(basex, kmask);
-          _mm256_storeu_si256((__m256i *)base_x, basex);
-
-          __m256i basexm = _mm256_and_si256(
-              kmask,
-              _mm256_setr_epi16(
-                  (uint16_t)above[base_x[0]], (uint16_t)above[base_x[1]],
-                  (uint16_t)above[base_x[2]], (uint16_t)above[base_x[3]],
-                  (uint16_t)above[base_x[4]], (uint16_t)above[base_x[5]],
-                  (uint16_t)above[base_x[6]], (uint16_t)above[base_x[7]],
-                  (uint16_t)above[base_x[8]], (uint16_t)above[base_x[9]],
-                  (uint16_t)above[base_x[10]], (uint16_t)above[base_x[11]],
-                  (uint16_t)above[base_x[12]], (uint16_t)above[base_x[13]],
-                  (uint16_t)above[base_x[14]], (uint16_t)above[base_x[15]]));
-          __m256i basex1m = _mm256_and_si256(
-              kmask, _mm256_setr_epi16((uint16_t)above[base_x[0] + 1],
-                                       (uint16_t)above[base_x[1] + 1],
-                                       (uint16_t)above[base_x[2] + 1],
-                                       (uint16_t)above[base_x[3] + 1],
-                                       (uint16_t)above[base_x[4] + 1],
-                                       (uint16_t)above[base_x[5] + 1],
-                                       (uint16_t)above[base_x[6] + 1],
-                                       (uint16_t)above[base_x[7] + 1],
-                                       (uint16_t)above[base_x[8] + 1],
-                                       (uint16_t)above[base_x[9] + 1],
-                                       (uint16_t)above[base_x[10] + 1],
-                                       (uint16_t)above[base_x[11] + 1],
-                                       (uint16_t)above[base_x[12] + 1],
-                                       (uint16_t)above[base_x[13] + 1],
-                                       (uint16_t)above[base_x[14] + 1],
-                                       (uint16_t)above[base_x[15] + 1]));
-
-          __m256i shiftx = _mm256_and_si256(
-              kmask,
-              _mm256_srai_epi16(
-                  _mm256_and_si256(_mm256_mullo_epi16(x, addup), and3f), 1));
-          __m256i shiftxc = _mm256_and_si256(
-              kmask, _mm256_sub_epi16(_mm256_set1_epi16(32), shiftx));
-
-          __m256i kmaskinv = _mm256_xor_si256(kmask, maskinv);
-
-          x = _mm256_add_epi16(c, r1);
-          __m256i basey = _mm256_slli_epi16(r, 6);
-          y = _mm256_sub_epi16(basey, _mm256_mullo_epi16(x, vdy));
-          basey = _mm256_sra_epi16(y, fracy);
-          basey = _mm256_and_si256(basey, kmaskinv);
-          _mm256_storeu_si256((__m256i *)base_x, basey);
-
-          __m256i baseym = _mm256_and_si256(
-              kmaskinv,
-              _mm256_setr_epi16(
-                  (uint16_t)left[base_x[0]], (uint16_t)left[base_x[1]],
-                  (uint16_t)left[base_x[2]], (uint16_t)left[base_x[3]],
-                  (uint16_t)left[base_x[4]], (uint16_t)left[base_x[5]],
-                  (uint16_t)left[base_x[6]], (uint16_t)left[base_x[7]],
-                  (uint16_t)left[base_x[8]], (uint16_t)left[base_x[9]],
-                  (uint16_t)left[base_x[10]], (uint16_t)left[base_x[11]],
-                  (uint16_t)left[base_x[12]], (uint16_t)left[base_x[13]],
-                  (uint16_t)left[base_x[14]], (uint16_t)left[base_x[15]]));
-          __m256i basey1m = _mm256_and_si256(
-              kmaskinv,
-              _mm256_setr_epi16(
-                  (uint16_t)left[base_x[0] + 1], (uint16_t)left[base_x[1] + 1],
-                  (uint16_t)left[base_x[2] + 1], (uint16_t)left[base_x[3] + 1],
-                  (uint16_t)left[base_x[4] + 1], (uint16_t)left[base_x[5] + 1],
-                  (uint16_t)left[base_x[6] + 1], (uint16_t)left[base_x[7] + 1],
-                  (uint16_t)left[base_x[8] + 1], (uint16_t)left[base_x[9] + 1],
-                  (uint16_t)left[base_x[10] + 1],
-                  (uint16_t)left[base_x[11] + 1],
-                  (uint16_t)left[base_x[12] + 1],
-                  (uint16_t)left[base_x[13] + 1],
-                  (uint16_t)left[base_x[14] + 1],
-                  (uint16_t)left[base_x[15] + 1]));
-
-          __m256i shifty = _mm256_and_si256(
-              kmaskinv,
-              _mm256_srai_epi16(
-                  _mm256_and_si256(_mm256_mullo_epi16(y, addleft), and3f), 1));
-          __m256i shiftyc = _mm256_and_si256(
-              kmaskinv, _mm256_sub_epi16(_mm256_set1_epi16(32), shifty));
-
-          __m256i basexy = _mm256_or_si256(basexm, baseym);
-          __m256i basexy1 = _mm256_or_si256(basex1m, basey1m);
-
-          __m256i shiftxy = _mm256_or_si256(shiftx, shifty);
-          __m256i shiftxyc = _mm256_or_si256(shiftxc, shiftyc);
-
-          __m256i val1 = _mm256_mullo_epi16(basexy, shiftxyc);
-          __m256i val2 = _mm256_mullo_epi16(basexy1, shiftxy);
-
-          __m256i valsum = _mm256_add_epi16(val1, val2);
-
-          __m256i val3 = _mm256_srai_epi16(_mm256_add_epi16(valsum, add16), 5);
-
-          __m128i lowval = _mm256_extracti128_si256(val3, 0);
-          __m128i highval = _mm256_extracti128_si256(val3, 1);
-
-          _mm_storeu_si128((__m128i *)dst, highval);
-          dst += stride;
-          _mm_storeu_si128((__m128i *)dst, lowval);
-          dst += stride;
-        }
-
-        break;
-      default:
-        inc = bw / 16;
-        c = _mm256_set_epi16(-1, -2, -3, -4, -5, -6, -7, -8, -9, -10, -11, -12,
-                             -13, -14, -15, -16);
-        rinc = _mm256_set1_epi16(1);
-        cinc = _mm256_set1_epi16(16);
-        dst -= 16;
-        if ((bw * dy <= 32767) && (bh * dx <= 32767)) {
-          for (int j = 0; j < inc; j++) {
-            c = _mm256_add_epi16(c, cinc);
-            r = _mm256_set1_epi16(-1);
-            dst += 16;
-            for (int i = 0; i < bh; i++) {
-              r = _mm256_add_epi16(r, rinc);
-
-              __m256i y = _mm256_add_epi16(r, r1);
-              __m256i basex = _mm256_slli_epi16(c, 6);
-              __m256i x = _mm256_sub_epi16(basex, _mm256_mullo_epi16(y, vdx));
-              basex = _mm256_sra_epi16(x, fracx);
-              __m256i keq = _mm256_cmpeq_epi16(basex, minbasex);
-              __m256i kgt = _mm256_cmpgt_epi16(basex, minbasex);
-              __m256i kmask = _mm256_or_si256(keq, kgt);
-              basex = _mm256_and_si256(basex, kmask);
-              _mm256_storeu_si256((__m256i *)base_x, basex);
-
-              __m256i basexm = _mm256_and_si256(
-                  kmask,
-                  _mm256_setr_epi16(
-                      (uint16_t)above[base_x[0]], (uint16_t)above[base_x[1]],
-                      (uint16_t)above[base_x[2]], (uint16_t)above[base_x[3]],
-                      (uint16_t)above[base_x[4]], (uint16_t)above[base_x[5]],
-                      (uint16_t)above[base_x[6]], (uint16_t)above[base_x[7]],
-                      (uint16_t)above[base_x[8]], (uint16_t)above[base_x[9]],
-                      (uint16_t)above[base_x[10]], (uint16_t)above[base_x[11]],
-                      (uint16_t)above[base_x[12]], (uint16_t)above[base_x[13]],
-                      (uint16_t)above[base_x[14]],
-                      (uint16_t)above[base_x[15]]));
-              __m256i basex1m = _mm256_and_si256(
-                  kmask, _mm256_setr_epi16((uint16_t)above[base_x[0] + 1],
-                                           (uint16_t)above[base_x[1] + 1],
-                                           (uint16_t)above[base_x[2] + 1],
-                                           (uint16_t)above[base_x[3] + 1],
-                                           (uint16_t)above[base_x[4] + 1],
-                                           (uint16_t)above[base_x[5] + 1],
-                                           (uint16_t)above[base_x[6] + 1],
-                                           (uint16_t)above[base_x[7] + 1],
-                                           (uint16_t)above[base_x[8] + 1],
-                                           (uint16_t)above[base_x[9] + 1],
-                                           (uint16_t)above[base_x[10] + 1],
-                                           (uint16_t)above[base_x[11] + 1],
-                                           (uint16_t)above[base_x[12] + 1],
-                                           (uint16_t)above[base_x[13] + 1],
-                                           (uint16_t)above[base_x[14] + 1],
-                                           (uint16_t)above[base_x[15] + 1]));
-
-              __m256i shiftx = _mm256_and_si256(
-                  kmask, _mm256_srai_epi16(_mm256_and_si256(x, and3f), 1));
-              __m256i shiftxc = _mm256_and_si256(
-                  kmask, _mm256_sub_epi16(_mm256_set1_epi16(32), shiftx));
-
-              __m256i kmaskinv = _mm256_xor_si256(kmask, maskinv);
-
-              x = _mm256_add_epi16(c, r1);
-              __m256i basey = _mm256_slli_epi16(r, 6);
-              y = _mm256_sub_epi16(basey, _mm256_mullo_epi16(x, vdy));
-              basey = _mm256_sra_epi16(y, fracy);
-              basey = _mm256_and_si256(basey, kmaskinv);
-              _mm256_storeu_si256((__m256i *)base_x, basey);
-
-              __m256i baseym = _mm256_and_si256(
-                  kmaskinv,
-                  _mm256_setr_epi16(
-                      (uint16_t)left[base_x[0]], (uint16_t)left[base_x[1]],
-                      (uint16_t)left[base_x[2]], (uint16_t)left[base_x[3]],
-                      (uint16_t)left[base_x[4]], (uint16_t)left[base_x[5]],
-                      (uint16_t)left[base_x[6]], (uint16_t)left[base_x[7]],
-                      (uint16_t)left[base_x[8]], (uint16_t)left[base_x[9]],
-                      (uint16_t)left[base_x[10]], (uint16_t)left[base_x[11]],
-                      (uint16_t)left[base_x[12]], (uint16_t)left[base_x[13]],
-                      (uint16_t)left[base_x[14]], (uint16_t)left[base_x[15]]));
-              __m256i basey1m = _mm256_and_si256(
-                  kmaskinv, _mm256_setr_epi16((uint16_t)left[base_x[0] + 1],
-                                              (uint16_t)left[base_x[1] + 1],
-                                              (uint16_t)left[base_x[2] + 1],
-                                              (uint16_t)left[base_x[3] + 1],
-                                              (uint16_t)left[base_x[4] + 1],
-                                              (uint16_t)left[base_x[5] + 1],
-                                              (uint16_t)left[base_x[6] + 1],
-                                              (uint16_t)left[base_x[7] + 1],
-                                              (uint16_t)left[base_x[8] + 1],
-                                              (uint16_t)left[base_x[9] + 1],
-                                              (uint16_t)left[base_x[10] + 1],
-                                              (uint16_t)left[base_x[11] + 1],
-                                              (uint16_t)left[base_x[12] + 1],
-                                              (uint16_t)left[base_x[13] + 1],
-                                              (uint16_t)left[base_x[14] + 1],
-                                              (uint16_t)left[base_x[15] + 1]));
-
-              __m256i shifty = _mm256_and_si256(
-                  kmaskinv, _mm256_srai_epi16(_mm256_and_si256(y, and3f), 1));
-              __m256i shiftyc = _mm256_and_si256(
-                  kmaskinv, _mm256_sub_epi16(_mm256_set1_epi16(32), shifty));
-
-              __m256i basexy = _mm256_or_si256(basexm, baseym);
-              __m256i basexy1 = _mm256_or_si256(basex1m, basey1m);
-
-              __m256i shiftxy = _mm256_or_si256(shiftx, shifty);
-              __m256i shiftxyc = _mm256_or_si256(shiftxc, shiftyc);
-
-              __m256i val1 = _mm256_mullo_epi16(basexy, shiftxyc);
-              __m256i val2 = _mm256_mullo_epi16(basexy1, shiftxy);
-
-              __m256i valsum = _mm256_add_epi16(val1, val2);
-
-              __m256i val3 =
-                  _mm256_srai_epi16(_mm256_add_epi16(valsum, add16), 5);
-
-              _mm256_storeu_si256((__m256i *)dst, val3);
-              dst += stride;
-            }
-            dst -= bh * stride;
-          }
-        } else {
-          for (int j = 0; j < inc; j++) {
-            c = _mm256_add_epi16(c, cinc);
-            r = _mm256_set1_epi16(-1);
-            dst += 16;
-            for (int i = 0; i < bh; i++) {
-              r = _mm256_add_epi16(r, rinc);
-
-              __m256i y = _mm256_add_epi16(r, r1);
-              __m256i basex = _mm256_slli_epi16(c, 6);
-              __m256i basexlo = _mm256_unpacklo_epi16(basex, azeros);
-              __m256i basexhi = _mm256_unpackhi_epi16(basex, azeros);
-              __m256i ydx = _mm256_mullo_epi16(y, vdx);
-              __m256i ydxlo = _mm256_unpacklo_epi16(ydx, azeros);
-              __m256i ydxhi = _mm256_unpackhi_epi16(ydx, azeros);
-              __m256i xlo = _mm256_sub_epi32(basexlo, ydxlo);
-              __m256i xhi = _mm256_sub_epi32(basexhi, ydxhi);
-              __m256i xmlo = _mm256_packs_epi32(xlo, azeros);
-              __m256i xmhi = _mm256_packs_epi32(xhi, azeros);
-              __m256i x = _mm256_unpacklo_epi64(xmlo, xmhi);
-              basex = _mm256_sra_epi16(x, fracx);
-              __m256i keq = _mm256_cmpeq_epi16(basex, minbasex);
-              __m256i kgt = _mm256_cmpgt_epi16(basex, minbasex);
-              __m256i kmask = _mm256_or_si256(keq, kgt);
-              basex = _mm256_and_si256(basex, kmask);
-              _mm256_storeu_si256((__m256i *)base_x, basex);
-
-              __m256i basexm = _mm256_and_si256(
-                  kmask,
-                  _mm256_setr_epi16(
-                      (uint16_t)above[base_x[0]], (uint16_t)above[base_x[1]],
-                      (uint16_t)above[base_x[2]], (uint16_t)above[base_x[3]],
-                      (uint16_t)above[base_x[4]], (uint16_t)above[base_x[5]],
-                      (uint16_t)above[base_x[6]], (uint16_t)above[base_x[7]],
-                      (uint16_t)above[base_x[8]], (uint16_t)above[base_x[9]],
-                      (uint16_t)above[base_x[10]], (uint16_t)above[base_x[11]],
-                      (uint16_t)above[base_x[12]], (uint16_t)above[base_x[13]],
-                      (uint16_t)above[base_x[14]],
-                      (uint16_t)above[base_x[15]]));
-              __m256i basex1m = _mm256_and_si256(
-                  kmask, _mm256_setr_epi16((uint16_t)above[base_x[0] + 1],
-                                           (uint16_t)above[base_x[1] + 1],
-                                           (uint16_t)above[base_x[2] + 1],
-                                           (uint16_t)above[base_x[3] + 1],
-                                           (uint16_t)above[base_x[4] + 1],
-                                           (uint16_t)above[base_x[5] + 1],
-                                           (uint16_t)above[base_x[6] + 1],
-                                           (uint16_t)above[base_x[7] + 1],
-                                           (uint16_t)above[base_x[8] + 1],
-                                           (uint16_t)above[base_x[9] + 1],
-                                           (uint16_t)above[base_x[10] + 1],
-                                           (uint16_t)above[base_x[11] + 1],
-                                           (uint16_t)above[base_x[12] + 1],
-                                           (uint16_t)above[base_x[13] + 1],
-                                           (uint16_t)above[base_x[14] + 1],
-                                           (uint16_t)above[base_x[15] + 1]));
-
-              __m256i shiftx = _mm256_and_si256(
-                  kmask, _mm256_srai_epi16(_mm256_and_si256(x, and3f), 1));
-              __m256i shiftxc = _mm256_and_si256(
-                  kmask, _mm256_sub_epi16(_mm256_set1_epi16(32), shiftx));
-
-              __m256i kmaskinv = _mm256_xor_si256(kmask, maskinv);
-
-              x = _mm256_add_epi16(c, r1);
-              __m256i basey = _mm256_slli_epi16(r, 6);
-              __m256i baseylo = _mm256_unpacklo_epi16(basey, azeros);
-              __m256i baseyhi = _mm256_unpackhi_epi16(basey, azeros);
-              __m256i xdy = _mm256_mullo_epi16(x, vdy);
-              __m256i xdylo = _mm256_unpacklo_epi16(xdy, azeros);
-              __m256i xdyhi = _mm256_unpackhi_epi16(xdy, azeros);
-              __m256i ylo = _mm256_sub_epi32(baseylo, xdylo);
-              __m256i yhi = _mm256_sub_epi32(baseyhi, xdyhi);
-              __m256i ymlo = _mm256_packs_epi32(ylo, azeros);
-              __m256i ymhi = _mm256_packs_epi32(yhi, azeros);
-              y = _mm256_unpacklo_epi64(ymlo, ymhi);
-              basey = _mm256_sra_epi16(y, fracy);
-              basey = _mm256_and_si256(basey, kmaskinv);
-              _mm256_storeu_si256((__m256i *)base_x, basey);
-
-              __m256i baseym = _mm256_and_si256(
-                  kmaskinv,
-                  _mm256_setr_epi16(
-                      (uint16_t)left[base_x[0]], (uint16_t)left[base_x[1]],
-                      (uint16_t)left[base_x[2]], (uint16_t)left[base_x[3]],
-                      (uint16_t)left[base_x[4]], (uint16_t)left[base_x[5]],
-                      (uint16_t)left[base_x[6]], (uint16_t)left[base_x[7]],
-                      (uint16_t)left[base_x[8]], (uint16_t)left[base_x[9]],
-                      (uint16_t)left[base_x[10]], (uint16_t)left[base_x[11]],
-                      (uint16_t)left[base_x[12]], (uint16_t)left[base_x[13]],
-                      (uint16_t)left[base_x[14]], (uint16_t)left[base_x[15]]));
-              __m256i basey1m = _mm256_and_si256(
-                  kmaskinv, _mm256_setr_epi16((uint16_t)left[base_x[0] + 1],
-                                              (uint16_t)left[base_x[1] + 1],
-                                              (uint16_t)left[base_x[2] + 1],
-                                              (uint16_t)left[base_x[3] + 1],
-                                              (uint16_t)left[base_x[4] + 1],
-                                              (uint16_t)left[base_x[5] + 1],
-                                              (uint16_t)left[base_x[6] + 1],
-                                              (uint16_t)left[base_x[7] + 1],
-                                              (uint16_t)left[base_x[8] + 1],
-                                              (uint16_t)left[base_x[9] + 1],
-                                              (uint16_t)left[base_x[10] + 1],
-                                              (uint16_t)left[base_x[11] + 1],
-                                              (uint16_t)left[base_x[12] + 1],
-                                              (uint16_t)left[base_x[13] + 1],
-                                              (uint16_t)left[base_x[14] + 1],
-                                              (uint16_t)left[base_x[15] + 1]));
-
-              __m256i shifty = _mm256_and_si256(
-                  kmaskinv, _mm256_srai_epi16(_mm256_and_si256(y, and3f), 1));
-              __m256i shiftyc = _mm256_and_si256(
-                  kmaskinv, _mm256_sub_epi16(_mm256_set1_epi16(32), shifty));
-
-              __m256i basexy = _mm256_or_si256(basexm, baseym);
-              __m256i basexy1 = _mm256_or_si256(basex1m, basey1m);
-
-              __m256i shiftxy = _mm256_or_si256(shiftx, shifty);
-              __m256i shiftxyc = _mm256_or_si256(shiftxc, shiftyc);
-
-              __m256i val1 = _mm256_mullo_epi16(basexy, shiftxyc);
-              __m256i val2 = _mm256_mullo_epi16(basexy1, shiftxy);
-
-              __m256i valsum = _mm256_add_epi16(val1, val2);
-
-              __m256i val3 =
-                  _mm256_srai_epi16(_mm256_add_epi16(valsum, add16), 5);
-
-              _mm256_storeu_si256((__m256i *)dst, val3);
-              dst += stride;
-            }
-            dst -= bh * stride;
-          }
-        }
-    }
-  } else {
-    __m256i c, r, cinc, rinc, r1;
-    int32_t base_x[8];
-    int16_t inc = 1;
-
-    __m256i minbasex = _mm256_set1_epi32((int32_t)min_base_x);
-    __m128i fracx = _mm_insert_epi16(_mm_set1_epi16(0), (int)frac_bits_x, 0);
-    __m128i fracy = _mm_insert_epi16(_mm_set1_epi16(0), (int)frac_bits_y, 0);
-    r1 = _mm256_set1_epi32(1);
-    __m256i vdx = _mm256_set1_epi32(dx);
-    __m256i vdy = _mm256_set1_epi32(dy);
-    __m256i add16 = _mm256_set1_epi32(16);
-    __m256i and3f = _mm256_set1_epi32(0x3f);
-    __m256i maskinv = _mm256_set1_epi32(-1);
-    __m256i addup = _mm256_set1_epi32((int32_t)-min_base_x);
-    __m256i addleft = _mm256_set1_epi32((int32_t)-min_base_y);
-    __m256i azeros = _mm256_set1_epi32(0);
-
-    switch (bw) {
-      case 4:
-        c = _mm256_set_epi32(3, 2, 1, 0, 3, 2, 1, 0);
-        r = _mm256_set_epi32(-2, -2, -2, -2, -1, -1, -1, -1);
-        rinc = _mm256_set1_epi32(2);
-        for (int i = 0; i < bh; i += 2) {
-          r = _mm256_add_epi32(r, rinc);
-
-          __m256i y = _mm256_add_epi32(r, r1);
-          __m256i basex = _mm256_slli_epi32(c, 6);
-          __m256i x = _mm256_sub_epi32(basex, _mm256_mullo_epi32(y, vdx));
-          basex = _mm256_sra_epi32(x, fracx);
-          __m256i keq = _mm256_cmpeq_epi32(basex, minbasex);
-          __m256i kgt = _mm256_cmpgt_epi32(basex, minbasex);
-          __m256i kmask = _mm256_or_si256(keq, kgt);
-          basex = _mm256_and_si256(basex, kmask);
-          _mm256_storeu_si256((__m256i *)base_x, basex);
-
-          __m256i basexm = _mm256_and_si256(
-              kmask,
-              _mm256_setr_epi32(
-                  (uint32_t)above[base_x[0]], (uint32_t)above[base_x[1]],
-                  (uint32_t)above[base_x[2]], (uint32_t)above[base_x[3]],
-                  (uint32_t)above[base_x[4]], (uint32_t)above[base_x[5]],
-                  (uint32_t)above[base_x[6]], (uint32_t)above[base_x[7]]));
-          __m256i basex1m = _mm256_and_si256(
-              kmask, _mm256_setr_epi32((uint32_t)above[base_x[0] + 1],
-                                       (uint32_t)above[base_x[1] + 1],
-                                       (uint32_t)above[base_x[2] + 1],
-                                       (uint32_t)above[base_x[3] + 1],
-                                       (uint32_t)above[base_x[4] + 1],
-                                       (uint32_t)above[base_x[5] + 1],
-                                       (uint32_t)above[base_x[6] + 1],
-                                       (uint32_t)above[base_x[7] + 1]));
-
-          __m256i shiftx = _mm256_and_si256(
-              kmask,
-              _mm256_srai_epi32(
-                  _mm256_and_si256(_mm256_mullo_epi32(x, addup), and3f), 1));
-          __m256i shiftxc = _mm256_and_si256(
-              kmask, _mm256_sub_epi32(_mm256_set1_epi32(32), shiftx));
-
-          __m256i kmaskinv = _mm256_xor_si256(kmask, maskinv);
-
-          x = _mm256_add_epi32(c, r1);
-          __m256i basey = _mm256_slli_epi32(r, 6);
-          y = _mm256_sub_epi32(basey, _mm256_mullo_epi32(x, vdy));
-          basey = _mm256_sra_epi32(y, fracy);
-          basey = _mm256_and_si256(basey, kmaskinv);
-          _mm256_storeu_si256((__m256i *)base_x, basey);
-
-          __m256i baseym = _mm256_and_si256(
-              kmaskinv,
-              _mm256_setr_epi32(
-                  (uint32_t)left[base_x[0]], (uint32_t)left[base_x[1]],
-                  (uint32_t)left[base_x[2]], (uint32_t)left[base_x[3]],
-                  (uint32_t)left[base_x[4]], (uint32_t)left[base_x[5]],
-                  (uint32_t)left[base_x[6]], (uint32_t)left[base_x[7]]));
-          __m256i basey1m = _mm256_and_si256(
-              kmaskinv,
-              _mm256_setr_epi32(
-                  (uint32_t)left[base_x[0] + 1], (uint32_t)left[base_x[1] + 1],
-                  (uint32_t)left[base_x[2] + 1], (uint32_t)left[base_x[3] + 1],
-                  (uint32_t)left[base_x[4] + 1], (uint32_t)left[base_x[5] + 1],
-                  (uint32_t)left[base_x[6] + 1],
-                  (uint32_t)left[base_x[7] + 1]));
-
-          __m256i shifty = _mm256_and_si256(
-              kmaskinv,
-              _mm256_srai_epi32(
-                  _mm256_and_si256(_mm256_mullo_epi32(y, addleft), and3f), 1));
-          __m256i shiftyc = _mm256_and_si256(
-              kmaskinv, _mm256_sub_epi32(_mm256_set1_epi32(32), shifty));
-
-          __m256i basexy = _mm256_or_si256(basexm, baseym);
-          __m256i basexy1 = _mm256_or_si256(basex1m, basey1m);
-
-          __m256i shiftxy = _mm256_or_si256(shiftx, shifty);
-          __m256i shiftxyc = _mm256_or_si256(shiftxc, shiftyc);
-
-          __m256i val1 = _mm256_mullo_epi32(basexy, shiftxyc);
-          __m256i val2 = _mm256_mullo_epi32(basexy1, shiftxy);
-
-          __m256i valsum = _mm256_add_epi32(val1, val2);
-
-          __m256i val3 = _mm256_srai_epi32(_mm256_add_epi32(valsum, add16), 5);
-          __m256i val4 = _mm256_packus_epi32(val3, azeros);
-
-          __m128i lowval = _mm256_extracti128_si256(val4, 0);
-          __m128i highval = _mm256_extracti128_si256(val4, 1);
-
-          _mm_storel_epi64((__m128i *)dst, highval);
-          dst += stride;
-          _mm_storel_epi64((__m128i *)dst, lowval);
-          dst += stride;
-        }
-
-        break;
-      case 8:
-        c = _mm256_set_epi32(7, 6, 5, 4, 3, 2, 1, 0);
-        rinc = _mm256_set1_epi32(1);
-        r = _mm256_set1_epi32(-1);
-        for (int i = 0; i < bh; i++) {
-          r = _mm256_add_epi32(r, rinc);
-
-          __m256i y = _mm256_add_epi32(r, r1);
-          __m256i basex = _mm256_slli_epi32(c, 6);
-          __m256i x = _mm256_sub_epi32(basex, _mm256_mullo_epi32(y, vdx));
-          basex = _mm256_sra_epi32(x, fracx);
-          __m256i keq = _mm256_cmpeq_epi32(basex, minbasex);
-          __m256i kgt = _mm256_cmpgt_epi32(basex, minbasex);
-          __m256i kmask = _mm256_or_si256(keq, kgt);
-          basex = _mm256_and_si256(basex, kmask);
-          _mm256_storeu_si256((__m256i *)base_x, basex);
-
-          __m256i basexm = _mm256_and_si256(
-              kmask,
-              _mm256_setr_epi32(
-                  (uint32_t)above[base_x[0]], (uint32_t)above[base_x[1]],
-                  (uint32_t)above[base_x[2]], (uint32_t)above[base_x[3]],
-                  (uint32_t)above[base_x[4]], (uint32_t)above[base_x[5]],
-                  (uint32_t)above[base_x[6]], (uint32_t)above[base_x[7]]));
-          __m256i basex1m = _mm256_and_si256(
-              kmask, _mm256_setr_epi32((uint32_t)above[base_x[0] + 1],
-                                       (uint32_t)above[base_x[1] + 1],
-                                       (uint32_t)above[base_x[2] + 1],
-                                       (uint32_t)above[base_x[3] + 1],
-                                       (uint32_t)above[base_x[4] + 1],
-                                       (uint32_t)above[base_x[5] + 1],
-                                       (uint32_t)above[base_x[6] + 1],
-                                       (uint32_t)above[base_x[7] + 1]));
-
-          __m256i shiftx = _mm256_and_si256(
-              kmask,
-              _mm256_srai_epi32(
-                  _mm256_and_si256(_mm256_mullo_epi32(x, addup), and3f), 1));
-          __m256i shiftxc = _mm256_and_si256(
-              kmask, _mm256_sub_epi32(_mm256_set1_epi32(32), shiftx));
-
-          __m256i kmaskinv = _mm256_xor_si256(kmask, maskinv);
-
-          x = _mm256_add_epi32(c, r1);
-          __m256i basey = _mm256_slli_epi32(r, 6);
-          y = _mm256_sub_epi32(basey, _mm256_mullo_epi32(x, vdy));
-          basey = _mm256_sra_epi32(y, fracy);
-          basey = _mm256_and_si256(basey, kmaskinv);
-          _mm256_storeu_si256((__m256i *)base_x, basey);
-
-          __m256i baseym = _mm256_and_si256(
-              kmaskinv,
-              _mm256_setr_epi32(
-                  (uint32_t)left[base_x[0]], (uint32_t)left[base_x[1]],
-                  (uint32_t)left[base_x[2]], (uint32_t)left[base_x[3]],
-                  (uint32_t)left[base_x[4]], (uint32_t)left[base_x[5]],
-                  (uint32_t)left[base_x[6]], (uint32_t)left[base_x[7]]));
-          __m256i basey1m = _mm256_and_si256(
-              kmaskinv,
-              _mm256_setr_epi32(
-                  (uint32_t)left[base_x[0] + 1], (uint32_t)left[base_x[1] + 1],
-                  (uint32_t)left[base_x[2] + 1], (uint32_t)left[base_x[3] + 1],
-                  (uint32_t)left[base_x[4] + 1], (uint32_t)left[base_x[5] + 1],
-                  (uint32_t)left[base_x[6] + 1],
-                  (uint32_t)left[base_x[7] + 1]));
-
-          __m256i shifty = _mm256_and_si256(
-              kmaskinv,
-              _mm256_srai_epi32(
-                  _mm256_and_si256(_mm256_mullo_epi32(y, addleft), and3f), 1));
-          __m256i shiftyc = _mm256_and_si256(
-              kmaskinv, _mm256_sub_epi32(_mm256_set1_epi32(32), shifty));
-
-          __m256i basexy = _mm256_or_si256(basexm, baseym);
-          __m256i basexy1 = _mm256_or_si256(basex1m, basey1m);
-
-          __m256i shiftxy = _mm256_or_si256(shiftx, shifty);
-          __m256i shiftxyc = _mm256_or_si256(shiftxc, shiftyc);
-
-          __m256i val1 = _mm256_mullo_epi32(basexy, shiftxyc);
-          __m256i val2 = _mm256_mullo_epi32(basexy1, shiftxy);
-
-          __m256i valsum = _mm256_add_epi32(val1, val2);
-
-          __m256i val3 = _mm256_srai_epi32(_mm256_add_epi32(valsum, add16), 5);
-          __m256i val4 = _mm256_packus_epi32(val3, azeros);
-
-          __m128i lowval = _mm256_extracti128_si256(val4, 0);
-          __m128i highval = _mm256_extracti128_si256(val4, 1);
-
-          _mm_storel_epi64((__m128i *)dst, lowval);
-          dst += 4;
-          _mm_storel_epi64((__m128i *)dst, highval);
-          dst += stride - 4;
-        }
-
-        break;
-      default:
-        inc = bw / 8;
-        c = _mm256_set_epi32(-1, -2, -3, -4, -5, -6, -7, -8);
-        rinc = _mm256_set1_epi32(1);
-        cinc = _mm256_set1_epi32(8);
-        dst -= 8;
-        for (int j = 0; j < inc; j++) {
-          c = _mm256_add_epi32(c, cinc);
-          r = _mm256_set1_epi32(-1);
-          dst += 8;
-          for (int i = 0; i < bh; i++) {
-            r = _mm256_add_epi32(r, rinc);
-
-            __m256i y = _mm256_add_epi32(r, r1);
-            __m256i basex = _mm256_slli_epi32(c, 6);
-            __m256i x = _mm256_sub_epi32(basex, _mm256_mullo_epi32(y, vdx));
-            basex = _mm256_sra_epi32(x, fracx);
-            __m256i keq = _mm256_cmpeq_epi32(basex, minbasex);
-            __m256i kgt = _mm256_cmpgt_epi32(basex, minbasex);
-            __m256i kmask = _mm256_or_si256(keq, kgt);
-            basex = _mm256_and_si256(basex, kmask);
-            _mm256_storeu_si256((__m256i *)base_x, basex);
-
-            __m256i basexm = _mm256_and_si256(
-                kmask,
-                _mm256_setr_epi32(
-                    (uint32_t)above[base_x[0]], (uint32_t)above[base_x[1]],
-                    (uint32_t)above[base_x[2]], (uint32_t)above[base_x[3]],
-                    (uint32_t)above[base_x[4]], (uint32_t)above[base_x[5]],
-                    (uint32_t)above[base_x[6]], (uint32_t)above[base_x[7]]));
-            __m256i basex1m = _mm256_and_si256(
-                kmask, _mm256_setr_epi32((uint32_t)above[base_x[0] + 1],
-                                         (uint32_t)above[base_x[1] + 1],
-                                         (uint32_t)above[base_x[2] + 1],
-                                         (uint32_t)above[base_x[3] + 1],
-                                         (uint32_t)above[base_x[4] + 1],
-                                         (uint32_t)above[base_x[5] + 1],
-                                         (uint32_t)above[base_x[6] + 1],
-                                         (uint32_t)above[base_x[7] + 1]));
-
-            __m256i shiftx = _mm256_and_si256(
-                kmask, _mm256_srai_epi32(_mm256_and_si256(x, and3f), 1));
-            __m256i shiftxc = _mm256_and_si256(
-                kmask, _mm256_sub_epi32(_mm256_set1_epi32(32), shiftx));
-
-            __m256i kmaskinv = _mm256_xor_si256(kmask, maskinv);
-
-            x = _mm256_add_epi32(c, r1);
-            __m256i basey = _mm256_slli_epi32(r, 6);
-            y = _mm256_sub_epi32(basey, _mm256_mullo_epi32(x, vdy));
-            basey = _mm256_sra_epi32(y, fracy);
-            basey = _mm256_and_si256(basey, kmaskinv);
-            _mm256_storeu_si256((__m256i *)base_x, basey);
-
-            __m256i baseym = _mm256_and_si256(
-                kmaskinv,
-                _mm256_setr_epi32(
-                    (uint32_t)left[base_x[0]], (uint32_t)left[base_x[1]],
-                    (uint32_t)left[base_x[2]], (uint32_t)left[base_x[3]],
-                    (uint32_t)left[base_x[4]], (uint32_t)left[base_x[5]],
-                    (uint32_t)left[base_x[6]], (uint32_t)left[base_x[7]]));
-            __m256i basey1m = _mm256_and_si256(
-                kmaskinv, _mm256_setr_epi32((uint32_t)left[base_x[0] + 1],
-                                            (uint32_t)left[base_x[1] + 1],
-                                            (uint32_t)left[base_x[2] + 1],
-                                            (uint32_t)left[base_x[3] + 1],
-                                            (uint32_t)left[base_x[4] + 1],
-                                            (uint32_t)left[base_x[5] + 1],
-                                            (uint32_t)left[base_x[6] + 1],
-                                            (uint32_t)left[base_x[7] + 1]));
-
-            __m256i shifty = _mm256_and_si256(
-                kmaskinv, _mm256_srai_epi32(_mm256_and_si256(y, and3f), 1));
-            __m256i shiftyc = _mm256_and_si256(
-                kmaskinv, _mm256_sub_epi32(_mm256_set1_epi32(32), shifty));
-
-            __m256i basexy = _mm256_or_si256(basexm, baseym);
-            __m256i basexy1 = _mm256_or_si256(basex1m, basey1m);
-
-            __m256i shiftxy = _mm256_or_si256(shiftx, shifty);
-            __m256i shiftxyc = _mm256_or_si256(shiftxc, shiftyc);
-
-            __m256i val1 = _mm256_mullo_epi32(basexy, shiftxyc);
-            __m256i val2 = _mm256_mullo_epi32(basexy1, shiftxy);
-
-            __m256i valsum = _mm256_add_epi32(val1, val2);
-
-            __m256i val3 =
-                _mm256_srai_epi32(_mm256_add_epi32(valsum, add16), 5);
-            __m256i val4 = _mm256_packus_epi32(val3, azeros);
-
-            __m128i lowval = _mm256_extracti128_si256(val4, 0);
-            __m128i highval = _mm256_extracti128_si256(val4, 1);
-
-            _mm_storel_epi64((__m128i *)dst, lowval);
-            dst += 4;
-            _mm_storel_epi64((__m128i *)dst, highval);
-            dst += stride - 4;
-          }
-          dst -= bh * stride;
-        }
-    }
+  switch (bw) {
+    case 4:
+      if (bd < 12) {
+        highbd_dr_prediction_z2_Nx4_avx2(bh, dst, stride, above, left,
+                                         upsample_above, upsample_left, dx, dy);
+      } else {
+        highbd_dr_prediction_32bit_z2_Nx4_avx2(bh, dst, stride, above, left,
+                                               upsample_above, upsample_left,
+                                               dx, dy);
+      }
+      break;
+    case 8:
+      if (bd < 12) {
+        highbd_dr_prediction_z2_Nx8_avx2(bh, dst, stride, above, left,
+                                         upsample_above, upsample_left, dx, dy);
+      } else {
+        highbd_dr_prediction_32bit_z2_Nx8_avx2(bh, dst, stride, above, left,
+                                               upsample_above, upsample_left,
+                                               dx, dy);
+      }
+      break;
+    default:
+      if (bd < 12) {
+        highbd_dr_prediction_z2_HxW_avx2(bh, bw, dst, stride, above, left,
+                                         upsample_above, upsample_left, dx, dy);
+      } else {
+        highbd_dr_prediction_32bit_z2_HxW_avx2(bh, bw, dst, stride, above, left,
+                                               upsample_above, upsample_left,
+                                               dx, dy);
+      }
+      break;
   }
 }
 
diff --git a/av1/common/reconintra.c b/av1/common/reconintra.c
index efb3794..66bf5b7 100644
--- a/av1/common/reconintra.c
+++ b/av1/common/reconintra.c
@@ -1200,7 +1200,7 @@
     int need_bottom = extend_modes[mode] & NEED_BOTTOMLEFT;
     if (use_filter_intra) need_bottom = 0;
     if (is_dr_mode) need_bottom = p_angle > 180;
-    const int num_left_pixels_needed = txhpx + (need_bottom ? txwpx : 0);
+    const int num_left_pixels_needed = txhpx + (need_bottom ? txwpx : 3);
     i = 0;
     if (n_left_px > 0) {
       for (; i < n_left_px; i++) left_col[i] = left_ref[i * ref_stride];