Optimize highbd fwd_txfm module

Added AVX2 variant for 64x64 txfm blk_size.

When tested for multiple test cases observed 1%
average reduction in encoder time for speed = 1 preset.

Module level gains improved by a factor of ~2
on average w.r.t to SSE4_1 module.

Change-Id: I855c628982e84ed6cb2950fcb695c42555e14609
diff --git a/av1/av1.cmake b/av1/av1.cmake
index 19af0ff..19f639d 100644
--- a/av1/av1.cmake
+++ b/av1/av1.cmake
@@ -286,6 +286,7 @@
             "${AOM_ROOT}/av1/encoder/x86/error_intrin_avx2.c"
             "${AOM_ROOT}/av1/encoder/x86/av1_fwd_txfm_avx2.h"
             "${AOM_ROOT}/av1/encoder/x86/av1_fwd_txfm2d_avx2.c"
+            "${AOM_ROOT}/av1/encoder/x86/highbd_fwd_txfm_avx2.c"
             "${AOM_ROOT}/av1/encoder/x86/wedge_utils_avx2.c"
             "${AOM_ROOT}/av1/encoder/x86/encodetxb_avx2.c"
             "${AOM_ROOT}/av1/encoder/x86/rdopt_avx2.c"
diff --git a/av1/common/av1_rtcd_defs.pl b/av1/common/av1_rtcd_defs.pl
index 1f12588..5d06d32 100755
--- a/av1/common/av1_rtcd_defs.pl
+++ b/av1/common/av1_rtcd_defs.pl
@@ -237,7 +237,7 @@
   specialize qw/av1_fwd_txfm2d_32x32 sse4_1/;
 
   add_proto qw/void av1_fwd_txfm2d_64x64/, "const int16_t *input, int32_t *output, int stride, TX_TYPE tx_type, int bd";
-  specialize qw/av1_fwd_txfm2d_64x64 sse4_1/;
+  specialize qw/av1_fwd_txfm2d_64x64 sse4_1 avx2/;
   add_proto qw/void av1_fwd_txfm2d_32x64/, "const int16_t *input, int32_t *output, int stride, TX_TYPE tx_type, int bd";
   specialize qw/av1_fwd_txfm2d_32x64 sse4_1/;
   add_proto qw/void av1_fwd_txfm2d_64x32/, "const int16_t *input, int32_t *output, int stride, TX_TYPE tx_type, int bd";
diff --git a/av1/encoder/x86/highbd_fwd_txfm_avx2.c b/av1/encoder/x86/highbd_fwd_txfm_avx2.c
new file mode 100644
index 0000000..1bcf472
--- /dev/null
+++ b/av1/encoder/x86/highbd_fwd_txfm_avx2.c
@@ -0,0 +1,1184 @@
+/*
+ * Copyright (c) 2018, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+#include <assert.h>
+#include <immintrin.h> /*AVX2*/
+
+#include "config/aom_config.h"
+#include "config/av1_rtcd.h"
+#include "av1/common/av1_txfm.h"
+#include "av1/encoder/av1_fwd_txfm1d_cfg.h"
+#include "aom_dsp/txfm_common.h"
+#include "aom_ports/mem.h"
+
+static INLINE void av1_load_buffer_16xn_avx2(const int16_t *input, __m256i *out,
+                                             int stride, int height,
+                                             int outstride) {
+  __m256i out1[64];
+  for (int i = 0; i < height; i++) {
+    out1[i] = _mm256_loadu_si256((const __m256i *)(input + i * stride));
+  }
+
+  for (int i = 0; i < height; i++) {
+    out[i * outstride] = _mm256_cvtepi16_epi32(_mm256_castsi256_si128(out1[i]));
+    out[i * outstride + 1] =
+        _mm256_cvtepi16_epi32(_mm256_extractf128_si256(out1[i], 1));
+  }
+}
+static void av1_fwd_txfm_transpose_8x8_avx2(const __m256i *in, __m256i *out,
+                                            const int instride,
+                                            const int outstride) {
+  __m256i u0, u1, u2, u3, u4, u5, u6, u7;
+  __m256i x0, x1;
+
+  u0 = _mm256_unpacklo_epi32(in[0 * instride], in[1 * instride]);
+  u1 = _mm256_unpackhi_epi32(in[0 * instride], in[1 * instride]);
+
+  u2 = _mm256_unpacklo_epi32(in[2 * instride], in[3 * instride]);
+  u3 = _mm256_unpackhi_epi32(in[2 * instride], in[3 * instride]);
+
+  u4 = _mm256_unpacklo_epi32(in[4 * instride], in[5 * instride]);
+  u5 = _mm256_unpackhi_epi32(in[4 * instride], in[5 * instride]);
+
+  u6 = _mm256_unpacklo_epi32(in[6 * instride], in[7 * instride]);
+  u7 = _mm256_unpackhi_epi32(in[6 * instride], in[7 * instride]);
+
+  x0 = _mm256_unpacklo_epi64(u0, u2);
+  x1 = _mm256_unpacklo_epi64(u4, u6);
+  out[0 * outstride] = _mm256_permute2f128_si256(x0, x1, 0x20);
+  out[4 * outstride] = _mm256_permute2f128_si256(x0, x1, 0x31);
+
+  x0 = _mm256_unpackhi_epi64(u0, u2);
+  x1 = _mm256_unpackhi_epi64(u4, u6);
+  out[1 * outstride] = _mm256_permute2f128_si256(x0, x1, 0x20);
+  out[5 * outstride] = _mm256_permute2f128_si256(x0, x1, 0x31);
+
+  x0 = _mm256_unpacklo_epi64(u1, u3);
+  x1 = _mm256_unpacklo_epi64(u5, u7);
+  out[2 * outstride] = _mm256_permute2f128_si256(x0, x1, 0x20);
+  out[6 * outstride] = _mm256_permute2f128_si256(x0, x1, 0x31);
+
+  x0 = _mm256_unpackhi_epi64(u1, u3);
+  x1 = _mm256_unpackhi_epi64(u5, u7);
+  out[3 * outstride] = _mm256_permute2f128_si256(x0, x1, 0x20);
+  out[7 * outstride] = _mm256_permute2f128_si256(x0, x1, 0x31);
+}
+static INLINE void av1_round_shift_32_8xn_avx2(__m256i *in, int size, int bit,
+                                               int stride) {
+  if (bit < 0) {
+    bit = -bit;
+    __m256i round = _mm256_set1_epi32(1 << (bit - 1));
+    for (int i = 0; i < size; ++i) {
+      in[stride * i] = _mm256_add_epi32(in[stride * i], round);
+      in[stride * i] = _mm256_srai_epi32(in[stride * i], bit);
+    }
+  } else if (bit > 0) {
+    for (int i = 0; i < size; ++i) {
+      in[stride * i] = _mm256_slli_epi32(in[stride * i], bit);
+    }
+  }
+}
+static INLINE void av1_store_buffer_avx2(const __m256i *const in, int32_t *out,
+                                         const int stride, const int out_size) {
+  for (int i = 0; i < out_size; ++i) {
+    _mm256_store_si256((__m256i *)(out), in[i]);
+    out += stride;
+  }
+}
+#define btf_32_type0_avx2_new(ww0, ww1, in0, in1, out0, out1, r, bit) \
+  do {                                                                \
+    const __m256i in0_w0 = _mm256_mullo_epi32(in0, ww0);              \
+    const __m256i in1_w1 = _mm256_mullo_epi32(in1, ww1);              \
+    out0 = _mm256_add_epi32(in0_w0, in1_w1);                          \
+    out0 = _mm256_add_epi32(out0, r);                                 \
+    out0 = _mm256_srai_epi32(out0, bit);                              \
+    const __m256i in0_w1 = _mm256_mullo_epi32(in0, ww1);              \
+    const __m256i in1_w0 = _mm256_mullo_epi32(in1, ww0);              \
+    out1 = _mm256_sub_epi32(in0_w1, in1_w0);                          \
+    out1 = _mm256_add_epi32(out1, r);                                 \
+    out1 = _mm256_srai_epi32(out1, bit);                              \
+  } while (0)
+
+typedef void (*transform_1d_avx2)(__m256i *in, __m256i *out,
+                                  const int8_t cos_bit,
+                                  const int8_t *stage_range, int instride,
+                                  int outstride);
+
+static INLINE void av1_fdct64_stage2_avx2(__m256i *x1, __m256i *x2,
+                                          __m256i *cospi_m32,
+                                          __m256i *cospi_p32,
+                                          const __m256i *__rounding,
+                                          int8_t cos_bit) {
+  x2[0] = _mm256_add_epi32(x1[0], x1[31]);
+  x2[31] = _mm256_sub_epi32(x1[0], x1[31]);
+  x2[1] = _mm256_add_epi32(x1[1], x1[30]);
+  x2[30] = _mm256_sub_epi32(x1[1], x1[30]);
+  x2[2] = _mm256_add_epi32(x1[2], x1[29]);
+  x2[29] = _mm256_sub_epi32(x1[2], x1[29]);
+  x2[3] = _mm256_add_epi32(x1[3], x1[28]);
+  x2[28] = _mm256_sub_epi32(x1[3], x1[28]);
+  x2[4] = _mm256_add_epi32(x1[4], x1[27]);
+  x2[27] = _mm256_sub_epi32(x1[4], x1[27]);
+  x2[5] = _mm256_add_epi32(x1[5], x1[26]);
+  x2[26] = _mm256_sub_epi32(x1[5], x1[26]);
+  x2[6] = _mm256_add_epi32(x1[6], x1[25]);
+  x2[25] = _mm256_sub_epi32(x1[6], x1[25]);
+  x2[7] = _mm256_add_epi32(x1[7], x1[24]);
+  x2[24] = _mm256_sub_epi32(x1[7], x1[24]);
+  x2[8] = _mm256_add_epi32(x1[8], x1[23]);
+  x2[23] = _mm256_sub_epi32(x1[8], x1[23]);
+  x2[9] = _mm256_add_epi32(x1[9], x1[22]);
+  x2[22] = _mm256_sub_epi32(x1[9], x1[22]);
+  x2[10] = _mm256_add_epi32(x1[10], x1[21]);
+  x2[21] = _mm256_sub_epi32(x1[10], x1[21]);
+  x2[11] = _mm256_add_epi32(x1[11], x1[20]);
+  x2[20] = _mm256_sub_epi32(x1[11], x1[20]);
+  x2[12] = _mm256_add_epi32(x1[12], x1[19]);
+  x2[19] = _mm256_sub_epi32(x1[12], x1[19]);
+  x2[13] = _mm256_add_epi32(x1[13], x1[18]);
+  x2[18] = _mm256_sub_epi32(x1[13], x1[18]);
+  x2[14] = _mm256_add_epi32(x1[14], x1[17]);
+  x2[17] = _mm256_sub_epi32(x1[14], x1[17]);
+  x2[15] = _mm256_add_epi32(x1[15], x1[16]);
+  x2[16] = _mm256_sub_epi32(x1[15], x1[16]);
+  x2[32] = x1[32];
+  x2[33] = x1[33];
+  x2[34] = x1[34];
+  x2[35] = x1[35];
+  x2[36] = x1[36];
+  x2[37] = x1[37];
+  x2[38] = x1[38];
+  x2[39] = x1[39];
+  btf_32_type0_avx2_new(*cospi_m32, *cospi_p32, x1[40], x1[55], x2[40], x2[55],
+                        *__rounding, cos_bit);
+  btf_32_type0_avx2_new(*cospi_m32, *cospi_p32, x1[41], x1[54], x2[41], x2[54],
+                        *__rounding, cos_bit);
+  btf_32_type0_avx2_new(*cospi_m32, *cospi_p32, x1[42], x1[53], x2[42], x2[53],
+                        *__rounding, cos_bit);
+  btf_32_type0_avx2_new(*cospi_m32, *cospi_p32, x1[43], x1[52], x2[43], x2[52],
+                        *__rounding, cos_bit);
+  btf_32_type0_avx2_new(*cospi_m32, *cospi_p32, x1[44], x1[51], x2[44], x2[51],
+                        *__rounding, cos_bit);
+  btf_32_type0_avx2_new(*cospi_m32, *cospi_p32, x1[45], x1[50], x2[45], x2[50],
+                        *__rounding, cos_bit);
+  btf_32_type0_avx2_new(*cospi_m32, *cospi_p32, x1[46], x1[49], x2[46], x2[49],
+                        *__rounding, cos_bit);
+  btf_32_type0_avx2_new(*cospi_m32, *cospi_p32, x1[47], x1[48], x2[47], x2[48],
+                        *__rounding, cos_bit);
+  x2[56] = x1[56];
+  x2[57] = x1[57];
+  x2[58] = x1[58];
+  x2[59] = x1[59];
+  x2[60] = x1[60];
+  x2[61] = x1[61];
+  x2[62] = x1[62];
+  x2[63] = x1[63];
+}
+static INLINE void av1_fdct64_stage3_avx2(__m256i *x2, __m256i *x3,
+                                          __m256i *cospi_m32,
+                                          __m256i *cospi_p32,
+                                          const __m256i *__rounding,
+                                          int8_t cos_bit) {
+  x3[0] = _mm256_add_epi32(x2[0], x2[15]);
+  x3[15] = _mm256_sub_epi32(x2[0], x2[15]);
+  x3[1] = _mm256_add_epi32(x2[1], x2[14]);
+  x3[14] = _mm256_sub_epi32(x2[1], x2[14]);
+  x3[2] = _mm256_add_epi32(x2[2], x2[13]);
+  x3[13] = _mm256_sub_epi32(x2[2], x2[13]);
+  x3[3] = _mm256_add_epi32(x2[3], x2[12]);
+  x3[12] = _mm256_sub_epi32(x2[3], x2[12]);
+  x3[4] = _mm256_add_epi32(x2[4], x2[11]);
+  x3[11] = _mm256_sub_epi32(x2[4], x2[11]);
+  x3[5] = _mm256_add_epi32(x2[5], x2[10]);
+  x3[10] = _mm256_sub_epi32(x2[5], x2[10]);
+  x3[6] = _mm256_add_epi32(x2[6], x2[9]);
+  x3[9] = _mm256_sub_epi32(x2[6], x2[9]);
+  x3[7] = _mm256_add_epi32(x2[7], x2[8]);
+  x3[8] = _mm256_sub_epi32(x2[7], x2[8]);
+  x3[16] = x2[16];
+  x3[17] = x2[17];
+  x3[18] = x2[18];
+  x3[19] = x2[19];
+  btf_32_type0_avx2_new(*cospi_m32, *cospi_p32, x2[20], x2[27], x3[20], x3[27],
+                        *__rounding, cos_bit);
+  btf_32_type0_avx2_new(*cospi_m32, *cospi_p32, x2[21], x2[26], x3[21], x3[26],
+                        *__rounding, cos_bit);
+  btf_32_type0_avx2_new(*cospi_m32, *cospi_p32, x2[22], x2[25], x3[22], x3[25],
+                        *__rounding, cos_bit);
+  btf_32_type0_avx2_new(*cospi_m32, *cospi_p32, x2[23], x2[24], x3[23], x3[24],
+                        *__rounding, cos_bit);
+  x3[28] = x2[28];
+  x3[29] = x2[29];
+  x3[30] = x2[30];
+  x3[31] = x2[31];
+  x3[32] = _mm256_add_epi32(x2[32], x2[47]);
+  x3[47] = _mm256_sub_epi32(x2[32], x2[47]);
+  x3[33] = _mm256_add_epi32(x2[33], x2[46]);
+  x3[46] = _mm256_sub_epi32(x2[33], x2[46]);
+  x3[34] = _mm256_add_epi32(x2[34], x2[45]);
+  x3[45] = _mm256_sub_epi32(x2[34], x2[45]);
+  x3[35] = _mm256_add_epi32(x2[35], x2[44]);
+  x3[44] = _mm256_sub_epi32(x2[35], x2[44]);
+  x3[36] = _mm256_add_epi32(x2[36], x2[43]);
+  x3[43] = _mm256_sub_epi32(x2[36], x2[43]);
+  x3[37] = _mm256_add_epi32(x2[37], x2[42]);
+  x3[42] = _mm256_sub_epi32(x2[37], x2[42]);
+  x3[38] = _mm256_add_epi32(x2[38], x2[41]);
+  x3[41] = _mm256_sub_epi32(x2[38], x2[41]);
+  x3[39] = _mm256_add_epi32(x2[39], x2[40]);
+  x3[40] = _mm256_sub_epi32(x2[39], x2[40]);
+  x3[48] = _mm256_sub_epi32(x2[63], x2[48]);
+  x3[63] = _mm256_add_epi32(x2[63], x2[48]);
+  x3[49] = _mm256_sub_epi32(x2[62], x2[49]);
+  x3[62] = _mm256_add_epi32(x2[62], x2[49]);
+  x3[50] = _mm256_sub_epi32(x2[61], x2[50]);
+  x3[61] = _mm256_add_epi32(x2[61], x2[50]);
+  x3[51] = _mm256_sub_epi32(x2[60], x2[51]);
+  x3[60] = _mm256_add_epi32(x2[60], x2[51]);
+  x3[52] = _mm256_sub_epi32(x2[59], x2[52]);
+  x3[59] = _mm256_add_epi32(x2[59], x2[52]);
+  x3[53] = _mm256_sub_epi32(x2[58], x2[53]);
+  x3[58] = _mm256_add_epi32(x2[58], x2[53]);
+  x3[54] = _mm256_sub_epi32(x2[57], x2[54]);
+  x3[57] = _mm256_add_epi32(x2[57], x2[54]);
+  x3[55] = _mm256_sub_epi32(x2[56], x2[55]);
+  x3[56] = _mm256_add_epi32(x2[56], x2[55]);
+}
+static INLINE void av1_fdct64_stage4_avx2(
+    __m256i *x3, __m256i *x4, __m256i *cospi_m32, __m256i *cospi_p32,
+    __m256i *cospi_m16, __m256i *cospi_p48, __m256i *cospi_m48,
+    const __m256i *__rounding, int8_t cos_bit) {
+  x4[0] = _mm256_add_epi32(x3[0], x3[7]);
+  x4[7] = _mm256_sub_epi32(x3[0], x3[7]);
+  x4[1] = _mm256_add_epi32(x3[1], x3[6]);
+  x4[6] = _mm256_sub_epi32(x3[1], x3[6]);
+  x4[2] = _mm256_add_epi32(x3[2], x3[5]);
+  x4[5] = _mm256_sub_epi32(x3[2], x3[5]);
+  x4[3] = _mm256_add_epi32(x3[3], x3[4]);
+  x4[4] = _mm256_sub_epi32(x3[3], x3[4]);
+  x4[8] = x3[8];
+  x4[9] = x3[9];
+  btf_32_type0_avx2_new(*cospi_m32, *cospi_p32, x3[10], x3[13], x4[10], x4[13],
+                        *__rounding, cos_bit);
+  btf_32_type0_avx2_new(*cospi_m32, *cospi_p32, x3[11], x3[12], x4[11], x4[12],
+                        *__rounding, cos_bit);
+  x4[14] = x3[14];
+  x4[15] = x3[15];
+  x4[16] = _mm256_add_epi32(x3[16], x3[23]);
+  x4[23] = _mm256_sub_epi32(x3[16], x3[23]);
+  x4[17] = _mm256_add_epi32(x3[17], x3[22]);
+  x4[22] = _mm256_sub_epi32(x3[17], x3[22]);
+  x4[18] = _mm256_add_epi32(x3[18], x3[21]);
+  x4[21] = _mm256_sub_epi32(x3[18], x3[21]);
+  x4[19] = _mm256_add_epi32(x3[19], x3[20]);
+  x4[20] = _mm256_sub_epi32(x3[19], x3[20]);
+  x4[24] = _mm256_sub_epi32(x3[31], x3[24]);
+  x4[31] = _mm256_add_epi32(x3[31], x3[24]);
+  x4[25] = _mm256_sub_epi32(x3[30], x3[25]);
+  x4[30] = _mm256_add_epi32(x3[30], x3[25]);
+  x4[26] = _mm256_sub_epi32(x3[29], x3[26]);
+  x4[29] = _mm256_add_epi32(x3[29], x3[26]);
+  x4[27] = _mm256_sub_epi32(x3[28], x3[27]);
+  x4[28] = _mm256_add_epi32(x3[28], x3[27]);
+  x4[32] = x3[32];
+  x4[33] = x3[33];
+  x4[34] = x3[34];
+  x4[35] = x3[35];
+  btf_32_type0_avx2_new(*cospi_m16, *cospi_p48, x3[36], x3[59], x4[36], x4[59],
+                        *__rounding, cos_bit);
+  btf_32_type0_avx2_new(*cospi_m16, *cospi_p48, x3[37], x3[58], x4[37], x4[58],
+                        *__rounding, cos_bit);
+  btf_32_type0_avx2_new(*cospi_m16, *cospi_p48, x3[38], x3[57], x4[38], x4[57],
+                        *__rounding, cos_bit);
+  btf_32_type0_avx2_new(*cospi_m16, *cospi_p48, x3[39], x3[56], x4[39], x4[56],
+                        *__rounding, cos_bit);
+  btf_32_type0_avx2_new(*cospi_m48, *cospi_m16, x3[40], x3[55], x4[40], x4[55],
+                        *__rounding, cos_bit);
+  btf_32_type0_avx2_new(*cospi_m48, *cospi_m16, x3[41], x3[54], x4[41], x4[54],
+                        *__rounding, cos_bit);
+  btf_32_type0_avx2_new(*cospi_m48, *cospi_m16, x3[42], x3[53], x4[42], x4[53],
+                        *__rounding, cos_bit);
+  btf_32_type0_avx2_new(*cospi_m48, *cospi_m16, x3[43], x3[52], x4[43], x4[52],
+                        *__rounding, cos_bit);
+  x4[44] = x3[44];
+  x4[45] = x3[45];
+  x4[46] = x3[46];
+  x4[47] = x3[47];
+  x4[48] = x3[48];
+  x4[49] = x3[49];
+  x4[50] = x3[50];
+  x4[51] = x3[51];
+  x4[60] = x3[60];
+  x4[61] = x3[61];
+  x4[62] = x3[62];
+  x4[63] = x3[63];
+}
+static INLINE void av1_fdct64_stage5_avx2(
+    __m256i *x4, __m256i *x5, __m256i *cospi_m32, __m256i *cospi_p32,
+    __m256i *cospi_m16, __m256i *cospi_p48, __m256i *cospi_m48,
+    const __m256i *__rounding, int8_t cos_bit) {
+  x5[0] = _mm256_add_epi32(x4[0], x4[3]);
+  x5[3] = _mm256_sub_epi32(x4[0], x4[3]);
+  x5[1] = _mm256_add_epi32(x4[1], x4[2]);
+  x5[2] = _mm256_sub_epi32(x4[1], x4[2]);
+  x5[4] = x4[4];
+  btf_32_type0_avx2_new(*cospi_m32, *cospi_p32, x4[5], x4[6], x5[5], x5[6],
+                        *__rounding, cos_bit);
+  x5[7] = x4[7];
+  x5[8] = _mm256_add_epi32(x4[8], x4[11]);
+  x5[11] = _mm256_sub_epi32(x4[8], x4[11]);
+  x5[9] = _mm256_add_epi32(x4[9], x4[10]);
+  x5[10] = _mm256_sub_epi32(x4[9], x4[10]);
+  x5[12] = _mm256_sub_epi32(x4[15], x4[12]);
+  x5[15] = _mm256_add_epi32(x4[15], x4[12]);
+  x5[13] = _mm256_sub_epi32(x4[14], x4[13]);
+  x5[14] = _mm256_add_epi32(x4[14], x4[13]);
+  x5[16] = x4[16];
+  x5[17] = x4[17];
+  btf_32_type0_avx2_new(*cospi_m16, *cospi_p48, x4[18], x4[29], x5[18], x5[29],
+                        *__rounding, cos_bit);
+  btf_32_type0_avx2_new(*cospi_m16, *cospi_p48, x4[19], x4[28], x5[19], x5[28],
+                        *__rounding, cos_bit);
+  btf_32_type0_avx2_new(*cospi_m48, *cospi_m16, x4[20], x4[27], x5[20], x5[27],
+                        *__rounding, cos_bit);
+  btf_32_type0_avx2_new(*cospi_m48, *cospi_m16, x4[21], x4[26], x5[21], x5[26],
+                        *__rounding, cos_bit);
+  x5[22] = x4[22];
+  x5[23] = x4[23];
+  x5[24] = x4[24];
+  x5[25] = x4[25];
+  x5[30] = x4[30];
+  x5[31] = x4[31];
+  x5[32] = _mm256_add_epi32(x4[32], x4[39]);
+  x5[39] = _mm256_sub_epi32(x4[32], x4[39]);
+  x5[33] = _mm256_add_epi32(x4[33], x4[38]);
+  x5[38] = _mm256_sub_epi32(x4[33], x4[38]);
+  x5[34] = _mm256_add_epi32(x4[34], x4[37]);
+  x5[37] = _mm256_sub_epi32(x4[34], x4[37]);
+  x5[35] = _mm256_add_epi32(x4[35], x4[36]);
+  x5[36] = _mm256_sub_epi32(x4[35], x4[36]);
+  x5[40] = _mm256_sub_epi32(x4[47], x4[40]);
+  x5[47] = _mm256_add_epi32(x4[47], x4[40]);
+  x5[41] = _mm256_sub_epi32(x4[46], x4[41]);
+  x5[46] = _mm256_add_epi32(x4[46], x4[41]);
+  x5[42] = _mm256_sub_epi32(x4[45], x4[42]);
+  x5[45] = _mm256_add_epi32(x4[45], x4[42]);
+  x5[43] = _mm256_sub_epi32(x4[44], x4[43]);
+  x5[44] = _mm256_add_epi32(x4[44], x4[43]);
+  x5[48] = _mm256_add_epi32(x4[48], x4[55]);
+  x5[55] = _mm256_sub_epi32(x4[48], x4[55]);
+  x5[49] = _mm256_add_epi32(x4[49], x4[54]);
+  x5[54] = _mm256_sub_epi32(x4[49], x4[54]);
+  x5[50] = _mm256_add_epi32(x4[50], x4[53]);
+  x5[53] = _mm256_sub_epi32(x4[50], x4[53]);
+  x5[51] = _mm256_add_epi32(x4[51], x4[52]);
+  x5[52] = _mm256_sub_epi32(x4[51], x4[52]);
+  x5[56] = _mm256_sub_epi32(x4[63], x4[56]);
+  x5[63] = _mm256_add_epi32(x4[63], x4[56]);
+  x5[57] = _mm256_sub_epi32(x4[62], x4[57]);
+  x5[62] = _mm256_add_epi32(x4[62], x4[57]);
+  x5[58] = _mm256_sub_epi32(x4[61], x4[58]);
+  x5[61] = _mm256_add_epi32(x4[61], x4[58]);
+  x5[59] = _mm256_sub_epi32(x4[60], x4[59]);
+  x5[60] = _mm256_add_epi32(x4[60], x4[59]);
+}
+static INLINE void av1_fdct64_stage6_avx2(
+    __m256i *x5, __m256i *x6, __m256i *cospi_p16, __m256i *cospi_p32,
+    __m256i *cospi_m16, __m256i *cospi_p48, __m256i *cospi_m48,
+    __m256i *cospi_m08, __m256i *cospi_p56, __m256i *cospi_m56,
+    __m256i *cospi_m40, __m256i *cospi_p24, __m256i *cospi_m24,
+    const __m256i *__rounding, int8_t cos_bit) {
+  btf_32_type0_avx2_new(*cospi_p32, *cospi_p32, x5[0], x5[1], x6[0], x6[1],
+                        *__rounding, cos_bit);
+  btf_32_type0_avx2_new(*cospi_p16, *cospi_p48, x5[3], x5[2], x6[2], x6[3],
+                        *__rounding, cos_bit);
+  x6[4] = _mm256_add_epi32(x5[4], x5[5]);
+  x6[5] = _mm256_sub_epi32(x5[4], x5[5]);
+  x6[6] = _mm256_sub_epi32(x5[7], x5[6]);
+  x6[7] = _mm256_add_epi32(x5[7], x5[6]);
+  x6[8] = x5[8];
+  btf_32_type0_avx2_new(*cospi_m16, *cospi_p48, x5[9], x5[14], x6[9], x6[14],
+                        *__rounding, cos_bit);
+  btf_32_type0_avx2_new(*cospi_m48, *cospi_m16, x5[10], x5[13], x6[10], x6[13],
+                        *__rounding, cos_bit);
+  x6[11] = x5[11];
+  x6[12] = x5[12];
+  x6[15] = x5[15];
+  x6[16] = _mm256_add_epi32(x5[16], x5[19]);
+  x6[19] = _mm256_sub_epi32(x5[16], x5[19]);
+  x6[17] = _mm256_add_epi32(x5[17], x5[18]);
+  x6[18] = _mm256_sub_epi32(x5[17], x5[18]);
+  x6[20] = _mm256_sub_epi32(x5[23], x5[20]);
+  x6[23] = _mm256_add_epi32(x5[23], x5[20]);
+  x6[21] = _mm256_sub_epi32(x5[22], x5[21]);
+  x6[22] = _mm256_add_epi32(x5[22], x5[21]);
+  x6[24] = _mm256_add_epi32(x5[24], x5[27]);
+  x6[27] = _mm256_sub_epi32(x5[24], x5[27]);
+  x6[25] = _mm256_add_epi32(x5[25], x5[26]);
+  x6[26] = _mm256_sub_epi32(x5[25], x5[26]);
+  x6[28] = _mm256_sub_epi32(x5[31], x5[28]);
+  x6[31] = _mm256_add_epi32(x5[31], x5[28]);
+  x6[29] = _mm256_sub_epi32(x5[30], x5[29]);
+  x6[30] = _mm256_add_epi32(x5[30], x5[29]);
+  x6[32] = x5[32];
+  x6[33] = x5[33];
+  btf_32_type0_avx2_new(*cospi_m08, *cospi_p56, x5[34], x5[61], x6[34], x6[61],
+                        *__rounding, cos_bit);
+  btf_32_type0_avx2_new(*cospi_m08, *cospi_p56, x5[35], x5[60], x6[35], x6[60],
+                        *__rounding, cos_bit);
+  btf_32_type0_avx2_new(*cospi_m56, *cospi_m08, x5[36], x5[59], x6[36], x6[59],
+                        *__rounding, cos_bit);
+  btf_32_type0_avx2_new(*cospi_m56, *cospi_m08, x5[37], x5[58], x6[37], x6[58],
+                        *__rounding, cos_bit);
+  x6[38] = x5[38];
+  x6[39] = x5[39];
+  x6[40] = x5[40];
+  x6[41] = x5[41];
+  btf_32_type0_avx2_new(*cospi_m40, *cospi_p24, x5[42], x5[53], x6[42], x6[53],
+                        *__rounding, cos_bit);
+  btf_32_type0_avx2_new(*cospi_m40, *cospi_p24, x5[43], x5[52], x6[43], x6[52],
+                        *__rounding, cos_bit);
+  btf_32_type0_avx2_new(*cospi_m24, *cospi_m40, x5[44], x5[51], x6[44], x6[51],
+                        *__rounding, cos_bit);
+  btf_32_type0_avx2_new(*cospi_m24, *cospi_m40, x5[45], x5[50], x6[45], x6[50],
+                        *__rounding, cos_bit);
+  x6[46] = x5[46];
+  x6[47] = x5[47];
+  x6[48] = x5[48];
+  x6[49] = x5[49];
+  x6[54] = x5[54];
+  x6[55] = x5[55];
+  x6[56] = x5[56];
+  x6[57] = x5[57];
+  x6[62] = x5[62];
+  x6[63] = x5[63];
+}
+static INLINE void av1_fdct64_stage7_avx2(
+    __m256i *x6, __m256i *x7, __m256i *cospi_p08, __m256i *cospi_p56,
+    __m256i *cospi_p40, __m256i *cospi_p24, __m256i *cospi_m08,
+    __m256i *cospi_m56, __m256i *cospi_m40, __m256i *cospi_m24,
+    const __m256i *__rounding, int8_t cos_bit) {
+  x7[0] = x6[0];
+  x7[1] = x6[1];
+  x7[2] = x6[2];
+  x7[3] = x6[3];
+  btf_32_type0_avx2_new(*cospi_p08, *cospi_p56, x6[7], x6[4], x7[4], x7[7],
+                        *__rounding, cos_bit);
+  btf_32_type0_avx2_new(*cospi_p40, *cospi_p24, x6[6], x6[5], x7[5], x7[6],
+                        *__rounding, cos_bit);
+  x7[8] = _mm256_add_epi32(x6[8], x6[9]);
+  x7[9] = _mm256_sub_epi32(x6[8], x6[9]);
+  x7[10] = _mm256_sub_epi32(x6[11], x6[10]);
+  x7[11] = _mm256_add_epi32(x6[11], x6[10]);
+  x7[12] = _mm256_add_epi32(x6[12], x6[13]);
+  x7[13] = _mm256_sub_epi32(x6[12], x6[13]);
+  x7[14] = _mm256_sub_epi32(x6[15], x6[14]);
+  x7[15] = _mm256_add_epi32(x6[15], x6[14]);
+  x7[16] = x6[16];
+  btf_32_type0_avx2_new(*cospi_m08, *cospi_p56, x6[17], x6[30], x7[17], x7[30],
+                        *__rounding, cos_bit);
+  btf_32_type0_avx2_new(*cospi_m56, *cospi_m08, x6[18], x6[29], x7[18], x7[29],
+                        *__rounding, cos_bit);
+  x7[19] = x6[19];
+  x7[20] = x6[20];
+  btf_32_type0_avx2_new(*cospi_m40, *cospi_p24, x6[21], x6[26], x7[21], x7[26],
+                        *__rounding, cos_bit);
+  btf_32_type0_avx2_new(*cospi_m24, *cospi_m40, x6[22], x6[25], x7[22], x7[25],
+                        *__rounding, cos_bit);
+  x7[23] = x6[23];
+  x7[24] = x6[24];
+  x7[27] = x6[27];
+  x7[28] = x6[28];
+  x7[31] = x6[31];
+  x7[32] = _mm256_add_epi32(x6[32], x6[35]);
+  x7[35] = _mm256_sub_epi32(x6[32], x6[35]);
+  x7[33] = _mm256_add_epi32(x6[33], x6[34]);
+  x7[34] = _mm256_sub_epi32(x6[33], x6[34]);
+  x7[36] = _mm256_sub_epi32(x6[39], x6[36]);
+  x7[39] = _mm256_add_epi32(x6[39], x6[36]);
+  x7[37] = _mm256_sub_epi32(x6[38], x6[37]);
+  x7[38] = _mm256_add_epi32(x6[38], x6[37]);
+  x7[40] = _mm256_add_epi32(x6[40], x6[43]);
+  x7[43] = _mm256_sub_epi32(x6[40], x6[43]);
+  x7[41] = _mm256_add_epi32(x6[41], x6[42]);
+  x7[42] = _mm256_sub_epi32(x6[41], x6[42]);
+  x7[44] = _mm256_sub_epi32(x6[47], x6[44]);
+  x7[47] = _mm256_add_epi32(x6[47], x6[44]);
+  x7[45] = _mm256_sub_epi32(x6[46], x6[45]);
+  x7[46] = _mm256_add_epi32(x6[46], x6[45]);
+  x7[48] = _mm256_add_epi32(x6[48], x6[51]);
+  x7[51] = _mm256_sub_epi32(x6[48], x6[51]);
+  x7[49] = _mm256_add_epi32(x6[49], x6[50]);
+  x7[50] = _mm256_sub_epi32(x6[49], x6[50]);
+  x7[52] = _mm256_sub_epi32(x6[55], x6[52]);
+  x7[55] = _mm256_add_epi32(x6[55], x6[52]);
+  x7[53] = _mm256_sub_epi32(x6[54], x6[53]);
+  x7[54] = _mm256_add_epi32(x6[54], x6[53]);
+  x7[56] = _mm256_add_epi32(x6[56], x6[59]);
+  x7[59] = _mm256_sub_epi32(x6[56], x6[59]);
+  x7[57] = _mm256_add_epi32(x6[57], x6[58]);
+  x7[58] = _mm256_sub_epi32(x6[57], x6[58]);
+  x7[60] = _mm256_sub_epi32(x6[63], x6[60]);
+  x7[63] = _mm256_add_epi32(x6[63], x6[60]);
+  x7[61] = _mm256_sub_epi32(x6[62], x6[61]);
+  x7[62] = _mm256_add_epi32(x6[62], x6[61]);
+}
+static INLINE void av1_fdct64_stage8_avx2(__m256i *x7, __m256i *x8,
+                                          const int32_t *cospi,
+                                          const __m256i *__rounding,
+                                          int8_t cos_bit) {
+  __m256i cospi_p60 = _mm256_set1_epi32(cospi[60]);
+  __m256i cospi_p04 = _mm256_set1_epi32(cospi[4]);
+  __m256i cospi_p28 = _mm256_set1_epi32(cospi[28]);
+  __m256i cospi_p36 = _mm256_set1_epi32(cospi[36]);
+  __m256i cospi_p44 = _mm256_set1_epi32(cospi[44]);
+  __m256i cospi_p20 = _mm256_set1_epi32(cospi[20]);
+  __m256i cospi_p12 = _mm256_set1_epi32(cospi[12]);
+  __m256i cospi_p52 = _mm256_set1_epi32(cospi[52]);
+  __m256i cospi_m04 = _mm256_set1_epi32(-cospi[4]);
+  __m256i cospi_m60 = _mm256_set1_epi32(-cospi[60]);
+  __m256i cospi_m36 = _mm256_set1_epi32(-cospi[36]);
+  __m256i cospi_m28 = _mm256_set1_epi32(-cospi[28]);
+  __m256i cospi_m20 = _mm256_set1_epi32(-cospi[20]);
+  __m256i cospi_m44 = _mm256_set1_epi32(-cospi[44]);
+  __m256i cospi_m52 = _mm256_set1_epi32(-cospi[52]);
+  __m256i cospi_m12 = _mm256_set1_epi32(-cospi[12]);
+
+  x8[0] = x7[0];
+  x8[1] = x7[1];
+  x8[2] = x7[2];
+  x8[3] = x7[3];
+  x8[4] = x7[4];
+  x8[5] = x7[5];
+  x8[6] = x7[6];
+  x8[7] = x7[7];
+
+  btf_32_type0_avx2_new(cospi_p04, cospi_p60, x7[15], x7[8], x8[8], x8[15],
+                        *__rounding, cos_bit);
+  btf_32_type0_avx2_new(cospi_p36, cospi_p28, x7[14], x7[9], x8[9], x8[14],
+                        *__rounding, cos_bit);
+  btf_32_type0_avx2_new(cospi_p20, cospi_p44, x7[13], x7[10], x8[10], x8[13],
+                        *__rounding, cos_bit);
+  btf_32_type0_avx2_new(cospi_p52, cospi_p12, x7[12], x7[11], x8[11], x8[12],
+                        *__rounding, cos_bit);
+  x8[16] = _mm256_add_epi32(x7[16], x7[17]);
+  x8[17] = _mm256_sub_epi32(x7[16], x7[17]);
+  x8[18] = _mm256_sub_epi32(x7[19], x7[18]);
+  x8[19] = _mm256_add_epi32(x7[19], x7[18]);
+  x8[20] = _mm256_add_epi32(x7[20], x7[21]);
+  x8[21] = _mm256_sub_epi32(x7[20], x7[21]);
+  x8[22] = _mm256_sub_epi32(x7[23], x7[22]);
+  x8[23] = _mm256_add_epi32(x7[23], x7[22]);
+  x8[24] = _mm256_add_epi32(x7[24], x7[25]);
+  x8[25] = _mm256_sub_epi32(x7[24], x7[25]);
+  x8[26] = _mm256_sub_epi32(x7[27], x7[26]);
+  x8[27] = _mm256_add_epi32(x7[27], x7[26]);
+  x8[28] = _mm256_add_epi32(x7[28], x7[29]);
+  x8[29] = _mm256_sub_epi32(x7[28], x7[29]);
+  x8[30] = _mm256_sub_epi32(x7[31], x7[30]);
+  x8[31] = _mm256_add_epi32(x7[31], x7[30]);
+  x8[32] = x7[32];
+  btf_32_type0_avx2_new(cospi_m04, cospi_p60, x7[33], x7[62], x8[33], x8[62],
+                        *__rounding, cos_bit);
+  btf_32_type0_avx2_new(cospi_m60, cospi_m04, x7[34], x7[61], x8[34], x8[61],
+                        *__rounding, cos_bit);
+  x8[35] = x7[35];
+  x8[36] = x7[36];
+  btf_32_type0_avx2_new(cospi_m36, cospi_p28, x7[37], x7[58], x8[37], x8[58],
+                        *__rounding, cos_bit);
+  btf_32_type0_avx2_new(cospi_m28, cospi_m36, x7[38], x7[57], x8[38], x8[57],
+                        *__rounding, cos_bit);
+  x8[39] = x7[39];
+  x8[40] = x7[40];
+  btf_32_type0_avx2_new(cospi_m20, cospi_p44, x7[41], x7[54], x8[41], x8[54],
+                        *__rounding, cos_bit);
+  btf_32_type0_avx2_new(cospi_m44, cospi_m20, x7[42], x7[53], x8[42], x8[53],
+                        *__rounding, cos_bit);
+  x8[43] = x7[43];
+  x8[44] = x7[44];
+  btf_32_type0_avx2_new(cospi_m52, cospi_p12, x7[45], x7[50], x8[45], x8[50],
+                        *__rounding, cos_bit);
+  btf_32_type0_avx2_new(cospi_m12, cospi_m52, x7[46], x7[49], x8[46], x8[49],
+                        *__rounding, cos_bit);
+  x8[47] = x7[47];
+  x8[48] = x7[48];
+  x8[51] = x7[51];
+  x8[52] = x7[52];
+  x8[55] = x7[55];
+  x8[56] = x7[56];
+  x8[59] = x7[59];
+  x8[60] = x7[60];
+  x8[63] = x7[63];
+}
+static INLINE void av1_fdct64_stage9_avx2(__m256i *x8, __m256i *x9,
+                                          const int32_t *cospi,
+                                          const __m256i *__rounding,
+                                          int8_t cos_bit) {
+  __m256i cospi_p62 = _mm256_set1_epi32(cospi[62]);
+  __m256i cospi_p02 = _mm256_set1_epi32(cospi[2]);
+  __m256i cospi_p30 = _mm256_set1_epi32(cospi[30]);
+  __m256i cospi_p34 = _mm256_set1_epi32(cospi[34]);
+  __m256i cospi_p46 = _mm256_set1_epi32(cospi[46]);
+  __m256i cospi_p18 = _mm256_set1_epi32(cospi[18]);
+  __m256i cospi_p14 = _mm256_set1_epi32(cospi[14]);
+  __m256i cospi_p50 = _mm256_set1_epi32(cospi[50]);
+  __m256i cospi_p54 = _mm256_set1_epi32(cospi[54]);
+  __m256i cospi_p10 = _mm256_set1_epi32(cospi[10]);
+  __m256i cospi_p22 = _mm256_set1_epi32(cospi[22]);
+  __m256i cospi_p42 = _mm256_set1_epi32(cospi[42]);
+  __m256i cospi_p38 = _mm256_set1_epi32(cospi[38]);
+  __m256i cospi_p26 = _mm256_set1_epi32(cospi[26]);
+  __m256i cospi_p06 = _mm256_set1_epi32(cospi[6]);
+  __m256i cospi_p58 = _mm256_set1_epi32(cospi[58]);
+
+  x9[0] = x8[0];
+  x9[1] = x8[1];
+  x9[2] = x8[2];
+  x9[3] = x8[3];
+  x9[4] = x8[4];
+  x9[5] = x8[5];
+  x9[6] = x8[6];
+  x9[7] = x8[7];
+  x9[8] = x8[8];
+  x9[9] = x8[9];
+  x9[10] = x8[10];
+  x9[11] = x8[11];
+  x9[12] = x8[12];
+  x9[13] = x8[13];
+  x9[14] = x8[14];
+  x9[15] = x8[15];
+  btf_32_type0_avx2_new(cospi_p02, cospi_p62, x8[31], x8[16], x9[16], x9[31],
+                        *__rounding, cos_bit);
+  btf_32_type0_avx2_new(cospi_p34, cospi_p30, x8[30], x8[17], x9[17], x9[30],
+                        *__rounding, cos_bit);
+  btf_32_type0_avx2_new(cospi_p18, cospi_p46, x8[29], x8[18], x9[18], x9[29],
+                        *__rounding, cos_bit);
+  btf_32_type0_avx2_new(cospi_p50, cospi_p14, x8[28], x8[19], x9[19], x9[28],
+                        *__rounding, cos_bit);
+  btf_32_type0_avx2_new(cospi_p10, cospi_p54, x8[27], x8[20], x9[20], x9[27],
+                        *__rounding, cos_bit);
+  btf_32_type0_avx2_new(cospi_p42, cospi_p22, x8[26], x8[21], x9[21], x9[26],
+                        *__rounding, cos_bit);
+  btf_32_type0_avx2_new(cospi_p26, cospi_p38, x8[25], x8[22], x9[22], x9[25],
+                        *__rounding, cos_bit);
+  btf_32_type0_avx2_new(cospi_p58, cospi_p06, x8[24], x8[23], x9[23], x9[24],
+                        *__rounding, cos_bit);
+  x9[32] = _mm256_add_epi32(x8[32], x8[33]);
+  x9[33] = _mm256_sub_epi32(x8[32], x8[33]);
+  x9[34] = _mm256_sub_epi32(x8[35], x8[34]);
+  x9[35] = _mm256_add_epi32(x8[35], x8[34]);
+  x9[36] = _mm256_add_epi32(x8[36], x8[37]);
+  x9[37] = _mm256_sub_epi32(x8[36], x8[37]);
+  x9[38] = _mm256_sub_epi32(x8[39], x8[38]);
+  x9[39] = _mm256_add_epi32(x8[39], x8[38]);
+  x9[40] = _mm256_add_epi32(x8[40], x8[41]);
+  x9[41] = _mm256_sub_epi32(x8[40], x8[41]);
+  x9[42] = _mm256_sub_epi32(x8[43], x8[42]);
+  x9[43] = _mm256_add_epi32(x8[43], x8[42]);
+  x9[44] = _mm256_add_epi32(x8[44], x8[45]);
+  x9[45] = _mm256_sub_epi32(x8[44], x8[45]);
+  x9[46] = _mm256_sub_epi32(x8[47], x8[46]);
+  x9[47] = _mm256_add_epi32(x8[47], x8[46]);
+  x9[48] = _mm256_add_epi32(x8[48], x8[49]);
+  x9[49] = _mm256_sub_epi32(x8[48], x8[49]);
+  x9[50] = _mm256_sub_epi32(x8[51], x8[50]);
+  x9[51] = _mm256_add_epi32(x8[51], x8[50]);
+  x9[52] = _mm256_add_epi32(x8[52], x8[53]);
+  x9[53] = _mm256_sub_epi32(x8[52], x8[53]);
+  x9[54] = _mm256_sub_epi32(x8[55], x8[54]);
+  x9[55] = _mm256_add_epi32(x8[55], x8[54]);
+  x9[56] = _mm256_add_epi32(x8[56], x8[57]);
+  x9[57] = _mm256_sub_epi32(x8[56], x8[57]);
+  x9[58] = _mm256_sub_epi32(x8[59], x8[58]);
+  x9[59] = _mm256_add_epi32(x8[59], x8[58]);
+  x9[60] = _mm256_add_epi32(x8[60], x8[61]);
+  x9[61] = _mm256_sub_epi32(x8[60], x8[61]);
+  x9[62] = _mm256_sub_epi32(x8[63], x8[62]);
+  x9[63] = _mm256_add_epi32(x8[63], x8[62]);
+}
+static INLINE void av1_fdct64_stage10_avx2(__m256i *x9, __m256i *x10,
+                                           const int32_t *cospi,
+                                           const __m256i *__rounding,
+                                           int8_t cos_bit) {
+  __m256i cospi_p63 = _mm256_set1_epi32(cospi[63]);
+  __m256i cospi_p01 = _mm256_set1_epi32(cospi[1]);
+  __m256i cospi_p31 = _mm256_set1_epi32(cospi[31]);
+  __m256i cospi_p33 = _mm256_set1_epi32(cospi[33]);
+  __m256i cospi_p47 = _mm256_set1_epi32(cospi[47]);
+  __m256i cospi_p17 = _mm256_set1_epi32(cospi[17]);
+  __m256i cospi_p15 = _mm256_set1_epi32(cospi[15]);
+  __m256i cospi_p49 = _mm256_set1_epi32(cospi[49]);
+  __m256i cospi_p55 = _mm256_set1_epi32(cospi[55]);
+  __m256i cospi_p09 = _mm256_set1_epi32(cospi[9]);
+  __m256i cospi_p23 = _mm256_set1_epi32(cospi[23]);
+  __m256i cospi_p41 = _mm256_set1_epi32(cospi[41]);
+  __m256i cospi_p39 = _mm256_set1_epi32(cospi[39]);
+  __m256i cospi_p25 = _mm256_set1_epi32(cospi[25]);
+  __m256i cospi_p07 = _mm256_set1_epi32(cospi[7]);
+  __m256i cospi_p57 = _mm256_set1_epi32(cospi[57]);
+  __m256i cospi_p59 = _mm256_set1_epi32(cospi[59]);
+  __m256i cospi_p05 = _mm256_set1_epi32(cospi[5]);
+  __m256i cospi_p27 = _mm256_set1_epi32(cospi[27]);
+  __m256i cospi_p37 = _mm256_set1_epi32(cospi[37]);
+  __m256i cospi_p43 = _mm256_set1_epi32(cospi[43]);
+  __m256i cospi_p21 = _mm256_set1_epi32(cospi[21]);
+  __m256i cospi_p11 = _mm256_set1_epi32(cospi[11]);
+  __m256i cospi_p53 = _mm256_set1_epi32(cospi[53]);
+  __m256i cospi_p51 = _mm256_set1_epi32(cospi[51]);
+  __m256i cospi_p13 = _mm256_set1_epi32(cospi[13]);
+  __m256i cospi_p19 = _mm256_set1_epi32(cospi[19]);
+  __m256i cospi_p45 = _mm256_set1_epi32(cospi[45]);
+  __m256i cospi_p35 = _mm256_set1_epi32(cospi[35]);
+  __m256i cospi_p29 = _mm256_set1_epi32(cospi[29]);
+  __m256i cospi_p03 = _mm256_set1_epi32(cospi[3]);
+  __m256i cospi_p61 = _mm256_set1_epi32(cospi[61]);
+
+  x10[0] = x9[0];
+  x10[1] = x9[1];
+  x10[2] = x9[2];
+  x10[3] = x9[3];
+  x10[4] = x9[4];
+  x10[5] = x9[5];
+  x10[6] = x9[6];
+  x10[7] = x9[7];
+  x10[8] = x9[8];
+  x10[9] = x9[9];
+  x10[10] = x9[10];
+  x10[11] = x9[11];
+  x10[12] = x9[12];
+  x10[13] = x9[13];
+  x10[14] = x9[14];
+  x10[15] = x9[15];
+  x10[16] = x9[16];
+  x10[17] = x9[17];
+  x10[18] = x9[18];
+  x10[19] = x9[19];
+  x10[20] = x9[20];
+  x10[21] = x9[21];
+  x10[22] = x9[22];
+  x10[23] = x9[23];
+  x10[24] = x9[24];
+  x10[25] = x9[25];
+  x10[26] = x9[26];
+  x10[27] = x9[27];
+  x10[28] = x9[28];
+  x10[29] = x9[29];
+  x10[30] = x9[30];
+  x10[31] = x9[31];
+  btf_32_type0_avx2_new(cospi_p01, cospi_p63, x9[63], x9[32], x10[32], x10[63],
+                        *__rounding, cos_bit);
+  btf_32_type0_avx2_new(cospi_p33, cospi_p31, x9[62], x9[33], x10[33], x10[62],
+                        *__rounding, cos_bit);
+  btf_32_type0_avx2_new(cospi_p17, cospi_p47, x9[61], x9[34], x10[34], x10[61],
+                        *__rounding, cos_bit);
+  btf_32_type0_avx2_new(cospi_p49, cospi_p15, x9[60], x9[35], x10[35], x10[60],
+                        *__rounding, cos_bit);
+  btf_32_type0_avx2_new(cospi_p09, cospi_p55, x9[59], x9[36], x10[36], x10[59],
+                        *__rounding, cos_bit);
+  btf_32_type0_avx2_new(cospi_p41, cospi_p23, x9[58], x9[37], x10[37], x10[58],
+                        *__rounding, cos_bit);
+  btf_32_type0_avx2_new(cospi_p25, cospi_p39, x9[57], x9[38], x10[38], x10[57],
+                        *__rounding, cos_bit);
+  btf_32_type0_avx2_new(cospi_p57, cospi_p07, x9[56], x9[39], x10[39], x10[56],
+                        *__rounding, cos_bit);
+  btf_32_type0_avx2_new(cospi_p05, cospi_p59, x9[55], x9[40], x10[40], x10[55],
+                        *__rounding, cos_bit);
+  btf_32_type0_avx2_new(cospi_p37, cospi_p27, x9[54], x9[41], x10[41], x10[54],
+                        *__rounding, cos_bit);
+  btf_32_type0_avx2_new(cospi_p21, cospi_p43, x9[53], x9[42], x10[42], x10[53],
+                        *__rounding, cos_bit);
+  btf_32_type0_avx2_new(cospi_p53, cospi_p11, x9[52], x9[43], x10[43], x10[52],
+                        *__rounding, cos_bit);
+  btf_32_type0_avx2_new(cospi_p13, cospi_p51, x9[51], x9[44], x10[44], x10[51],
+                        *__rounding, cos_bit);
+  btf_32_type0_avx2_new(cospi_p45, cospi_p19, x9[50], x9[45], x10[45], x10[50],
+                        *__rounding, cos_bit);
+  btf_32_type0_avx2_new(cospi_p29, cospi_p35, x9[49], x9[46], x10[46], x10[49],
+                        *__rounding, cos_bit);
+  btf_32_type0_avx2_new(cospi_p61, cospi_p03, x9[48], x9[47], x10[47], x10[48],
+                        *__rounding, cos_bit);
+}
+static void av1_fdct64_avx2(__m256i *input, __m256i *output, int8_t cos_bit,
+                            const int8_t *stage_range, const int instride,
+                            const int outstride) {
+  (void)stage_range;
+  const int32_t *cospi = cospi_arr(cos_bit);
+  const __m256i __rounding = _mm256_set1_epi32(1 << (cos_bit - 1));
+  __m256i cospi_m32 = _mm256_set1_epi32(-cospi[32]);
+  __m256i cospi_p32 = _mm256_set1_epi32(cospi[32]);
+  __m256i cospi_m16 = _mm256_set1_epi32(-cospi[16]);
+  __m256i cospi_p48 = _mm256_set1_epi32(cospi[48]);
+  __m256i cospi_m48 = _mm256_set1_epi32(-cospi[48]);
+  __m256i cospi_p16 = _mm256_set1_epi32(cospi[16]);
+  __m256i cospi_m08 = _mm256_set1_epi32(-cospi[8]);
+  __m256i cospi_p56 = _mm256_set1_epi32(cospi[56]);
+  __m256i cospi_m56 = _mm256_set1_epi32(-cospi[56]);
+  __m256i cospi_m40 = _mm256_set1_epi32(-cospi[40]);
+  __m256i cospi_p24 = _mm256_set1_epi32(cospi[24]);
+  __m256i cospi_m24 = _mm256_set1_epi32(-cospi[24]);
+  __m256i cospi_p08 = _mm256_set1_epi32(cospi[8]);
+  __m256i cospi_p40 = _mm256_set1_epi32(cospi[40]);
+
+  int startidx = 0 * instride;
+  int endidx = 63 * instride;
+  // stage 1
+  __m256i x1[64];
+  x1[0] = _mm256_add_epi32(input[startidx], input[endidx]);
+  x1[63] = _mm256_sub_epi32(input[startidx], input[endidx]);
+  startidx += instride;
+  endidx -= instride;
+  x1[1] = _mm256_add_epi32(input[startidx], input[endidx]);
+  x1[62] = _mm256_sub_epi32(input[startidx], input[endidx]);
+  startidx += instride;
+  endidx -= instride;
+  x1[2] = _mm256_add_epi32(input[startidx], input[endidx]);
+  x1[61] = _mm256_sub_epi32(input[startidx], input[endidx]);
+  startidx += instride;
+  endidx -= instride;
+  x1[3] = _mm256_add_epi32(input[startidx], input[endidx]);
+  x1[60] = _mm256_sub_epi32(input[startidx], input[endidx]);
+  startidx += instride;
+  endidx -= instride;
+  x1[4] = _mm256_add_epi32(input[startidx], input[endidx]);
+  x1[59] = _mm256_sub_epi32(input[startidx], input[endidx]);
+  startidx += instride;
+  endidx -= instride;
+  x1[5] = _mm256_add_epi32(input[startidx], input[endidx]);
+  x1[58] = _mm256_sub_epi32(input[startidx], input[endidx]);
+  startidx += instride;
+  endidx -= instride;
+  x1[6] = _mm256_add_epi32(input[startidx], input[endidx]);
+  x1[57] = _mm256_sub_epi32(input[startidx], input[endidx]);
+  startidx += instride;
+  endidx -= instride;
+  x1[7] = _mm256_add_epi32(input[startidx], input[endidx]);
+  x1[56] = _mm256_sub_epi32(input[startidx], input[endidx]);
+  startidx += instride;
+  endidx -= instride;
+  x1[8] = _mm256_add_epi32(input[startidx], input[endidx]);
+  x1[55] = _mm256_sub_epi32(input[startidx], input[endidx]);
+  startidx += instride;
+  endidx -= instride;
+  x1[9] = _mm256_add_epi32(input[startidx], input[endidx]);
+  x1[54] = _mm256_sub_epi32(input[startidx], input[endidx]);
+  startidx += instride;
+  endidx -= instride;
+  x1[10] = _mm256_add_epi32(input[startidx], input[endidx]);
+  x1[53] = _mm256_sub_epi32(input[startidx], input[endidx]);
+  startidx += instride;
+  endidx -= instride;
+  x1[11] = _mm256_add_epi32(input[startidx], input[endidx]);
+  x1[52] = _mm256_sub_epi32(input[startidx], input[endidx]);
+  startidx += instride;
+  endidx -= instride;
+  x1[12] = _mm256_add_epi32(input[startidx], input[endidx]);
+  x1[51] = _mm256_sub_epi32(input[startidx], input[endidx]);
+  startidx += instride;
+  endidx -= instride;
+  x1[13] = _mm256_add_epi32(input[startidx], input[endidx]);
+  x1[50] = _mm256_sub_epi32(input[startidx], input[endidx]);
+  startidx += instride;
+  endidx -= instride;
+  x1[14] = _mm256_add_epi32(input[startidx], input[endidx]);
+  x1[49] = _mm256_sub_epi32(input[startidx], input[endidx]);
+  startidx += instride;
+  endidx -= instride;
+  x1[15] = _mm256_add_epi32(input[startidx], input[endidx]);
+  x1[48] = _mm256_sub_epi32(input[startidx], input[endidx]);
+  startidx += instride;
+  endidx -= instride;
+  x1[16] = _mm256_add_epi32(input[startidx], input[endidx]);
+  x1[47] = _mm256_sub_epi32(input[startidx], input[endidx]);
+  startidx += instride;
+  endidx -= instride;
+  x1[17] = _mm256_add_epi32(input[startidx], input[endidx]);
+  x1[46] = _mm256_sub_epi32(input[startidx], input[endidx]);
+  startidx += instride;
+  endidx -= instride;
+  x1[18] = _mm256_add_epi32(input[startidx], input[endidx]);
+  x1[45] = _mm256_sub_epi32(input[startidx], input[endidx]);
+  startidx += instride;
+  endidx -= instride;
+  x1[19] = _mm256_add_epi32(input[startidx], input[endidx]);
+  x1[44] = _mm256_sub_epi32(input[startidx], input[endidx]);
+  startidx += instride;
+  endidx -= instride;
+  x1[20] = _mm256_add_epi32(input[startidx], input[endidx]);
+  x1[43] = _mm256_sub_epi32(input[startidx], input[endidx]);
+  startidx += instride;
+  endidx -= instride;
+  x1[21] = _mm256_add_epi32(input[startidx], input[endidx]);
+  x1[42] = _mm256_sub_epi32(input[startidx], input[endidx]);
+  startidx += instride;
+  endidx -= instride;
+  x1[22] = _mm256_add_epi32(input[startidx], input[endidx]);
+  x1[41] = _mm256_sub_epi32(input[startidx], input[endidx]);
+  startidx += instride;
+  endidx -= instride;
+  x1[23] = _mm256_add_epi32(input[startidx], input[endidx]);
+  x1[40] = _mm256_sub_epi32(input[startidx], input[endidx]);
+  startidx += instride;
+  endidx -= instride;
+  x1[24] = _mm256_add_epi32(input[startidx], input[endidx]);
+  x1[39] = _mm256_sub_epi32(input[startidx], input[endidx]);
+  startidx += instride;
+  endidx -= instride;
+  x1[25] = _mm256_add_epi32(input[startidx], input[endidx]);
+  x1[38] = _mm256_sub_epi32(input[startidx], input[endidx]);
+  startidx += instride;
+  endidx -= instride;
+  x1[26] = _mm256_add_epi32(input[startidx], input[endidx]);
+  x1[37] = _mm256_sub_epi32(input[startidx], input[endidx]);
+  startidx += instride;
+  endidx -= instride;
+  x1[27] = _mm256_add_epi32(input[startidx], input[endidx]);
+  x1[36] = _mm256_sub_epi32(input[startidx], input[endidx]);
+  startidx += instride;
+  endidx -= instride;
+  x1[28] = _mm256_add_epi32(input[startidx], input[endidx]);
+  x1[35] = _mm256_sub_epi32(input[startidx], input[endidx]);
+  startidx += instride;
+  endidx -= instride;
+  x1[29] = _mm256_add_epi32(input[startidx], input[endidx]);
+  x1[34] = _mm256_sub_epi32(input[startidx], input[endidx]);
+  startidx += instride;
+  endidx -= instride;
+  x1[30] = _mm256_add_epi32(input[startidx], input[endidx]);
+  x1[33] = _mm256_sub_epi32(input[startidx], input[endidx]);
+  startidx += instride;
+  endidx -= instride;
+  x1[31] = _mm256_add_epi32(input[startidx], input[endidx]);
+  x1[32] = _mm256_sub_epi32(input[startidx], input[endidx]);
+
+  // stage 2
+  __m256i x2[64];
+  av1_fdct64_stage2_avx2(x1, x2, &cospi_m32, &cospi_p32, &__rounding, cos_bit);
+  // stage 3
+  av1_fdct64_stage3_avx2(x2, x1, &cospi_m32, &cospi_p32, &__rounding, cos_bit);
+  // stage 4
+  av1_fdct64_stage4_avx2(x1, x2, &cospi_m32, &cospi_p32, &cospi_m16, &cospi_p48,
+                         &cospi_m48, &__rounding, cos_bit);
+  // stage 5
+  av1_fdct64_stage5_avx2(x2, x1, &cospi_m32, &cospi_p32, &cospi_m16, &cospi_p48,
+                         &cospi_m48, &__rounding, cos_bit);
+  // stage 6
+  av1_fdct64_stage6_avx2(x1, x2, &cospi_p16, &cospi_p32, &cospi_m16, &cospi_p48,
+                         &cospi_m48, &cospi_m08, &cospi_p56, &cospi_m56,
+                         &cospi_m40, &cospi_p24, &cospi_m24, &__rounding,
+                         cos_bit);
+  // stage 7
+  av1_fdct64_stage7_avx2(x2, x1, &cospi_p08, &cospi_p56, &cospi_p40, &cospi_p24,
+                         &cospi_m08, &cospi_m56, &cospi_m40, &cospi_m24,
+                         &__rounding, cos_bit);
+  // stage 8
+  av1_fdct64_stage8_avx2(x1, x2, cospi, &__rounding, cos_bit);
+  // stage 9
+  av1_fdct64_stage9_avx2(x2, x1, cospi, &__rounding, cos_bit);
+  // stage 10
+  av1_fdct64_stage10_avx2(x1, x2, cospi, &__rounding, cos_bit);
+
+  startidx = 0 * outstride;
+  endidx = 63 * outstride;
+
+  // stage 11
+  output[startidx] = x2[0];
+  output[endidx] = x2[63];
+  startidx += outstride;
+  endidx -= outstride;
+  output[startidx] = x2[32];
+  output[endidx] = x2[31];
+  startidx += outstride;
+  endidx -= outstride;
+  output[startidx] = x2[16];
+  output[endidx] = x2[47];
+  startidx += outstride;
+  endidx -= outstride;
+  output[startidx] = x2[48];
+  output[endidx] = x2[15];
+  startidx += outstride;
+  endidx -= outstride;
+  output[startidx] = x2[8];
+  output[endidx] = x2[55];
+  startidx += outstride;
+  endidx -= outstride;
+  output[startidx] = x2[40];
+  output[endidx] = x2[23];
+  startidx += outstride;
+  endidx -= outstride;
+  output[startidx] = x2[24];
+  output[endidx] = x2[39];
+  startidx += outstride;
+  endidx -= outstride;
+  output[startidx] = x2[56];
+  output[endidx] = x2[7];
+  startidx += outstride;
+  endidx -= outstride;
+  output[startidx] = x2[4];
+  output[endidx] = x2[59];
+  startidx += outstride;
+  endidx -= outstride;
+  output[startidx] = x2[36];
+  output[endidx] = x2[27];
+  startidx += outstride;
+  endidx -= outstride;
+  output[startidx] = x2[20];
+  output[endidx] = x2[43];
+  startidx += outstride;
+  endidx -= outstride;
+  output[startidx] = x2[52];
+  output[endidx] = x2[11];
+  startidx += outstride;
+  endidx -= outstride;
+  output[startidx] = x2[12];
+  output[endidx] = x2[51];
+  startidx += outstride;
+  endidx -= outstride;
+  output[startidx] = x2[44];
+  output[endidx] = x2[19];
+  startidx += outstride;
+  endidx -= outstride;
+  output[startidx] = x2[28];
+  output[endidx] = x2[35];
+  startidx += outstride;
+  endidx -= outstride;
+  output[startidx] = x2[60];
+  output[endidx] = x2[3];
+  startidx += outstride;
+  endidx -= outstride;
+  output[startidx] = x2[2];
+  output[endidx] = x2[61];
+  startidx += outstride;
+  endidx -= outstride;
+  output[startidx] = x2[34];
+  output[endidx] = x2[29];
+  startidx += outstride;
+  endidx -= outstride;
+  output[startidx] = x2[18];
+  output[endidx] = x2[45];
+  startidx += outstride;
+  endidx -= outstride;
+  output[startidx] = x2[50];
+  output[endidx] = x2[13];
+  startidx += outstride;
+  endidx -= outstride;
+  output[startidx] = x2[10];
+  output[endidx] = x2[53];
+  startidx += outstride;
+  endidx -= outstride;
+  output[startidx] = x2[42];
+  output[endidx] = x2[21];
+  startidx += outstride;
+  endidx -= outstride;
+  output[startidx] = x2[26];
+  output[endidx] = x2[37];
+  startidx += outstride;
+  endidx -= outstride;
+  output[startidx] = x2[58];
+  output[endidx] = x2[5];
+  startidx += outstride;
+  endidx -= outstride;
+  output[startidx] = x2[6];
+  output[endidx] = x2[57];
+  startidx += outstride;
+  endidx -= outstride;
+  output[startidx] = x2[38];
+  output[endidx] = x2[25];
+  startidx += outstride;
+  endidx -= outstride;
+  output[startidx] = x2[22];
+  output[endidx] = x2[41];
+  startidx += outstride;
+  endidx -= outstride;
+  output[startidx] = x2[54];
+  output[endidx] = x2[9];
+  startidx += outstride;
+  endidx -= outstride;
+  output[startidx] = x2[14];
+  output[endidx] = x2[49];
+  startidx += outstride;
+  endidx -= outstride;
+  output[startidx] = x2[46];
+  output[endidx] = x2[17];
+  startidx += outstride;
+  endidx -= outstride;
+  output[startidx] = x2[30];
+  output[endidx] = x2[33];
+  startidx += outstride;
+  endidx -= outstride;
+  output[startidx] = x2[62];
+  output[endidx] = x2[1];
+}
+void av1_fwd_txfm2d_64x64_avx2(const int16_t *input, int32_t *output,
+                               int stride, TX_TYPE tx_type, int bd) {
+  (void)bd;
+  (void)tx_type;
+  assert(tx_type == DCT_DCT);
+  TXFM_2D_FLIP_CFG cfg;
+  av1_get_fwd_txfm_cfg(tx_type, TX_64X64, &cfg);
+  TXFM_2D_FLIP_CFG *cfg1 = &cfg;
+  const TX_SIZE tx_size = TX_64X64;
+  __m256i buf0[512], buf1[512];
+  const int8_t *shift = fwd_txfm_shift_ls[tx_size];
+  const int txw_idx = get_txw_idx(tx_size);
+  const int txh_idx = get_txh_idx(tx_size);
+  const int cos_bit_col = fwd_cos_bit_col[txw_idx][txh_idx];
+  const int cos_bit_row = fwd_cos_bit_row[txw_idx][txh_idx];
+  const int width = tx_size_wide[tx_size];
+  const int height = tx_size_high[tx_size];
+  const int8_t *stage_range_col = cfg1->stage_range_col;
+  const int8_t *stage_range_row = cfg1->stage_range_row;
+  const transform_1d_avx2 col_txfm = av1_fdct64_avx2;
+  const transform_1d_avx2 row_txfm = av1_fdct64_avx2;
+  const int width_div16 = (width >> 4);
+  const int width_div8 = (width >> 3);
+  int r, c;
+  for (int i = 0; i < width_div16; i++) {
+    av1_load_buffer_16xn_avx2(input + (i << 4), &buf0[i << 1], stride, height,
+                              width_div8);
+    av1_round_shift_32_8xn_avx2(&buf0[i << 1], height, shift[0], width_div8);
+    av1_round_shift_32_8xn_avx2(&buf0[(i << 1) + 1], height, shift[0],
+                                width_div8);
+    col_txfm(&buf0[i << 1], &buf0[i << 1], cos_bit_col, stage_range_col,
+             width_div8, width_div8);
+    col_txfm(&buf0[(i << 1) + 1], &buf0[(i << 1) + 1], cos_bit_col,
+             stage_range_col, width_div8, width_div8);
+    av1_round_shift_32_8xn_avx2(&buf0[i << 1], height, shift[1], width_div8);
+    av1_round_shift_32_8xn_avx2(&buf0[(i << 1) + 1], height, shift[1],
+                                width_div8);
+  }
+
+  for (r = 0; r < height; r += 8) {
+    for (c = 0; c < width_div8; c++) {
+      av1_fwd_txfm_transpose_8x8_avx2(&buf0[r * width_div8 + c],
+                                      &buf1[c * 8 * width_div8 + (r >> 3)],
+                                      width_div8, width_div8);
+    }
+  }
+
+  for (int i = 0; i < 2; i++) {
+    row_txfm(&buf1[i << 1], &buf0[i << 1], cos_bit_row, stage_range_row,
+             width_div8, width_div16);
+    row_txfm(&buf1[(i << 1) + 1], &buf0[(i << 1) + 1], cos_bit_row,
+             stage_range_row, width_div8, width_div16);
+    av1_round_shift_32_8xn_avx2(&buf0[i << 1], (height >> 1), shift[2],
+                                width_div16);
+    av1_round_shift_32_8xn_avx2(&buf0[(i << 1) + 1], (height >> 1), shift[2],
+                                width_div16);
+  }
+
+  for (r = 0; r < (height >> 1); r += 8) {
+    for (c = 0; c < width_div16; c++) {
+      av1_fwd_txfm_transpose_8x8_avx2(&buf0[r * width_div16 + c],
+                                      &buf1[c * 8 * width_div16 + (r >> 3)],
+                                      width_div16, width_div16);
+    }
+  }
+  av1_store_buffer_avx2(buf1, output, 8, 128);
+}
diff --git a/test/av1_fwd_txfm2d_test.cc b/test/av1_fwd_txfm2d_test.cc
index c70eba7..ba75b02 100644
--- a/test/av1_fwd_txfm2d_test.cc
+++ b/test/av1_fwd_txfm2d_test.cc
@@ -570,5 +570,11 @@
                         Combine(ValuesIn(Highbd_fwd_txfm_for_sse4_1),
                                 Values(av1_highbd_fwd_txfm)));
 #endif  // HAVE_SSE4_1
+#if HAVE_AVX2
+static TX_SIZE Highbd_fwd_txfm_for_avx2[] = { TX_64X64 };
 
+INSTANTIATE_TEST_CASE_P(AVX2, AV1HighbdFwdTxfm2dTest,
+                        Combine(ValuesIn(Highbd_fwd_txfm_for_avx2),
+                                Values(av1_highbd_fwd_txfm)));
+#endif  // HAVE_AVX2
 }  // namespace