Revert "Avoid use of deprecated high-bitdepth functions"

This reverts commit f9d77bd5a2541387dc435e052704b2e734307c20.

Reason for revert: 8x8 transform failures

BUG=https://bugs.chromium.org/p/aomedia/issues/detail?id=502

Change-Id: I2f6c10bc576a966bd5a878b7ee8389074bf45014
diff --git a/aom_dsp/inv_txfm.c b/aom_dsp/inv_txfm.c
index bb99585..6e7d8c9 100644
--- a/aom_dsp/inv_txfm.c
+++ b/aom_dsp/inv_txfm.c
@@ -1442,4 +1442,868 @@
   }
 }
 
+void aom_highbd_idct8_c(const tran_low_t *input, tran_low_t *output, int bd) {
+  tran_low_t step1[8], step2[8];
+  tran_high_t temp1, temp2;
+  // stage 1
+  step1[0] = input[0];
+  step1[2] = input[4];
+  step1[1] = input[2];
+  step1[3] = input[6];
+  temp1 = input[1] * cospi_28_64 - input[7] * cospi_4_64;
+  temp2 = input[1] * cospi_4_64 + input[7] * cospi_28_64;
+  step1[4] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
+  step1[7] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
+  temp1 = input[5] * cospi_12_64 - input[3] * cospi_20_64;
+  temp2 = input[5] * cospi_20_64 + input[3] * cospi_12_64;
+  step1[5] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
+  step1[6] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
+
+  // stage 2 & stage 3 - even half
+  aom_highbd_idct4_c(step1, step1, bd);
+
+  // stage 2 - odd half
+  step2[4] = HIGHBD_WRAPLOW(step1[4] + step1[5], bd);
+  step2[5] = HIGHBD_WRAPLOW(step1[4] - step1[5], bd);
+  step2[6] = HIGHBD_WRAPLOW(-step1[6] + step1[7], bd);
+  step2[7] = HIGHBD_WRAPLOW(step1[6] + step1[7], bd);
+
+  // stage 3 - odd half
+  step1[4] = step2[4];
+  temp1 = (step2[6] - step2[5]) * cospi_16_64;
+  temp2 = (step2[5] + step2[6]) * cospi_16_64;
+  step1[5] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
+  step1[6] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
+  step1[7] = step2[7];
+
+  // stage 4
+  output[0] = HIGHBD_WRAPLOW(step1[0] + step1[7], bd);
+  output[1] = HIGHBD_WRAPLOW(step1[1] + step1[6], bd);
+  output[2] = HIGHBD_WRAPLOW(step1[2] + step1[5], bd);
+  output[3] = HIGHBD_WRAPLOW(step1[3] + step1[4], bd);
+  output[4] = HIGHBD_WRAPLOW(step1[3] - step1[4], bd);
+  output[5] = HIGHBD_WRAPLOW(step1[2] - step1[5], bd);
+  output[6] = HIGHBD_WRAPLOW(step1[1] - step1[6], bd);
+  output[7] = HIGHBD_WRAPLOW(step1[0] - step1[7], bd);
+}
+
+void aom_highbd_iadst4_c(const tran_low_t *input, tran_low_t *output, int bd) {
+  tran_high_t s0, s1, s2, s3, s4, s5, s6, s7;
+
+  tran_low_t x0 = input[0];
+  tran_low_t x1 = input[1];
+  tran_low_t x2 = input[2];
+  tran_low_t x3 = input[3];
+  (void)bd;
+
+  if (!(x0 | x1 | x2 | x3)) {
+    memset(output, 0, 4 * sizeof(*output));
+    return;
+  }
+
+  s0 = sinpi_1_9 * x0;
+  s1 = sinpi_2_9 * x0;
+  s2 = sinpi_3_9 * x1;
+  s3 = sinpi_4_9 * x2;
+  s4 = sinpi_1_9 * x2;
+  s5 = sinpi_2_9 * x3;
+  s6 = sinpi_4_9 * x3;
+  s7 = (tran_high_t)HIGHBD_WRAPLOW(x0 - x2 + x3, bd);
+
+  s0 = s0 + s3 + s5;
+  s1 = s1 - s4 - s6;
+  s3 = s2;
+  s2 = sinpi_3_9 * s7;
+
+  // 1-D transform scaling factor is sqrt(2).
+  // The overall dynamic range is 14b (input) + 14b (multiplication scaling)
+  // + 1b (addition) = 29b.
+  // Hence the output bit depth is 15b.
+  output[0] = HIGHBD_WRAPLOW(dct_const_round_shift(s0 + s3), bd);
+  output[1] = HIGHBD_WRAPLOW(dct_const_round_shift(s1 + s3), bd);
+  output[2] = HIGHBD_WRAPLOW(dct_const_round_shift(s2), bd);
+  output[3] = HIGHBD_WRAPLOW(dct_const_round_shift(s0 + s1 - s3), bd);
+}
+
+void aom_highbd_iadst8_c(const tran_low_t *input, tran_low_t *output, int bd) {
+  tran_high_t s0, s1, s2, s3, s4, s5, s6, s7;
+
+  tran_low_t x0 = input[7];
+  tran_low_t x1 = input[0];
+  tran_low_t x2 = input[5];
+  tran_low_t x3 = input[2];
+  tran_low_t x4 = input[3];
+  tran_low_t x5 = input[4];
+  tran_low_t x6 = input[1];
+  tran_low_t x7 = input[6];
+  (void)bd;
+
+  if (!(x0 | x1 | x2 | x3 | x4 | x5 | x6 | x7)) {
+    memset(output, 0, 8 * sizeof(*output));
+    return;
+  }
+
+  // stage 1
+  s0 = cospi_2_64 * x0 + cospi_30_64 * x1;
+  s1 = cospi_30_64 * x0 - cospi_2_64 * x1;
+  s2 = cospi_10_64 * x2 + cospi_22_64 * x3;
+  s3 = cospi_22_64 * x2 - cospi_10_64 * x3;
+  s4 = cospi_18_64 * x4 + cospi_14_64 * x5;
+  s5 = cospi_14_64 * x4 - cospi_18_64 * x5;
+  s6 = cospi_26_64 * x6 + cospi_6_64 * x7;
+  s7 = cospi_6_64 * x6 - cospi_26_64 * x7;
+
+  x0 = HIGHBD_WRAPLOW(dct_const_round_shift(s0 + s4), bd);
+  x1 = HIGHBD_WRAPLOW(dct_const_round_shift(s1 + s5), bd);
+  x2 = HIGHBD_WRAPLOW(dct_const_round_shift(s2 + s6), bd);
+  x3 = HIGHBD_WRAPLOW(dct_const_round_shift(s3 + s7), bd);
+  x4 = HIGHBD_WRAPLOW(dct_const_round_shift(s0 - s4), bd);
+  x5 = HIGHBD_WRAPLOW(dct_const_round_shift(s1 - s5), bd);
+  x6 = HIGHBD_WRAPLOW(dct_const_round_shift(s2 - s6), bd);
+  x7 = HIGHBD_WRAPLOW(dct_const_round_shift(s3 - s7), bd);
+
+  // stage 2
+  s0 = x0;
+  s1 = x1;
+  s2 = x2;
+  s3 = x3;
+  s4 = cospi_8_64 * x4 + cospi_24_64 * x5;
+  s5 = cospi_24_64 * x4 - cospi_8_64 * x5;
+  s6 = -cospi_24_64 * x6 + cospi_8_64 * x7;
+  s7 = cospi_8_64 * x6 + cospi_24_64 * x7;
+
+  x0 = HIGHBD_WRAPLOW(s0 + s2, bd);
+  x1 = HIGHBD_WRAPLOW(s1 + s3, bd);
+  x2 = HIGHBD_WRAPLOW(s0 - s2, bd);
+  x3 = HIGHBD_WRAPLOW(s1 - s3, bd);
+  x4 = HIGHBD_WRAPLOW(dct_const_round_shift(s4 + s6), bd);
+  x5 = HIGHBD_WRAPLOW(dct_const_round_shift(s5 + s7), bd);
+  x6 = HIGHBD_WRAPLOW(dct_const_round_shift(s4 - s6), bd);
+  x7 = HIGHBD_WRAPLOW(dct_const_round_shift(s5 - s7), bd);
+
+  // stage 3
+  s2 = cospi_16_64 * (x2 + x3);
+  s3 = cospi_16_64 * (x2 - x3);
+  s6 = cospi_16_64 * (x6 + x7);
+  s7 = cospi_16_64 * (x6 - x7);
+
+  x2 = HIGHBD_WRAPLOW(dct_const_round_shift(s2), bd);
+  x3 = HIGHBD_WRAPLOW(dct_const_round_shift(s3), bd);
+  x6 = HIGHBD_WRAPLOW(dct_const_round_shift(s6), bd);
+  x7 = HIGHBD_WRAPLOW(dct_const_round_shift(s7), bd);
+
+  output[0] = HIGHBD_WRAPLOW(x0, bd);
+  output[1] = HIGHBD_WRAPLOW(-x4, bd);
+  output[2] = HIGHBD_WRAPLOW(x6, bd);
+  output[3] = HIGHBD_WRAPLOW(-x2, bd);
+  output[4] = HIGHBD_WRAPLOW(x3, bd);
+  output[5] = HIGHBD_WRAPLOW(-x7, bd);
+  output[6] = HIGHBD_WRAPLOW(x5, bd);
+  output[7] = HIGHBD_WRAPLOW(-x1, bd);
+}
+
+void aom_highbd_idct16_c(const tran_low_t *input, tran_low_t *output, int bd) {
+  tran_low_t step1[16], step2[16];
+  tran_high_t temp1, temp2;
+  (void)bd;
+
+  // stage 1
+  step1[0] = input[0 / 2];
+  step1[1] = input[16 / 2];
+  step1[2] = input[8 / 2];
+  step1[3] = input[24 / 2];
+  step1[4] = input[4 / 2];
+  step1[5] = input[20 / 2];
+  step1[6] = input[12 / 2];
+  step1[7] = input[28 / 2];
+  step1[8] = input[2 / 2];
+  step1[9] = input[18 / 2];
+  step1[10] = input[10 / 2];
+  step1[11] = input[26 / 2];
+  step1[12] = input[6 / 2];
+  step1[13] = input[22 / 2];
+  step1[14] = input[14 / 2];
+  step1[15] = input[30 / 2];
+
+  // stage 2
+  step2[0] = step1[0];
+  step2[1] = step1[1];
+  step2[2] = step1[2];
+  step2[3] = step1[3];
+  step2[4] = step1[4];
+  step2[5] = step1[5];
+  step2[6] = step1[6];
+  step2[7] = step1[7];
+
+  temp1 = step1[8] * cospi_30_64 - step1[15] * cospi_2_64;
+  temp2 = step1[8] * cospi_2_64 + step1[15] * cospi_30_64;
+  step2[8] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
+  step2[15] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
+
+  temp1 = step1[9] * cospi_14_64 - step1[14] * cospi_18_64;
+  temp2 = step1[9] * cospi_18_64 + step1[14] * cospi_14_64;
+  step2[9] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
+  step2[14] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
+
+  temp1 = step1[10] * cospi_22_64 - step1[13] * cospi_10_64;
+  temp2 = step1[10] * cospi_10_64 + step1[13] * cospi_22_64;
+  step2[10] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
+  step2[13] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
+
+  temp1 = step1[11] * cospi_6_64 - step1[12] * cospi_26_64;
+  temp2 = step1[11] * cospi_26_64 + step1[12] * cospi_6_64;
+  step2[11] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
+  step2[12] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
+
+  // stage 3
+  step1[0] = step2[0];
+  step1[1] = step2[1];
+  step1[2] = step2[2];
+  step1[3] = step2[3];
+
+  temp1 = step2[4] * cospi_28_64 - step2[7] * cospi_4_64;
+  temp2 = step2[4] * cospi_4_64 + step2[7] * cospi_28_64;
+  step1[4] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
+  step1[7] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
+  temp1 = step2[5] * cospi_12_64 - step2[6] * cospi_20_64;
+  temp2 = step2[5] * cospi_20_64 + step2[6] * cospi_12_64;
+  step1[5] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
+  step1[6] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
+
+  step1[8] = HIGHBD_WRAPLOW(step2[8] + step2[9], bd);
+  step1[9] = HIGHBD_WRAPLOW(step2[8] - step2[9], bd);
+  step1[10] = HIGHBD_WRAPLOW(-step2[10] + step2[11], bd);
+  step1[11] = HIGHBD_WRAPLOW(step2[10] + step2[11], bd);
+  step1[12] = HIGHBD_WRAPLOW(step2[12] + step2[13], bd);
+  step1[13] = HIGHBD_WRAPLOW(step2[12] - step2[13], bd);
+  step1[14] = HIGHBD_WRAPLOW(-step2[14] + step2[15], bd);
+  step1[15] = HIGHBD_WRAPLOW(step2[14] + step2[15], bd);
+
+  // stage 4
+  temp1 = (step1[0] + step1[1]) * cospi_16_64;
+  temp2 = (step1[0] - step1[1]) * cospi_16_64;
+  step2[0] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
+  step2[1] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
+  temp1 = step1[2] * cospi_24_64 - step1[3] * cospi_8_64;
+  temp2 = step1[2] * cospi_8_64 + step1[3] * cospi_24_64;
+  step2[2] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
+  step2[3] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
+  step2[4] = HIGHBD_WRAPLOW(step1[4] + step1[5], bd);
+  step2[5] = HIGHBD_WRAPLOW(step1[4] - step1[5], bd);
+  step2[6] = HIGHBD_WRAPLOW(-step1[6] + step1[7], bd);
+  step2[7] = HIGHBD_WRAPLOW(step1[6] + step1[7], bd);
+
+  step2[8] = step1[8];
+  step2[15] = step1[15];
+  temp1 = -step1[9] * cospi_8_64 + step1[14] * cospi_24_64;
+  temp2 = step1[9] * cospi_24_64 + step1[14] * cospi_8_64;
+  step2[9] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
+  step2[14] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
+  temp1 = -step1[10] * cospi_24_64 - step1[13] * cospi_8_64;
+  temp2 = -step1[10] * cospi_8_64 + step1[13] * cospi_24_64;
+  step2[10] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
+  step2[13] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
+  step2[11] = step1[11];
+  step2[12] = step1[12];
+
+  // stage 5
+  step1[0] = HIGHBD_WRAPLOW(step2[0] + step2[3], bd);
+  step1[1] = HIGHBD_WRAPLOW(step2[1] + step2[2], bd);
+  step1[2] = HIGHBD_WRAPLOW(step2[1] - step2[2], bd);
+  step1[3] = HIGHBD_WRAPLOW(step2[0] - step2[3], bd);
+  step1[4] = step2[4];
+  temp1 = (step2[6] - step2[5]) * cospi_16_64;
+  temp2 = (step2[5] + step2[6]) * cospi_16_64;
+  step1[5] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
+  step1[6] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
+  step1[7] = step2[7];
+
+  step1[8] = HIGHBD_WRAPLOW(step2[8] + step2[11], bd);
+  step1[9] = HIGHBD_WRAPLOW(step2[9] + step2[10], bd);
+  step1[10] = HIGHBD_WRAPLOW(step2[9] - step2[10], bd);
+  step1[11] = HIGHBD_WRAPLOW(step2[8] - step2[11], bd);
+  step1[12] = HIGHBD_WRAPLOW(-step2[12] + step2[15], bd);
+  step1[13] = HIGHBD_WRAPLOW(-step2[13] + step2[14], bd);
+  step1[14] = HIGHBD_WRAPLOW(step2[13] + step2[14], bd);
+  step1[15] = HIGHBD_WRAPLOW(step2[12] + step2[15], bd);
+
+  // stage 6
+  step2[0] = HIGHBD_WRAPLOW(step1[0] + step1[7], bd);
+  step2[1] = HIGHBD_WRAPLOW(step1[1] + step1[6], bd);
+  step2[2] = HIGHBD_WRAPLOW(step1[2] + step1[5], bd);
+  step2[3] = HIGHBD_WRAPLOW(step1[3] + step1[4], bd);
+  step2[4] = HIGHBD_WRAPLOW(step1[3] - step1[4], bd);
+  step2[5] = HIGHBD_WRAPLOW(step1[2] - step1[5], bd);
+  step2[6] = HIGHBD_WRAPLOW(step1[1] - step1[6], bd);
+  step2[7] = HIGHBD_WRAPLOW(step1[0] - step1[7], bd);
+  step2[8] = step1[8];
+  step2[9] = step1[9];
+  temp1 = (-step1[10] + step1[13]) * cospi_16_64;
+  temp2 = (step1[10] + step1[13]) * cospi_16_64;
+  step2[10] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
+  step2[13] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
+  temp1 = (-step1[11] + step1[12]) * cospi_16_64;
+  temp2 = (step1[11] + step1[12]) * cospi_16_64;
+  step2[11] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
+  step2[12] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
+  step2[14] = step1[14];
+  step2[15] = step1[15];
+
+  // stage 7
+  output[0] = HIGHBD_WRAPLOW(step2[0] + step2[15], bd);
+  output[1] = HIGHBD_WRAPLOW(step2[1] + step2[14], bd);
+  output[2] = HIGHBD_WRAPLOW(step2[2] + step2[13], bd);
+  output[3] = HIGHBD_WRAPLOW(step2[3] + step2[12], bd);
+  output[4] = HIGHBD_WRAPLOW(step2[4] + step2[11], bd);
+  output[5] = HIGHBD_WRAPLOW(step2[5] + step2[10], bd);
+  output[6] = HIGHBD_WRAPLOW(step2[6] + step2[9], bd);
+  output[7] = HIGHBD_WRAPLOW(step2[7] + step2[8], bd);
+  output[8] = HIGHBD_WRAPLOW(step2[7] - step2[8], bd);
+  output[9] = HIGHBD_WRAPLOW(step2[6] - step2[9], bd);
+  output[10] = HIGHBD_WRAPLOW(step2[5] - step2[10], bd);
+  output[11] = HIGHBD_WRAPLOW(step2[4] - step2[11], bd);
+  output[12] = HIGHBD_WRAPLOW(step2[3] - step2[12], bd);
+  output[13] = HIGHBD_WRAPLOW(step2[2] - step2[13], bd);
+  output[14] = HIGHBD_WRAPLOW(step2[1] - step2[14], bd);
+  output[15] = HIGHBD_WRAPLOW(step2[0] - step2[15], bd);
+}
+
+void aom_highbd_iadst16_c(const tran_low_t *input, tran_low_t *output, int bd) {
+  tran_high_t s0, s1, s2, s3, s4, s5, s6, s7, s8;
+  tran_high_t s9, s10, s11, s12, s13, s14, s15;
+
+  tran_low_t x0 = input[15];
+  tran_low_t x1 = input[0];
+  tran_low_t x2 = input[13];
+  tran_low_t x3 = input[2];
+  tran_low_t x4 = input[11];
+  tran_low_t x5 = input[4];
+  tran_low_t x6 = input[9];
+  tran_low_t x7 = input[6];
+  tran_low_t x8 = input[7];
+  tran_low_t x9 = input[8];
+  tran_low_t x10 = input[5];
+  tran_low_t x11 = input[10];
+  tran_low_t x12 = input[3];
+  tran_low_t x13 = input[12];
+  tran_low_t x14 = input[1];
+  tran_low_t x15 = input[14];
+  (void)bd;
+
+  if (!(x0 | x1 | x2 | x3 | x4 | x5 | x6 | x7 | x8 | x9 | x10 | x11 | x12 |
+        x13 | x14 | x15)) {
+    memset(output, 0, 16 * sizeof(*output));
+    return;
+  }
+
+  // stage 1
+  s0 = x0 * cospi_1_64 + x1 * cospi_31_64;
+  s1 = x0 * cospi_31_64 - x1 * cospi_1_64;
+  s2 = x2 * cospi_5_64 + x3 * cospi_27_64;
+  s3 = x2 * cospi_27_64 - x3 * cospi_5_64;
+  s4 = x4 * cospi_9_64 + x5 * cospi_23_64;
+  s5 = x4 * cospi_23_64 - x5 * cospi_9_64;
+  s6 = x6 * cospi_13_64 + x7 * cospi_19_64;
+  s7 = x6 * cospi_19_64 - x7 * cospi_13_64;
+  s8 = x8 * cospi_17_64 + x9 * cospi_15_64;
+  s9 = x8 * cospi_15_64 - x9 * cospi_17_64;
+  s10 = x10 * cospi_21_64 + x11 * cospi_11_64;
+  s11 = x10 * cospi_11_64 - x11 * cospi_21_64;
+  s12 = x12 * cospi_25_64 + x13 * cospi_7_64;
+  s13 = x12 * cospi_7_64 - x13 * cospi_25_64;
+  s14 = x14 * cospi_29_64 + x15 * cospi_3_64;
+  s15 = x14 * cospi_3_64 - x15 * cospi_29_64;
+
+  x0 = HIGHBD_WRAPLOW(dct_const_round_shift(s0 + s8), bd);
+  x1 = HIGHBD_WRAPLOW(dct_const_round_shift(s1 + s9), bd);
+  x2 = HIGHBD_WRAPLOW(dct_const_round_shift(s2 + s10), bd);
+  x3 = HIGHBD_WRAPLOW(dct_const_round_shift(s3 + s11), bd);
+  x4 = HIGHBD_WRAPLOW(dct_const_round_shift(s4 + s12), bd);
+  x5 = HIGHBD_WRAPLOW(dct_const_round_shift(s5 + s13), bd);
+  x6 = HIGHBD_WRAPLOW(dct_const_round_shift(s6 + s14), bd);
+  x7 = HIGHBD_WRAPLOW(dct_const_round_shift(s7 + s15), bd);
+  x8 = HIGHBD_WRAPLOW(dct_const_round_shift(s0 - s8), bd);
+  x9 = HIGHBD_WRAPLOW(dct_const_round_shift(s1 - s9), bd);
+  x10 = HIGHBD_WRAPLOW(dct_const_round_shift(s2 - s10), bd);
+  x11 = HIGHBD_WRAPLOW(dct_const_round_shift(s3 - s11), bd);
+  x12 = HIGHBD_WRAPLOW(dct_const_round_shift(s4 - s12), bd);
+  x13 = HIGHBD_WRAPLOW(dct_const_round_shift(s5 - s13), bd);
+  x14 = HIGHBD_WRAPLOW(dct_const_round_shift(s6 - s14), bd);
+  x15 = HIGHBD_WRAPLOW(dct_const_round_shift(s7 - s15), bd);
+
+  // stage 2
+  s0 = x0;
+  s1 = x1;
+  s2 = x2;
+  s3 = x3;
+  s4 = x4;
+  s5 = x5;
+  s6 = x6;
+  s7 = x7;
+  s8 = x8 * cospi_4_64 + x9 * cospi_28_64;
+  s9 = x8 * cospi_28_64 - x9 * cospi_4_64;
+  s10 = x10 * cospi_20_64 + x11 * cospi_12_64;
+  s11 = x10 * cospi_12_64 - x11 * cospi_20_64;
+  s12 = -x12 * cospi_28_64 + x13 * cospi_4_64;
+  s13 = x12 * cospi_4_64 + x13 * cospi_28_64;
+  s14 = -x14 * cospi_12_64 + x15 * cospi_20_64;
+  s15 = x14 * cospi_20_64 + x15 * cospi_12_64;
+
+  x0 = HIGHBD_WRAPLOW(s0 + s4, bd);
+  x1 = HIGHBD_WRAPLOW(s1 + s5, bd);
+  x2 = HIGHBD_WRAPLOW(s2 + s6, bd);
+  x3 = HIGHBD_WRAPLOW(s3 + s7, bd);
+  x4 = HIGHBD_WRAPLOW(s0 - s4, bd);
+  x5 = HIGHBD_WRAPLOW(s1 - s5, bd);
+  x6 = HIGHBD_WRAPLOW(s2 - s6, bd);
+  x7 = HIGHBD_WRAPLOW(s3 - s7, bd);
+  x8 = HIGHBD_WRAPLOW(dct_const_round_shift(s8 + s12), bd);
+  x9 = HIGHBD_WRAPLOW(dct_const_round_shift(s9 + s13), bd);
+  x10 = HIGHBD_WRAPLOW(dct_const_round_shift(s10 + s14), bd);
+  x11 = HIGHBD_WRAPLOW(dct_const_round_shift(s11 + s15), bd);
+  x12 = HIGHBD_WRAPLOW(dct_const_round_shift(s8 - s12), bd);
+  x13 = HIGHBD_WRAPLOW(dct_const_round_shift(s9 - s13), bd);
+  x14 = HIGHBD_WRAPLOW(dct_const_round_shift(s10 - s14), bd);
+  x15 = HIGHBD_WRAPLOW(dct_const_round_shift(s11 - s15), bd);
+
+  // stage 3
+  s0 = x0;
+  s1 = x1;
+  s2 = x2;
+  s3 = x3;
+  s4 = x4 * cospi_8_64 + x5 * cospi_24_64;
+  s5 = x4 * cospi_24_64 - x5 * cospi_8_64;
+  s6 = -x6 * cospi_24_64 + x7 * cospi_8_64;
+  s7 = x6 * cospi_8_64 + x7 * cospi_24_64;
+  s8 = x8;
+  s9 = x9;
+  s10 = x10;
+  s11 = x11;
+  s12 = x12 * cospi_8_64 + x13 * cospi_24_64;
+  s13 = x12 * cospi_24_64 - x13 * cospi_8_64;
+  s14 = -x14 * cospi_24_64 + x15 * cospi_8_64;
+  s15 = x14 * cospi_8_64 + x15 * cospi_24_64;
+
+  x0 = HIGHBD_WRAPLOW(s0 + s2, bd);
+  x1 = HIGHBD_WRAPLOW(s1 + s3, bd);
+  x2 = HIGHBD_WRAPLOW(s0 - s2, bd);
+  x3 = HIGHBD_WRAPLOW(s1 - s3, bd);
+  x4 = HIGHBD_WRAPLOW(dct_const_round_shift(s4 + s6), bd);
+  x5 = HIGHBD_WRAPLOW(dct_const_round_shift(s5 + s7), bd);
+  x6 = HIGHBD_WRAPLOW(dct_const_round_shift(s4 - s6), bd);
+  x7 = HIGHBD_WRAPLOW(dct_const_round_shift(s5 - s7), bd);
+  x8 = HIGHBD_WRAPLOW(s8 + s10, bd);
+  x9 = HIGHBD_WRAPLOW(s9 + s11, bd);
+  x10 = HIGHBD_WRAPLOW(s8 - s10, bd);
+  x11 = HIGHBD_WRAPLOW(s9 - s11, bd);
+  x12 = HIGHBD_WRAPLOW(dct_const_round_shift(s12 + s14), bd);
+  x13 = HIGHBD_WRAPLOW(dct_const_round_shift(s13 + s15), bd);
+  x14 = HIGHBD_WRAPLOW(dct_const_round_shift(s12 - s14), bd);
+  x15 = HIGHBD_WRAPLOW(dct_const_round_shift(s13 - s15), bd);
+
+  // stage 4
+  s2 = (-cospi_16_64) * (x2 + x3);
+  s3 = cospi_16_64 * (x2 - x3);
+  s6 = cospi_16_64 * (x6 + x7);
+  s7 = cospi_16_64 * (-x6 + x7);
+  s10 = cospi_16_64 * (x10 + x11);
+  s11 = cospi_16_64 * (-x10 + x11);
+  s14 = (-cospi_16_64) * (x14 + x15);
+  s15 = cospi_16_64 * (x14 - x15);
+
+  x2 = HIGHBD_WRAPLOW(dct_const_round_shift(s2), bd);
+  x3 = HIGHBD_WRAPLOW(dct_const_round_shift(s3), bd);
+  x6 = HIGHBD_WRAPLOW(dct_const_round_shift(s6), bd);
+  x7 = HIGHBD_WRAPLOW(dct_const_round_shift(s7), bd);
+  x10 = HIGHBD_WRAPLOW(dct_const_round_shift(s10), bd);
+  x11 = HIGHBD_WRAPLOW(dct_const_round_shift(s11), bd);
+  x14 = HIGHBD_WRAPLOW(dct_const_round_shift(s14), bd);
+  x15 = HIGHBD_WRAPLOW(dct_const_round_shift(s15), bd);
+
+  output[0] = HIGHBD_WRAPLOW(x0, bd);
+  output[1] = HIGHBD_WRAPLOW(-x8, bd);
+  output[2] = HIGHBD_WRAPLOW(x12, bd);
+  output[3] = HIGHBD_WRAPLOW(-x4, bd);
+  output[4] = HIGHBD_WRAPLOW(x6, bd);
+  output[5] = HIGHBD_WRAPLOW(x14, bd);
+  output[6] = HIGHBD_WRAPLOW(x10, bd);
+  output[7] = HIGHBD_WRAPLOW(x2, bd);
+  output[8] = HIGHBD_WRAPLOW(x3, bd);
+  output[9] = HIGHBD_WRAPLOW(x11, bd);
+  output[10] = HIGHBD_WRAPLOW(x15, bd);
+  output[11] = HIGHBD_WRAPLOW(x7, bd);
+  output[12] = HIGHBD_WRAPLOW(x5, bd);
+  output[13] = HIGHBD_WRAPLOW(-x13, bd);
+  output[14] = HIGHBD_WRAPLOW(x9, bd);
+  output[15] = HIGHBD_WRAPLOW(-x1, bd);
+}
+
+void aom_highbd_idct32_c(const tran_low_t *input, tran_low_t *output, int bd) {
+  tran_low_t step1[32], step2[32];
+  tran_high_t temp1, temp2;
+  (void)bd;
+
+  // stage 1
+  step1[0] = input[0];
+  step1[1] = input[16];
+  step1[2] = input[8];
+  step1[3] = input[24];
+  step1[4] = input[4];
+  step1[5] = input[20];
+  step1[6] = input[12];
+  step1[7] = input[28];
+  step1[8] = input[2];
+  step1[9] = input[18];
+  step1[10] = input[10];
+  step1[11] = input[26];
+  step1[12] = input[6];
+  step1[13] = input[22];
+  step1[14] = input[14];
+  step1[15] = input[30];
+
+  temp1 = input[1] * cospi_31_64 - input[31] * cospi_1_64;
+  temp2 = input[1] * cospi_1_64 + input[31] * cospi_31_64;
+  step1[16] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
+  step1[31] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
+
+  temp1 = input[17] * cospi_15_64 - input[15] * cospi_17_64;
+  temp2 = input[17] * cospi_17_64 + input[15] * cospi_15_64;
+  step1[17] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
+  step1[30] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
+
+  temp1 = input[9] * cospi_23_64 - input[23] * cospi_9_64;
+  temp2 = input[9] * cospi_9_64 + input[23] * cospi_23_64;
+  step1[18] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
+  step1[29] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
+
+  temp1 = input[25] * cospi_7_64 - input[7] * cospi_25_64;
+  temp2 = input[25] * cospi_25_64 + input[7] * cospi_7_64;
+  step1[19] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
+  step1[28] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
+
+  temp1 = input[5] * cospi_27_64 - input[27] * cospi_5_64;
+  temp2 = input[5] * cospi_5_64 + input[27] * cospi_27_64;
+  step1[20] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
+  step1[27] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
+
+  temp1 = input[21] * cospi_11_64 - input[11] * cospi_21_64;
+  temp2 = input[21] * cospi_21_64 + input[11] * cospi_11_64;
+  step1[21] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
+  step1[26] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
+
+  temp1 = input[13] * cospi_19_64 - input[19] * cospi_13_64;
+  temp2 = input[13] * cospi_13_64 + input[19] * cospi_19_64;
+  step1[22] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
+  step1[25] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
+
+  temp1 = input[29] * cospi_3_64 - input[3] * cospi_29_64;
+  temp2 = input[29] * cospi_29_64 + input[3] * cospi_3_64;
+  step1[23] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
+  step1[24] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
+
+  // stage 2
+  step2[0] = step1[0];
+  step2[1] = step1[1];
+  step2[2] = step1[2];
+  step2[3] = step1[3];
+  step2[4] = step1[4];
+  step2[5] = step1[5];
+  step2[6] = step1[6];
+  step2[7] = step1[7];
+
+  temp1 = step1[8] * cospi_30_64 - step1[15] * cospi_2_64;
+  temp2 = step1[8] * cospi_2_64 + step1[15] * cospi_30_64;
+  step2[8] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
+  step2[15] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
+
+  temp1 = step1[9] * cospi_14_64 - step1[14] * cospi_18_64;
+  temp2 = step1[9] * cospi_18_64 + step1[14] * cospi_14_64;
+  step2[9] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
+  step2[14] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
+
+  temp1 = step1[10] * cospi_22_64 - step1[13] * cospi_10_64;
+  temp2 = step1[10] * cospi_10_64 + step1[13] * cospi_22_64;
+  step2[10] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
+  step2[13] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
+
+  temp1 = step1[11] * cospi_6_64 - step1[12] * cospi_26_64;
+  temp2 = step1[11] * cospi_26_64 + step1[12] * cospi_6_64;
+  step2[11] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
+  step2[12] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
+
+  step2[16] = HIGHBD_WRAPLOW(step1[16] + step1[17], bd);
+  step2[17] = HIGHBD_WRAPLOW(step1[16] - step1[17], bd);
+  step2[18] = HIGHBD_WRAPLOW(-step1[18] + step1[19], bd);
+  step2[19] = HIGHBD_WRAPLOW(step1[18] + step1[19], bd);
+  step2[20] = HIGHBD_WRAPLOW(step1[20] + step1[21], bd);
+  step2[21] = HIGHBD_WRAPLOW(step1[20] - step1[21], bd);
+  step2[22] = HIGHBD_WRAPLOW(-step1[22] + step1[23], bd);
+  step2[23] = HIGHBD_WRAPLOW(step1[22] + step1[23], bd);
+  step2[24] = HIGHBD_WRAPLOW(step1[24] + step1[25], bd);
+  step2[25] = HIGHBD_WRAPLOW(step1[24] - step1[25], bd);
+  step2[26] = HIGHBD_WRAPLOW(-step1[26] + step1[27], bd);
+  step2[27] = HIGHBD_WRAPLOW(step1[26] + step1[27], bd);
+  step2[28] = HIGHBD_WRAPLOW(step1[28] + step1[29], bd);
+  step2[29] = HIGHBD_WRAPLOW(step1[28] - step1[29], bd);
+  step2[30] = HIGHBD_WRAPLOW(-step1[30] + step1[31], bd);
+  step2[31] = HIGHBD_WRAPLOW(step1[30] + step1[31], bd);
+
+  // stage 3
+  step1[0] = step2[0];
+  step1[1] = step2[1];
+  step1[2] = step2[2];
+  step1[3] = step2[3];
+
+  temp1 = step2[4] * cospi_28_64 - step2[7] * cospi_4_64;
+  temp2 = step2[4] * cospi_4_64 + step2[7] * cospi_28_64;
+  step1[4] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
+  step1[7] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
+  temp1 = step2[5] * cospi_12_64 - step2[6] * cospi_20_64;
+  temp2 = step2[5] * cospi_20_64 + step2[6] * cospi_12_64;
+  step1[5] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
+  step1[6] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
+
+  step1[8] = HIGHBD_WRAPLOW(step2[8] + step2[9], bd);
+  step1[9] = HIGHBD_WRAPLOW(step2[8] - step2[9], bd);
+  step1[10] = HIGHBD_WRAPLOW(-step2[10] + step2[11], bd);
+  step1[11] = HIGHBD_WRAPLOW(step2[10] + step2[11], bd);
+  step1[12] = HIGHBD_WRAPLOW(step2[12] + step2[13], bd);
+  step1[13] = HIGHBD_WRAPLOW(step2[12] - step2[13], bd);
+  step1[14] = HIGHBD_WRAPLOW(-step2[14] + step2[15], bd);
+  step1[15] = HIGHBD_WRAPLOW(step2[14] + step2[15], bd);
+
+  step1[16] = step2[16];
+  step1[31] = step2[31];
+  temp1 = -step2[17] * cospi_4_64 + step2[30] * cospi_28_64;
+  temp2 = step2[17] * cospi_28_64 + step2[30] * cospi_4_64;
+  step1[17] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
+  step1[30] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
+  temp1 = -step2[18] * cospi_28_64 - step2[29] * cospi_4_64;
+  temp2 = -step2[18] * cospi_4_64 + step2[29] * cospi_28_64;
+  step1[18] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
+  step1[29] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
+  step1[19] = step2[19];
+  step1[20] = step2[20];
+  temp1 = -step2[21] * cospi_20_64 + step2[26] * cospi_12_64;
+  temp2 = step2[21] * cospi_12_64 + step2[26] * cospi_20_64;
+  step1[21] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
+  step1[26] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
+  temp1 = -step2[22] * cospi_12_64 - step2[25] * cospi_20_64;
+  temp2 = -step2[22] * cospi_20_64 + step2[25] * cospi_12_64;
+  step1[22] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
+  step1[25] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
+  step1[23] = step2[23];
+  step1[24] = step2[24];
+  step1[27] = step2[27];
+  step1[28] = step2[28];
+
+  // stage 4
+  temp1 = (step1[0] + step1[1]) * cospi_16_64;
+  temp2 = (step1[0] - step1[1]) * cospi_16_64;
+  step2[0] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
+  step2[1] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
+  temp1 = step1[2] * cospi_24_64 - step1[3] * cospi_8_64;
+  temp2 = step1[2] * cospi_8_64 + step1[3] * cospi_24_64;
+  step2[2] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
+  step2[3] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
+  step2[4] = HIGHBD_WRAPLOW(step1[4] + step1[5], bd);
+  step2[5] = HIGHBD_WRAPLOW(step1[4] - step1[5], bd);
+  step2[6] = HIGHBD_WRAPLOW(-step1[6] + step1[7], bd);
+  step2[7] = HIGHBD_WRAPLOW(step1[6] + step1[7], bd);
+
+  step2[8] = step1[8];
+  step2[15] = step1[15];
+  temp1 = -step1[9] * cospi_8_64 + step1[14] * cospi_24_64;
+  temp2 = step1[9] * cospi_24_64 + step1[14] * cospi_8_64;
+  step2[9] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
+  step2[14] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
+  temp1 = -step1[10] * cospi_24_64 - step1[13] * cospi_8_64;
+  temp2 = -step1[10] * cospi_8_64 + step1[13] * cospi_24_64;
+  step2[10] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
+  step2[13] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
+  step2[11] = step1[11];
+  step2[12] = step1[12];
+
+  step2[16] = HIGHBD_WRAPLOW(step1[16] + step1[19], bd);
+  step2[17] = HIGHBD_WRAPLOW(step1[17] + step1[18], bd);
+  step2[18] = HIGHBD_WRAPLOW(step1[17] - step1[18], bd);
+  step2[19] = HIGHBD_WRAPLOW(step1[16] - step1[19], bd);
+  step2[20] = HIGHBD_WRAPLOW(-step1[20] + step1[23], bd);
+  step2[21] = HIGHBD_WRAPLOW(-step1[21] + step1[22], bd);
+  step2[22] = HIGHBD_WRAPLOW(step1[21] + step1[22], bd);
+  step2[23] = HIGHBD_WRAPLOW(step1[20] + step1[23], bd);
+
+  step2[24] = HIGHBD_WRAPLOW(step1[24] + step1[27], bd);
+  step2[25] = HIGHBD_WRAPLOW(step1[25] + step1[26], bd);
+  step2[26] = HIGHBD_WRAPLOW(step1[25] - step1[26], bd);
+  step2[27] = HIGHBD_WRAPLOW(step1[24] - step1[27], bd);
+  step2[28] = HIGHBD_WRAPLOW(-step1[28] + step1[31], bd);
+  step2[29] = HIGHBD_WRAPLOW(-step1[29] + step1[30], bd);
+  step2[30] = HIGHBD_WRAPLOW(step1[29] + step1[30], bd);
+  step2[31] = HIGHBD_WRAPLOW(step1[28] + step1[31], bd);
+
+  // stage 5
+  step1[0] = HIGHBD_WRAPLOW(step2[0] + step2[3], bd);
+  step1[1] = HIGHBD_WRAPLOW(step2[1] + step2[2], bd);
+  step1[2] = HIGHBD_WRAPLOW(step2[1] - step2[2], bd);
+  step1[3] = HIGHBD_WRAPLOW(step2[0] - step2[3], bd);
+  step1[4] = step2[4];
+  temp1 = (step2[6] - step2[5]) * cospi_16_64;
+  temp2 = (step2[5] + step2[6]) * cospi_16_64;
+  step1[5] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
+  step1[6] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
+  step1[7] = step2[7];
+
+  step1[8] = HIGHBD_WRAPLOW(step2[8] + step2[11], bd);
+  step1[9] = HIGHBD_WRAPLOW(step2[9] + step2[10], bd);
+  step1[10] = HIGHBD_WRAPLOW(step2[9] - step2[10], bd);
+  step1[11] = HIGHBD_WRAPLOW(step2[8] - step2[11], bd);
+  step1[12] = HIGHBD_WRAPLOW(-step2[12] + step2[15], bd);
+  step1[13] = HIGHBD_WRAPLOW(-step2[13] + step2[14], bd);
+  step1[14] = HIGHBD_WRAPLOW(step2[13] + step2[14], bd);
+  step1[15] = HIGHBD_WRAPLOW(step2[12] + step2[15], bd);
+
+  step1[16] = step2[16];
+  step1[17] = step2[17];
+  temp1 = -step2[18] * cospi_8_64 + step2[29] * cospi_24_64;
+  temp2 = step2[18] * cospi_24_64 + step2[29] * cospi_8_64;
+  step1[18] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
+  step1[29] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
+  temp1 = -step2[19] * cospi_8_64 + step2[28] * cospi_24_64;
+  temp2 = step2[19] * cospi_24_64 + step2[28] * cospi_8_64;
+  step1[19] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
+  step1[28] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
+  temp1 = -step2[20] * cospi_24_64 - step2[27] * cospi_8_64;
+  temp2 = -step2[20] * cospi_8_64 + step2[27] * cospi_24_64;
+  step1[20] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
+  step1[27] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
+  temp1 = -step2[21] * cospi_24_64 - step2[26] * cospi_8_64;
+  temp2 = -step2[21] * cospi_8_64 + step2[26] * cospi_24_64;
+  step1[21] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
+  step1[26] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
+  step1[22] = step2[22];
+  step1[23] = step2[23];
+  step1[24] = step2[24];
+  step1[25] = step2[25];
+  step1[30] = step2[30];
+  step1[31] = step2[31];
+
+  // stage 6
+  step2[0] = HIGHBD_WRAPLOW(step1[0] + step1[7], bd);
+  step2[1] = HIGHBD_WRAPLOW(step1[1] + step1[6], bd);
+  step2[2] = HIGHBD_WRAPLOW(step1[2] + step1[5], bd);
+  step2[3] = HIGHBD_WRAPLOW(step1[3] + step1[4], bd);
+  step2[4] = HIGHBD_WRAPLOW(step1[3] - step1[4], bd);
+  step2[5] = HIGHBD_WRAPLOW(step1[2] - step1[5], bd);
+  step2[6] = HIGHBD_WRAPLOW(step1[1] - step1[6], bd);
+  step2[7] = HIGHBD_WRAPLOW(step1[0] - step1[7], bd);
+  step2[8] = step1[8];
+  step2[9] = step1[9];
+  temp1 = (-step1[10] + step1[13]) * cospi_16_64;
+  temp2 = (step1[10] + step1[13]) * cospi_16_64;
+  step2[10] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
+  step2[13] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
+  temp1 = (-step1[11] + step1[12]) * cospi_16_64;
+  temp2 = (step1[11] + step1[12]) * cospi_16_64;
+  step2[11] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
+  step2[12] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
+  step2[14] = step1[14];
+  step2[15] = step1[15];
+
+  step2[16] = HIGHBD_WRAPLOW(step1[16] + step1[23], bd);
+  step2[17] = HIGHBD_WRAPLOW(step1[17] + step1[22], bd);
+  step2[18] = HIGHBD_WRAPLOW(step1[18] + step1[21], bd);
+  step2[19] = HIGHBD_WRAPLOW(step1[19] + step1[20], bd);
+  step2[20] = HIGHBD_WRAPLOW(step1[19] - step1[20], bd);
+  step2[21] = HIGHBD_WRAPLOW(step1[18] - step1[21], bd);
+  step2[22] = HIGHBD_WRAPLOW(step1[17] - step1[22], bd);
+  step2[23] = HIGHBD_WRAPLOW(step1[16] - step1[23], bd);
+
+  step2[24] = HIGHBD_WRAPLOW(-step1[24] + step1[31], bd);
+  step2[25] = HIGHBD_WRAPLOW(-step1[25] + step1[30], bd);
+  step2[26] = HIGHBD_WRAPLOW(-step1[26] + step1[29], bd);
+  step2[27] = HIGHBD_WRAPLOW(-step1[27] + step1[28], bd);
+  step2[28] = HIGHBD_WRAPLOW(step1[27] + step1[28], bd);
+  step2[29] = HIGHBD_WRAPLOW(step1[26] + step1[29], bd);
+  step2[30] = HIGHBD_WRAPLOW(step1[25] + step1[30], bd);
+  step2[31] = HIGHBD_WRAPLOW(step1[24] + step1[31], bd);
+
+  // stage 7
+  step1[0] = HIGHBD_WRAPLOW(step2[0] + step2[15], bd);
+  step1[1] = HIGHBD_WRAPLOW(step2[1] + step2[14], bd);
+  step1[2] = HIGHBD_WRAPLOW(step2[2] + step2[13], bd);
+  step1[3] = HIGHBD_WRAPLOW(step2[3] + step2[12], bd);
+  step1[4] = HIGHBD_WRAPLOW(step2[4] + step2[11], bd);
+  step1[5] = HIGHBD_WRAPLOW(step2[5] + step2[10], bd);
+  step1[6] = HIGHBD_WRAPLOW(step2[6] + step2[9], bd);
+  step1[7] = HIGHBD_WRAPLOW(step2[7] + step2[8], bd);
+  step1[8] = HIGHBD_WRAPLOW(step2[7] - step2[8], bd);
+  step1[9] = HIGHBD_WRAPLOW(step2[6] - step2[9], bd);
+  step1[10] = HIGHBD_WRAPLOW(step2[5] - step2[10], bd);
+  step1[11] = HIGHBD_WRAPLOW(step2[4] - step2[11], bd);
+  step1[12] = HIGHBD_WRAPLOW(step2[3] - step2[12], bd);
+  step1[13] = HIGHBD_WRAPLOW(step2[2] - step2[13], bd);
+  step1[14] = HIGHBD_WRAPLOW(step2[1] - step2[14], bd);
+  step1[15] = HIGHBD_WRAPLOW(step2[0] - step2[15], bd);
+
+  step1[16] = step2[16];
+  step1[17] = step2[17];
+  step1[18] = step2[18];
+  step1[19] = step2[19];
+  temp1 = (-step2[20] + step2[27]) * cospi_16_64;
+  temp2 = (step2[20] + step2[27]) * cospi_16_64;
+  step1[20] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
+  step1[27] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
+  temp1 = (-step2[21] + step2[26]) * cospi_16_64;
+  temp2 = (step2[21] + step2[26]) * cospi_16_64;
+  step1[21] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
+  step1[26] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
+  temp1 = (-step2[22] + step2[25]) * cospi_16_64;
+  temp2 = (step2[22] + step2[25]) * cospi_16_64;
+  step1[22] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
+  step1[25] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
+  temp1 = (-step2[23] + step2[24]) * cospi_16_64;
+  temp2 = (step2[23] + step2[24]) * cospi_16_64;
+  step1[23] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
+  step1[24] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
+  step1[28] = step2[28];
+  step1[29] = step2[29];
+  step1[30] = step2[30];
+  step1[31] = step2[31];
+
+  // final stage
+  output[0] = HIGHBD_WRAPLOW(step1[0] + step1[31], bd);
+  output[1] = HIGHBD_WRAPLOW(step1[1] + step1[30], bd);
+  output[2] = HIGHBD_WRAPLOW(step1[2] + step1[29], bd);
+  output[3] = HIGHBD_WRAPLOW(step1[3] + step1[28], bd);
+  output[4] = HIGHBD_WRAPLOW(step1[4] + step1[27], bd);
+  output[5] = HIGHBD_WRAPLOW(step1[5] + step1[26], bd);
+  output[6] = HIGHBD_WRAPLOW(step1[6] + step1[25], bd);
+  output[7] = HIGHBD_WRAPLOW(step1[7] + step1[24], bd);
+  output[8] = HIGHBD_WRAPLOW(step1[8] + step1[23], bd);
+  output[9] = HIGHBD_WRAPLOW(step1[9] + step1[22], bd);
+  output[10] = HIGHBD_WRAPLOW(step1[10] + step1[21], bd);
+  output[11] = HIGHBD_WRAPLOW(step1[11] + step1[20], bd);
+  output[12] = HIGHBD_WRAPLOW(step1[12] + step1[19], bd);
+  output[13] = HIGHBD_WRAPLOW(step1[13] + step1[18], bd);
+  output[14] = HIGHBD_WRAPLOW(step1[14] + step1[17], bd);
+  output[15] = HIGHBD_WRAPLOW(step1[15] + step1[16], bd);
+  output[16] = HIGHBD_WRAPLOW(step1[15] - step1[16], bd);
+  output[17] = HIGHBD_WRAPLOW(step1[14] - step1[17], bd);
+  output[18] = HIGHBD_WRAPLOW(step1[13] - step1[18], bd);
+  output[19] = HIGHBD_WRAPLOW(step1[12] - step1[19], bd);
+  output[20] = HIGHBD_WRAPLOW(step1[11] - step1[20], bd);
+  output[21] = HIGHBD_WRAPLOW(step1[10] - step1[21], bd);
+  output[22] = HIGHBD_WRAPLOW(step1[9] - step1[22], bd);
+  output[23] = HIGHBD_WRAPLOW(step1[8] - step1[23], bd);
+  output[24] = HIGHBD_WRAPLOW(step1[7] - step1[24], bd);
+  output[25] = HIGHBD_WRAPLOW(step1[6] - step1[25], bd);
+  output[26] = HIGHBD_WRAPLOW(step1[5] - step1[26], bd);
+  output[27] = HIGHBD_WRAPLOW(step1[4] - step1[27], bd);
+  output[28] = HIGHBD_WRAPLOW(step1[3] - step1[28], bd);
+  output[29] = HIGHBD_WRAPLOW(step1[2] - step1[29], bd);
+  output[30] = HIGHBD_WRAPLOW(step1[1] - step1[30], bd);
+  output[31] = HIGHBD_WRAPLOW(step1[0] - step1[31], bd);
+}
+
 #endif  // CONFIG_HIGHBITDEPTH
diff --git a/aom_dsp/x86/inv_txfm_sse2.c b/aom_dsp/x86/inv_txfm_sse2.c
index 5795a18..be200df 100644
--- a/aom_dsp/x86/inv_txfm_sse2.c
+++ b/aom_dsp/x86/inv_txfm_sse2.c
@@ -3628,4 +3628,107 @@
   }
 }
 
+void aom_highbd_idct8x8_10_add_sse2(const tran_low_t *input, uint8_t *dest8,
+                                    int stride, int bd) {
+  tran_low_t out[8 * 8] = { 0 };
+  tran_low_t *outptr = out;
+  int i, j, test;
+  __m128i inptr[8];
+  __m128i min_input, max_input, temp1, temp2, sign_bits;
+  uint16_t *dest = CONVERT_TO_SHORTPTR(dest8);
+  const __m128i zero = _mm_set1_epi16(0);
+  const __m128i sixteen = _mm_set1_epi16(16);
+  const __m128i max = _mm_set1_epi16(6201);
+  const __m128i min = _mm_set1_epi16(-6201);
+  int optimised_cols = 0;
+
+  // Load input into __m128i & pack to 16 bits
+  for (i = 0; i < 8; i++) {
+    temp1 = _mm_loadu_si128((const __m128i *)(input + 8 * i));
+    temp2 = _mm_loadu_si128((const __m128i *)(input + 8 * i + 4));
+    inptr[i] = _mm_packs_epi32(temp1, temp2);
+  }
+
+  // Find the min & max for the row transform
+  // only first 4 row has non-zero coefs
+  max_input = _mm_max_epi16(inptr[0], inptr[1]);
+  min_input = _mm_min_epi16(inptr[0], inptr[1]);
+  for (i = 2; i < 4; i++) {
+    max_input = _mm_max_epi16(max_input, inptr[i]);
+    min_input = _mm_min_epi16(min_input, inptr[i]);
+  }
+  max_input = _mm_cmpgt_epi16(max_input, max);
+  min_input = _mm_cmplt_epi16(min_input, min);
+  temp1 = _mm_or_si128(max_input, min_input);
+  test = _mm_movemask_epi8(temp1);
+
+  if (!test) {
+    // Do the row transform
+    aom_idct8_sse2(inptr);
+
+    // Find the min & max for the column transform
+    // N.B. Only first 4 cols contain non-zero coeffs
+    max_input = _mm_max_epi16(inptr[0], inptr[1]);
+    min_input = _mm_min_epi16(inptr[0], inptr[1]);
+    for (i = 2; i < 8; i++) {
+      max_input = _mm_max_epi16(max_input, inptr[i]);
+      min_input = _mm_min_epi16(min_input, inptr[i]);
+    }
+    max_input = _mm_cmpgt_epi16(max_input, max);
+    min_input = _mm_cmplt_epi16(min_input, min);
+    temp1 = _mm_or_si128(max_input, min_input);
+    test = _mm_movemask_epi8(temp1);
+
+    if (test) {
+      // Use fact only first 4 rows contain non-zero coeffs
+      array_transpose_4X8(inptr, inptr);
+      for (i = 0; i < 4; i++) {
+        sign_bits = _mm_cmplt_epi16(inptr[i], zero);
+        temp1 = _mm_unpackhi_epi16(inptr[i], sign_bits);
+        temp2 = _mm_unpacklo_epi16(inptr[i], sign_bits);
+        _mm_storeu_si128((__m128i *)(outptr + 4 * (2 * i + 1)), temp1);
+        _mm_storeu_si128((__m128i *)(outptr + 4 * (2 * i)), temp2);
+      }
+    } else {
+      // Set to use the optimised transform for the column
+      optimised_cols = 1;
+    }
+  } else {
+    // Run the un-optimised row transform
+    for (i = 0; i < 4; ++i) {
+      aom_highbd_idct8_c(input, outptr, bd);
+      input += 8;
+      outptr += 8;
+    }
+  }
+
+  if (optimised_cols) {
+    aom_idct8_sse2(inptr);
+
+    // Final round & shift and Reconstruction and Store
+    {
+      __m128i d[8];
+      for (i = 0; i < 8; i++) {
+        inptr[i] = _mm_add_epi16(inptr[i], sixteen);
+        d[i] = _mm_loadu_si128((const __m128i *)(dest + stride * i));
+        inptr[i] = _mm_srai_epi16(inptr[i], 5);
+        d[i] = clamp_high_sse2(_mm_adds_epi16(d[i], inptr[i]), bd);
+        // Store
+        _mm_storeu_si128((__m128i *)(dest + stride * i), d[i]);
+      }
+    }
+  } else {
+    // Run the un-optimised column transform
+    tran_low_t temp_in[8], temp_out[8];
+    for (i = 0; i < 8; ++i) {
+      for (j = 0; j < 8; ++j) temp_in[j] = out[j * 8 + i];
+      aom_highbd_idct8_c(temp_in, temp_out, bd);
+      for (j = 0; j < 8; ++j) {
+        dest[j * stride + i] = highbd_clip_pixel_add(
+            dest[j * stride + i], ROUND_POWER_OF_TWO(temp_out[j], 5), bd);
+      }
+    }
+  }
+}
+
 #endif  // CONFIG_HIGHBITDEPTH
diff --git a/av1/common/av1_inv_txfm2d.c b/av1/common/av1_inv_txfm2d.c
index 688cfb4..d847ab1 100644
--- a/av1/common/av1_inv_txfm2d.c
+++ b/av1/common/av1_inv_txfm2d.c
@@ -30,115 +30,64 @@
 }
 
 #if CONFIG_EXT_TX
-const TXFM_2D_CFG *inv_txfm_cfg_ls[TX_TYPES][TX_SIZES] = {
-  // DCT_DCT
+static const TXFM_2D_CFG *inv_txfm_cfg_ls[FLIPADST_ADST + 1][TX_SIZES] = {
   {
 #if CONFIG_CB4X4
       NULL,
 #endif
       &inv_txfm_2d_cfg_dct_dct_4, &inv_txfm_2d_cfg_dct_dct_8,
       &inv_txfm_2d_cfg_dct_dct_16, &inv_txfm_2d_cfg_dct_dct_32 },
-  // ADST_DCT
   {
 #if CONFIG_CB4X4
       NULL,
 #endif
       &inv_txfm_2d_cfg_adst_dct_4, &inv_txfm_2d_cfg_adst_dct_8,
       &inv_txfm_2d_cfg_adst_dct_16, &inv_txfm_2d_cfg_adst_dct_32 },
-  // DCT_ADST
   {
 #if CONFIG_CB4X4
       NULL,
 #endif
       &inv_txfm_2d_cfg_dct_adst_4, &inv_txfm_2d_cfg_dct_adst_8,
       &inv_txfm_2d_cfg_dct_adst_16, &inv_txfm_2d_cfg_dct_adst_32 },
-  // ADST_ADST
   {
 #if CONFIG_CB4X4
       NULL,
 #endif
       &inv_txfm_2d_cfg_adst_adst_4, &inv_txfm_2d_cfg_adst_adst_8,
       &inv_txfm_2d_cfg_adst_adst_16, &inv_txfm_2d_cfg_adst_adst_32 },
-  // FLIPADST_DCT
   {
 #if CONFIG_CB4X4
       NULL,
 #endif
       &inv_txfm_2d_cfg_adst_dct_4, &inv_txfm_2d_cfg_adst_dct_8,
       &inv_txfm_2d_cfg_adst_dct_16, &inv_txfm_2d_cfg_adst_dct_32 },
-  // DCT_FLIPADST
   {
 #if CONFIG_CB4X4
       NULL,
 #endif
       &inv_txfm_2d_cfg_dct_adst_4, &inv_txfm_2d_cfg_dct_adst_8,
       &inv_txfm_2d_cfg_dct_adst_16, &inv_txfm_2d_cfg_dct_adst_32 },
-  // FLIPADST_FLIPADST
   {
 #if CONFIG_CB4X4
       NULL,
 #endif
       &inv_txfm_2d_cfg_adst_adst_4, &inv_txfm_2d_cfg_adst_adst_8,
       &inv_txfm_2d_cfg_adst_adst_16, &inv_txfm_2d_cfg_adst_adst_32 },
-  // ADST_FLIPADST
   {
 #if CONFIG_CB4X4
       NULL,
 #endif
       &inv_txfm_2d_cfg_adst_adst_4, &inv_txfm_2d_cfg_adst_adst_8,
       &inv_txfm_2d_cfg_adst_adst_16, &inv_txfm_2d_cfg_adst_adst_32 },
-  // FLIPADST_ADST
   {
 #if CONFIG_CB4X4
       NULL,
 #endif
       &inv_txfm_2d_cfg_adst_adst_4, &inv_txfm_2d_cfg_adst_adst_8,
       &inv_txfm_2d_cfg_adst_adst_16, &inv_txfm_2d_cfg_adst_adst_32 },
-  { // IDTX
-#if CONFIG_CB4X4
-    NULL,
-#endif
-    &inv_txfm_2d_cfg_adst_adst_4, &inv_txfm_2d_cfg_adst_adst_8,
-    &inv_txfm_2d_cfg_adst_adst_16, &inv_txfm_2d_cfg_adst_adst_32 },
-  { // V_DCT
-#if CONFIG_CB4X4
-    NULL,
-#endif
-    &inv_txfm_2d_cfg_dct_adst_4, &inv_txfm_2d_cfg_dct_adst_8,
-    &inv_txfm_2d_cfg_dct_adst_16, &inv_txfm_2d_cfg_dct_adst_32 },
-  { // H_DCT
-#if CONFIG_CB4X4
-    NULL,
-#endif
-    &inv_txfm_2d_cfg_adst_dct_4, &inv_txfm_2d_cfg_adst_dct_8,
-    &inv_txfm_2d_cfg_adst_dct_16, &inv_txfm_2d_cfg_adst_dct_32 },
-  { // V_ADST
-#if CONFIG_CB4X4
-    NULL,
-#endif
-    &inv_txfm_2d_cfg_adst_adst_4, &inv_txfm_2d_cfg_adst_adst_8,
-    &inv_txfm_2d_cfg_adst_adst_16, &inv_txfm_2d_cfg_adst_adst_32 },
-  { // H_ADST
-#if CONFIG_CB4X4
-    NULL,
-#endif
-    &inv_txfm_2d_cfg_adst_adst_4, &inv_txfm_2d_cfg_adst_adst_8,
-    &inv_txfm_2d_cfg_adst_adst_16, &inv_txfm_2d_cfg_adst_adst_32 },
-  { // V_FLIP_ADST
-#if CONFIG_CB4X4
-    NULL,
-#endif
-    &inv_txfm_2d_cfg_adst_adst_4, &inv_txfm_2d_cfg_adst_adst_8,
-    &inv_txfm_2d_cfg_adst_adst_16, &inv_txfm_2d_cfg_adst_adst_32 },
-  { // H_FLIP_ADST
-#if CONFIG_CB4X4
-    NULL,
-#endif
-    &inv_txfm_2d_cfg_adst_adst_4, &inv_txfm_2d_cfg_adst_adst_8,
-    &inv_txfm_2d_cfg_adst_adst_16, &inv_txfm_2d_cfg_adst_adst_32 },
 };
 #else
-const TXFM_2D_CFG *inv_txfm_cfg_ls[TX_TYPES][TX_SIZES] = {
+static const TXFM_2D_CFG *inv_txfm_cfg_ls[TX_TYPES][TX_SIZES] = {
   {
 #if CONFIG_CB4X4
       NULL,
diff --git a/av1/common/av1_inv_txfm2d_cfg.h b/av1/common/av1_inv_txfm2d_cfg.h
index 9eabc2e..badafd5 100644
--- a/av1/common/av1_inv_txfm2d_cfg.h
+++ b/av1/common/av1_inv_txfm2d_cfg.h
@@ -442,6 +442,4 @@
   TXFM_TYPE_DCT32
 };  // .txfm_type_row
 
-extern const TXFM_2D_CFG *inv_txfm_cfg_ls[TX_TYPES][TX_SIZES];
-
 #endif  // AV1_INV_TXFM2D_CFG_H_
diff --git a/av1/common/idct.c b/av1/common/idct.c
index 0ea58bf..2c960cd 100644
--- a/av1/common/idct.c
+++ b/av1/common/idct.c
@@ -116,100 +116,38 @@
 #endif  // CONFIG_TX64X64
 
 #if CONFIG_HIGHBITDEPTH
-static void highbd_idct4(const tran_low_t *input, tran_low_t *output,
-                         const int8_t *cos_bit, const int8_t *stage_range,
-                         int bd) {
-  (void)bd;
-  av1_idct4_new(input, output, cos_bit, stage_range);
-}
-
-static void highbd_idct8(const tran_low_t *input, tran_low_t *output,
-                         const int8_t *cos_bit, const int8_t *stage_range,
-                         int bd) {
-  (void)bd;
-  av1_idct8_new(input, output, cos_bit, stage_range);
-}
-
-static void highbd_idct16(const tran_low_t *input, tran_low_t *output,
-                          const int8_t *cos_bit, const int8_t *stage_range,
-                          int bd) {
-  (void)bd;
-  av1_idct16_new(input, output, cos_bit, stage_range);
-}
-
-static void highbd_idct32(const tran_low_t *input, tran_low_t *output,
-                          const int8_t *cos_bit, const int8_t *stage_range,
-                          int bd) {
-  (void)bd;
-  av1_idct32_new(input, output, cos_bit, stage_range);
-}
-
-static void highbd_iadst4(const tran_low_t *input, tran_low_t *output,
-                          const int8_t *cos_bit, const int8_t *stage_range,
-                          int bd) {
-  (void)bd;
-  av1_iadst4_new(input, output, cos_bit, stage_range);
-}
-
-static void highbd_iadst8(const tran_low_t *input, tran_low_t *output,
-                          const int8_t *cos_bit, const int8_t *stage_range,
-                          int bd) {
-  (void)bd;
-  av1_iadst8_new(input, output, cos_bit, stage_range);
-}
-
-static void highbd_iadst16(const tran_low_t *input, tran_low_t *output,
-                           const int8_t *cos_bit, const int8_t *stage_range,
-                           int bd) {
-  (void)bd;
-  av1_iadst16_new(input, output, cos_bit, stage_range);
-}
-
 #if CONFIG_EXT_TX
 static void highbd_iidtx4_c(const tran_low_t *input, tran_low_t *output,
-                            const int8_t *cos_bit, const int8_t *stage_range,
                             int bd) {
   int i;
-  (void)cos_bit;
-  (void)stage_range;
   for (i = 0; i < 4; ++i)
     output[i] = HIGHBD_WRAPLOW(dct_const_round_shift(input[i] * Sqrt2), bd);
 }
 
 static void highbd_iidtx8_c(const tran_low_t *input, tran_low_t *output,
-                            const int8_t *cos_bit, const int8_t *stage_range,
                             int bd) {
   int i;
   (void)bd;
-  (void)cos_bit;
-  (void)stage_range;
   for (i = 0; i < 8; ++i) output[i] = input[i] * 2;
 }
 
 static void highbd_iidtx16_c(const tran_low_t *input, tran_low_t *output,
-                             const int8_t *cos_bit, const int8_t *stage_range,
                              int bd) {
   int i;
-  (void)cos_bit;
-  (void)stage_range;
   for (i = 0; i < 16; ++i)
     output[i] = HIGHBD_WRAPLOW(dct_const_round_shift(input[i] * 2 * Sqrt2), bd);
 }
 
 static void highbd_iidtx32_c(const tran_low_t *input, tran_low_t *output,
-                             const int8_t *cos_bit, const int8_t *stage_range,
                              int bd) {
   int i;
   (void)bd;
-  (void)cos_bit;
-  (void)stage_range;
   for (i = 0; i < 32; ++i) output[i] = input[i] * 4;
 }
 #endif  // CONFIG_EXT_TX
 
 static void highbd_ihalfright32_c(const tran_low_t *input, tran_low_t *output,
-                                  const int8_t *cos_bit,
-                                  const int8_t *stage_range, int bd) {
+                                  int bd) {
   int i;
   tran_low_t inputhalf[16];
   // Multiply input by sqrt(2)
@@ -219,17 +157,14 @@
   for (i = 0; i < 16; ++i) {
     output[i] = input[16 + i] * 4;
   }
-  highbd_idct16(inputhalf, output + 16, cos_bit, stage_range, bd);
+  aom_highbd_idct16_c(inputhalf, output + 16, bd);
   // Note overall scaling factor is 4 times orthogonal
 }
 
 #if CONFIG_EXT_TX
 #if CONFIG_TX64X64
 static void highbd_iidtx64_c(const tran_low_t *input, tran_low_t *output,
-                             const int8_t *cos_bit, const int8_t *stage_range,
                              int bd) {
-  (void)cos_bit;
-  (void)stage_range;
   int i;
   for (i = 0; i < 64; ++i)
     output[i] = HIGHBD_WRAPLOW(dct_const_round_shift(input[i] * 4 * Sqrt2), bd);
@@ -240,8 +175,7 @@
 #if CONFIG_TX64X64
 // For use in lieu of ADST
 static void highbd_ihalfright64_c(const tran_low_t *input, tran_low_t *output,
-                                  const int8_t *cos_bit,
-                                  const int8_t *stage_range, int bd) {
+                                  int bd) {
   int i;
   tran_low_t inputhalf[32];
   // Multiply input by sqrt(2)
@@ -252,17 +186,14 @@
     output[i] =
         HIGHBD_WRAPLOW(dct_const_round_shift(input[32 + i] * 4 * Sqrt2), bd);
   }
-  highbd_idct32(inputhalf, output + 32, cos_bit, stage_range, bd);
+  aom_highbd_idct32_c(inputhalf, output + 32, bd);
   // Note overall scaling factor is 4 * sqrt(2)  times orthogonal
 }
 
 static void highbd_idct64_col_c(const tran_low_t *input, tran_low_t *output,
-                                const int8_t *cos_bit,
-                                const int8_t *stage_range, int bd) {
+                                int bd) {
   int32_t in[64], out[64];
   int i;
-  (void)cos_bit;
-  (void)stage_range;
   (void)bd;
   for (i = 0; i < 64; ++i) in[i] = (int32_t)input[i];
   av1_idct64_new(in, out, inv_cos_bit_col_dct_dct_64,
@@ -271,12 +202,9 @@
 }
 
 static void highbd_idct64_row_c(const tran_low_t *input, tran_low_t *output,
-                                const int8_t *cos_bit,
-                                const int8_t *stage_range, int bd) {
+                                int bd) {
   int32_t in[64], out[64];
   int i;
-  (void)cos_bit;
-  (void)stage_range;
   (void)bd;
   for (i = 0; i < 64; ++i) in[i] = (int32_t)input[i];
   av1_idct64_new(in, out, inv_cos_bit_row_dct_dct_64,
@@ -1588,55 +1516,27 @@
 #endif  // CONFIG_TX64X64
 
 #if CONFIG_HIGHBITDEPTH
-
-const TXFM_2D_CFG *inv_txfm_cfg_ls[TX_TYPES][TX_SIZES];
-
-typedef struct {
-  const int8_t *cos_bit;
-  const int8_t *stage_range;
-} tx_1d_cfg;
-
-typedef struct {
-  tx_1d_cfg row;
-  tx_1d_cfg col;
-} tx_2d_cfg;
-
-tx_2d_cfg inv_tx_cfg(int tx_type, int tx_size_row, int tx_size_col) {
-  const TXFM_2D_CFG *cfg_row = inv_txfm_cfg_ls[tx_type][tx_size_row];
-  const int8_t *stage_range_row = cfg_row->stage_range_row;
-  const int8_t *cos_bit_row = cfg_row->cos_bit_row;
-
-  const TXFM_2D_CFG *cfg_col = inv_txfm_cfg_ls[tx_type][tx_size_col];
-  const int8_t *stage_range_col = cfg_col->stage_range_col;
-  const int8_t *cos_bit_col = cfg_col->cos_bit_col;
-
-  tx_2d_cfg cfg = {
-    { cos_bit_row, stage_range_row }, { cos_bit_col, stage_range_col },
-  };
-  return cfg;
-}
-
 void av1_highbd_iht4x4_16_add_c(const tran_low_t *input, uint8_t *dest8,
                                 int stride, int tx_type, int bd) {
   static const highbd_transform_2d HIGH_IHT_4[] = {
-    { highbd_idct4, highbd_idct4 },    // DCT_DCT
-    { highbd_iadst4, highbd_idct4 },   // ADST_DCT
-    { highbd_idct4, highbd_iadst4 },   // DCT_ADST
-    { highbd_iadst4, highbd_iadst4 },  // ADST_ADST
+    { aom_highbd_idct4_c, aom_highbd_idct4_c },    // DCT_DCT
+    { aom_highbd_iadst4_c, aom_highbd_idct4_c },   // ADST_DCT
+    { aom_highbd_idct4_c, aom_highbd_iadst4_c },   // DCT_ADST
+    { aom_highbd_iadst4_c, aom_highbd_iadst4_c },  // ADST_ADST
 #if CONFIG_EXT_TX
-    { highbd_iadst4, highbd_idct4 },       // FLIPADST_DCT
-    { highbd_idct4, highbd_iadst4 },       // DCT_FLIPADST
-    { highbd_iadst4, highbd_iadst4 },      // FLIPADST_FLIPADST
-    { highbd_iadst4, highbd_iadst4 },      // ADST_FLIPADST
-    { highbd_iadst4, highbd_iadst4 },      // FLIPADST_ADST
-    { highbd_iidtx4_c, highbd_iidtx4_c },  // IDTX
-    { highbd_idct4, highbd_iidtx4_c },     // V_DCT
-    { highbd_iidtx4_c, highbd_idct4 },     // H_DCT
-    { highbd_iadst4, highbd_iidtx4_c },    // V_ADST
-    { highbd_iidtx4_c, highbd_iadst4 },    // H_ADST
-    { highbd_iadst4, highbd_iidtx4_c },    // V_FLIPADST
-    { highbd_iidtx4_c, highbd_iadst4 },    // H_FLIPADST
-#endif                                     // CONFIG_EXT_TX
+    { aom_highbd_iadst4_c, aom_highbd_idct4_c },   // FLIPADST_DCT
+    { aom_highbd_idct4_c, aom_highbd_iadst4_c },   // DCT_FLIPADST
+    { aom_highbd_iadst4_c, aom_highbd_iadst4_c },  // FLIPADST_FLIPADST
+    { aom_highbd_iadst4_c, aom_highbd_iadst4_c },  // ADST_FLIPADST
+    { aom_highbd_iadst4_c, aom_highbd_iadst4_c },  // FLIPADST_ADST
+    { highbd_iidtx4_c, highbd_iidtx4_c },          // IDTX
+    { aom_highbd_idct4_c, highbd_iidtx4_c },       // V_DCT
+    { highbd_iidtx4_c, aom_highbd_idct4_c },       // H_DCT
+    { aom_highbd_iadst4_c, highbd_iidtx4_c },      // V_ADST
+    { highbd_iidtx4_c, aom_highbd_iadst4_c },      // H_ADST
+    { aom_highbd_iadst4_c, highbd_iidtx4_c },      // V_FLIPADST
+    { highbd_iidtx4_c, aom_highbd_iadst4_c },      // H_FLIPADST
+#endif                                             // CONFIG_EXT_TX
   };
 
   uint16_t *dest = CONVERT_TO_SHORTPTR(dest8);
@@ -1647,12 +1547,9 @@
   tran_low_t *outp = &out[0][0];
   int outstride = 4;
 
-  tx_2d_cfg cfg = inv_tx_cfg(tx_type, TX_4X4, TX_4X4);
-
   // inverse transform row vectors
   for (i = 0; i < 4; ++i) {
-    HIGH_IHT_4[tx_type].rows(input, out[i], cfg.row.cos_bit,
-                             cfg.row.stage_range, bd);
+    HIGH_IHT_4[tx_type].rows(input, out[i], bd);
     input += 4;
   }
 
@@ -1667,8 +1564,7 @@
 
   // inverse transform column vectors
   for (i = 0; i < 4; ++i) {
-    HIGH_IHT_4[tx_type].cols(out[i], out[i], cfg.col.cos_bit,
-                             cfg.col.stage_range, bd);
+    HIGH_IHT_4[tx_type].cols(out[i], out[i], bd);
   }
 
 #if CONFIG_EXT_TX
@@ -1689,24 +1585,24 @@
 void av1_highbd_iht4x8_32_add_c(const tran_low_t *input, uint8_t *dest8,
                                 int stride, int tx_type, int bd) {
   static const highbd_transform_2d HIGH_IHT_4x8[] = {
-    { highbd_idct8, highbd_idct4 },    // DCT_DCT
-    { highbd_iadst8, highbd_idct4 },   // ADST_DCT
-    { highbd_idct8, highbd_iadst4 },   // DCT_ADST
-    { highbd_iadst8, highbd_iadst4 },  // ADST_ADST
+    { aom_highbd_idct8_c, aom_highbd_idct4_c },    // DCT_DCT
+    { aom_highbd_iadst8_c, aom_highbd_idct4_c },   // ADST_DCT
+    { aom_highbd_idct8_c, aom_highbd_iadst4_c },   // DCT_ADST
+    { aom_highbd_iadst8_c, aom_highbd_iadst4_c },  // ADST_ADST
 #if CONFIG_EXT_TX
-    { highbd_iadst8, highbd_idct4 },       // FLIPADST_DCT
-    { highbd_idct8, highbd_iadst4 },       // DCT_FLIPADST
-    { highbd_iadst8, highbd_iadst4 },      // FLIPADST_FLIPADST
-    { highbd_iadst8, highbd_iadst4 },      // ADST_FLIPADST
-    { highbd_iadst8, highbd_iadst4 },      // FLIPADST_ADST
-    { highbd_iidtx8_c, highbd_iidtx4_c },  // IDTX
-    { highbd_idct8, highbd_iidtx4_c },     // V_DCT
-    { highbd_iidtx8_c, highbd_idct4 },     // H_DCT
-    { highbd_iadst8, highbd_iidtx4_c },    // V_ADST
-    { highbd_iidtx8_c, highbd_iadst4 },    // H_ADST
-    { highbd_iadst8, highbd_iidtx4_c },    // V_FLIPADST
-    { highbd_iidtx8_c, highbd_iadst4 },    // H_FLIPADST
-#endif                                     // CONFIG_EXT_TX
+    { aom_highbd_iadst8_c, aom_highbd_idct4_c },   // FLIPADST_DCT
+    { aom_highbd_idct8_c, aom_highbd_iadst4_c },   // DCT_FLIPADST
+    { aom_highbd_iadst8_c, aom_highbd_iadst4_c },  // FLIPADST_FLIPADST
+    { aom_highbd_iadst8_c, aom_highbd_iadst4_c },  // ADST_FLIPADST
+    { aom_highbd_iadst8_c, aom_highbd_iadst4_c },  // FLIPADST_ADST
+    { highbd_iidtx8_c, highbd_iidtx4_c },          // IDTX
+    { aom_highbd_idct8_c, highbd_iidtx4_c },       // V_DCT
+    { highbd_iidtx8_c, aom_highbd_idct4_c },       // H_DCT
+    { aom_highbd_iadst8_c, highbd_iidtx4_c },      // V_ADST
+    { highbd_iidtx8_c, aom_highbd_iadst4_c },      // H_ADST
+    { aom_highbd_iadst8_c, highbd_iidtx4_c },      // V_FLIPADST
+    { highbd_iidtx8_c, aom_highbd_iadst4_c },      // H_FLIPADST
+#endif                                             // CONFIG_EXT_TX
   };
   const int n = 4;
   const int n2 = 8;
@@ -1718,12 +1614,9 @@
   tran_low_t *outp = &out[0][0];
   int outstride = n2;
 
-  tx_2d_cfg cfg = inv_tx_cfg(tx_type, TX_4X4, TX_8X8);
-
   // inverse transform row vectors, and transpose
   for (i = 0; i < n2; ++i) {
-    HIGH_IHT_4x8[tx_type].rows(input, outtmp, cfg.row.cos_bit,
-                               cfg.row.stage_range, bd);
+    HIGH_IHT_4x8[tx_type].rows(input, outtmp, bd);
     for (j = 0; j < n; ++j) {
       out[j][i] = HIGHBD_WRAPLOW(dct_const_round_shift(outtmp[j] * Sqrt2), bd);
     }
@@ -1732,8 +1625,7 @@
 
   // inverse transform column vectors
   for (i = 0; i < n; ++i) {
-    HIGH_IHT_4x8[tx_type].cols(out[i], out[i], cfg.col.cos_bit,
-                               cfg.col.stage_range, bd);
+    HIGH_IHT_4x8[tx_type].cols(out[i], out[i], bd);
   }
 
 #if CONFIG_EXT_TX
@@ -1754,24 +1646,24 @@
 void av1_highbd_iht8x4_32_add_c(const tran_low_t *input, uint8_t *dest8,
                                 int stride, int tx_type, int bd) {
   static const highbd_transform_2d HIGH_IHT_8x4[] = {
-    { highbd_idct4, highbd_idct8 },    // DCT_DCT
-    { highbd_iadst4, highbd_idct8 },   // ADST_DCT
-    { highbd_idct4, highbd_iadst8 },   // DCT_ADST
-    { highbd_iadst4, highbd_iadst8 },  // ADST_ADST
+    { aom_highbd_idct4_c, aom_highbd_idct8_c },    // DCT_DCT
+    { aom_highbd_iadst4_c, aom_highbd_idct8_c },   // ADST_DCT
+    { aom_highbd_idct4_c, aom_highbd_iadst8_c },   // DCT_ADST
+    { aom_highbd_iadst4_c, aom_highbd_iadst8_c },  // ADST_ADST
 #if CONFIG_EXT_TX
-    { highbd_iadst4, highbd_idct8 },       // FLIPADST_DCT
-    { highbd_idct4, highbd_iadst8 },       // DCT_FLIPADST
-    { highbd_iadst4, highbd_iadst8 },      // FLIPADST_FLIPADST
-    { highbd_iadst4, highbd_iadst8 },      // ADST_FLIPADST
-    { highbd_iadst4, highbd_iadst8 },      // FLIPADST_ADST
-    { highbd_iidtx4_c, highbd_iidtx8_c },  // IDTX
-    { highbd_idct4, highbd_iidtx8_c },     // V_DCT
-    { highbd_iidtx4_c, highbd_idct8 },     // H_DCT
-    { highbd_iadst4, highbd_iidtx8_c },    // V_ADST
-    { highbd_iidtx4_c, highbd_iadst8 },    // H_ADST
-    { highbd_iadst4, highbd_iidtx8_c },    // V_FLIPADST
-    { highbd_iidtx4_c, highbd_iadst8 },    // H_FLIPADST
-#endif                                     // CONFIG_EXT_TX
+    { aom_highbd_iadst4_c, aom_highbd_idct8_c },   // FLIPADST_DCT
+    { aom_highbd_idct4_c, aom_highbd_iadst8_c },   // DCT_FLIPADST
+    { aom_highbd_iadst4_c, aom_highbd_iadst8_c },  // FLIPADST_FLIPADST
+    { aom_highbd_iadst4_c, aom_highbd_iadst8_c },  // ADST_FLIPADST
+    { aom_highbd_iadst4_c, aom_highbd_iadst8_c },  // FLIPADST_ADST
+    { highbd_iidtx4_c, highbd_iidtx8_c },          // IDTX
+    { aom_highbd_idct4_c, highbd_iidtx8_c },       // V_DCT
+    { highbd_iidtx4_c, aom_highbd_idct8_c },       // H_DCT
+    { aom_highbd_iadst4_c, highbd_iidtx8_c },      // V_ADST
+    { highbd_iidtx4_c, aom_highbd_iadst8_c },      // H_ADST
+    { aom_highbd_iadst4_c, highbd_iidtx8_c },      // V_FLIPADST
+    { highbd_iidtx4_c, aom_highbd_iadst8_c },      // H_FLIPADST
+#endif                                             // CONFIG_EXT_TX
   };
   const int n = 4;
   const int n2 = 8;
@@ -1783,12 +1675,9 @@
   tran_low_t *outp = &out[0][0];
   int outstride = n;
 
-  tx_2d_cfg cfg = inv_tx_cfg(tx_type, TX_8X8, TX_4X4);
-
   // inverse transform row vectors, and transpose
   for (i = 0; i < n; ++i) {
-    HIGH_IHT_8x4[tx_type].rows(input, outtmp, cfg.row.cos_bit,
-                               cfg.row.stage_range, bd);
+    HIGH_IHT_8x4[tx_type].rows(input, outtmp, bd);
     for (j = 0; j < n2; ++j) {
       out[j][i] = HIGHBD_WRAPLOW(dct_const_round_shift(outtmp[j] * Sqrt2), bd);
     }
@@ -1797,8 +1686,7 @@
 
   // inverse transform column vectors
   for (i = 0; i < n2; ++i) {
-    HIGH_IHT_8x4[tx_type].cols(out[i], out[i], cfg.col.cos_bit,
-                               cfg.col.stage_range, bd);
+    HIGH_IHT_8x4[tx_type].cols(out[i], out[i], bd);
   }
 
 #if CONFIG_EXT_TX
@@ -1819,24 +1707,24 @@
 void av1_highbd_iht4x16_64_add_c(const tran_low_t *input, uint8_t *dest8,
                                  int stride, int tx_type, int bd) {
   static const highbd_transform_2d HIGH_IHT_4x16[] = {
-    { highbd_idct16, highbd_idct4 },    // DCT_DCT
-    { highbd_iadst16, highbd_idct4 },   // ADST_DCT
-    { highbd_idct16, highbd_iadst4 },   // DCT_ADST
-    { highbd_iadst16, highbd_iadst4 },  // ADST_ADST
+    { aom_highbd_idct16_c, aom_highbd_idct4_c },    // DCT_DCT
+    { aom_highbd_iadst16_c, aom_highbd_idct4_c },   // ADST_DCT
+    { aom_highbd_idct16_c, aom_highbd_iadst4_c },   // DCT_ADST
+    { aom_highbd_iadst16_c, aom_highbd_iadst4_c },  // ADST_ADST
 #if CONFIG_EXT_TX
-    { highbd_iadst16, highbd_idct4 },       // FLIPADST_DCT
-    { highbd_idct16, highbd_iadst4 },       // DCT_FLIPADST
-    { highbd_iadst16, highbd_iadst4 },      // FLIPADST_FLIPADST
-    { highbd_iadst16, highbd_iadst4 },      // ADST_FLIPADST
-    { highbd_iadst16, highbd_iadst4 },      // FLIPADST_ADST
-    { highbd_iidtx16_c, highbd_iidtx4_c },  // IDTX
-    { highbd_idct16, highbd_iidtx4_c },     // V_DCT
-    { highbd_iidtx16_c, highbd_idct4 },     // H_DCT
-    { highbd_iadst16, highbd_iidtx4_c },    // V_ADST
-    { highbd_iidtx16_c, highbd_iadst4 },    // H_ADST
-    { highbd_iadst16, highbd_iidtx4_c },    // V_FLIPADST
-    { highbd_iidtx16_c, highbd_iadst4 },    // H_FLIPADST
-#endif                                      // CONFIG_EXT_TX
+    { aom_highbd_iadst16_c, aom_highbd_idct4_c },   // FLIPADST_DCT
+    { aom_highbd_idct16_c, aom_highbd_iadst4_c },   // DCT_FLIPADST
+    { aom_highbd_iadst16_c, aom_highbd_iadst4_c },  // FLIPADST_FLIPADST
+    { aom_highbd_iadst16_c, aom_highbd_iadst4_c },  // ADST_FLIPADST
+    { aom_highbd_iadst16_c, aom_highbd_iadst4_c },  // FLIPADST_ADST
+    { highbd_iidtx16_c, highbd_iidtx4_c },          // IDTX
+    { aom_highbd_idct16_c, highbd_iidtx4_c },       // V_DCT
+    { highbd_iidtx16_c, aom_highbd_idct4_c },       // H_DCT
+    { aom_highbd_iadst16_c, highbd_iidtx4_c },      // V_ADST
+    { highbd_iidtx16_c, aom_highbd_iadst4_c },      // H_ADST
+    { aom_highbd_iadst16_c, highbd_iidtx4_c },      // V_FLIPADST
+    { highbd_iidtx16_c, aom_highbd_iadst4_c },      // H_FLIPADST
+#endif                                              // CONFIG_EXT_TX
   };
   const int n = 4;
   const int n4 = 16;
@@ -1848,20 +1736,15 @@
   tran_low_t *outp = &out[0][0];
   int outstride = n4;
 
-  tx_2d_cfg cfg = inv_tx_cfg(tx_type, TX_4X4, TX_16X16);
-
   // inverse transform row vectors, and transpose
   for (i = 0; i < n4; ++i) {
-    HIGH_IHT_4x16[tx_type].rows(input, outtmp, cfg.row.cos_bit,
-                                cfg.row.stage_range, bd);
+    HIGH_IHT_4x16[tx_type].rows(input, outtmp, bd);
     for (j = 0; j < n; ++j) out[j][i] = outtmp[j];
     input += n;
   }
 
   // inverse transform column vectors
-  for (i = 0; i < n; ++i)
-    HIGH_IHT_4x16[tx_type].cols(out[i], out[i], cfg.col.cos_bit,
-                                cfg.col.stage_range, bd);
+  for (i = 0; i < n; ++i) HIGH_IHT_4x16[tx_type].cols(out[i], out[i], bd);
 
 #if CONFIG_EXT_TX
   maybe_flip_strides16(&dest, &stride, &outp, &outstride, tx_type, n4, n);
@@ -1881,24 +1764,24 @@
 void av1_highbd_iht16x4_64_add_c(const tran_low_t *input, uint8_t *dest8,
                                  int stride, int tx_type, int bd) {
   static const highbd_transform_2d HIGH_IHT_16x4[] = {
-    { highbd_idct4, highbd_idct16 },    // DCT_DCT
-    { highbd_iadst4, highbd_idct16 },   // ADST_DCT
-    { highbd_idct4, highbd_iadst16 },   // DCT_ADST
-    { highbd_iadst4, highbd_iadst16 },  // ADST_ADST
+    { aom_highbd_idct4_c, aom_highbd_idct16_c },    // DCT_DCT
+    { aom_highbd_iadst4_c, aom_highbd_idct16_c },   // ADST_DCT
+    { aom_highbd_idct4_c, aom_highbd_iadst16_c },   // DCT_ADST
+    { aom_highbd_iadst4_c, aom_highbd_iadst16_c },  // ADST_ADST
 #if CONFIG_EXT_TX
-    { highbd_iadst4, highbd_idct16 },       // FLIPADST_DCT
-    { highbd_idct4, highbd_iadst16 },       // DCT_FLIPADST
-    { highbd_iadst4, highbd_iadst16 },      // FLIPADST_FLIPADST
-    { highbd_iadst4, highbd_iadst16 },      // ADST_FLIPADST
-    { highbd_iadst4, highbd_iadst16 },      // FLIPADST_ADST
-    { highbd_iidtx4_c, highbd_iidtx16_c },  // IDTX
-    { highbd_idct4, highbd_iidtx16_c },     // V_DCT
-    { highbd_iidtx4_c, highbd_idct16 },     // H_DCT
-    { highbd_iadst4, highbd_iidtx16_c },    // V_ADST
-    { highbd_iidtx4_c, highbd_iadst16 },    // H_ADST
-    { highbd_iadst4, highbd_iidtx16_c },    // V_FLIPADST
-    { highbd_iidtx4_c, highbd_iadst16 },    // H_FLIPADST
-#endif                                      // CONFIG_EXT_TX
+    { aom_highbd_iadst4_c, aom_highbd_idct16_c },   // FLIPADST_DCT
+    { aom_highbd_idct4_c, aom_highbd_iadst16_c },   // DCT_FLIPADST
+    { aom_highbd_iadst4_c, aom_highbd_iadst16_c },  // FLIPADST_FLIPADST
+    { aom_highbd_iadst4_c, aom_highbd_iadst16_c },  // ADST_FLIPADST
+    { aom_highbd_iadst4_c, aom_highbd_iadst16_c },  // FLIPADST_ADST
+    { highbd_iidtx4_c, highbd_iidtx16_c },          // IDTX
+    { aom_highbd_idct4_c, highbd_iidtx16_c },       // V_DCT
+    { highbd_iidtx4_c, aom_highbd_idct16_c },       // H_DCT
+    { aom_highbd_iadst4_c, highbd_iidtx16_c },      // V_ADST
+    { highbd_iidtx4_c, aom_highbd_iadst16_c },      // H_ADST
+    { aom_highbd_iadst4_c, highbd_iidtx16_c },      // V_FLIPADST
+    { highbd_iidtx4_c, aom_highbd_iadst16_c },      // H_FLIPADST
+#endif                                              // CONFIG_EXT_TX
   };
   const int n = 4;
   const int n4 = 16;
@@ -1910,20 +1793,16 @@
   tran_low_t *outp = &out[0][0];
   int outstride = n;
 
-  tx_2d_cfg cfg = inv_tx_cfg(tx_type, TX_16X16, TX_4X4);
-
   // inverse transform row vectors, and transpose
   for (i = 0; i < n; ++i) {
-    HIGH_IHT_16x4[tx_type].rows(input, outtmp, cfg.row.cos_bit,
-                                cfg.row.stage_range, bd);
+    HIGH_IHT_16x4[tx_type].rows(input, outtmp, bd);
     for (j = 0; j < n4; ++j) out[j][i] = outtmp[j];
     input += n4;
   }
 
   // inverse transform column vectors
   for (i = 0; i < n4; ++i) {
-    HIGH_IHT_16x4[tx_type].cols(out[i], out[i], cfg.col.cos_bit,
-                                cfg.col.stage_range, bd);
+    HIGH_IHT_16x4[tx_type].cols(out[i], out[i], bd);
   }
 
 #if CONFIG_EXT_TX
@@ -1944,24 +1823,24 @@
 void av1_highbd_iht8x16_128_add_c(const tran_low_t *input, uint8_t *dest8,
                                   int stride, int tx_type, int bd) {
   static const highbd_transform_2d HIGH_IHT_8x16[] = {
-    { highbd_idct16, highbd_idct8 },    // DCT_DCT
-    { highbd_iadst16, highbd_idct8 },   // ADST_DCT
-    { highbd_idct16, highbd_iadst8 },   // DCT_ADST
-    { highbd_iadst16, highbd_iadst8 },  // ADST_ADST
+    { aom_highbd_idct16_c, aom_highbd_idct8_c },    // DCT_DCT
+    { aom_highbd_iadst16_c, aom_highbd_idct8_c },   // ADST_DCT
+    { aom_highbd_idct16_c, aom_highbd_iadst8_c },   // DCT_ADST
+    { aom_highbd_iadst16_c, aom_highbd_iadst8_c },  // ADST_ADST
 #if CONFIG_EXT_TX
-    { highbd_iadst16, highbd_idct8 },       // FLIPADST_DCT
-    { highbd_idct16, highbd_iadst8 },       // DCT_FLIPADST
-    { highbd_iadst16, highbd_iadst8 },      // FLIPADST_FLIPADST
-    { highbd_iadst16, highbd_iadst8 },      // ADST_FLIPADST
-    { highbd_iadst16, highbd_iadst8 },      // FLIPADST_ADST
-    { highbd_iidtx16_c, highbd_iidtx8_c },  // IDTX
-    { highbd_idct16, highbd_iidtx8_c },     // V_DCT
-    { highbd_iidtx16_c, highbd_idct8 },     // H_DCT
-    { highbd_iadst16, highbd_iidtx8_c },    // V_ADST
-    { highbd_iidtx16_c, highbd_iadst8 },    // H_ADST
-    { highbd_iadst16, highbd_iidtx8_c },    // V_FLIPADST
-    { highbd_iidtx16_c, highbd_iadst8 },    // H_FLIPADST
-#endif                                      // CONFIG_EXT_TX
+    { aom_highbd_iadst16_c, aom_highbd_idct8_c },   // FLIPADST_DCT
+    { aom_highbd_idct16_c, aom_highbd_iadst8_c },   // DCT_FLIPADST
+    { aom_highbd_iadst16_c, aom_highbd_iadst8_c },  // FLIPADST_FLIPADST
+    { aom_highbd_iadst16_c, aom_highbd_iadst8_c },  // ADST_FLIPADST
+    { aom_highbd_iadst16_c, aom_highbd_iadst8_c },  // FLIPADST_ADST
+    { highbd_iidtx16_c, highbd_iidtx8_c },          // IDTX
+    { aom_highbd_idct16_c, highbd_iidtx8_c },       // V_DCT
+    { highbd_iidtx16_c, aom_highbd_idct8_c },       // H_DCT
+    { aom_highbd_iadst16_c, highbd_iidtx8_c },      // V_ADST
+    { highbd_iidtx16_c, aom_highbd_iadst8_c },      // H_ADST
+    { aom_highbd_iadst16_c, highbd_iidtx8_c },      // V_FLIPADST
+    { highbd_iidtx16_c, aom_highbd_iadst8_c },      // H_FLIPADST
+#endif                                              // CONFIG_EXT_TX
   };
   const int n = 8;
   const int n2 = 16;
@@ -1973,12 +1852,9 @@
   tran_low_t *outp = &out[0][0];
   int outstride = n2;
 
-  tx_2d_cfg cfg = inv_tx_cfg(tx_type, TX_8X8, TX_16X16);
-
   // inverse transform row vectors, and transpose
   for (i = 0; i < n2; ++i) {
-    HIGH_IHT_8x16[tx_type].rows(input, outtmp, cfg.row.cos_bit,
-                                cfg.row.stage_range, bd);
+    HIGH_IHT_8x16[tx_type].rows(input, outtmp, bd);
     for (j = 0; j < n; ++j)
       out[j][i] = HIGHBD_WRAPLOW(dct_const_round_shift(outtmp[j] * Sqrt2), bd);
     input += n;
@@ -1986,8 +1862,7 @@
 
   // inverse transform column vectors
   for (i = 0; i < n; ++i) {
-    HIGH_IHT_8x16[tx_type].cols(out[i], out[i], cfg.col.cos_bit,
-                                cfg.col.stage_range, bd);
+    HIGH_IHT_8x16[tx_type].cols(out[i], out[i], bd);
   }
 
 #if CONFIG_EXT_TX
@@ -2008,24 +1883,24 @@
 void av1_highbd_iht16x8_128_add_c(const tran_low_t *input, uint8_t *dest8,
                                   int stride, int tx_type, int bd) {
   static const highbd_transform_2d HIGH_IHT_16x8[] = {
-    { highbd_idct8, highbd_idct16 },    // DCT_DCT
-    { highbd_iadst8, highbd_idct16 },   // ADST_DCT
-    { highbd_idct8, highbd_iadst16 },   // DCT_ADST
-    { highbd_iadst8, highbd_iadst16 },  // ADST_ADST
+    { aom_highbd_idct8_c, aom_highbd_idct16_c },    // DCT_DCT
+    { aom_highbd_iadst8_c, aom_highbd_idct16_c },   // ADST_DCT
+    { aom_highbd_idct8_c, aom_highbd_iadst16_c },   // DCT_ADST
+    { aom_highbd_iadst8_c, aom_highbd_iadst16_c },  // ADST_ADST
 #if CONFIG_EXT_TX
-    { highbd_iadst8, highbd_idct16 },       // FLIPADST_DCT
-    { highbd_idct8, highbd_iadst16 },       // DCT_FLIPADST
-    { highbd_iadst8, highbd_iadst16 },      // FLIPADST_FLIPADST
-    { highbd_iadst8, highbd_iadst16 },      // ADST_FLIPADST
-    { highbd_iadst8, highbd_iadst16 },      // FLIPADST_ADST
-    { highbd_iidtx8_c, highbd_iidtx16_c },  // IDTX
-    { highbd_idct8, highbd_iidtx16_c },     // V_DCT
-    { highbd_iidtx8_c, highbd_idct16 },     // H_DCT
-    { highbd_iadst8, highbd_iidtx16_c },    // V_ADST
-    { highbd_iidtx8_c, highbd_iadst16 },    // H_ADST
-    { highbd_iadst8, highbd_iidtx16_c },    // V_FLIPADST
-    { highbd_iidtx8_c, highbd_iadst16 },    // H_FLIPADST
-#endif                                      // CONFIG_EXT_TX
+    { aom_highbd_iadst8_c, aom_highbd_idct16_c },   // FLIPADST_DCT
+    { aom_highbd_idct8_c, aom_highbd_iadst16_c },   // DCT_FLIPADST
+    { aom_highbd_iadst8_c, aom_highbd_iadst16_c },  // FLIPADST_FLIPADST
+    { aom_highbd_iadst8_c, aom_highbd_iadst16_c },  // ADST_FLIPADST
+    { aom_highbd_iadst8_c, aom_highbd_iadst16_c },  // FLIPADST_ADST
+    { highbd_iidtx8_c, highbd_iidtx16_c },          // IDTX
+    { aom_highbd_idct8_c, highbd_iidtx16_c },       // V_DCT
+    { highbd_iidtx8_c, aom_highbd_idct16_c },       // H_DCT
+    { aom_highbd_iadst8_c, highbd_iidtx16_c },      // V_ADST
+    { highbd_iidtx8_c, aom_highbd_iadst16_c },      // H_ADST
+    { aom_highbd_iadst8_c, highbd_iidtx16_c },      // V_FLIPADST
+    { highbd_iidtx8_c, aom_highbd_iadst16_c },      // H_FLIPADST
+#endif                                              // CONFIG_EXT_TX
   };
   const int n = 8;
   const int n2 = 16;
@@ -2037,12 +1912,9 @@
   tran_low_t *outp = &out[0][0];
   int outstride = n;
 
-  tx_2d_cfg cfg = inv_tx_cfg(tx_type, TX_16X16, TX_8X8);
-
   // inverse transform row vectors, and transpose
   for (i = 0; i < n; ++i) {
-    HIGH_IHT_16x8[tx_type].rows(input, outtmp, cfg.row.cos_bit,
-                                cfg.row.stage_range, bd);
+    HIGH_IHT_16x8[tx_type].rows(input, outtmp, bd);
     for (j = 0; j < n2; ++j)
       out[j][i] = HIGHBD_WRAPLOW(dct_const_round_shift(outtmp[j] * Sqrt2), bd);
     input += n2;
@@ -2050,8 +1922,7 @@
 
   // inverse transform column vectors
   for (i = 0; i < n2; ++i) {
-    HIGH_IHT_16x8[tx_type].cols(out[i], out[i], cfg.col.cos_bit,
-                                cfg.col.stage_range, bd);
+    HIGH_IHT_16x8[tx_type].cols(out[i], out[i], bd);
   }
 
 #if CONFIG_EXT_TX
@@ -2072,24 +1943,24 @@
 void av1_highbd_iht8x32_256_add_c(const tran_low_t *input, uint8_t *dest8,
                                   int stride, int tx_type, int bd) {
   static const highbd_transform_2d HIGH_IHT_8x32[] = {
-    { highbd_idct32, highbd_idct8 },           // DCT_DCT
-    { highbd_ihalfright32_c, highbd_idct8 },   // ADST_DCT
-    { highbd_idct32, highbd_iadst8 },          // DCT_ADST
-    { highbd_ihalfright32_c, highbd_iadst8 },  // ADST_ADST
+    { aom_highbd_idct32_c, aom_highbd_idct8_c },     // DCT_DCT
+    { highbd_ihalfright32_c, aom_highbd_idct8_c },   // ADST_DCT
+    { aom_highbd_idct32_c, aom_highbd_iadst8_c },    // DCT_ADST
+    { highbd_ihalfright32_c, aom_highbd_iadst8_c },  // ADST_ADST
 #if CONFIG_EXT_TX
-    { highbd_ihalfright32_c, highbd_idct8 },     // FLIPADST_DCT
-    { highbd_idct32, highbd_iadst8 },            // DCT_FLIPADST
-    { highbd_ihalfright32_c, highbd_iadst8 },    // FLIPADST_FLIPADST
-    { highbd_ihalfright32_c, highbd_iadst8 },    // ADST_FLIPADST
-    { highbd_ihalfright32_c, highbd_iadst8 },    // FLIPADST_ADST
-    { highbd_iidtx32_c, highbd_iidtx8_c },       // IDTX
-    { highbd_idct32, highbd_iidtx8_c },          // V_DCT
-    { highbd_iidtx32_c, highbd_idct8 },          // H_DCT
-    { highbd_ihalfright32_c, highbd_iidtx8_c },  // V_ADST
-    { highbd_iidtx32_c, highbd_iadst8 },         // H_ADST
-    { highbd_ihalfright32_c, highbd_iidtx8_c },  // V_FLIPADST
-    { highbd_iidtx32_c, highbd_iadst8 },         // H_FLIPADST
-#endif                                           // CONFIG_EXT_TX
+    { highbd_ihalfright32_c, aom_highbd_idct8_c },   // FLIPADST_DCT
+    { aom_highbd_idct32_c, aom_highbd_iadst8_c },    // DCT_FLIPADST
+    { highbd_ihalfright32_c, aom_highbd_iadst8_c },  // FLIPADST_FLIPADST
+    { highbd_ihalfright32_c, aom_highbd_iadst8_c },  // ADST_FLIPADST
+    { highbd_ihalfright32_c, aom_highbd_iadst8_c },  // FLIPADST_ADST
+    { highbd_iidtx32_c, highbd_iidtx8_c },           // IDTX
+    { aom_highbd_idct32_c, highbd_iidtx8_c },        // V_DCT
+    { highbd_iidtx32_c, aom_highbd_idct8_c },        // H_DCT
+    { highbd_ihalfright32_c, highbd_iidtx8_c },      // V_ADST
+    { highbd_iidtx32_c, aom_highbd_iadst8_c },       // H_ADST
+    { highbd_ihalfright32_c, highbd_iidtx8_c },      // V_FLIPADST
+    { highbd_iidtx32_c, aom_highbd_iadst8_c },       // H_FLIPADST
+#endif                                               // CONFIG_EXT_TX
   };
   const int n = 8;
   const int n4 = 32;
@@ -2101,20 +1972,15 @@
   tran_low_t *outp = &out[0][0];
   int outstride = n4;
 
-  tx_2d_cfg cfg = inv_tx_cfg(tx_type, TX_8X8, TX_32X32);
-
   // inverse transform row vectors, and transpose
   for (i = 0; i < n4; ++i) {
-    HIGH_IHT_8x32[tx_type].rows(input, outtmp, cfg.row.cos_bit,
-                                cfg.row.stage_range, bd);
+    HIGH_IHT_8x32[tx_type].rows(input, outtmp, bd);
     for (j = 0; j < n; ++j) out[j][i] = outtmp[j];
     input += n;
   }
 
   // inverse transform column vectors
-  for (i = 0; i < n; ++i)
-    HIGH_IHT_8x32[tx_type].cols(out[i], out[i], cfg.col.cos_bit,
-                                cfg.col.stage_range, bd);
+  for (i = 0; i < n; ++i) HIGH_IHT_8x32[tx_type].cols(out[i], out[i], bd);
 
 #if CONFIG_EXT_TX
   maybe_flip_strides16(&dest, &stride, &outp, &outstride, tx_type, n4, n);
@@ -2134,24 +2000,24 @@
 void av1_highbd_iht32x8_256_add_c(const tran_low_t *input, uint8_t *dest8,
                                   int stride, int tx_type, int bd) {
   static const highbd_transform_2d HIGH_IHT_32x8[] = {
-    { highbd_idct8, highbd_idct32 },           // DCT_DCT
-    { highbd_iadst8, highbd_idct32 },          // ADST_DCT
-    { highbd_idct8, highbd_ihalfright32_c },   // DCT_ADST
-    { highbd_iadst8, highbd_ihalfright32_c },  // ADST_ADST
+    { aom_highbd_idct8_c, aom_highbd_idct32_c },     // DCT_DCT
+    { aom_highbd_iadst8_c, aom_highbd_idct32_c },    // ADST_DCT
+    { aom_highbd_idct8_c, highbd_ihalfright32_c },   // DCT_ADST
+    { aom_highbd_iadst8_c, highbd_ihalfright32_c },  // ADST_ADST
 #if CONFIG_EXT_TX
-    { highbd_iadst8, highbd_idct32 },            // FLIPADST_DCT
-    { highbd_idct8, highbd_ihalfright32_c },     // DCT_FLIPADST
-    { highbd_iadst8, highbd_ihalfright32_c },    // FLIPADST_FLIPADST
-    { highbd_iadst8, highbd_ihalfright32_c },    // ADST_FLIPADST
-    { highbd_iadst8, highbd_ihalfright32_c },    // FLIPADST_ADST
-    { highbd_iidtx8_c, highbd_iidtx32_c },       // IDTX
-    { highbd_idct8, highbd_iidtx32_c },          // V_DCT
-    { highbd_iidtx8_c, highbd_idct32 },          // H_DCT
-    { highbd_iadst8, highbd_iidtx32_c },         // V_ADST
-    { highbd_iidtx8_c, highbd_ihalfright32_c },  // H_ADST
-    { highbd_iadst8, highbd_iidtx32_c },         // V_FLIPADST
-    { highbd_iidtx8_c, highbd_ihalfright32_c },  // H_FLIPADST
-#endif                                           // CONFIG_EXT_TX
+    { aom_highbd_iadst8_c, aom_highbd_idct32_c },    // FLIPADST_DCT
+    { aom_highbd_idct8_c, highbd_ihalfright32_c },   // DCT_FLIPADST
+    { aom_highbd_iadst8_c, highbd_ihalfright32_c },  // FLIPADST_FLIPADST
+    { aom_highbd_iadst8_c, highbd_ihalfright32_c },  // ADST_FLIPADST
+    { aom_highbd_iadst8_c, highbd_ihalfright32_c },  // FLIPADST_ADST
+    { highbd_iidtx8_c, highbd_iidtx32_c },           // IDTX
+    { aom_highbd_idct8_c, highbd_iidtx32_c },        // V_DCT
+    { highbd_iidtx8_c, aom_highbd_idct32_c },        // H_DCT
+    { aom_highbd_iadst8_c, highbd_iidtx32_c },       // V_ADST
+    { highbd_iidtx8_c, highbd_ihalfright32_c },      // H_ADST
+    { aom_highbd_iadst8_c, highbd_iidtx32_c },       // V_FLIPADST
+    { highbd_iidtx8_c, highbd_ihalfright32_c },      // H_FLIPADST
+#endif                                               // CONFIG_EXT_TX
   };
   const int n = 8;
   const int n4 = 32;
@@ -2163,20 +2029,15 @@
   tran_low_t *outp = &out[0][0];
   int outstride = n;
 
-  tx_2d_cfg cfg = inv_tx_cfg(tx_type, TX_32X32, TX_8X8);
-
   // inverse transform row vectors, and transpose
   for (i = 0; i < n; ++i) {
-    HIGH_IHT_32x8[tx_type].rows(input, outtmp, cfg.row.cos_bit,
-                                cfg.row.stage_range, bd);
+    HIGH_IHT_32x8[tx_type].rows(input, outtmp, bd);
     for (j = 0; j < n4; ++j) out[j][i] = outtmp[j];
     input += n4;
   }
 
   // inverse transform column vectors
-  for (i = 0; i < n4; ++i)
-    HIGH_IHT_32x8[tx_type].cols(out[i], out[i], cfg.col.cos_bit,
-                                cfg.col.stage_range, bd);
+  for (i = 0; i < n4; ++i) HIGH_IHT_32x8[tx_type].cols(out[i], out[i], bd);
 
 #if CONFIG_EXT_TX
   maybe_flip_strides16(&dest, &stride, &outp, &outstride, tx_type, n, n4);
@@ -2196,24 +2057,24 @@
 void av1_highbd_iht16x32_512_add_c(const tran_low_t *input, uint8_t *dest8,
                                    int stride, int tx_type, int bd) {
   static const highbd_transform_2d HIGH_IHT_16x32[] = {
-    { highbd_idct32, highbd_idct16 },           // DCT_DCT
-    { highbd_ihalfright32_c, highbd_idct16 },   // ADST_DCT
-    { highbd_idct32, highbd_iadst16 },          // DCT_ADST
-    { highbd_ihalfright32_c, highbd_iadst16 },  // ADST_ADST
+    { aom_highbd_idct32_c, aom_highbd_idct16_c },     // DCT_DCT
+    { highbd_ihalfright32_c, aom_highbd_idct16_c },   // ADST_DCT
+    { aom_highbd_idct32_c, aom_highbd_iadst16_c },    // DCT_ADST
+    { highbd_ihalfright32_c, aom_highbd_iadst16_c },  // ADST_ADST
 #if CONFIG_EXT_TX
-    { highbd_ihalfright32_c, highbd_idct16 },     // FLIPADST_DCT
-    { highbd_idct32, highbd_iadst16 },            // DCT_FLIPADST
-    { highbd_ihalfright32_c, highbd_iadst16 },    // FLIPADST_FLIPADST
-    { highbd_ihalfright32_c, highbd_iadst16 },    // ADST_FLIPADST
-    { highbd_ihalfright32_c, highbd_iadst16 },    // FLIPADST_ADST
-    { highbd_iidtx32_c, highbd_iidtx16_c },       // IDTX
-    { highbd_idct32, highbd_iidtx16_c },          // V_DCT
-    { highbd_iidtx32_c, highbd_idct16 },          // H_DCT
-    { highbd_ihalfright32_c, highbd_iidtx16_c },  // V_ADST
-    { highbd_iidtx32_c, highbd_iadst16 },         // H_ADST
-    { highbd_ihalfright32_c, highbd_iidtx16_c },  // V_FLIPADST
-    { highbd_iidtx32_c, highbd_iadst16 },         // H_FLIPADST
-#endif                                            // CONFIG_EXT_TX
+    { highbd_ihalfright32_c, aom_highbd_idct16_c },   // FLIPADST_DCT
+    { aom_highbd_idct32_c, aom_highbd_iadst16_c },    // DCT_FLIPADST
+    { highbd_ihalfright32_c, aom_highbd_iadst16_c },  // FLIPADST_FLIPADST
+    { highbd_ihalfright32_c, aom_highbd_iadst16_c },  // ADST_FLIPADST
+    { highbd_ihalfright32_c, aom_highbd_iadst16_c },  // FLIPADST_ADST
+    { highbd_iidtx32_c, highbd_iidtx16_c },           // IDTX
+    { aom_highbd_idct32_c, highbd_iidtx16_c },        // V_DCT
+    { highbd_iidtx32_c, aom_highbd_idct16_c },        // H_DCT
+    { highbd_ihalfright32_c, highbd_iidtx16_c },      // V_ADST
+    { highbd_iidtx32_c, aom_highbd_iadst16_c },       // H_ADST
+    { highbd_ihalfright32_c, highbd_iidtx16_c },      // V_FLIPADST
+    { highbd_iidtx32_c, aom_highbd_iadst16_c },       // H_FLIPADST
+#endif                                                // CONFIG_EXT_TX
   };
   const int n = 16;
   const int n2 = 32;
@@ -2225,12 +2086,9 @@
   tran_low_t *outp = &out[0][0];
   int outstride = n2;
 
-  tx_2d_cfg cfg = inv_tx_cfg(tx_type, TX_16X16, TX_32X32);
-
   // inverse transform row vectors, and transpose
   for (i = 0; i < n2; ++i) {
-    HIGH_IHT_16x32[tx_type].rows(input, outtmp, cfg.row.cos_bit,
-                                 cfg.row.stage_range, bd);
+    HIGH_IHT_16x32[tx_type].rows(input, outtmp, bd);
     for (j = 0; j < n; ++j)
       out[j][i] = HIGHBD_WRAPLOW(dct_const_round_shift(outtmp[j] * Sqrt2), bd);
     input += n;
@@ -2238,8 +2096,7 @@
 
   // inverse transform column vectors
   for (i = 0; i < n; ++i) {
-    HIGH_IHT_16x32[tx_type].cols(out[i], out[i], cfg.col.cos_bit,
-                                 cfg.col.stage_range, bd);
+    HIGH_IHT_16x32[tx_type].cols(out[i], out[i], bd);
   }
 
 #if CONFIG_EXT_TX
@@ -2260,24 +2117,24 @@
 void av1_highbd_iht32x16_512_add_c(const tran_low_t *input, uint8_t *dest8,
                                    int stride, int tx_type, int bd) {
   static const highbd_transform_2d HIGH_IHT_32x16[] = {
-    { highbd_idct16, highbd_idct32 },           // DCT_DCT
-    { highbd_iadst16, highbd_idct32 },          // ADST_DCT
-    { highbd_idct16, highbd_ihalfright32_c },   // DCT_ADST
-    { highbd_iadst16, highbd_ihalfright32_c },  // ADST_ADST
+    { aom_highbd_idct16_c, aom_highbd_idct32_c },     // DCT_DCT
+    { aom_highbd_iadst16_c, aom_highbd_idct32_c },    // ADST_DCT
+    { aom_highbd_idct16_c, highbd_ihalfright32_c },   // DCT_ADST
+    { aom_highbd_iadst16_c, highbd_ihalfright32_c },  // ADST_ADST
 #if CONFIG_EXT_TX
-    { highbd_iadst16, highbd_idct32 },            // FLIPADST_DCT
-    { highbd_idct16, highbd_ihalfright32_c },     // DCT_FLIPADST
-    { highbd_iadst16, highbd_ihalfright32_c },    // FLIPADST_FLIPADST
-    { highbd_iadst16, highbd_ihalfright32_c },    // ADST_FLIPADST
-    { highbd_iadst16, highbd_ihalfright32_c },    // FLIPADST_ADST
-    { highbd_iidtx16_c, highbd_iidtx32_c },       // IDTX
-    { highbd_idct16, highbd_iidtx32_c },          // V_DCT
-    { highbd_iidtx16_c, highbd_idct32 },          // H_DCT
-    { highbd_iadst16, highbd_iidtx32_c },         // V_ADST
-    { highbd_iidtx16_c, highbd_ihalfright32_c },  // H_ADST
-    { highbd_iadst16, highbd_iidtx32_c },         // V_FLIPADST
-    { highbd_iidtx16_c, highbd_ihalfright32_c },  // H_FLIPADST
-#endif                                            // CONFIG_EXT_TX
+    { aom_highbd_iadst16_c, aom_highbd_idct32_c },    // FLIPADST_DCT
+    { aom_highbd_idct16_c, highbd_ihalfright32_c },   // DCT_FLIPADST
+    { aom_highbd_iadst16_c, highbd_ihalfright32_c },  // FLIPADST_FLIPADST
+    { aom_highbd_iadst16_c, highbd_ihalfright32_c },  // ADST_FLIPADST
+    { aom_highbd_iadst16_c, highbd_ihalfright32_c },  // FLIPADST_ADST
+    { highbd_iidtx16_c, highbd_iidtx32_c },           // IDTX
+    { aom_highbd_idct16_c, highbd_iidtx32_c },        // V_DCT
+    { highbd_iidtx16_c, aom_highbd_idct32_c },        // H_DCT
+    { aom_highbd_iadst16_c, highbd_iidtx32_c },       // V_ADST
+    { highbd_iidtx16_c, highbd_ihalfright32_c },      // H_ADST
+    { aom_highbd_iadst16_c, highbd_iidtx32_c },       // V_FLIPADST
+    { highbd_iidtx16_c, highbd_ihalfright32_c },      // H_FLIPADST
+#endif                                                // CONFIG_EXT_TX
   };
   const int n = 16;
   const int n2 = 32;
@@ -2289,12 +2146,9 @@
   tran_low_t *outp = &out[0][0];
   int outstride = n;
 
-  tx_2d_cfg cfg = inv_tx_cfg(tx_type, TX_32X32, TX_16X16);
-
   // inverse transform row vectors, and transpose
   for (i = 0; i < n; ++i) {
-    HIGH_IHT_32x16[tx_type].rows(input, outtmp, cfg.row.cos_bit,
-                                 cfg.row.stage_range, bd);
+    HIGH_IHT_32x16[tx_type].rows(input, outtmp, bd);
     for (j = 0; j < n2; ++j)
       out[j][i] = HIGHBD_WRAPLOW(dct_const_round_shift(outtmp[j] * Sqrt2), bd);
     input += n2;
@@ -2302,8 +2156,7 @@
 
   // inverse transform column vectors
   for (i = 0; i < n2; ++i) {
-    HIGH_IHT_32x16[tx_type].cols(out[i], out[i], cfg.col.cos_bit,
-                                 cfg.col.stage_range, bd);
+    HIGH_IHT_32x16[tx_type].cols(out[i], out[i], bd);
   }
 
 #if CONFIG_EXT_TX
@@ -2324,24 +2177,24 @@
 void av1_highbd_iht8x8_64_add_c(const tran_low_t *input, uint8_t *dest8,
                                 int stride, int tx_type, int bd) {
   static const highbd_transform_2d HIGH_IHT_8[] = {
-    { highbd_idct8, highbd_idct8 },    // DCT_DCT
-    { highbd_iadst8, highbd_idct8 },   // ADST_DCT
-    { highbd_idct8, highbd_iadst8 },   // DCT_ADST
-    { highbd_iadst8, highbd_iadst8 },  // ADST_ADST
+    { aom_highbd_idct8_c, aom_highbd_idct8_c },    // DCT_DCT
+    { aom_highbd_iadst8_c, aom_highbd_idct8_c },   // ADST_DCT
+    { aom_highbd_idct8_c, aom_highbd_iadst8_c },   // DCT_ADST
+    { aom_highbd_iadst8_c, aom_highbd_iadst8_c },  // ADST_ADST
 #if CONFIG_EXT_TX
-    { highbd_iadst8, highbd_idct8 },       // FLIPADST_DCT
-    { highbd_idct8, highbd_iadst8 },       // DCT_FLIPADST
-    { highbd_iadst8, highbd_iadst8 },      // FLIPADST_FLIPADST
-    { highbd_iadst8, highbd_iadst8 },      // ADST_FLIPADST
-    { highbd_iadst8, highbd_iadst8 },      // FLIPADST_ADST
-    { highbd_iidtx8_c, highbd_iidtx8_c },  // IDTX
-    { highbd_idct8, highbd_iidtx8_c },     // V_DCT
-    { highbd_iidtx8_c, highbd_idct8 },     // H_DCT
-    { highbd_iadst8, highbd_iidtx8_c },    // V_ADST
-    { highbd_iidtx8_c, highbd_iadst8 },    // H_ADST
-    { highbd_iadst8, highbd_iidtx8_c },    // V_FLIPADST
-    { highbd_iidtx8_c, highbd_iadst8 },    // H_FLIPADST
-#endif                                     // CONFIG_EXT_TX
+    { aom_highbd_iadst8_c, aom_highbd_idct8_c },   // FLIPADST_DCT
+    { aom_highbd_idct8_c, aom_highbd_iadst8_c },   // DCT_FLIPADST
+    { aom_highbd_iadst8_c, aom_highbd_iadst8_c },  // FLIPADST_FLIPADST
+    { aom_highbd_iadst8_c, aom_highbd_iadst8_c },  // ADST_FLIPADST
+    { aom_highbd_iadst8_c, aom_highbd_iadst8_c },  // FLIPADST_ADST
+    { highbd_iidtx8_c, highbd_iidtx8_c },          // IDTX
+    { aom_highbd_idct8_c, highbd_iidtx8_c },       // V_DCT
+    { highbd_iidtx8_c, aom_highbd_idct8_c },       // H_DCT
+    { aom_highbd_iadst8_c, highbd_iidtx8_c },      // V_ADST
+    { highbd_iidtx8_c, aom_highbd_iadst8_c },      // H_ADST
+    { aom_highbd_iadst8_c, highbd_iidtx8_c },      // V_FLIPADST
+    { highbd_iidtx8_c, aom_highbd_iadst8_c },      // H_FLIPADST
+#endif                                             // CONFIG_EXT_TX
   };
 
   uint16_t *dest = CONVERT_TO_SHORTPTR(dest8);
@@ -2352,12 +2205,9 @@
   tran_low_t *outp = &out[0][0];
   int outstride = 8;
 
-  tx_2d_cfg cfg = inv_tx_cfg(tx_type, TX_8X8, TX_8X8);
-
   // inverse transform row vectors
   for (i = 0; i < 8; ++i) {
-    HIGH_IHT_8[tx_type].rows(input, out[i], cfg.row.cos_bit,
-                             cfg.row.stage_range, bd);
+    HIGH_IHT_8[tx_type].rows(input, out[i], bd);
     input += 8;
   }
 
@@ -2372,8 +2222,7 @@
 
   // inverse transform column vectors
   for (i = 0; i < 8; ++i) {
-    HIGH_IHT_8[tx_type].cols(out[i], out[i], cfg.col.cos_bit,
-                             cfg.col.stage_range, bd);
+    HIGH_IHT_8[tx_type].cols(out[i], out[i], bd);
   }
 
 #if CONFIG_EXT_TX
@@ -2394,24 +2243,24 @@
 void av1_highbd_iht16x16_256_add_c(const tran_low_t *input, uint8_t *dest8,
                                    int stride, int tx_type, int bd) {
   static const highbd_transform_2d HIGH_IHT_16[] = {
-    { highbd_idct16, highbd_idct16 },    // DCT_DCT
-    { highbd_iadst16, highbd_idct16 },   // ADST_DCT
-    { highbd_idct16, highbd_iadst16 },   // DCT_ADST
-    { highbd_iadst16, highbd_iadst16 },  // ADST_ADST
+    { aom_highbd_idct16_c, aom_highbd_idct16_c },    // DCT_DCT
+    { aom_highbd_iadst16_c, aom_highbd_idct16_c },   // ADST_DCT
+    { aom_highbd_idct16_c, aom_highbd_iadst16_c },   // DCT_ADST
+    { aom_highbd_iadst16_c, aom_highbd_iadst16_c },  // ADST_ADST
 #if CONFIG_EXT_TX
-    { highbd_iadst16, highbd_idct16 },       // FLIPADST_DCT
-    { highbd_idct16, highbd_iadst16 },       // DCT_FLIPADST
-    { highbd_iadst16, highbd_iadst16 },      // FLIPADST_FLIPADST
-    { highbd_iadst16, highbd_iadst16 },      // ADST_FLIPADST
-    { highbd_iadst16, highbd_iadst16 },      // FLIPADST_ADST
-    { highbd_iidtx16_c, highbd_iidtx16_c },  // IDTX
-    { highbd_idct16, highbd_iidtx16_c },     // V_DCT
-    { highbd_iidtx16_c, highbd_idct16 },     // H_DCT
-    { highbd_iadst16, highbd_iidtx16_c },    // V_ADST
-    { highbd_iidtx16_c, highbd_iadst16 },    // H_ADST
-    { highbd_iadst16, highbd_iidtx16_c },    // V_FLIPADST
-    { highbd_iidtx16_c, highbd_iadst16 },    // H_FLIPADST
-#endif                                       // CONFIG_EXT_TX
+    { aom_highbd_iadst16_c, aom_highbd_idct16_c },   // FLIPADST_DCT
+    { aom_highbd_idct16_c, aom_highbd_iadst16_c },   // DCT_FLIPADST
+    { aom_highbd_iadst16_c, aom_highbd_iadst16_c },  // FLIPADST_FLIPADST
+    { aom_highbd_iadst16_c, aom_highbd_iadst16_c },  // ADST_FLIPADST
+    { aom_highbd_iadst16_c, aom_highbd_iadst16_c },  // FLIPADST_ADST
+    { highbd_iidtx16_c, highbd_iidtx16_c },          // IDTX
+    { aom_highbd_idct16_c, highbd_iidtx16_c },       // V_DCT
+    { highbd_iidtx16_c, aom_highbd_idct16_c },       // H_DCT
+    { aom_highbd_iadst16_c, highbd_iidtx16_c },      // V_ADST
+    { highbd_iidtx16_c, aom_highbd_iadst16_c },      // H_ADST
+    { aom_highbd_iadst16_c, highbd_iidtx16_c },      // V_FLIPADST
+    { highbd_iidtx16_c, aom_highbd_iadst16_c },      // H_FLIPADST
+#endif                                               // CONFIG_EXT_TX
   };
 
   uint16_t *dest = CONVERT_TO_SHORTPTR(dest8);
@@ -2422,12 +2271,9 @@
   tran_low_t *outp = &out[0][0];
   int outstride = 16;
 
-  tx_2d_cfg cfg = inv_tx_cfg(tx_type, TX_16X16, TX_16X16);
-
   // inverse transform row vectors
   for (i = 0; i < 16; ++i) {
-    HIGH_IHT_16[tx_type].rows(input, out[i], cfg.row.cos_bit,
-                              cfg.row.stage_range, bd);
+    HIGH_IHT_16[tx_type].rows(input, out[i], bd);
     input += 16;
   }
 
@@ -2442,8 +2288,7 @@
 
   // inverse transform column vectors
   for (i = 0; i < 16; ++i) {
-    HIGH_IHT_16[tx_type].cols(out[i], out[i], cfg.col.cos_bit,
-                              cfg.col.stage_range, bd);
+    HIGH_IHT_16[tx_type].cols(out[i], out[i], bd);
   }
 
 #if CONFIG_EXT_TX
@@ -2465,18 +2310,18 @@
 static void highbd_iht32x32_1024_add_c(const tran_low_t *input, uint8_t *dest8,
                                        int stride, int tx_type, int bd) {
   static const highbd_transform_2d HIGH_IHT_32[] = {
-    { highbd_idct32, highbd_idct32 },                  // DCT_DCT
-    { highbd_ihalfright32_c, highbd_idct32 },          // ADST_DCT
-    { highbd_idct32, highbd_ihalfright32_c },          // DCT_ADST
+    { aom_highbd_idct32_c, aom_highbd_idct32_c },      // DCT_DCT
+    { highbd_ihalfright32_c, aom_highbd_idct32_c },    // ADST_DCT
+    { aom_highbd_idct32_c, highbd_ihalfright32_c },    // DCT_ADST
     { highbd_ihalfright32_c, highbd_ihalfright32_c },  // ADST_ADST
-    { highbd_ihalfright32_c, highbd_idct32 },          // FLIPADST_DCT
-    { highbd_idct32, highbd_ihalfright32_c },          // DCT_FLIPADST
+    { highbd_ihalfright32_c, aom_highbd_idct32_c },    // FLIPADST_DCT
+    { aom_highbd_idct32_c, highbd_ihalfright32_c },    // DCT_FLIPADST
     { highbd_ihalfright32_c, highbd_ihalfright32_c },  // FLIPADST_FLIPADST
     { highbd_ihalfright32_c, highbd_ihalfright32_c },  // ADST_FLIPADST
     { highbd_ihalfright32_c, highbd_ihalfright32_c },  // FLIPADST_ADST
     { highbd_iidtx32_c, highbd_iidtx32_c },            // IDTX
-    { highbd_idct32, highbd_iidtx32_c },               // V_DCT
-    { highbd_iidtx32_c, highbd_idct32 },               // H_DCT
+    { aom_highbd_idct32_c, highbd_iidtx32_c },         // V_DCT
+    { highbd_iidtx32_c, aom_highbd_idct32_c },         // H_DCT
     { highbd_ihalfright32_c, highbd_iidtx32_c },       // V_ADST
     { highbd_iidtx32_c, highbd_ihalfright32_c },       // H_ADST
     { highbd_ihalfright32_c, highbd_iidtx32_c },       // V_FLIPADST
@@ -2491,12 +2336,9 @@
   tran_low_t *outp = &out[0][0];
   int outstride = 32;
 
-  tx_2d_cfg cfg = inv_tx_cfg(tx_type, TX_32X32, TX_32X32);
-
   // inverse transform row vectors
   for (i = 0; i < 32; ++i) {
-    HIGH_IHT_32[tx_type].rows(input, out[i], cfg.row.cos_bit,
-                              cfg.row.stage_range, bd);
+    HIGH_IHT_32[tx_type].rows(input, out[i], bd);
     input += 32;
   }
 
@@ -2511,8 +2353,7 @@
 
   // inverse transform column vectors
   for (i = 0; i < 32; ++i) {
-    HIGH_IHT_32[tx_type].cols(out[i], out[i], cfg.col.cos_bit,
-                              cfg.col.stage_range, bd);
+    HIGH_IHT_32[tx_type].cols(out[i], out[i], bd);
   }
 
   maybe_flip_strides16(&dest, &stride, &outp, &outstride, tx_type, 32, 32);
@@ -2561,12 +2402,9 @@
   tran_low_t *outp = &out[0][0];
   int outstride = 64;
 
-  tx_2d_cfg cfg = inv_tx_cfg(tx_type, TX_64X64, TX_64X64);
-
   // inverse transform row vectors
   for (i = 0; i < 64; ++i) {
-    HIGH_IHT_64[tx_type].rows(input, out[i], cfg.row.cos_bit,
-                              cfg.row.stage_range, bd);
+    HIGH_IHT_64[tx_type].rows(input, out[i], bd);
     for (j = 0; j < 64; ++j) out[i][j] = ROUND_POWER_OF_TWO(out[i][j], 1);
     input += 64;
   }
@@ -2582,8 +2420,7 @@
 
   // inverse transform column vectors
   for (i = 0; i < 64; ++i) {
-    HIGH_IHT_64[tx_type].cols(out[i], out[i], cfg.col.cos_bit_col,
-                              cfg.col.stage_range, bd);
+    HIGH_IHT_64[tx_type].cols(out[i], out[i], bd);
   }
 
 #if CONFIG_EXT_TX
diff --git a/av1/common/idct.h b/av1/common/idct.h
index e3a1921..d9c1507 100644
--- a/av1/common/idct.h
+++ b/av1/common/idct.h
@@ -46,9 +46,7 @@
 } transform_2d;
 
 #if CONFIG_HIGHBITDEPTH
-typedef void (*highbd_transform_1d)(const tran_low_t *, tran_low_t *,
-                                    const int8_t *cos_bit,
-                                    const int8_t *stage_range, int bd);
+typedef void (*highbd_transform_1d)(const tran_low_t *, tran_low_t *, int bd);
 
 typedef struct {
   highbd_transform_1d cols, rows;  // vertical and horizontal