Remove LGT experiment

This experiment has been abandonned for AV1.

Change-Id: If560a67d00b8ae3daa377a59293d5125a8cb7902
diff --git a/aom_dsp/txfm_common.h b/aom_dsp/txfm_common.h
index 86c6ecc..253aefc 100644
--- a/aom_dsp/txfm_common.h
+++ b/aom_dsp/txfm_common.h
@@ -29,9 +29,9 @@
   int lossless;
   int bd;
   TxSetType tx_set_type;
-#if CONFIG_MRC_TX || CONFIG_LGT
+#if CONFIG_MRC_TX
   int is_inter;
-#endif  // CONFIG_MRC_TX || CONFIG_LGT
+#endif  // CONFIG_MRC_TX
 #if CONFIG_MRC_TX
   int stride;
   uint8_t *dst;
@@ -99,53 +99,4 @@
   return rv;
 }
 
-#if CONFIG_LGT
-// LGT4 name: lgt4_170
-// Self loops: 1.700, 0.000, 0.000, 0.000
-// Edges: 1.000, 1.000, 1.000
-static const tran_high_t lgt4_170[4][4] = {
-  { 3636, 9287, 13584, 15902 },
-  { 10255, 15563, 2470, -13543 },
-  { 14786, 711, -15249, 9231 },
-  { 14138, -14420, 10663, -3920 },
-};
-
-// LGT4 name: lgt4_140
-// Self loops: 1.400, 0.000, 0.000, 0.000
-// Edges: 1.000, 1.000, 1.000
-static const tran_high_t lgt4_140[4][4] = {
-  { 4206, 9518, 13524, 15674 },
-  { 11552, 14833, 1560, -13453 },
-  { 15391, -1906, -14393, 9445 },
-  { 12201, -14921, 12016, -4581 },
-};
-
-// LGT8 name: lgt8_170
-// Self loops: 1.700, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000
-// Edges: 1.000, 1.000, 1.000, 1.000, 1.000, 1.000, 1.000
-static const tran_high_t lgt8_170[8][8] = {
-  { 1858, 4947, 7850, 10458, 12672, 14411, 15607, 16217 },
-  { 5494, 13022, 16256, 14129, 7343, -1864, -10456, -15601 },
-  { 8887, 16266, 9500, -5529, -15749, -12273, 1876, 14394 },
-  { 11870, 13351, -6199, -15984, -590, 15733, 7273, -12644 },
-  { 14248, 5137, -15991, 291, 15893, -5685, -13963, 10425 },
-  { 15716, -5450, -10010, 15929, -6665, -8952, 16036, -7835 },
-  { 15533, -13869, 6559, 3421, -12009, 15707, -13011, 5018 },
-  { 11357, -13726, 14841, -14600, 13025, -10259, 6556, -2254 },
-};
-
-// LGT8 name: lgt8_150
-// Self loops: 1.500, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000
-// Edges: 1.000, 1.000, 1.000, 1.000, 1.000, 1.000, 1.000
-static const tran_high_t lgt8_150[8][8] = {
-  { 2075, 5110, 7958, 10511, 12677, 14376, 15544, 16140 },
-  { 6114, 13307, 16196, 13845, 7015, -2084, -10509, -15534 },
-  { 9816, 16163, 8717, -6168, -15790, -11936, 2104, 14348 },
-  { 12928, 12326, -7340, -15653, 242, 15763, 6905, -12632 },
-  { 15124, 3038, -16033, 1758, 15507, -6397, -13593, 10463 },
-  { 15895, -7947, -7947, 15895, -7947, -7947, 15895, -7947 },
-  { 14325, -15057, 9030, 1050, -10659, 15483, -13358, 5236 },
-  { 9054, -12580, 14714, -15220, 14043, -11312, 7330, -2537 },
-};
-#endif  // CONFIG_LGT
 #endif  // AOM_DSP_TXFM_COMMON_H_
diff --git a/av1/common/av1_rtcd_defs.pl b/av1/common/av1_rtcd_defs.pl
index 93642d9..eb97956 100755
--- a/av1/common/av1_rtcd_defs.pl
+++ b/av1/common/av1_rtcd_defs.pl
@@ -70,7 +70,7 @@
 # Inverse dct
 #
 add_proto qw/void av1_iht4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, const struct txfm_param *param";
-if (aom_config("CONFIG_DAALA_TX4") ne "yes" && aom_config("CONFIG_LGT") ne "yes") {
+if (aom_config("CONFIG_DAALA_TX4") ne "yes") {
   if (aom_config("CONFIG_HIGHBITDEPTH") eq "yes") {
     specialize qw/av1_iht4x4_16_add sse2/;
   } else {
@@ -105,7 +105,7 @@
 add_proto qw/void av1_iht32x8_256_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, const struct txfm_param *param";
 
 add_proto qw/void av1_iht8x8_64_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, const struct txfm_param *param";
-if (aom_config("CONFIG_DAALA_TX8") ne "yes" && aom_config("CONFIG_LGT") ne "yes") {
+if (aom_config("CONFIG_DAALA_TX8") ne "yes") {
   if (aom_config("CONFIG_HIGHBITDEPTH") eq "yes") {
     specialize qw/av1_iht8x8_64_add sse2/;
   } else {
@@ -115,7 +115,7 @@
 
 add_proto qw/void av1_iht16x16_256_add/, "const tran_low_t *input, uint8_t *output, int pitch, const struct txfm_param *param";
 
-if (aom_config("CONFIG_DAALA_TX16") ne "yes" && aom_config("CONFIG_LGT") ne "yes") {
+if (aom_config("CONFIG_DAALA_TX16") ne "yes") {
   if (aom_config("CONFIG_HIGHBITDEPTH") eq "yes") {
     specialize qw/av1_iht16x16_256_add sse2 avx2/;
   } else {
diff --git a/av1/common/idct.c b/av1/common/idct.c
index dd4eada..4bef625 100644
--- a/av1/common/idct.c
+++ b/av1/common/idct.c
@@ -199,68 +199,6 @@
 #endif  // CONFIG_TX64X64
 #endif  // CONFIG_HIGHBITDEPTH
 
-#if CONFIG_LGT
-void ilgt4(const tran_low_t *input, tran_low_t *output,
-           const tran_high_t *lgtmtx) {
-  if (!lgtmtx) assert(0);
-
-  // evaluate s[j] = sum of all lgtmtx[j]*input[i] over i=1,...,4
-  tran_high_t s[4] = { 0 };
-  for (int i = 0; i < 4; ++i)
-    for (int j = 0; j < 4; ++j) s[j] += lgtmtx[i * 4 + j] * input[i];
-
-  for (int i = 0; i < 4; ++i) output[i] = WRAPLOW(dct_const_round_shift(s[i]));
-}
-
-void ilgt8(const tran_low_t *input, tran_low_t *output,
-           const tran_high_t *lgtmtx) {
-  if (!lgtmtx) assert(0);
-
-  // evaluate s[j] = sum of all lgtmtx[j]*input[i] over i=1,...,8
-  tran_high_t s[8] = { 0 };
-  for (int i = 0; i < 8; ++i)
-    for (int j = 0; j < 8; ++j) s[j] += lgtmtx[i * 8 + j] * input[i];
-
-  for (int i = 0; i < 8; ++i) output[i] = WRAPLOW(dct_const_round_shift(s[i]));
-}
-#endif  // CONFIG_LGT
-
-#if CONFIG_LGT
-// get_lgt4 and get_lgt8 return 1 and pick a lgt matrix if LGT is chosen to
-// apply. Otherwise they return 0
-int get_lgt4(const TxfmParam *txfm_param, int is_col,
-             const tran_high_t **lgtmtx) {
-  assert(av1_ext_tx_used[txfm_param->tx_set_type][txfm_param->tx_type]);
-  if (is_col && (vtx_tab[txfm_param->tx_type] == ADST_1D ||
-                 vtx_tab[txfm_param->tx_type] == FLIPADST_1D)) {
-    lgtmtx[0] = txfm_param->is_inter ? &lgt4_170[0][0] : &lgt4_140[0][0];
-    return 1;
-  } else if (!is_col && (htx_tab[txfm_param->tx_type] == ADST_1D ||
-                         htx_tab[txfm_param->tx_type] == FLIPADST_1D)) {
-    lgtmtx[0] = txfm_param->is_inter ? &lgt4_170[0][0] : &lgt4_140[0][0];
-    return 1;
-  }
-  lgtmtx[0] = NULL;
-  return 0;
-}
-
-int get_lgt8(const TxfmParam *txfm_param, int is_col,
-             const tran_high_t **lgtmtx) {
-  assert(av1_ext_tx_used[txfm_param->tx_set_type][txfm_param->tx_type]);
-  if (is_col && (vtx_tab[txfm_param->tx_type] == ADST_1D ||
-                 vtx_tab[txfm_param->tx_type] == FLIPADST_1D)) {
-    lgtmtx[0] = txfm_param->is_inter ? &lgt8_170[0][0] : &lgt8_150[0][0];
-    return 1;
-  } else if (!is_col && (htx_tab[txfm_param->tx_type] == ADST_1D ||
-                         htx_tab[txfm_param->tx_type] == FLIPADST_1D)) {
-    lgtmtx[0] = txfm_param->is_inter ? &lgt8_170[0][0] : &lgt8_150[0][0];
-    return 1;
-  }
-  lgtmtx[0] = NULL;
-  return 0;
-}
-#endif  // CONFIG_LGT
-
 void av1_iht4x4_16_add_c(const tran_low_t *input, uint8_t *dest, int stride,
                          const TxfmParam *txfm_param) {
   const TX_TYPE tx_type = txfm_param->tx_type;
@@ -321,13 +259,6 @@
   assert(tx_type == DCT_DCT);
 #endif
 
-#if CONFIG_LGT
-  const tran_high_t *lgtmtx_col[1];
-  const tran_high_t *lgtmtx_row[1];
-  int use_lgt_col = get_lgt4(txfm_param, 1, lgtmtx_col);
-  int use_lgt_row = get_lgt4(txfm_param, 0, lgtmtx_row);
-#endif
-
   // inverse transform row vectors
   for (i = 0; i < 4; ++i) {
 #if CONFIG_DAALA_TX4
@@ -335,12 +266,7 @@
     for (j = 0; j < 4; j++) temp_in[j] = input[j] * 2;
     IHT_4[tx_type].rows(temp_in, out[i]);
 #else
-#if CONFIG_LGT
-    if (use_lgt_row)
-      ilgt4(input, out[i], lgtmtx_row[0]);
-    else
-#endif
-      IHT_4[tx_type].rows(input, out[i]);
+    IHT_4[tx_type].rows(input, out[i]);
 #endif
     input += 4;
   }
@@ -354,12 +280,7 @@
 
   // inverse transform column vectors
   for (i = 0; i < 4; ++i) {
-#if CONFIG_LGT
-    if (use_lgt_col)
-      ilgt4(tmp[i], out[i], lgtmtx_col[0]);
-    else
-#endif
-      IHT_4[tx_type].cols(tmp[i], out[i]);
+    IHT_4[tx_type].cols(tmp[i], out[i]);
   }
 
   maybe_flip_strides(&dest, &stride, &outp, &outstride, tx_type, 4, 4);
@@ -432,13 +353,6 @@
   tran_low_t *outp = &out[0][0];
   int outstride = n2;
 
-#if CONFIG_LGT
-  const tran_high_t *lgtmtx_col[1];
-  const tran_high_t *lgtmtx_row[1];
-  int use_lgt_col = get_lgt8(txfm_param, 1, lgtmtx_col);
-  int use_lgt_row = get_lgt4(txfm_param, 0, lgtmtx_row);
-#endif
-
   // Multi-way scaling matrix (bits):
   // LGT/AV1 row,col     input+0, rowTX+.5, mid+.5, colTX+1, out-5 == -3
   // LGT row, Daala col  input+0, rowTX+.5, mid+.5, colTX+0, out-4 == -3
@@ -447,26 +361,15 @@
 
   // inverse transform row vectors and transpose
   for (i = 0; i < n2; ++i) {
-#if CONFIG_LGT
-    if (use_lgt_row) {
-      // Scaling cases 1 and 2 above
-      // No input scaling
-      // Row transform (LGT; scales up .5 bits)
-      ilgt4(input, outtmp, lgtmtx_row[0]);
-      // Transpose and mid scaling up by .5 bit
-      for (j = 0; j < n; ++j)
-        tmp[j][i] = (tran_low_t)dct_const_round_shift(outtmp[j] * Sqrt2);
-    } else {
-#endif
 #if CONFIG_DAALA_TX4 && CONFIG_DAALA_TX8
-      // Daala row transform; Scaling cases 3 and 4 above
-      tran_low_t temp_in[4];
-      // Input scaling up by 1 bit
-      for (j = 0; j < n; j++) temp_in[j] = input[j] * 2;
-      // Row transform; Daala does not scale
-      IHT_4x8[tx_type].rows(temp_in, outtmp);
-      // Transpose; no mid scaling
-      for (j = 0; j < n; ++j) tmp[j][i] = outtmp[j];
+    // Daala row transform; Scaling cases 3 and 4 above
+    tran_low_t temp_in[4];
+    // Input scaling up by 1 bit
+    for (j = 0; j < n; j++) temp_in[j] = input[j] * 2;
+    // Row transform; Daala does not scale
+    IHT_4x8[tx_type].rows(temp_in, outtmp);
+    // Transpose; no mid scaling
+    for (j = 0; j < n; ++j) tmp[j][i] = outtmp[j];
 #else
     // AV1 row transform; Scaling case 1 only
     // Row transform (AV1 scales up .5 bits)
@@ -475,21 +378,13 @@
     for (j = 0; j < n; ++j)
       tmp[j][i] = (tran_low_t)dct_const_round_shift(outtmp[j] * Sqrt2);
 #endif
-#if CONFIG_LGT
-    }
-#endif
     input += n;
   }
 
   // inverse transform column vectors
   // AV1/LGT column TX scales up by 1 bit, Daala does not scale
   for (i = 0; i < n; ++i) {
-#if CONFIG_LGT
-    if (use_lgt_col)
-      ilgt8(tmp[i], out[i], lgtmtx_col[0]);
-    else
-#endif
-      IHT_4x8[tx_type].cols(tmp[i], out[i]);
+    IHT_4x8[tx_type].cols(tmp[i], out[i]);
   }
 
   maybe_flip_strides(&dest, &stride, &outp, &outstride, tx_type, n2, n);
@@ -500,14 +395,8 @@
       int d = i * stride + j;
       int s = j * outstride + i;
 #if CONFIG_DAALA_TX4 && CONFIG_DAALA_TX8
-#if CONFIG_LGT
-      if (use_lgt_col)
-        // Output Scaling cases 1, 3
-        dest[d] = clip_pixel_add(dest[d], ROUND_POWER_OF_TWO(outp[s], 5));
-      else
-#endif
-        // Output scaling cases 2, 4
-        dest[d] = clip_pixel_add(dest[d], ROUND_POWER_OF_TWO(outp[s], 4));
+      // Output scaling cases 2, 4
+      dest[d] = clip_pixel_add(dest[d], ROUND_POWER_OF_TWO(outp[s], 4));
 #else
       // Output scaling case 1 only
       dest[d] = clip_pixel_add(dest[d], ROUND_POWER_OF_TWO(outp[s], 5));
@@ -571,13 +460,6 @@
   tran_low_t *outp = &out[0][0];
   int outstride = n;
 
-#if CONFIG_LGT
-  const tran_high_t *lgtmtx_col[1];
-  const tran_high_t *lgtmtx_row[1];
-  int use_lgt_col = get_lgt4(txfm_param, 1, lgtmtx_col);
-  int use_lgt_row = get_lgt8(txfm_param, 0, lgtmtx_row);
-#endif
-
   // Multi-way scaling matrix (bits):
   // LGT/AV1 row,col     input+0, rowTX+1, mid+.5, colTX+.5, out-5 == -3
   // LGT row, Daala col  input+0, rowTX+1, mid+.5, colTX+.5, out-4 == -3
@@ -586,26 +468,15 @@
 
   // inverse transform row vectors and transpose
   for (i = 0; i < n; ++i) {
-#if CONFIG_LGT
-    if (use_lgt_row) {
-      // Scaling cases 1 and 2 above
-      // No input scaling
-      // Row transform (LGT; scales up 1 bit)
-      ilgt8(input, outtmp, lgtmtx_row[0]);
-      // Transpose and mid scaling up by .5 bit
-      for (j = 0; j < n2; ++j)
-        tmp[j][i] = (tran_low_t)dct_const_round_shift(outtmp[j] * Sqrt2);
-    } else {
-#endif
 #if CONFIG_DAALA_TX4 && CONFIG_DAALA_TX8
-      // Daala row transform; Scaling cases 3 and 4 above
-      tran_low_t temp_in[8];
-      // Input scaling up by 1 bit
-      for (j = 0; j < n2; j++) temp_in[j] = input[j] * 2;
-      // Row transform; Daala does not scale
-      IHT_8x4[tx_type].rows(temp_in, outtmp);
-      // Transpose; no mid scaling
-      for (j = 0; j < n2; ++j) tmp[j][i] = outtmp[j];
+    // Daala row transform; Scaling cases 3 and 4 above
+    tran_low_t temp_in[8];
+    // Input scaling up by 1 bit
+    for (j = 0; j < n2; j++) temp_in[j] = input[j] * 2;
+    // Row transform; Daala does not scale
+    IHT_8x4[tx_type].rows(temp_in, outtmp);
+    // Transpose; no mid scaling
+    for (j = 0; j < n2; ++j) tmp[j][i] = outtmp[j];
 #else
     // AV1 row transform; Scaling case 1 only
     // Row transform (AV1 scales up 1 bit)
@@ -614,21 +485,13 @@
     for (j = 0; j < n2; ++j)
       tmp[j][i] = (tran_low_t)dct_const_round_shift(outtmp[j] * Sqrt2);
 #endif
-#if CONFIG_LGT
-    }
-#endif
     input += n2;
   }
 
   // inverse transform column vectors
   // AV1 and LGT scale up by .5 bits; Daala does not scale
   for (i = 0; i < n2; ++i) {
-#if CONFIG_LGT
-    if (use_lgt_col)
-      ilgt4(tmp[i], out[i], lgtmtx_col[0]);
-    else
-#endif
-      IHT_8x4[tx_type].cols(tmp[i], out[i]);
+    IHT_8x4[tx_type].cols(tmp[i], out[i]);
   }
 
   maybe_flip_strides(&dest, &stride, &outp, &outstride, tx_type, n, n2);
@@ -639,14 +502,8 @@
       int d = i * stride + j;
       int s = j * outstride + i;
 #if CONFIG_DAALA_TX4 && CONFIG_DAALA_TX8
-#if CONFIG_LGT
-      if (use_lgt_col)
-        // Output scaling cases 1, 3
-        dest[d] = clip_pixel_add(dest[d], ROUND_POWER_OF_TWO(outp[s], 5));
-      else
-#endif
-        // Output scaling cases 2, 4
-        dest[d] = clip_pixel_add(dest[d], ROUND_POWER_OF_TWO(outp[s], 4));
+      // Output scaling cases 2, 4
+      dest[d] = clip_pixel_add(dest[d], ROUND_POWER_OF_TWO(outp[s], 4));
 #else
       // Output scaling case 1
       dest[d] = clip_pixel_add(dest[d], ROUND_POWER_OF_TWO(outp[s], 5));
@@ -690,19 +547,9 @@
   tran_low_t *outp = &out[0][0];
   int outstride = n4;
 
-#if CONFIG_LGT
-  const tran_high_t *lgtmtx_row[1];
-  int use_lgt_row = get_lgt4(txfm_param, 0, lgtmtx_row);
-#endif
-
   // inverse transform row vectors and transpose
   for (i = 0; i < n4; ++i) {
-#if CONFIG_LGT
-    if (use_lgt_row)
-      ilgt4(input, outtmp, lgtmtx_row[0]);
-    else
-#endif
-      IHT_4x16[tx_type].rows(input, outtmp);
+    IHT_4x16[tx_type].rows(input, outtmp);
     for (j = 0; j < n; ++j) tmp[j][i] = outtmp[j];
     input += n;
   }
@@ -760,11 +607,6 @@
   tran_low_t *outp = &out[0][0];
   int outstride = n;
 
-#if CONFIG_LGT
-  const tran_high_t *lgtmtx_col[1];
-  int use_lgt_col = get_lgt4(txfm_param, 1, lgtmtx_col);
-#endif
-
   // inverse transform row vectors and transpose
   for (i = 0; i < n; ++i) {
     IHT_16x4[tx_type].rows(input, outtmp);
@@ -774,12 +616,7 @@
 
   // inverse transform column vectors
   for (i = 0; i < n4; ++i) {
-#if CONFIG_LGT
-    if (use_lgt_col)
-      ilgt4(tmp[i], out[i], lgtmtx_col[0]);
-    else
-#endif
-      IHT_16x4[tx_type].cols(tmp[i], out[i]);
+    IHT_16x4[tx_type].cols(tmp[i], out[i]);
   }
 
   maybe_flip_strides(&dest, &stride, &outp, &outstride, tx_type, n, n4);
@@ -848,11 +685,6 @@
   tran_low_t *outp = &out[0][0];
   int outstride = n2;
 
-#if CONFIG_LGT
-  const tran_high_t *lgtmtx_row[1];
-  int use_lgt_row = get_lgt8(txfm_param, 0, lgtmtx_row);
-#endif
-
   // Multi-way scaling matrix (bits):
   // LGT/AV1 row, AV1 col  input+0, rowTX+1, mid+.5, colTX+1.5, out-6 == -3
   // LGT row, Daala col    input+0, rowTX+1, mid+0,  colTX+0,   out-4 == -3
@@ -861,32 +693,14 @@
 
   // inverse transform row vectors and transpose
   for (i = 0; i < n2; ++i) {
-#if CONFIG_LGT
-    if (use_lgt_row) {
-      // Scaling cases 1 and 2 above
-      // No input scaling
-      // Row transform (LGT; scales up 1 bit)
-      ilgt8(input, outtmp, lgtmtx_row[0]);
-      // Transpose and mid scaling
-      for (j = 0; j < n; ++j) {
 #if CONFIG_DAALA_TX8 && CONFIG_DAALA_TX16
-        // Mid scaling case 2
-        tmp[j][i] = outtmp[j];
-#else
-        // Mid scaling case 1
-        tmp[j][i] = (tran_low_t)dct_const_round_shift(outtmp[j] * Sqrt2);
-#endif
-      }
-    } else {
-#endif
-#if CONFIG_DAALA_TX8 && CONFIG_DAALA_TX16
-      tran_low_t temp_in[8];
-      // Input scaling case 4
-      for (j = 0; j < n; j++) temp_in[j] = input[j] * 2;
-      // Row transform (Daala does not scale)
-      IHT_8x16[tx_type].rows(temp_in, outtmp);
-      // Transpose (no mid scaling)
-      for (j = 0; j < n; ++j) tmp[j][i] = outtmp[j];
+    tran_low_t temp_in[8];
+    // Input scaling case 4
+    for (j = 0; j < n; j++) temp_in[j] = input[j] * 2;
+    // Row transform (Daala does not scale)
+    IHT_8x16[tx_type].rows(temp_in, outtmp);
+    // Transpose (no mid scaling)
+    for (j = 0; j < n; ++j) tmp[j][i] = outtmp[j];
 #else
     // Case 1; no input scaling
     // Row transform (AV1 scales up 1 bit)
@@ -895,9 +709,6 @@
     for (j = 0; j < n; ++j)
       tmp[j][i] = (tran_low_t)dct_const_round_shift(outtmp[j] * Sqrt2);
 #endif
-#if CONFIG_LGT
-    }
-#endif
     input += n;
   }
 
@@ -980,11 +791,6 @@
   tran_low_t *outp = &out[0][0];
   int outstride = n;
 
-#if CONFIG_LGT
-  const tran_high_t *lgtmtx_col[1];
-  int use_lgt_col = get_lgt8(txfm_param, 1, lgtmtx_col);
-#endif
-
   // Multi-way scaling matrix (bits):
   // AV1 row, LGT/AV1 col  input+0, rowTX+1.5, mid+.5, colTX+1, out-6 == -3
   // LGT row, Daala col    N/A (no 16-point LGT)
@@ -999,15 +805,9 @@
     for (j = 0; j < n2; j++) temp_in[j] = input[j] * 2;
     // Daala row TX, no scaling
     IHT_16x8[tx_type].rows(temp_in, outtmp);
-// Transpose and mid scaling
-#if CONFIG_LGT
-    if (use_lgt_col)
-      // Case 3
-      for (j = 0; j < n2; ++j) tmp[j][i] = outtmp[j] * 2;
-    else
-#endif
-      // Case 4
-      for (j = 0; j < n2; ++j) tmp[j][i] = outtmp[j];
+    // Transpose and mid scaling
+    // Case 4
+    for (j = 0; j < n2; ++j) tmp[j][i] = outtmp[j];
 #else
     // Case 1
     // No input scaling
@@ -1023,12 +823,7 @@
   // inverse transform column vectors
   // AV!/LGT scales up by 1 bit, Daala does not scale
   for (i = 0; i < n2; ++i) {
-#if CONFIG_LGT
-    if (use_lgt_col)
-      ilgt8(tmp[i], out[i], lgtmtx_col[0]);
-    else
-#endif
-      IHT_16x8[tx_type].cols(tmp[i], out[i]);
+    IHT_16x8[tx_type].cols(tmp[i], out[i]);
   }
 
   maybe_flip_strides(&dest, &stride, &outp, &outstride, tx_type, n, n2);
@@ -1040,14 +835,8 @@
       int s = j * outstride + i;
 // Output scaling
 #if CONFIG_DAALA_TX8 && CONFIG_DAALA_TX16
-#if CONFIG_LGT
-      if (use_lgt_col)
-        // case 3
-        dest[d] = clip_pixel_add(dest[d], ROUND_POWER_OF_TWO(outp[s], 6));
-      else
-#endif
-        // case 4
-        dest[d] = clip_pixel_add(dest[d], ROUND_POWER_OF_TWO(outp[s], 4));
+      // case 4
+      dest[d] = clip_pixel_add(dest[d], ROUND_POWER_OF_TWO(outp[s], 4));
 #else
       // case 1
       dest[d] = clip_pixel_add(dest[d], ROUND_POWER_OF_TWO(outp[s], 6));
@@ -1091,19 +880,9 @@
   tran_low_t *outp = &out[0][0];
   int outstride = n4;
 
-#if CONFIG_LGT
-  const tran_high_t *lgtmtx_row[1];
-  int use_lgt_row = get_lgt8(txfm_param, 0, lgtmtx_row);
-#endif
-
   // inverse transform row vectors and transpose
   for (i = 0; i < n4; ++i) {
-#if CONFIG_LGT
-    if (use_lgt_row)
-      ilgt8(input, outtmp, lgtmtx_row[0]);
-    else
-#endif
-      IHT_8x32[tx_type].rows(input, outtmp);
+    IHT_8x32[tx_type].rows(input, outtmp);
     for (j = 0; j < n; ++j) tmp[j][i] = outtmp[j];
     input += n;
   }
@@ -1161,11 +940,6 @@
   tran_low_t *outp = &out[0][0];
   int outstride = n;
 
-#if CONFIG_LGT
-  const tran_high_t *lgtmtx_col[1];
-  int use_lgt_col = get_lgt4(txfm_param, 1, lgtmtx_col);
-#endif
-
   // inverse transform row vectors and transpose
   for (i = 0; i < n; ++i) {
     IHT_32x8[tx_type].rows(input, outtmp);
@@ -1175,12 +949,7 @@
 
   // inverse transform column vectors
   for (i = 0; i < n4; ++i) {
-#if CONFIG_LGT
-    if (use_lgt_col)
-      ilgt8(tmp[i], out[i], lgtmtx_col[0]);
-    else
-#endif
-      IHT_32x8[tx_type].cols(tmp[i], out[i]);
+    IHT_32x8[tx_type].cols(tmp[i], out[i]);
   }
 
   maybe_flip_strides(&dest, &stride, &outp, &outstride, tx_type, n, n4);
@@ -1424,13 +1193,6 @@
   tran_low_t *outp = &out[0][0];
   int outstride = 8;
 
-#if CONFIG_LGT
-  const tran_high_t *lgtmtx_col[1];
-  const tran_high_t *lgtmtx_row[1];
-  int use_lgt_col = get_lgt8(txfm_param, 1, lgtmtx_col);
-  int use_lgt_row = get_lgt8(txfm_param, 0, lgtmtx_row);
-#endif
-
   // inverse transform row vectors
   for (i = 0; i < 8; ++i) {
 #if CONFIG_DAALA_TX8
@@ -1438,12 +1200,7 @@
     for (j = 0; j < 8; j++) temp_in[j] = input[j] * 2;
     IHT_8[tx_type].rows(temp_in, out[i]);
 #else
-#if CONFIG_LGT
-    if (use_lgt_row)
-      ilgt8(input, out[i], lgtmtx_row[0]);
-    else
-#endif
-      IHT_8[tx_type].rows(input, out[i]);
+    IHT_8[tx_type].rows(input, out[i]);
 #endif
     input += 8;
   }
@@ -1457,12 +1214,7 @@
 
   // inverse transform column vectors
   for (i = 0; i < 8; ++i) {
-#if CONFIG_LGT
-    if (use_lgt_col)
-      ilgt8(tmp[i], out[i], lgtmtx_col[0]);
-    else
-#endif
-      IHT_8[tx_type].cols(tmp[i], out[i]);
+    IHT_8[tx_type].cols(tmp[i], out[i]);
   }
 
   maybe_flip_strides(&dest, &stride, &outp, &outstride, tx_type, 8, 8);
@@ -2124,7 +1876,7 @@
 
 static void inv_txfm_add_4x8(const tran_low_t *input, uint8_t *dest, int stride,
                              const TxfmParam *txfm_param) {
-#if CONFIG_LGT || (CONFIG_DAALA_TX4 && CONFIG_DAALA_TX8)
+#if (CONFIG_DAALA_TX4 && CONFIG_DAALA_TX8)
   av1_iht4x8_32_add_c(input, dest, stride, txfm_param);
 #else
   av1_iht4x8_32_add(input, dest, stride, txfm_param);
@@ -2133,7 +1885,7 @@
 
 static void inv_txfm_add_8x4(const tran_low_t *input, uint8_t *dest, int stride,
                              const TxfmParam *txfm_param) {
-#if CONFIG_LGT || (CONFIG_DAALA_TX4 && CONFIG_DAALA_TX8)
+#if (CONFIG_DAALA_TX4 && CONFIG_DAALA_TX8)
   av1_iht8x4_32_add_c(input, dest, stride, txfm_param);
 #else
   av1_iht8x4_32_add(input, dest, stride, txfm_param);
@@ -2144,44 +1896,28 @@
 #if CONFIG_RECT_TX_EXT
 static void inv_txfm_add_4x16(const tran_low_t *input, uint8_t *dest,
                               int stride, const TxfmParam *txfm_param) {
-#if CONFIG_LGT
-  av1_iht4x16_64_add_c(input, dest, stride, txfm_param);
-#else
   av1_iht4x16_64_add(input, dest, stride, txfm_param);
-#endif
 }
 
 static void inv_txfm_add_16x4(const tran_low_t *input, uint8_t *dest,
                               int stride, const TxfmParam *txfm_param) {
-#if CONFIG_LGT
-  av1_iht16x4_64_add_c(input, dest, stride, txfm_param);
-#else
   av1_iht16x4_64_add(input, dest, stride, txfm_param);
-#endif
 }
 
 static void inv_txfm_add_8x32(const tran_low_t *input, uint8_t *dest,
                               int stride, const TxfmParam *txfm_param) {
-#if CONFIG_LGT
-  av1_iht8x32_256_add_c(input, dest, stride, txfm_param);
-#else
   av1_iht8x32_256_add(input, dest, stride, txfm_param);
-#endif
 }
 
 static void inv_txfm_add_32x8(const tran_low_t *input, uint8_t *dest,
                               int stride, const TxfmParam *txfm_param) {
-#if CONFIG_LGT
-  av1_iht32x8_256_add_c(input, dest, stride, txfm_param);
-#else
   av1_iht32x8_256_add(input, dest, stride, txfm_param);
-#endif
 }
 #endif
 
 static void inv_txfm_add_8x16(const tran_low_t *input, uint8_t *dest,
                               int stride, const TxfmParam *txfm_param) {
-#if CONFIG_LGT || (CONFIG_DAALA_TX8 && CONFIG_DAALA_TX16)
+#if (CONFIG_DAALA_TX8 && CONFIG_DAALA_TX16)
   av1_iht8x16_128_add_c(input, dest, stride, txfm_param);
 #else
   av1_iht8x16_128_add(input, dest, stride, txfm_param);
@@ -2190,7 +1926,7 @@
 
 static void inv_txfm_add_16x8(const tran_low_t *input, uint8_t *dest,
                               int stride, const TxfmParam *txfm_param) {
-#if CONFIG_LGT || (CONFIG_DAALA_TX8 && CONFIG_DAALA_TX16)
+#if (CONFIG_DAALA_TX8 && CONFIG_DAALA_TX16)
   av1_iht16x8_128_add_c(input, dest, stride, txfm_param);
 #else
   av1_iht16x8_128_add(input, dest, stride, txfm_param);
@@ -2693,9 +2429,6 @@
   // within this function.
   txfm_param->tx_set_type = get_ext_tx_set_type(
       txfm_param->tx_size, plane_bsize, is_inter_block(&xd->mi[0]->mbmi), 0);
-#if CONFIG_LGT
-  txfm_param->is_inter = is_inter_block(&xd->mi[0]->mbmi);
-#endif
 #if CONFIG_ADAPT_SCAN
   txfm_param->eob_threshold =
       (const int16_t *)&xd->eob_threshold_md[tx_size][tx_type][0];
@@ -2721,9 +2454,9 @@
 
   TxfmParam txfm_param;
   init_txfm_param(xd, plane, tx_size, tx_type, eob, &txfm_param);
-#if CONFIG_LGT || CONFIG_MRC_TX
+#if CONFIG_MRC_TX
   txfm_param.is_inter = is_inter_block(&xd->mi[0]->mbmi);
-#endif  // CONFIG_LGT || CONFIG_MRC_TX
+#endif  // CONFIG_MRC_TX
 #if CONFIG_MRC_TX && SIGNAL_ANY_MRC_MASK
   txfm_param.mask = mrc_mask;
 #endif  // CONFIG_MRC_TX && SIGNAL_ANY_MRC_MASK
diff --git a/av1/common/idct.h b/av1/common/idct.h
index a5b21fe..58590eb 100644
--- a/av1/common/idct.h
+++ b/av1/common/idct.h
@@ -32,13 +32,6 @@
   transform_1d cols, rows;  // vertical and horizontal
 } transform_2d;
 
-#if CONFIG_LGT
-int get_lgt4(const TxfmParam *txfm_param, int is_col,
-             const tran_high_t **lgtmtx);
-int get_lgt8(const TxfmParam *txfm_param, int is_col,
-             const tran_high_t **lgtmtx);
-#endif  // CONFIG_LGT
-
 #if CONFIG_HIGHBITDEPTH
 typedef void (*highbd_transform_1d)(const tran_low_t *, tran_low_t *, int bd);
 
diff --git a/av1/encoder/dct.c b/av1/encoder/dct.c
index d79d64a..6ae3080 100644
--- a/av1/encoder/dct.c
+++ b/av1/encoder/dct.c
@@ -1070,32 +1070,6 @@
 }
 #endif  // CONFIG_MRC_TX
 
-#if CONFIG_LGT
-static void flgt4(const tran_low_t *input, tran_low_t *output,
-                  const tran_high_t *lgtmtx) {
-  if (!lgtmtx) assert(0);
-
-  // evaluate s[j] = sum of all lgtmtx[j][i]*input[i] over i=1,...,4
-  tran_high_t s[4] = { 0 };
-  for (int i = 0; i < 4; ++i)
-    for (int j = 0; j < 4; ++j) s[j] += lgtmtx[j * 4 + i] * input[i];
-
-  for (int i = 0; i < 4; ++i) output[i] = (tran_low_t)fdct_round_shift(s[i]);
-}
-
-static void flgt8(const tran_low_t *input, tran_low_t *output,
-                  const tran_high_t *lgtmtx) {
-  if (!lgtmtx) assert(0);
-
-  // evaluate s[j] = sum of all lgtmtx[j][i]*input[i] over i=1,...,8
-  tran_high_t s[8] = { 0 };
-  for (int i = 0; i < 8; ++i)
-    for (int j = 0; j < 8; ++j) s[j] += lgtmtx[j * 8 + i] * input[i];
-
-  for (int i = 0; i < 8; ++i) output[i] = (tran_low_t)fdct_round_shift(s[i]);
-}
-#endif  // CONFIG_LGT
-
 // TODO(sarahparker) these functions will be removed once the highbitdepth
 // codepath works properly for rectangular transforms. They have almost
 // identical versions in av1_fwd_txfm1d.c, but those are currently only
@@ -1286,15 +1260,6 @@
     int16_t flipped_input[4 * 4];
     maybe_flip_input(&input, &stride, 4, 4, flipped_input, tx_type);
 
-#if CONFIG_LGT
-    // Choose LGT adaptive to the prediction. We may apply different LGTs for
-    // different rows/columns, indicated by the pointers to 2D arrays
-    const tran_high_t *lgtmtx_col[1];
-    const tran_high_t *lgtmtx_row[1];
-    int use_lgt_col = get_lgt4(txfm_param, 1, lgtmtx_col);
-    int use_lgt_row = get_lgt4(txfm_param, 0, lgtmtx_row);
-#endif
-
     // Columns
     for (i = 0; i < 4; ++i) {
       /* A C99-safe upshift by 4 for both Daala and VPx TX. */
@@ -1302,24 +1267,14 @@
 #if !CONFIG_DAALA_TX4
       if (i == 0 && temp_in[0]) temp_in[0] += 1;
 #endif
-#if CONFIG_LGT
-      if (use_lgt_col)
-        flgt4(temp_in, temp_out, lgtmtx_col[0]);
-      else
-#endif
-        ht.cols(temp_in, temp_out);
+      ht.cols(temp_in, temp_out);
       for (j = 0; j < 4; ++j) out[j * 4 + i] = temp_out[j];
     }
 
     // Rows
     for (i = 0; i < 4; ++i) {
       for (j = 0; j < 4; ++j) temp_in[j] = out[j + i * 4];
-#if CONFIG_LGT
-      if (use_lgt_row)
-        flgt4(temp_in, temp_out, lgtmtx_row[0]);
-      else
-#endif
-        ht.rows(temp_in, temp_out);
+      ht.rows(temp_in, temp_out);
 #if CONFIG_DAALA_TX4
       /* Daala TX has orthonormal scaling; shift down by only 1 to achieve
          the usual VPx coefficient left-shift of 3. */
@@ -1386,13 +1341,6 @@
   int16_t flipped_input[8 * 4];
   maybe_flip_input(&input, &stride, n2, n, flipped_input, tx_type);
 
-#if CONFIG_LGT
-  const tran_high_t *lgtmtx_col[1];
-  const tran_high_t *lgtmtx_row[1];
-  int use_lgt_col = get_lgt8(txfm_param, 1, lgtmtx_col);
-  int use_lgt_row = get_lgt4(txfm_param, 0, lgtmtx_row);
-#endif
-
   // Multi-way scaling matrix (bits):
   // LGT/AV1 row,col     input+2.5, rowTX+.5, mid+0, colTX+1, out-1 == 3
   // LGT row, Daala col  input+3.5, rowTX+.5, mid+0, colTX+0, out-1 == 3
@@ -1404,29 +1352,16 @@
     // Input scaling
     for (j = 0; j < n; ++j) {
 #if CONFIG_DAALA_TX4 && CONFIG_DAALA_TX8
-#if CONFIG_LGT
-      // Input scaling when LGT might be active (1-4 above)
-      temp_in[j] = use_lgt_row ?
-        (tran_low_t)fdct_round_shift(input[i * stride + j] * Sqrt2 *
-                                     (use_lgt_col ? 4 : 8)) :
-        input[i * stride + j] * (use_lgt_col ? 8 : 16));
-#else
       // Input scaling when LGT is not possible, Daala only (4 above)
       temp_in[j] = input[i * stride + j] * 16;
-#endif
 #else
       // Input scaling when Daala is not possible, LGT/AV1 only (1 above)
       temp_in[j] =
           (tran_low_t)fdct_round_shift(input[i * stride + j] * 4 * Sqrt2);
 #endif
     }
-// Row transform (AV1/LGT scale up .5 bit, Daala does not scale)
-#if CONFIG_LGT
-    if (use_lgt_row)
-      flgt4(temp_in, temp_out, lgtmtx_row[0]);
-    else
-#endif
-      ht.rows(temp_in, temp_out);
+    // Row transform (AV1/LGT scale up .5 bit, Daala does not scale)
+    ht.rows(temp_in, temp_out);
     // No mid scaling
     for (j = 0; j < n; ++j) out[j * n2 + i] = temp_out[j];
   }
@@ -1434,13 +1369,8 @@
   // Columns
   for (i = 0; i < n; ++i) {
     for (j = 0; j < n2; ++j) temp_in[j] = out[j + i * n2];
-// Column transform (AV1/LGT scale up 1 bit, Daala does not scale)
-#if CONFIG_LGT
-    if (use_lgt_col)
-      flgt8(temp_in, temp_out, lgtmtx_col[0]);
-    else
-#endif
-      ht.cols(temp_in, temp_out);
+    // Column transform (AV1/LGT scale up 1 bit, Daala does not scale)
+    ht.cols(temp_in, temp_out);
     // Output scaling is always a downshift of 1
     for (j = 0; j < n2; ++j)
       output[i + j * n] = (temp_out[j] + (temp_out[j] < 0)) >> 1;
@@ -1503,13 +1433,6 @@
   int16_t flipped_input[8 * 4];
   maybe_flip_input(&input, &stride, n, n2, flipped_input, tx_type);
 
-#if CONFIG_LGT
-  const tran_high_t *lgtmtx_col[1];
-  const tran_high_t *lgtmtx_row[1];
-  int use_lgt_col = get_lgt4(txfm_param, 1, lgtmtx_col);
-  int use_lgt_row = get_lgt8(txfm_param, 0, lgtmtx_row);
-#endif
-
   // Multi-way scaling matrix (bits):
   // LGT/AV1 row,col     input+2.5, rowTX+1, mid+0, colTX+.5, out-1 == 3
   // LGT row, Daala col  input+3,   rowTX+1, mid+0, colTX+0,  out-1 == 3
@@ -1520,29 +1443,16 @@
   for (i = 0; i < n2; ++i) {
     for (j = 0; j < n; ++j) {
 #if CONFIG_DAALA_TX4 && CONFIG_DAALA_TX8
-#if CONFIG_LGT
-      // Input scaling when LGT might be active (1-4 above)
-      temp_in[j] = use_lgt_col ?
-        (tran_low_t)fdct_round_shift(input[j * stride + i] * Sqrt2 *
-                                     (use_lgt_row ? 4 : 8)) :
-        input[j * stride + i] * (use_lgt_row ? 8 : 16));
-#else
       // Input scaling when LGT is not possible, Daala only (4 above)
       temp_in[j] = input[j * stride + i] * 16;
-#endif
 #else
       // Input scaling when Daala is not possible, AV1/LGT only (1 above)
       temp_in[j] =
           (tran_low_t)fdct_round_shift(input[j * stride + i] * 4 * Sqrt2);
 #endif
     }
-// Column transform (AV1/LGT scale up .5 bit, Daala does not scale)
-#if CONFIG_LGT
-    if (use_lgt_col)
-      flgt4(temp_in, temp_out, lgtmtx_col[0]);
-    else
-#endif
-      ht.cols(temp_in, temp_out);
+    // Column transform (AV1/LGT scale up .5 bit, Daala does not scale)
+    ht.cols(temp_in, temp_out);
     // No scaling between transforms
     for (j = 0; j < n; ++j) out[j * n2 + i] = temp_out[j];
   }
@@ -1550,13 +1460,8 @@
   // Rows
   for (i = 0; i < n; ++i) {
     for (j = 0; j < n2; ++j) temp_in[j] = out[j + i * n2];
-// Row transform (AV1/LGT scale up 1 bit, Daala does not scale)
-#if CONFIG_LGT
-    if (use_lgt_row)
-      flgt8(temp_in, temp_out, lgtmtx_row[0]);
-    else
-#endif
-      ht.rows(temp_in, temp_out);
+    // Row transform (AV1/LGT scale up 1 bit, Daala does not scale)
+    ht.rows(temp_in, temp_out);
     // Output scaling is always a downshift of 1
     for (j = 0; j < n2; ++j)
       output[j + i * n2] = (temp_out[j] + (temp_out[j] < 0)) >> 1;
@@ -1600,20 +1505,10 @@
   int16_t flipped_input[16 * 4];
   maybe_flip_input(&input, &stride, n4, n, flipped_input, tx_type);
 
-#if CONFIG_LGT
-  const tran_high_t *lgtmtx_row[1];
-  int use_lgt_row = get_lgt4(txfm_param, 0, lgtmtx_row);
-#endif
-
   // Rows
   for (i = 0; i < n4; ++i) {
     for (j = 0; j < n; ++j) temp_in[j] = input[i * stride + j] * 4;
-#if CONFIG_LGT
-    if (use_lgt_row)
-      flgt4(temp_in, temp_out, lgtmtx_row[0]);
-    else
-#endif
-      ht.rows(temp_in, temp_out);
+    ht.rows(temp_in, temp_out);
     for (j = 0; j < n; ++j) out[j * n4 + i] = temp_out[j];
   }
 
@@ -1663,20 +1558,10 @@
   int16_t flipped_input[16 * 4];
   maybe_flip_input(&input, &stride, n, n4, flipped_input, tx_type);
 
-#if CONFIG_LGT
-  const tran_high_t *lgtmtx_col[1];
-  int use_lgt_col = get_lgt4(txfm_param, 1, lgtmtx_col);
-#endif
-
   // Columns
   for (i = 0; i < n4; ++i) {
     for (j = 0; j < n; ++j) temp_in[j] = input[j * stride + i] * 4;
-#if CONFIG_LGT
-    if (use_lgt_col)
-      flgt4(temp_in, temp_out, lgtmtx_col[0]);
-    else
-#endif
-      ht.cols(temp_in, temp_out);
+    ht.cols(temp_in, temp_out);
     for (j = 0; j < n; ++j) out[j * n4 + i] = temp_out[j];
   }
 
@@ -1745,11 +1630,6 @@
   int16_t flipped_input[16 * 8];
   maybe_flip_input(&input, &stride, n2, n, flipped_input, tx_type);
 
-#if CONFIG_LGT
-  const tran_high_t *lgtmtx_row[1];
-  int use_lgt_row = get_lgt8(txfm_param, 0, lgtmtx_row);
-#endif
-
   // Multi-way scaling matrix (bits):
   // LGT/AV1 row, AV1 col  input+2.5, rowTX+1, mid-2, colTX+1.5, out+0 == 3
   // LGT row, Daala col    input+3,   rowTX+1, mid+0, colTX+0,   out-1 == 3
@@ -1761,13 +1641,8 @@
     // Input scaling
     for (j = 0; j < n; ++j) {
 #if CONFIG_DAALA_TX8 && CONFIG_DAALA_TX16
-#if CONFIG_LGT
-      // Input scaling when LGT might be active (cases 2, 4 above)
-      temp_in[j] = input[i * stride + j] * (use_lgt_row ? 2 : 4) * 4;
-#else
       // Input scaling when LGT is not possible, Daala only (case 4 above)
       temp_in[j] = input[i * stride + j] * 16;
-#endif
 #else
       // Input scaling when Daala is not possible, LGT/AV1 only (case 1 above)
       temp_in[j] =
@@ -1775,13 +1650,8 @@
 #endif
     }
 
-// Row transform (AV1/LGT scale up 1 bit, Daala does not scale)
-#if CONFIG_LGT
-    if (use_lgt_row)
-      flgt8(temp_in, temp_out, lgtmtx_row[0]);
-    else
-#endif
-      ht.rows(temp_in, temp_out);
+    // Row transform (AV1/LGT scale up 1 bit, Daala does not scale)
+    ht.rows(temp_in, temp_out);
 
     // Mid scaling
     for (j = 0; j < n; ++j) {
@@ -1868,11 +1738,6 @@
   int16_t flipped_input[16 * 8];
   maybe_flip_input(&input, &stride, n, n2, flipped_input, tx_type);
 
-#if CONFIG_LGT
-  const tran_high_t *lgtmtx_col[1];
-  int use_lgt_col = get_lgt8(txfm_param, 1, lgtmtx_col);
-#endif
-
   // Multi-way scaling matrix (bits):
   // LGT/AV1 col, AV1 row  input+2.5, colTX+1, mid-2, rowTX+1.5, out+0 == 3
   // LGT col, Daala row    input+3,   colTX+1, mid+0, rowTX+0,   out-1 == 3
@@ -1884,13 +1749,8 @@
     // Input scaling
     for (j = 0; j < n; ++j) {
 #if CONFIG_DAALA_TX8 && CONFIG_DAALA_TX16
-#if CONFIG_LGT
-      // Input scaling when LGT might be active (1, 2 above)
-      temp_in[j] = input[j * stride + i] * 4 * (use_lgt_col ? 2 : 4);
-#else
       // Input scaling when LGT is not possible, Daala only (4 above)
       temp_in[j] = input[j * stride + i] * 16;
-#endif
 #else
       // Input scaling when Daala is not possible, AV1/LGT only (1 above)
       temp_in[j] =
@@ -1898,13 +1758,8 @@
 #endif
     }
 
-// Column transform (AV1/LGT scale up 1 bit, Daala does not scale)
-#if CONFIG_LGT
-    if (use_lgt_col)
-      flgt8(temp_in, temp_out, lgtmtx_col[0]);
-    else
-#endif
-      ht.cols(temp_in, temp_out);
+    // Column transform (AV1/LGT scale up 1 bit, Daala does not scale)
+    ht.cols(temp_in, temp_out);
 
     // Mid scaling
     for (j = 0; j < n; ++j) {
@@ -1972,20 +1827,10 @@
   int16_t flipped_input[32 * 8];
   maybe_flip_input(&input, &stride, n4, n, flipped_input, tx_type);
 
-#if CONFIG_LGT
-  const tran_high_t *lgtmtx_row[1];
-  int use_lgt_row = get_lgt8(txfm_param, 0, lgtmtx_row);
-#endif
-
   // Rows
   for (i = 0; i < n4; ++i) {
     for (j = 0; j < n; ++j) temp_in[j] = input[i * stride + j] * 4;
-#if CONFIG_LGT
-    if (use_lgt_row)
-      flgt8(temp_in, temp_out, lgtmtx_row[0]);
-    else
-#endif
-      ht.rows(temp_in, temp_out);
+    ht.rows(temp_in, temp_out);
     for (j = 0; j < n; ++j) out[j * n4 + i] = temp_out[j];
   }
 
@@ -2035,20 +1880,10 @@
   int16_t flipped_input[32 * 8];
   maybe_flip_input(&input, &stride, n, n4, flipped_input, tx_type);
 
-#if CONFIG_LGT
-  const tran_high_t *lgtmtx_col[1];
-  int use_lgt_col = get_lgt8(txfm_param, 1, lgtmtx_col);
-#endif
-
   // Columns
   for (i = 0; i < n4; ++i) {
     for (j = 0; j < n; ++j) temp_in[j] = input[j * stride + i] * 4;
-#if CONFIG_LGT
-    if (use_lgt_col)
-      flgt8(temp_in, temp_out, lgtmtx_col[0]);
-    else
-#endif
-      ht.cols(temp_in, temp_out);
+    ht.cols(temp_in, temp_out);
     for (j = 0; j < n; ++j) out[j * n4 + i] = temp_out[j];
   }
 
@@ -2291,13 +2126,6 @@
     int16_t flipped_input[8 * 8];
     maybe_flip_input(&input, &stride, 8, 8, flipped_input, tx_type);
 
-#if CONFIG_LGT
-    const tran_high_t *lgtmtx_col[1];
-    const tran_high_t *lgtmtx_row[1];
-    int use_lgt_col = get_lgt8(txfm_param, 1, lgtmtx_col);
-    int use_lgt_row = get_lgt8(txfm_param, 0, lgtmtx_row);
-#endif
-
     // Columns
     for (i = 0; i < 8; ++i) {
 #if CONFIG_DAALA_TX8
@@ -2305,24 +2133,14 @@
 #else
       for (j = 0; j < 8; ++j) temp_in[j] = input[j * stride + i] * 4;
 #endif
-#if CONFIG_LGT
-      if (use_lgt_col)
-        flgt8(temp_in, temp_out, lgtmtx_col[0]);
-      else
-#endif
-        ht.cols(temp_in, temp_out);
+      ht.cols(temp_in, temp_out);
       for (j = 0; j < 8; ++j) out[j * 8 + i] = temp_out[j];
     }
 
     // Rows
     for (i = 0; i < 8; ++i) {
       for (j = 0; j < 8; ++j) temp_in[j] = out[j + i * 8];
-#if CONFIG_LGT
-      if (use_lgt_row)
-        flgt8(temp_in, temp_out, lgtmtx_row[0]);
-      else
-#endif
-        ht.rows(temp_in, temp_out);
+      ht.rows(temp_in, temp_out);
 #if CONFIG_DAALA_TX8
       for (j = 0; j < 8; ++j)
         output[j + i * 8] = (temp_out[j] + (temp_out[j] < 0)) >> 1;
diff --git a/av1/encoder/encodemb.c b/av1/encoder/encodemb.c
index fa3ae44..fcc56f9 100644
--- a/av1/encoder/encodemb.c
+++ b/av1/encoder/encodemb.c
@@ -542,7 +542,7 @@
   txfm_param.tx_set_type =
       get_ext_tx_set_type(txfm_param.tx_size, plane_bsize, is_inter_block(mbmi),
                           cm->reduced_tx_set_used);
-#if CONFIG_MRC_TX || CONFIG_LGT
+#if CONFIG_MRC_TX
   txfm_param.is_inter = is_inter_block(mbmi);
 #endif
 #if CONFIG_MRC_TX
diff --git a/av1/encoder/hybrid_fwd_txfm.c b/av1/encoder/hybrid_fwd_txfm.c
index fdbf313..74bb439 100644
--- a/av1/encoder/hybrid_fwd_txfm.c
+++ b/av1/encoder/hybrid_fwd_txfm.c
@@ -24,7 +24,7 @@
     return;
   }
 
-#if CONFIG_LGT || CONFIG_DAALA_TX4
+#if CONFIG_DAALA_TX4
   // only C version has LGTs
   av1_fht4x4_c(src_diff, coeff, diff_stride, txfm_param);
 #else
@@ -34,7 +34,7 @@
 
 static void fwd_txfm_4x8(const int16_t *src_diff, tran_low_t *coeff,
                          int diff_stride, TxfmParam *txfm_param) {
-#if CONFIG_LGT || (CONFIG_DAALA_TX4 && CONFIG_DAALA_TX8)
+#if (CONFIG_DAALA_TX4 && CONFIG_DAALA_TX8)
   av1_fht4x8_c(src_diff, coeff, diff_stride, txfm_param);
 #else
   av1_fht4x8(src_diff, coeff, diff_stride, txfm_param);
@@ -43,7 +43,7 @@
 
 static void fwd_txfm_8x4(const int16_t *src_diff, tran_low_t *coeff,
                          int diff_stride, TxfmParam *txfm_param) {
-#if CONFIG_LGT || (CONFIG_DAALA_TX4 && CONFIG_DAALA_TX8)
+#if (CONFIG_DAALA_TX4 && CONFIG_DAALA_TX8)
   av1_fht8x4_c(src_diff, coeff, diff_stride, txfm_param);
 #else
   av1_fht8x4(src_diff, coeff, diff_stride, txfm_param);
@@ -52,7 +52,7 @@
 
 static void fwd_txfm_8x16(const int16_t *src_diff, tran_low_t *coeff,
                           int diff_stride, TxfmParam *txfm_param) {
-#if CONFIG_LGT || (CONFIG_DAALA_TX8 && CONFIG_DAALA_TX16)
+#if (CONFIG_DAALA_TX8 && CONFIG_DAALA_TX16)
   av1_fht8x16_c(src_diff, coeff, diff_stride, txfm_param);
 #else
   av1_fht8x16(src_diff, coeff, diff_stride, txfm_param);
@@ -61,7 +61,7 @@
 
 static void fwd_txfm_16x8(const int16_t *src_diff, tran_low_t *coeff,
                           int diff_stride, TxfmParam *txfm_param) {
-#if CONFIG_LGT || (CONFIG_DAALA_TX8 && CONFIG_DAALA_TX16)
+#if (CONFIG_DAALA_TX8 && CONFIG_DAALA_TX16)
   av1_fht16x8_c(src_diff, coeff, diff_stride, txfm_param);
 #else
   av1_fht16x8(src_diff, coeff, diff_stride, txfm_param);
@@ -88,7 +88,7 @@
 
 static void fwd_txfm_8x8(const int16_t *src_diff, tran_low_t *coeff,
                          int diff_stride, TxfmParam *txfm_param) {
-#if CONFIG_LGT || CONFIG_DAALA_TX8
+#if CONFIG_DAALA_TX8
   av1_fht8x8_c(src_diff, coeff, diff_stride, txfm_param);
 #else
   av1_fht8x8(src_diff, coeff, diff_stride, txfm_param);
@@ -145,38 +145,22 @@
 #if CONFIG_RECT_TX_EXT
 static void fwd_txfm_16x4(const int16_t *src_diff, tran_low_t *coeff,
                           int diff_stride, TxfmParam *txfm_param) {
-#if CONFIG_LGT
-  av1_fht16x4_c(src_diff, coeff, diff_stride, txfm_param);
-#else
   av1_fht16x4(src_diff, coeff, diff_stride, txfm_param);
-#endif
 }
 
 static void fwd_txfm_4x16(const int16_t *src_diff, tran_low_t *coeff,
                           int diff_stride, TxfmParam *txfm_param) {
-#if CONFIG_LGT
-  av1_fht4x16_c(src_diff, coeff, diff_stride, txfm_param);
-#else
   av1_fht4x16(src_diff, coeff, diff_stride, txfm_param);
-#endif
 }
 
 static void fwd_txfm_32x8(const int16_t *src_diff, tran_low_t *coeff,
                           int diff_stride, TxfmParam *txfm_param) {
-#if CONFIG_LGT
-  av1_fht32x8_c(src_diff, coeff, diff_stride, txfm_param);
-#else
   av1_fht32x8(src_diff, coeff, diff_stride, txfm_param);
-#endif
 }
 
 static void fwd_txfm_8x32(const int16_t *src_diff, tran_low_t *coeff,
                           int diff_stride, TxfmParam *txfm_param) {
-#if CONFIG_LGT
-  av1_fht8x32_c(src_diff, coeff, diff_stride, txfm_param);
-#else
   av1_fht8x32(src_diff, coeff, diff_stride, txfm_param);
-#endif
 }
 #endif
 
diff --git a/build/cmake/aom_config_defaults.cmake b/build/cmake/aom_config_defaults.cmake
index ea7964a..46c9310 100644
--- a/build/cmake/aom_config_defaults.cmake
+++ b/build/cmake/aom_config_defaults.cmake
@@ -156,7 +156,6 @@
 set(CONFIG_INTRA_EDGE 1 CACHE NUMBER "AV1 experiment flag.")
 set(CONFIG_JNT_COMP 0 CACHE NUMBER "AV1 experiment flag.")
 set(CONFIG_KF_CTX 1 CACHE NUMBER "AV1 experiment flag.")
-set(CONFIG_LGT 0 CACHE NUMBER "AV1 experiment flag.")
 set(CONFIG_LOOPFILTERING_ACROSS_TILES 1 CACHE NUMBER "AV1 experiment flag.")
 set(CONFIG_LOOPFILTER_LEVEL 1 CACHE NUMBER "AV1 experiment flag.")
 set(CONFIG_LOOP_RESTORATION 1 CACHE NUMBER "AV1 experiment flag.")
diff --git a/build/cmake/aom_experiment_deps.cmake b/build/cmake/aom_experiment_deps.cmake
index 00fe45d..de1bb81 100644
--- a/build/cmake/aom_experiment_deps.cmake
+++ b/build/cmake/aom_experiment_deps.cmake
@@ -55,9 +55,6 @@
 
   if (CONFIG_DAALA_TX4 OR CONFIG_DAALA_TX8 OR CONFIG_DAALA_TX16 OR
       CONFIG_DAALA_TX32 OR CONFIG_DAALA_TX64)
-    if (CONFIG_LGT)
-      change_config_and_warn(CONFIG_LGT 0 CONFIG_DAALA_TXx)
-    endif ()
     if (NOT CONFIG_LOWBITDEPTH)
       change_config_and_warn(CONFIG_LOWBITDEPTH 1 CONFIG_DAALA_TXx)
     endif ()
diff --git a/configure b/configure
index 17796a3..a19a3e9 100755
--- a/configure
+++ b/configure
@@ -311,7 +311,6 @@
     aom_qm
     ext_comp_refs
     smooth_hv
-    lgt
     bgsprite
     var_tx_no_tx_mode
     simplify_tx_mode
@@ -574,7 +573,6 @@
     if enabled daala_tx4 || enabled daala_tx8 || enabled daala_tx16 ||
         enabled daala_tx32 || enabled daala_tx64; then
       disable_feature txmg
-      disable_feature lgt
       enable_feature lowbitdepth
     fi
     if enabled ext_partition_types; then
diff --git a/test/av1_fht16x16_test.cc b/test/av1_fht16x16_test.cc
index 60d32de..fefdab9 100644
--- a/test/av1_fht16x16_test.cc
+++ b/test/av1_fht16x16_test.cc
@@ -164,7 +164,7 @@
 
 using std::tr1::make_tuple;
 
-#if HAVE_SSE2 && !CONFIG_DAALA_TX16 && !CONFIG_LGT
+#if HAVE_SSE2 && !CONFIG_DAALA_TX16
 const Ht16x16Param kArrayHt16x16Param_sse2[] = {
   make_tuple(&av1_fht16x16_sse2, &av1_iht16x16_256_add_sse2, DCT_DCT,
              AOM_BITS_8, 256),
@@ -203,7 +203,7 @@
                         ::testing::ValuesIn(kArrayHt16x16Param_sse2));
 #endif  // HAVE_SSE2
 
-#if HAVE_AVX2 && !CONFIG_DAALA_TX16 && !CONFIG_LGT
+#if HAVE_AVX2 && !CONFIG_DAALA_TX16
 const Ht16x16Param kArrayHt16x16Param_avx2[] = {
   make_tuple(&av1_fht16x16_avx2, &av1_iht16x16_256_add_avx2, DCT_DCT,
              AOM_BITS_8, 256),
diff --git a/test/av1_fht4x4_test.cc b/test/av1_fht4x4_test.cc
index 3e6847d..df7b03c 100644
--- a/test/av1_fht4x4_test.cc
+++ b/test/av1_fht4x4_test.cc
@@ -167,7 +167,7 @@
 
 using std::tr1::make_tuple;
 
-#if HAVE_SSE2 && !CONFIG_DAALA_TX4 && !CONFIG_LGT
+#if HAVE_SSE2 && !CONFIG_DAALA_TX4
 const Ht4x4Param kArrayHt4x4Param_sse2[] = {
   make_tuple(&av1_fht4x4_sse2, &av1_iht4x4_16_add_sse2, DCT_DCT, AOM_BITS_8,
              16),
diff --git a/test/av1_fht8x8_test.cc b/test/av1_fht8x8_test.cc
index 325b1bb..1bffe4e 100644
--- a/test/av1_fht8x8_test.cc
+++ b/test/av1_fht8x8_test.cc
@@ -167,7 +167,7 @@
 
 using std::tr1::make_tuple;
 
-#if HAVE_SSE2 && !CONFIG_DAALA_TX8 && !CONFIG_LGT
+#if HAVE_SSE2 && !CONFIG_DAALA_TX8
 const Ht8x8Param kArrayHt8x8Param_sse2[] = {
   make_tuple(&av1_fht8x8_sse2, &av1_iht8x8_64_add_sse2, DCT_DCT, AOM_BITS_8,
              64),