Remove experimental flag of EXT_TX
This experiment has been adopted, we can simplify the code
by dropping the associated preprocessor conditionals.
Change-Id: I02ed47186bbc32400ee9bfadda17659d859c0ef7
diff --git a/av1/encoder/bitstream.c b/av1/encoder/bitstream.c
index 4e1603a..ebb8011 100644
--- a/av1/encoder/bitstream.c
+++ b/av1/encoder/bitstream.c
@@ -328,9 +328,7 @@
: intra_tx_size_cat_lookup[bsize];
const TX_SIZE coded_tx_size = txsize_sqr_up_map[tx_size];
const int depth = tx_size_to_depth(coded_tx_size);
-#if CONFIG_EXT_TX
assert(IMPLIES(is_rect_tx(tx_size), is_rect_tx_allowed(xd, mbmi)));
-#endif // CONFIG_EXT_TX
aom_write_symbol(w, depth, ec_ctx->tx_size_cdf[tx_size_cat][tx_size_ctx],
tx_size_cat + 2);
@@ -1316,7 +1314,6 @@
#endif
if (!FIXED_TX_TYPE) {
-#if CONFIG_EXT_TX
const TX_SIZE square_tx_size = txsize_sqr_map[tx_size];
const BLOCK_SIZE bsize = mbmi->sb_type;
if (get_ext_tx_types(tx_size, bsize, is_inter, cm->reduced_tx_set_used) >
@@ -1395,24 +1392,6 @@
}
#endif // CONFIG_LGT_FROM_PRED
}
-#else // CONFIG_EXT_TX
- if (tx_size < TX_32X32 &&
- ((!cm->seg.enabled && cm->base_qindex > 0) ||
- (cm->seg.enabled && xd->qindex[mbmi->segment_id] > 0)) &&
- !mbmi->skip &&
- !segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP)) {
- if (is_inter) {
- aom_write_symbol(w, av1_ext_tx_ind[tx_type],
- ec_ctx->inter_ext_tx_cdf[tx_size], TX_TYPES);
- } else {
- aom_write_symbol(
- w, av1_ext_tx_ind[tx_type],
- ec_ctx->intra_ext_tx_cdf[tx_size]
- [intra_mode_to_tx_type_context[mbmi->mode]],
- TX_TYPES);
- }
- }
-#endif // CONFIG_EXT_TX
}
}
@@ -1787,9 +1766,9 @@
}
int_mv dv_ref = mbmi_ext->ref_mvs[INTRA_FRAME][0];
av1_encode_dv(w, &mbmi->mv[0].as_mv, &dv_ref.as_mv, &ec_ctx->ndvc);
-#if CONFIG_EXT_TX && !CONFIG_TXK_SEL
+#if !CONFIG_TXK_SEL
av1_write_tx_type(cm, xd, w);
-#endif // CONFIG_EXT_TX && !CONFIG_TXK_SEL
+#endif // !CONFIG_TXK_SEL
}
}
#endif // CONFIG_INTRABC
@@ -4145,9 +4124,7 @@
}
write_compound_tools(cm, wb);
-#if CONFIG_EXT_TX
aom_wb_write_bit(wb, cm->reduced_tx_set_used);
-#endif // CONFIG_EXT_TX
#if CONFIG_ADAPT_SCAN
aom_wb_write_bit(wb, cm->use_adapt_scan);
@@ -4496,9 +4473,7 @@
}
write_compound_tools(cm, wb);
-#if CONFIG_EXT_TX
aom_wb_write_bit(wb, cm->reduced_tx_set_used);
-#endif // CONFIG_EXT_TX
if (!frame_is_intra_only(cm)) write_global_motion(cpi, wb);
diff --git a/av1/encoder/block.h b/av1/encoder/block.h
index fea97c5..169038c 100644
--- a/av1/encoder/block.h
+++ b/av1/encoder/block.h
@@ -264,7 +264,6 @@
int quarter_tx_size_cost[2];
#endif
int txfm_partition_cost[TXFM_PARTITION_CONTEXTS][2];
-#if CONFIG_EXT_TX
#if CONFIG_LGT_FROM_PRED
int intra_lgt_cost[LGT_SIZES][INTRA_MODES][2];
int inter_lgt_cost[LGT_SIZES][2];
@@ -272,10 +271,6 @@
int inter_tx_type_costs[EXT_TX_SETS_INTER][EXT_TX_SIZES][TX_TYPES];
int intra_tx_type_costs[EXT_TX_SETS_INTRA][EXT_TX_SIZES][INTRA_MODES]
[TX_TYPES];
-#else
- int intra_tx_type_costs[EXT_TX_SIZES][TX_TYPES][TX_TYPES];
- int inter_tx_type_costs[EXT_TX_SIZES][TX_TYPES];
-#endif // CONFIG_EXT_TX
#if CONFIG_EXT_INTRA && CONFIG_EXT_INTRA_MOD
int angle_delta_cost[DIRECTIONAL_MODES][2 * MAX_ANGLE_DELTA + 1];
#endif // CONFIG_EXT_INTRA && CONFIG_EXT_INTRA_MOD
diff --git a/av1/encoder/dct.c b/av1/encoder/dct.c
index 7131cb4..e6bcac4 100644
--- a/av1/encoder/dct.c
+++ b/av1/encoder/dct.c
@@ -1249,7 +1249,6 @@
}
#endif // CONFIG_LGT_FROM_PRED
-#if CONFIG_EXT_TX
// TODO(sarahparker) these functions will be removed once the highbitdepth
// codepath works properly for rectangular transforms. They have almost
// identical versions in av1_fwd_txfm1d.c, but those are currently only
@@ -1378,7 +1377,6 @@
default: assert(0); break;
}
}
-#endif // CONFIG_EXT_TX
void av1_fht4x4_c(const int16_t *input, tran_low_t *output, int stride,
TxfmParam *txfm_param) {
@@ -1402,7 +1400,6 @@
{ daala_fdst4, daala_fdct4 }, // ADST_DCT
{ daala_fdct4, daala_fdst4 }, // DCT_ADST
{ daala_fdst4, daala_fdst4 }, // ADST_ADST
-#if CONFIG_EXT_TX
{ daala_fdst4, daala_fdct4 }, // FLIPADST_DCT
{ daala_fdct4, daala_fdst4 }, // DCT_FLIPADST
{ daala_fdst4, daala_fdst4 }, // FLIPADST_FLIPADST
@@ -1415,13 +1412,11 @@
{ daala_idtx4, daala_fdst4 }, // H_ADST
{ daala_fdst4, daala_idtx4 }, // V_FLIPADST
{ daala_idtx4, daala_fdst4 }, // H_FLIPADST
-#endif
#else
{ fdct4, fdct4 }, // DCT_DCT
{ fadst4, fdct4 }, // ADST_DCT
{ fdct4, fadst4 }, // DCT_ADST
{ fadst4, fadst4 }, // ADST_ADST
-#if CONFIG_EXT_TX
{ fadst4, fdct4 }, // FLIPADST_DCT
{ fdct4, fadst4 }, // DCT_FLIPADST
{ fadst4, fadst4 }, // FLIPADST_FLIPADST
@@ -1435,17 +1430,14 @@
{ fadst4, fidtx4 }, // V_FLIPADST
{ fidtx4, fadst4 }, // H_FLIPADST
#endif
-#endif
};
const transform_2d ht = FHT[tx_type];
tran_low_t out[4 * 4];
int i, j;
tran_low_t temp_in[4], temp_out[4];
-#if CONFIG_EXT_TX
int16_t flipped_input[4 * 4];
maybe_flip_input(&input, &stride, 4, 4, flipped_input, tx_type);
-#endif
#if CONFIG_LGT
// Choose LGT adaptive to the prediction. We may apply different LGTs for
@@ -1507,7 +1499,6 @@
{ daala_fdst8, daala_fdct4 }, // ADST_DCT
{ daala_fdct8, daala_fdst4 }, // DCT_ADST
{ daala_fdst8, daala_fdst4 }, // ADST_ADST
-#if CONFIG_EXT_TX
{ daala_fdst8, daala_fdct4 }, // FLIPADST_DCT
{ daala_fdct8, daala_fdst4 }, // DCT_FLIPADST
{ daala_fdst8, daala_fdst4 }, // FLIPADST_FLIPADST
@@ -1520,13 +1511,11 @@
{ daala_idtx8, daala_fdst4 }, // H_ADST
{ daala_fdst8, daala_idtx4 }, // V_FLIPADST
{ daala_idtx8, daala_fdst4 }, // H_FLIPADST
-#endif
#else
{ fdct8, fdct4 }, // DCT_DCT
{ fadst8, fdct4 }, // ADST_DCT
{ fdct8, fadst4 }, // DCT_ADST
{ fadst8, fadst4 }, // ADST_ADST
-#if CONFIG_EXT_TX
{ fadst8, fdct4 }, // FLIPADST_DCT
{ fdct8, fadst4 }, // DCT_FLIPADST
{ fadst8, fadst4 }, // FLIPADST_FLIPADST
@@ -1540,7 +1529,6 @@
{ fadst8, fidtx4 }, // V_FLIPADST
{ fidtx8, fadst4 }, // H_FLIPADST
#endif
-#endif
};
const transform_2d ht = FHT[tx_type];
const int n = 4;
@@ -1548,10 +1536,8 @@
tran_low_t out[8 * 4];
tran_low_t temp_in[8], temp_out[8];
int i, j;
-#if CONFIG_EXT_TX
int16_t flipped_input[8 * 4];
maybe_flip_input(&input, &stride, n2, n, flipped_input, tx_type);
-#endif
#if CONFIG_LGT
const tran_high_t *lgtmtx_col[1];
@@ -1630,7 +1616,6 @@
{ daala_fdst4, daala_fdct8 }, // ADST_DCT
{ daala_fdct4, daala_fdst8 }, // DCT_ADST
{ daala_fdst4, daala_fdst8 }, // ADST_ADST
-#if CONFIG_EXT_TX
{ daala_fdst4, daala_fdct8 }, // FLIPADST_DCT
{ daala_fdct4, daala_fdst8 }, // DCT_FLIPADST
{ daala_fdst4, daala_fdst8 }, // FLIPADST_FLIPADST
@@ -1643,13 +1628,11 @@
{ daala_idtx4, daala_fdst8 }, // H_ADST
{ daala_fdst4, daala_idtx8 }, // V_FLIPADST
{ daala_idtx4, daala_fdst8 }, // H_FLIPADST
-#endif
#else
{ fdct4, fdct8 }, // DCT_DCT
{ fadst4, fdct8 }, // ADST_DCT
{ fdct4, fadst8 }, // DCT_ADST
{ fadst4, fadst8 }, // ADST_ADST
-#if CONFIG_EXT_TX
{ fadst4, fdct8 }, // FLIPADST_DCT
{ fdct4, fadst8 }, // DCT_FLIPADST
{ fadst4, fadst8 }, // FLIPADST_FLIPADST
@@ -1663,7 +1646,6 @@
{ fadst4, fidtx8 }, // V_FLIPADST
{ fidtx4, fadst8 }, // H_FLIPADST
#endif
-#endif
};
const transform_2d ht = FHT[tx_type];
const int n = 4;
@@ -1671,10 +1653,8 @@
tran_low_t out[8 * 4];
tran_low_t temp_in[8], temp_out[8];
int i, j;
-#if CONFIG_EXT_TX
int16_t flipped_input[8 * 4];
maybe_flip_input(&input, &stride, n, n2, flipped_input, tx_type);
-#endif
#if CONFIG_LGT
const tran_high_t *lgtmtx_col[1];
@@ -1751,7 +1731,6 @@
{ fadst16, fdct4 }, // ADST_DCT
{ fdct16, fadst4 }, // DCT_ADST
{ fadst16, fadst4 }, // ADST_ADST
-#if CONFIG_EXT_TX
{ fadst16, fdct4 }, // FLIPADST_DCT
{ fdct16, fadst4 }, // DCT_FLIPADST
{ fadst16, fadst4 }, // FLIPADST_FLIPADST
@@ -1764,7 +1743,6 @@
{ fidtx16, fadst4 }, // H_ADST
{ fadst16, fidtx4 }, // V_FLIPADST
{ fidtx16, fadst4 }, // H_FLIPADST
-#endif
};
const transform_2d ht = FHT[tx_type];
const int n = 4;
@@ -1772,10 +1750,8 @@
tran_low_t out[16 * 4];
tran_low_t temp_in[16], temp_out[16];
int i, j;
-#if CONFIG_EXT_TX
int16_t flipped_input[16 * 4];
maybe_flip_input(&input, &stride, n4, n, flipped_input, tx_type);
-#endif
#if CONFIG_LGT
const tran_high_t *lgtmtx_row[1];
@@ -1818,7 +1794,6 @@
{ fadst4, fdct16 }, // ADST_DCT
{ fdct4, fadst16 }, // DCT_ADST
{ fadst4, fadst16 }, // ADST_ADST
-#if CONFIG_EXT_TX
{ fadst4, fdct16 }, // FLIPADST_DCT
{ fdct4, fadst16 }, // DCT_FLIPADST
{ fadst4, fadst16 }, // FLIPADST_FLIPADST
@@ -1831,7 +1806,6 @@
{ fidtx4, fadst16 }, // H_ADST
{ fadst4, fidtx16 }, // V_FLIPADST
{ fidtx4, fadst16 }, // H_FLIPADST
-#endif
};
const transform_2d ht = FHT[tx_type];
const int n = 4;
@@ -1839,10 +1813,8 @@
tran_low_t out[16 * 4];
tran_low_t temp_in[16], temp_out[16];
int i, j;
-#if CONFIG_EXT_TX
int16_t flipped_input[16 * 4];
maybe_flip_input(&input, &stride, n, n4, flipped_input, tx_type);
-#endif
#if CONFIG_LGT
const tran_high_t *lgtmtx_col[1];
@@ -1886,7 +1858,6 @@
{ daala_fdst16, daala_fdct8 }, // ADST_DCT
{ daala_fdct16, daala_fdst8 }, // DCT_ADST
{ daala_fdst16, daala_fdst8 }, // ADST_ADST
-#if CONFIG_EXT_TX
{ daala_fdst16, daala_fdct8 }, // FLIPADST_DCT
{ daala_fdct16, daala_fdst8 }, // DCT_FLIPADST
{ daala_fdst16, daala_fdst8 }, // FLIPADST_FLIPADST
@@ -1899,13 +1870,11 @@
{ daala_idtx16, daala_fdst8 }, // H_ADST
{ daala_fdst16, daala_idtx8 }, // V_FLIPADST
{ daala_idtx16, daala_fdst8 }, // H_FLIPADST
-#endif
#else
{ fdct16, fdct8 }, // DCT_DCT
{ fadst16, fdct8 }, // ADST_DCT
{ fdct16, fadst8 }, // DCT_ADST
{ fadst16, fadst8 }, // ADST_ADST
-#if CONFIG_EXT_TX
{ fadst16, fdct8 }, // FLIPADST_DCT
{ fdct16, fadst8 }, // DCT_FLIPADST
{ fadst16, fadst8 }, // FLIPADST_FLIPADST
@@ -1919,7 +1888,6 @@
{ fadst16, fidtx8 }, // V_FLIPADST
{ fidtx16, fadst8 }, // H_FLIPADST
#endif
-#endif
};
const transform_2d ht = FHT[tx_type];
const int n = 8;
@@ -1927,10 +1895,8 @@
tran_low_t out[16 * 8];
tran_low_t temp_in[16], temp_out[16];
int i, j;
-#if CONFIG_EXT_TX
int16_t flipped_input[16 * 8];
maybe_flip_input(&input, &stride, n2, n, flipped_input, tx_type);
-#endif
#if CONFIG_LGT
const tran_high_t *lgtmtx_row[1];
@@ -2015,7 +1981,6 @@
{ daala_fdst8, daala_fdct16 }, // ADST_DCT
{ daala_fdct8, daala_fdst16 }, // DCT_ADST
{ daala_fdst8, daala_fdst16 }, // ADST_ADST
-#if CONFIG_EXT_TX
{ daala_fdst8, daala_fdct16 }, // FLIPADST_DCT
{ daala_fdct8, daala_fdst16 }, // DCT_FLIPADST
{ daala_fdst8, daala_fdst16 }, // FLIPADST_FLIPADST
@@ -2028,13 +1993,11 @@
{ daala_idtx8, daala_fdst16 }, // H_ADST
{ daala_fdst8, daala_idtx16 }, // V_FLIPADST
{ daala_idtx8, daala_fdst16 }, // H_FLIPADST
-#endif
#else
{ fdct8, fdct16 }, // DCT_DCT
{ fadst8, fdct16 }, // ADST_DCT
{ fdct8, fadst16 }, // DCT_ADST
{ fadst8, fadst16 }, // ADST_ADST
-#if CONFIG_EXT_TX
{ fadst8, fdct16 }, // FLIPADST_DCT
{ fdct8, fadst16 }, // DCT_FLIPADST
{ fadst8, fadst16 }, // FLIPADST_FLIPADST
@@ -2048,7 +2011,6 @@
{ fadst8, fidtx16 }, // V_FLIPADST
{ fidtx8, fadst16 }, // H_FLIPADST
#endif
-#endif
};
const transform_2d ht = FHT[tx_type];
const int n = 8;
@@ -2056,10 +2018,8 @@
tran_low_t out[16 * 8];
tran_low_t temp_in[16], temp_out[16];
int i, j;
-#if CONFIG_EXT_TX
int16_t flipped_input[16 * 8];
maybe_flip_input(&input, &stride, n, n2, flipped_input, tx_type);
-#endif
#if CONFIG_LGT
const tran_high_t *lgtmtx_col[1];
@@ -2143,7 +2103,6 @@
{ fhalfright32, fdct8 }, // ADST_DCT
{ fdct32, fadst8 }, // DCT_ADST
{ fhalfright32, fadst8 }, // ADST_ADST
-#if CONFIG_EXT_TX
{ fhalfright32, fdct8 }, // FLIPADST_DCT
{ fdct32, fadst8 }, // DCT_FLIPADST
{ fhalfright32, fadst8 }, // FLIPADST_FLIPADST
@@ -2156,7 +2115,6 @@
{ fidtx32, fadst8 }, // H_ADST
{ fhalfright32, fidtx8 }, // V_FLIPADST
{ fidtx32, fadst8 }, // H_FLIPADST
-#endif
};
const transform_2d ht = FHT[tx_type];
const int n = 8;
@@ -2164,10 +2122,8 @@
tran_low_t out[32 * 8];
tran_low_t temp_in[32], temp_out[32];
int i, j;
-#if CONFIG_EXT_TX
int16_t flipped_input[32 * 8];
maybe_flip_input(&input, &stride, n4, n, flipped_input, tx_type);
-#endif
#if CONFIG_LGT
const tran_high_t *lgtmtx_row[1];
@@ -2210,7 +2166,6 @@
{ fadst8, fdct32 }, // ADST_DCT
{ fdct8, fhalfright32 }, // DCT_ADST
{ fadst8, fhalfright32 }, // ADST_ADST
-#if CONFIG_EXT_TX
{ fadst8, fdct32 }, // FLIPADST_DCT
{ fdct8, fhalfright32 }, // DCT_FLIPADST
{ fadst8, fhalfright32 }, // FLIPADST_FLIPADST
@@ -2223,7 +2178,6 @@
{ fidtx8, fhalfright32 }, // H_ADST
{ fadst8, fidtx32 }, // V_FLIPADST
{ fidtx8, fhalfright32 }, // H_FLIPADST
-#endif
};
const transform_2d ht = FHT[tx_type];
const int n = 8;
@@ -2231,10 +2185,8 @@
tran_low_t out[32 * 8];
tran_low_t temp_in[32], temp_out[32];
int i, j;
-#if CONFIG_EXT_TX
int16_t flipped_input[32 * 8];
maybe_flip_input(&input, &stride, n, n4, flipped_input, tx_type);
-#endif
#if CONFIG_LGT
const tran_high_t *lgtmtx_col[1];
@@ -2278,7 +2230,6 @@
{ daala_fdst32, daala_fdct16 }, // ADST_DCT
{ daala_fdct32, daala_fdst16 }, // DCT_ADST
{ daala_fdst32, daala_fdst16 }, // ADST_ADST
-#if CONFIG_EXT_TX
{ daala_fdst32, daala_fdct16 }, // FLIPADST_DCT
{ daala_fdct32, daala_fdst16 }, // DCT_FLIPADST
{ daala_fdst32, daala_fdst16 }, // FLIPADST_FLIPADST
@@ -2291,13 +2242,11 @@
{ daala_idtx32, daala_fdst16 }, // H_ADST
{ daala_fdst32, daala_idtx16 }, // V_FLIPADST
{ daala_idtx32, daala_fdst16 }, // H_FLIPADST
-#endif
#else
{ fdct32, fdct16 }, // DCT_DCT
{ fhalfright32, fdct16 }, // ADST_DCT
{ fdct32, fadst16 }, // DCT_ADST
{ fhalfright32, fadst16 }, // ADST_ADST
-#if CONFIG_EXT_TX
{ fhalfright32, fdct16 }, // FLIPADST_DCT
{ fdct32, fadst16 }, // DCT_FLIPADST
{ fhalfright32, fadst16 }, // FLIPADST_FLIPADST
@@ -2311,7 +2260,6 @@
{ fhalfright32, fidtx16 }, // V_FLIPADST
{ fidtx32, fadst16 }, // H_FLIPADST
#endif
-#endif
};
const transform_2d ht = FHT[tx_type];
const int n = 16;
@@ -2319,10 +2267,8 @@
tran_low_t out[32 * 16];
tran_low_t temp_in[32], temp_out[32];
int i, j;
-#if CONFIG_EXT_TX
int16_t flipped_input[32 * 16];
maybe_flip_input(&input, &stride, n2, n, flipped_input, tx_type);
-#endif
// Rows
for (i = 0; i < n2; ++i) {
@@ -2368,7 +2314,6 @@
{ daala_fdst16, daala_fdct32 }, // ADST_DCT
{ daala_fdct16, daala_fdst32 }, // DCT_ADST
{ daala_fdst16, daala_fdst32 }, // ADST_ADST
-#if CONFIG_EXT_TX
{ daala_fdst16, daala_fdct32 }, // FLIPADST_DCT
{ daala_fdct16, daala_fdst32 }, // DCT_FLIPADST
{ daala_fdst16, daala_fdst32 }, // FLIPADST_FLIPADST
@@ -2381,13 +2326,11 @@
{ daala_idtx16, daala_fdst32 }, // H_ADST
{ daala_fdst16, daala_idtx32 }, // V_FLIPADST
{ daala_idtx16, daala_fdst32 }, // H_FLIPADST
-#endif
#else
{ fdct16, fdct32 }, // DCT_DCT
{ fadst16, fdct32 }, // ADST_DCT
{ fdct16, fhalfright32 }, // DCT_ADST
{ fadst16, fhalfright32 }, // ADST_ADST
-#if CONFIG_EXT_TX
{ fadst16, fdct32 }, // FLIPADST_DCT
{ fdct16, fhalfright32 }, // DCT_FLIPADST
{ fadst16, fhalfright32 }, // FLIPADST_FLIPADST
@@ -2401,7 +2344,6 @@
{ fadst16, fidtx32 }, // V_FLIPADST
{ fidtx16, fhalfright32 }, // H_FLIPADST
#endif
-#endif
};
const transform_2d ht = FHT[tx_type];
const int n = 16;
@@ -2409,10 +2351,8 @@
tran_low_t out[32 * 16];
tran_low_t temp_in[32], temp_out[32];
int i, j;
-#if CONFIG_EXT_TX
int16_t flipped_input[32 * 16];
maybe_flip_input(&input, &stride, n, n2, flipped_input, tx_type);
-#endif
// Columns
for (i = 0; i < n2; ++i) {
@@ -2465,7 +2405,6 @@
{ daala_fdst8, daala_fdct8 }, // ADST_DCT
{ daala_fdct8, daala_fdst8 }, // DCT_ADST
{ daala_fdst8, daala_fdst8 }, // ADST_ADST
-#if CONFIG_EXT_TX
{ daala_fdst8, daala_fdct8 }, // FLIPADST_DCT
{ daala_fdct8, daala_fdst8 }, // DCT_FLIPADST
{ daala_fdst8, daala_fdst8 }, // FLIPADST_FLIPADST
@@ -2478,13 +2417,11 @@
{ daala_idtx8, daala_fdst8 }, // H_ADST
{ daala_fdst8, daala_idtx8 }, // V_FLIPADST
{ daala_idtx8, daala_fdst8 }, // H_FLIPADST
-#endif
#else
{ fdct8, fdct8 }, // DCT_DCT
{ fadst8, fdct8 }, // ADST_DCT
{ fdct8, fadst8 }, // DCT_ADST
{ fadst8, fadst8 }, // ADST_ADST
-#if CONFIG_EXT_TX
{ fadst8, fdct8 }, // FLIPADST_DCT
{ fdct8, fadst8 }, // DCT_FLIPADST
{ fadst8, fadst8 }, // FLIPADST_FLIPADST
@@ -2498,17 +2435,14 @@
{ fadst8, fidtx8 }, // V_FLIPADST
{ fidtx8, fadst8 }, // H_FLIPADST
#endif
-#endif
};
const transform_2d ht = FHT[tx_type];
tran_low_t out[64];
int i, j;
tran_low_t temp_in[8], temp_out[8];
-#if CONFIG_EXT_TX
int16_t flipped_input[8 * 8];
maybe_flip_input(&input, &stride, 8, 8, flipped_input, tx_type);
-#endif
#if CONFIG_LGT
const tran_high_t *lgtmtx_col[1];
@@ -2624,7 +2558,6 @@
{ daala_fdst16, daala_fdct16 }, // ADST_DCT
{ daala_fdct16, daala_fdst16 }, // DCT_ADST
{ daala_fdst16, daala_fdst16 }, // ADST_ADST
-#if CONFIG_EXT_TX
{ daala_fdst16, daala_fdct16 }, // FLIPADST_DCT
{ daala_fdct16, daala_fdst16 }, // DCT_FLIPADST
{ daala_fdst16, daala_fdst16 }, // FLIPADST_FLIPADST
@@ -2637,13 +2570,11 @@
{ daala_idtx16, daala_fdst16 }, // H_ADST
{ daala_fdst16, daala_idtx16 }, // V_FLIPADST
{ daala_idtx16, daala_fdst16 }, // H_FLIPADST
-#endif
#else
{ fdct16, fdct16 }, // DCT_DCT
{ fadst16, fdct16 }, // ADST_DCT
{ fdct16, fadst16 }, // DCT_ADST
{ fadst16, fadst16 }, // ADST_ADST
-#if CONFIG_EXT_TX
{ fadst16, fdct16 }, // FLIPADST_DCT
{ fdct16, fadst16 }, // DCT_FLIPADST
{ fadst16, fadst16 }, // FLIPADST_FLIPADST
@@ -2657,17 +2588,14 @@
{ fadst16, fidtx16 }, // V_FLIPADST
{ fidtx16, fadst16 }, // H_FLIPADST
#endif
-#endif
};
const transform_2d ht = FHT[tx_type];
tran_low_t out[256];
int i, j;
tran_low_t temp_in[16], temp_out[16];
-#if CONFIG_EXT_TX
int16_t flipped_input[16 * 16];
maybe_flip_input(&input, &stride, 16, 16, flipped_input, tx_type);
-#endif
// Columns
for (i = 0; i < 16; ++i) {
@@ -2716,7 +2644,6 @@
static const transform_2d FHT[] = {
#if CONFIG_DAALA_TX32
{ daala_fdct32, daala_fdct32 }, // DCT_DCT
-#if CONFIG_EXT_TX
{ daala_fdst32, daala_fdct32 }, // ADST_DCT
{ daala_fdct32, daala_fdst32 }, // DCT_ADST
{ daala_fdst32, daala_fdst32 }, // ADST_ADST
@@ -2732,10 +2659,8 @@
{ daala_idtx32, daala_fdst32 }, // H_ADST
{ daala_fdst32, daala_idtx32 }, // V_FLIPADST
{ daala_idtx32, daala_fdst32 }, // H_FLIPADST
-#endif
#else
{ fdct32, fdct32 }, // DCT_DCT
-#if CONFIG_EXT_TX
{ fhalfright32, fdct32 }, // ADST_DCT
{ fdct32, fhalfright32 }, // DCT_ADST
{ fhalfright32, fhalfright32 }, // ADST_ADST
@@ -2752,7 +2677,6 @@
{ fhalfright32, fidtx32 }, // V_FLIPADST
{ fidtx32, fhalfright32 }, // H_FLIPADST
#endif
-#endif
#if CONFIG_MRC_TX
{ fdct32, fdct32 }, // MRC_TX
#endif // CONFIG_MRC_TX
@@ -2762,10 +2686,8 @@
int i, j;
tran_low_t temp_in[32], temp_out[32];
-#if CONFIG_EXT_TX
int16_t flipped_input[32 * 32];
maybe_flip_input(&input, &stride, 32, 32, flipped_input, tx_type);
-#endif
#if CONFIG_MRC_TX
if (tx_type == MRC_DCT) {
@@ -2806,7 +2728,6 @@
#if CONFIG_TX64X64
#if !CONFIG_DAALA_TX64
-#if CONFIG_EXT_TX
static void fidtx64(const tran_low_t *input, tran_low_t *output) {
int i;
for (i = 0; i < 64; ++i)
@@ -2827,7 +2748,6 @@
fdct32(inputhalf, output);
// Note overall scaling factor is 2 times unitary
}
-#endif // CONFIG_EXT_TX
static void fdct64_col(const tran_low_t *input, tran_low_t *output) {
int32_t in[64], out[64];
@@ -2858,7 +2778,6 @@
static const transform_2d FHT[] = {
#if CONFIG_DAALA_TX64
{ daala_fdct64, daala_fdct64 }, // DCT_DCT
-#if CONFIG_EXT_TX
{ daala_fdst64, daala_fdct64 }, // ADST_DCT
{ daala_fdct64, daala_fdst64 }, // DCT_ADST
{ daala_fdst64, daala_fdst64 }, // ADST_ADST
@@ -2874,10 +2793,8 @@
{ daala_idtx64, daala_fdst64 }, // H_ADST
{ daala_fdst64, daala_idtx64 }, // V_FLIPADST
{ daala_idtx64, daala_fdst64 }, // H_FLIPADST
-#endif // CONFIG_EXT_TX
#else
{ fdct64_col, fdct64_row }, // DCT_DCT
-#if CONFIG_EXT_TX
{ fhalfright64, fdct64_row }, // ADST_DCT
{ fdct64_col, fhalfright64 }, // DCT_ADST
{ fhalfright64, fhalfright64 }, // ADST_ADST
@@ -2893,17 +2810,14 @@
{ fidtx64, fhalfright64 }, // H_ADST
{ fhalfright64, fidtx64 }, // V_FLIPADST
{ fidtx64, fhalfright64 }, // H_FLIPADST
-#endif // CONFIG_EXT_TX
#endif // CONFIG_DAALA_TX64
};
const transform_2d ht = FHT[tx_type];
tran_low_t out[4096];
int i, j;
tran_low_t temp_in[64], temp_out[64];
-#if CONFIG_EXT_TX
int16_t flipped_input[64 * 64];
maybe_flip_input(&input, &stride, 64, 64, flipped_input, tx_type);
-#endif
// Columns
for (i = 0; i < 64; ++i) {
@@ -2952,8 +2866,7 @@
assert(tx_type == DCT_DCT);
#endif
static const transform_2d FHT[] = {
- { fdct32, fdct64_row }, // DCT_DCT
-#if CONFIG_EXT_TX
+ { fdct32, fdct64_row }, // DCT_DCT
{ fhalfright32, fdct64_row }, // ADST_DCT
{ fdct32, fhalfright64 }, // DCT_ADST
{ fhalfright32, fhalfright64 }, // ADST_ADST
@@ -2969,7 +2882,6 @@
{ fidtx32, fhalfright64 }, // H_ADST
{ fhalfright32, fidtx64 }, // V_FLIPADST
{ fidtx32, fhalfright64 }, // H_FLIPADST
-#endif // CONFIG_EXT_TX
};
const transform_2d ht = FHT[tx_type];
tran_low_t out[2048];
@@ -2977,10 +2889,8 @@
tran_low_t temp_in[64], temp_out[64];
const int n = 32;
const int n2 = 64;
-#if CONFIG_EXT_TX
int16_t flipped_input[32 * 64];
maybe_flip_input(&input, &stride, n, n2, flipped_input, tx_type);
-#endif
// Columns
for (i = 0; i < n2; ++i) {
@@ -3016,8 +2926,7 @@
assert(tx_type == DCT_DCT);
#endif
static const transform_2d FHT[] = {
- { fdct64_row, fdct32 }, // DCT_DCT
-#if CONFIG_EXT_TX
+ { fdct64_row, fdct32 }, // DCT_DCT
{ fhalfright64, fdct32 }, // ADST_DCT
{ fdct64_row, fhalfright32 }, // DCT_ADST
{ fhalfright64, fhalfright32 }, // ADST_ADST
@@ -3033,7 +2942,6 @@
{ fidtx64, fhalfright32 }, // H_ADST
{ fhalfright64, fidtx32 }, // V_FLIPADST
{ fidtx64, fhalfright32 }, // H_FLIPADST
-#endif // CONFIG_EXT_TX
};
const transform_2d ht = FHT[tx_type];
tran_low_t out[32 * 64];
@@ -3041,10 +2949,8 @@
tran_low_t temp_in[64], temp_out[64];
const int n = 32;
const int n2 = 64;
-#if CONFIG_EXT_TX
int16_t flipped_input[32 * 64];
maybe_flip_input(&input, &stride, n2, n, flipped_input, tx_type);
-#endif
// Rows
for (i = 0; i < n2; ++i) {
@@ -3068,7 +2974,6 @@
}
#endif // CONFIG_TX64X64
-#if CONFIG_EXT_TX
// Forward identity transform.
void av1_fwd_idtx_c(const int16_t *src_diff, tran_low_t *coeff, int stride,
int bsx, int bsy, TX_TYPE tx_type) {
@@ -3083,5 +2988,4 @@
}
}
}
-#endif // CONFIG_EXT_TX
#endif // !AV1_DCT_GTEST
diff --git a/av1/encoder/encodeframe.c b/av1/encoder/encodeframe.c
index fcf0125..15d0ac1 100644
--- a/av1/encoder/encodeframe.c
+++ b/av1/encoder/encodeframe.c
@@ -3994,11 +3994,9 @@
void av1_encode_frame(AV1_COMP *cpi) {
AV1_COMMON *const cm = &cpi->common;
-#if CONFIG_EXT_TX
// Indicates whether or not to use a default reduced set for ext-tx
// rather than the potential full set of 16 transforms
cm->reduced_tx_set_used = 0;
-#endif // CONFIG_EXT_TX
#if CONFIG_ADAPT_SCAN
cm->use_adapt_scan = 1;
// TODO(angiebird): call av1_init_scan_order only when use_adapt_scan
@@ -4426,7 +4424,6 @@
TX_TYPE tx_type =
av1_get_tx_type(PLANE_TYPE_Y, xd, blk_row, blk_col, block, tx_size);
#endif
-#if CONFIG_EXT_TX
if (get_ext_tx_types(tx_size, bsize, is_inter, cm->reduced_tx_set_used) > 1 &&
cm->base_qindex > 0 && !mbmi->skip &&
!segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP)) {
@@ -4510,31 +4507,6 @@
#endif // CONFIG_LGT_FROM_PRED
}
}
-#else
- (void)bsize;
- if (tx_size < TX_32X32 &&
- ((!cm->seg.enabled && cm->base_qindex > 0) ||
- (cm->seg.enabled && xd->qindex[mbmi->segment_id] > 0)) &&
- !mbmi->skip &&
- !segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP)) {
- if (is_inter) {
-#if CONFIG_ENTROPY_STATS
- ++counts->inter_ext_tx[tx_size][tx_type];
-#endif // CONFIG_ENTROPY_STATS
- update_cdf(fc->inter_ext_tx_cdf[tx_size], av1_ext_tx_ind[tx_type],
- TX_TYPES);
- } else {
-#if CONFIG_ENTROPY_STATS
- ++counts->intra_ext_tx[tx_size][intra_mode_to_tx_type_context[mbmi->mode]]
- [tx_type];
-#endif // CONFIG_ENTROPY_STATS
- update_cdf(
- fc->intra_ext_tx_cdf[tx_size]
- [intra_mode_to_tx_type_context[mbmi->mode]],
- av1_ext_tx_ind[tx_type], TX_TYPES);
- }
- }
-#endif // CONFIG_EXT_TX
}
static void encode_superblock(const AV1_COMP *const cpi, ThreadData *td,
@@ -4684,9 +4656,7 @@
#endif
}
#endif
-#if CONFIG_EXT_TX
assert(IMPLIES(is_rect_tx(tx_size), is_rect_tx_allowed(xd, mbmi)));
-#endif // CONFIG_EXT_TX
} else {
int i, j;
TX_SIZE intra_tx_size;
@@ -4698,16 +4668,10 @@
intra_tx_size = tx_size_from_tx_mode(bsize, cm->tx_mode, 1);
}
} else {
-#if CONFIG_EXT_TX
intra_tx_size = tx_size;
-#else
- intra_tx_size = (bsize >= BLOCK_8X8) ? tx_size : TX_4X4;
-#endif // CONFIG_EXT_TX
}
-#if CONFIG_EXT_TX
++td->counts->tx_size_implied[max_txsize_lookup[bsize]]
[txsize_sqr_up_map[tx_size]];
-#endif // CONFIG_EXT_TX
for (j = 0; j < mi_height; j++)
for (i = 0; i < mi_width; i++)
diff --git a/av1/encoder/encodemb.c b/av1/encoder/encodemb.c
index 760337b..5e3f236 100644
--- a/av1/encoder/encodemb.c
+++ b/av1/encoder/encodemb.c
@@ -539,11 +539,9 @@
txfm_param.tx_type = tx_type;
txfm_param.tx_size = tx_size;
txfm_param.lossless = xd->lossless[mbmi->segment_id];
-#if CONFIG_EXT_TX
txfm_param.tx_set_type =
get_ext_tx_set_type(txfm_param.tx_size, plane_bsize, is_inter_block(mbmi),
cm->reduced_tx_set_used);
-#endif // CONFIG_EXT_TX
#if CONFIG_MRC_TX || CONFIG_LGT
txfm_param.is_inter = is_inter_block(mbmi);
#endif
@@ -642,10 +640,7 @@
#if CONFIG_MRC_TX && SIGNAL_ANY_MRC_MASK
mrc_mask,
#endif // CONFIG_MRC_TX && SIGNAL_ANY_MRC_MASK
-#if CONFIG_EXT_TX
- plane,
-#endif // CONFIG_EXT_TX
- tx_type, tx_size, dst, pd->dst.stride,
+ plane, tx_type, tx_size, dst, pd->dst.stride,
p->eobs[block]);
}
}
@@ -743,11 +738,9 @@
txfm_param.tx_size = tx_size;
txfm_param.eob = p->eobs[block];
txfm_param.lossless = xd->lossless[xd->mi[0]->mbmi.segment_id];
-#if CONFIG_EXT_TX
txfm_param.tx_set_type = get_ext_tx_set_type(
txfm_param.tx_size, plane_bsize, is_inter_block(&xd->mi[0]->mbmi),
cm->reduced_tx_set_used);
-#endif // CONFIG_EXT_TX
#if CONFIG_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
av1_highbd_inv_txfm_add_4x4(dqcoeff, dst, pd->dst.stride, &txfm_param);
@@ -914,10 +907,7 @@
#if CONFIG_MRC_TX && SIGNAL_ANY_MRC_MASK
mrc_mask,
#endif // CONFIG_MRC_TX && SIGNAL_ANY_MRC_MASK
-#if CONFIG_EXT_TX
- plane,
-#endif // CONFIG_EXT_TX
- tx_type, tx_size, dst, dst_stride, *eob);
+ plane, tx_type, tx_size, dst, dst_stride, *eob);
if (*eob) *(args->skip) = 0;
diff --git a/av1/encoder/encodetxb.c b/av1/encoder/encodetxb.c
index 177a660..e715a54 100644
--- a/av1/encoder/encodetxb.c
+++ b/av1/encoder/encodetxb.c
@@ -2195,13 +2195,11 @@
continue;
}
-#if CONFIG_EXT_TX
const int is_inter = is_inter_block(mbmi);
const TxSetType tx_set_type =
get_ext_tx_set_type(get_min_tx_size(tx_size), mbmi->sb_type, is_inter,
cm->reduced_tx_set_used);
if (!av1_ext_tx_used[tx_set_type][tx_type]) continue;
-#endif // CONFIG_EXT_TX
RD_STATS this_rd_stats;
av1_invalid_rd_stats(&this_rd_stats);
diff --git a/av1/encoder/hybrid_fwd_txfm.c b/av1/encoder/hybrid_fwd_txfm.c
index 17c3533..f1bed84 100644
--- a/av1/encoder/hybrid_fwd_txfm.c
+++ b/av1/encoder/hybrid_fwd_txfm.c
@@ -119,31 +119,25 @@
#if CONFIG_TX64X64
static void fwd_txfm_64x64(const int16_t *src_diff, tran_low_t *coeff,
int diff_stride, TxfmParam *txfm_param) {
-#if CONFIG_EXT_TX
if (txfm_param->tx_type == IDTX)
av1_fwd_idtx_c(src_diff, coeff, diff_stride, 64, 64, txfm_param->tx_type);
else
-#endif
av1_fht64x64(src_diff, coeff, diff_stride, txfm_param);
}
static void fwd_txfm_32x64(const int16_t *src_diff, tran_low_t *coeff,
int diff_stride, TxfmParam *txfm_param) {
-#if CONFIG_EXT_TX
if (txfm_param->tx_type == IDTX)
av1_fwd_idtx_c(src_diff, coeff, diff_stride, 32, 64, txfm_param->tx_type);
else
-#endif
av1_fht32x64(src_diff, coeff, diff_stride, txfm_param);
}
static void fwd_txfm_64x32(const int16_t *src_diff, tran_low_t *coeff,
int diff_stride, TxfmParam *txfm_param) {
-#if CONFIG_EXT_TX
if (txfm_param->tx_type == IDTX)
av1_fwd_idtx_c(src_diff, coeff, diff_stride, 64, 32, txfm_param->tx_type);
else
-#endif
av1_fht64x32(src_diff, coeff, diff_stride, txfm_param);
}
#endif // CONFIG_TX64X64
@@ -204,7 +198,6 @@
// fallthrough intended
av1_fwd_txfm2d_4x4(src_diff, dst_coeff, diff_stride, tx_type, bd);
break;
-#if CONFIG_EXT_TX
case FLIPADST_DCT:
case DCT_FLIPADST:
case FLIPADST_FLIPADST:
@@ -224,7 +217,6 @@
// fallthrough intended
av1_fwd_txfm2d_4x4_c(src_diff, dst_coeff, diff_stride, tx_type, bd);
break;
-#endif // CONFIG_EXT_TX
default: assert(0);
}
}
@@ -284,7 +276,6 @@
// fallthrough intended
av1_fwd_txfm2d_8x8(src_diff, dst_coeff, diff_stride, tx_type, bd);
break;
-#if CONFIG_EXT_TX
case FLIPADST_DCT:
case DCT_FLIPADST:
case FLIPADST_FLIPADST:
@@ -304,7 +295,6 @@
// fallthrough intended
av1_fwd_txfm2d_8x8_c(src_diff, dst_coeff, diff_stride, tx_type, bd);
break;
-#endif // CONFIG_EXT_TX
default: assert(0);
}
}
@@ -322,7 +312,6 @@
// fallthrough intended
av1_fwd_txfm2d_16x16(src_diff, dst_coeff, diff_stride, tx_type, bd);
break;
-#if CONFIG_EXT_TX
case FLIPADST_DCT:
case DCT_FLIPADST:
case FLIPADST_FLIPADST:
@@ -342,7 +331,6 @@
// fallthrough intended
av1_fwd_txfm2d_16x16_c(src_diff, dst_coeff, diff_stride, tx_type, bd);
break;
-#endif // CONFIG_EXT_TX
default: assert(0);
}
}
@@ -360,7 +348,6 @@
// fallthrough intended
av1_fwd_txfm2d_32x32(src_diff, dst_coeff, diff_stride, tx_type, bd);
break;
-#if CONFIG_EXT_TX
case FLIPADST_DCT:
case DCT_FLIPADST:
case FLIPADST_FLIPADST:
@@ -380,7 +367,6 @@
// fallthrough intended
av1_fwd_txfm2d_32x32_c(src_diff, dst_coeff, diff_stride, tx_type, bd);
break;
-#endif // CONFIG_EXT_TX
default: assert(0);
}
}
@@ -395,7 +381,6 @@
case DCT_DCT:
av1_fwd_txfm2d_32x64_c(src_diff, dst_coeff, diff_stride, tx_type, bd);
break;
-#if CONFIG_EXT_TX
case ADST_DCT:
case DCT_ADST:
case ADST_ADST:
@@ -421,7 +406,6 @@
case IDTX:
av1_fwd_idtx_c(src_diff, dst_coeff, diff_stride, 32, 64, tx_type);
break;
-#endif // CONFIG_EXT_TX
default: assert(0); break;
}
}
@@ -435,7 +419,6 @@
case DCT_DCT:
av1_fwd_txfm2d_64x32_c(src_diff, dst_coeff, diff_stride, tx_type, bd);
break;
-#if CONFIG_EXT_TX
case ADST_DCT:
case DCT_ADST:
case ADST_ADST:
@@ -461,7 +444,6 @@
case IDTX:
av1_fwd_idtx_c(src_diff, dst_coeff, diff_stride, 64, 32, tx_type);
break;
-#endif // CONFIG_EXT_TX
default: assert(0); break;
}
}
@@ -474,7 +456,6 @@
case DCT_DCT:
av1_fwd_txfm2d_64x64(src_diff, dst_coeff, diff_stride, tx_type, bd);
break;
-#if CONFIG_EXT_TX
case ADST_DCT:
case DCT_ADST:
case ADST_ADST:
@@ -500,7 +481,6 @@
case IDTX:
av1_fwd_idtx_c(src_diff, dst_coeff, diff_stride, 64, 64, tx_type);
break;
-#endif // CONFIG_EXT_TX
default: assert(0); break;
}
}
@@ -508,9 +488,7 @@
void av1_fwd_txfm(const int16_t *src_diff, tran_low_t *coeff, int diff_stride,
TxfmParam *txfm_param) {
-#if CONFIG_EXT_TX
assert(av1_ext_tx_used[txfm_param->tx_set_type][txfm_param->tx_type]);
-#endif // CONFIG_EXT_TX
const TX_SIZE tx_size = txfm_param->tx_size;
#if CONFIG_LGT_FROM_PRED
if (txfm_param->use_lgt) {
@@ -574,9 +552,7 @@
void av1_highbd_fwd_txfm(const int16_t *src_diff, tran_low_t *coeff,
int diff_stride, TxfmParam *txfm_param) {
-#if CONFIG_EXT_TX
assert(av1_ext_tx_used[txfm_param->tx_set_type][txfm_param->tx_type]);
-#endif // CONFIG_EXT_TX
const TX_SIZE tx_size = txfm_param->tx_size;
switch (tx_size) {
#if CONFIG_TX64X64
diff --git a/av1/encoder/rd.c b/av1/encoder/rd.c
index 7dd4b89..dc6af9e 100644
--- a/av1/encoder/rd.c
+++ b/av1/encoder/rd.c
@@ -64,7 +64,6 @@
#endif // CONFIG_EXT_PARTITION
};
-#if CONFIG_EXT_TX
static const int use_intra_ext_tx_for_txsize[EXT_TX_SETS_INTRA][EXT_TX_SIZES] =
{
{ 1, 1, 1, 1 }, // unused
@@ -83,7 +82,6 @@
{ 0, 0, 0, 1 },
#endif // CONFIG_MRC_TX
};
-#endif // CONFIG_EXT_TX
void av1_fill_mode_rates(AV1_COMMON *const cm, MACROBLOCK *x,
FRAME_CONTEXT *fc) {
@@ -246,7 +244,6 @@
#endif
}
-#if CONFIG_EXT_TX
#if CONFIG_LGT_FROM_PRED
if (LGT_FROM_PRED_INTRA) {
for (i = 0; i < LGT_SIZES; ++i) {
@@ -282,17 +279,6 @@
}
}
}
-#else
- for (i = TX_4X4; i < EXT_TX_SIZES; ++i) {
- for (j = 0; j < TX_TYPES; ++j)
- av1_cost_tokens_from_cdf(x->intra_tx_type_costs[i][j],
- fc->intra_ext_tx_cdf[i][j], av1_ext_tx_inv);
- }
- for (i = TX_4X4; i < EXT_TX_SIZES; ++i) {
- av1_cost_tokens_from_cdf(x->inter_tx_type_costs[i], fc->inter_ext_tx_cdf[i],
- av1_ext_tx_inv);
- }
-#endif // CONFIG_EXT_TX
#if CONFIG_EXT_INTRA && CONFIG_EXT_INTRA_MOD
for (i = 0; i < DIRECTIONAL_MODES; ++i) {
av1_cost_tokens_from_cdf(x->angle_delta_cost[i], fc->angle_delta_cdf[i],
diff --git a/av1/encoder/rdopt.c b/av1/encoder/rdopt.c
index a0ddd66..ba16d06 100644
--- a/av1/encoder/rdopt.c
+++ b/av1/encoder/rdopt.c
@@ -58,9 +58,7 @@
#include "av1/encoder/rd.h"
#include "av1/encoder/rdopt.h"
#include "av1/encoder/tokenize.h"
-#if CONFIG_EXT_TX
#include "av1/encoder/tx_prune_model_weights.h"
-#endif // CONFIG_EXT_TX
#if CONFIG_DUAL_FILTER
#define DUAL_FILTER_SET_SIZE (SWITCHABLE_FILTERS * SWITCHABLE_FILTERS)
@@ -1039,7 +1037,6 @@
return prune_bitmask;
}
-#if CONFIG_EXT_TX
static void get_horver_correlation(const int16_t *diff, int stride, int w,
int h, double *hcorr, double *vcorr) {
// Returns hor/ver correlation coefficient
@@ -1123,7 +1120,6 @@
return prune;
}
-#endif // CONFIG_EXT_TX
// Performance drop: 0.3%, Speed improvement: 5%
static int prune_one_for_sby(const AV1_COMP *cpi, BLOCK_SIZE bsize,
@@ -1134,7 +1130,6 @@
pd->dst.stride);
}
-#if CONFIG_EXT_TX
// 1D Transforms used in inter set, this needs to be changed if
// ext_tx_used_inter is changed
static const int ext_tx_used_inter_1D[EXT_TX_SETS_INTER][TX_TYPES_1D] = {
@@ -1384,18 +1379,12 @@
return prune_bitmask;
}
-#endif // CONFIG_EXT_TX
static int prune_tx_types(const AV1_COMP *cpi, BLOCK_SIZE bsize, MACROBLOCK *x,
const MACROBLOCKD *const xd, int tx_set_type) {
-#if CONFIG_EXT_TX
int tx_set = ext_tx_set_index[1][tx_set_type];
assert(tx_set >= 0);
const int *tx_set_1D = ext_tx_used_inter_1D[tx_set];
-#else
- const int tx_set_1D[TX_TYPES_1D] = { 0 };
- (void)tx_set_type;
-#endif // CONFIG_EXT_TX
switch (cpi->sf.tx_type_search.prune_mode) {
case NO_PRUNE: return 0; break;
@@ -1403,7 +1392,6 @@
if (!(tx_set_1D[FLIPADST_1D] & tx_set_1D[ADST_1D])) return 0;
return prune_one_for_sby(cpi, bsize, x, xd);
break;
-#if CONFIG_EXT_TX
case PRUNE_TWO:
if (!(tx_set_1D[FLIPADST_1D] & tx_set_1D[ADST_1D])) {
if (!(tx_set_1D[DCT_1D] & tx_set_1D[IDTX_1D])) return 0;
@@ -1429,7 +1417,6 @@
else
return 0;
break;
-#endif // CONFIG_EXT_TX
}
assert(0);
return 0;
@@ -1437,23 +1424,13 @@
static int do_tx_type_search(TX_TYPE tx_type, int prune,
TX_TYPE_PRUNE_MODE mode) {
-// TODO(sarahparker) implement for non ext tx
-#if CONFIG_EXT_TX
+ // TODO(sarahparker) implement for non ext tx
if (mode >= PRUNE_2D_ACCURATE) {
return !((prune >> tx_type) & 1);
} else {
return !(((prune >> vtx_tab[tx_type]) & 1) |
((prune >> (htx_tab[tx_type] + 8)) & 1));
}
-#else
- // temporary to avoid compiler warnings
- (void)vtx_tab;
- (void)htx_tab;
- (void)tx_type;
- (void)prune;
- (void)mode;
- return 1;
-#endif // CONFIG_EXT_TX
}
static void model_rd_from_sse(const AV1_COMP *const cpi,
@@ -1976,10 +1953,8 @@
#if CONFIG_MRC_TX && SIGNAL_ANY_MRC_MASK
mrc_mask,
#endif // CONFIG_MRC_TX && SIGNAL_ANY_MRC_MASK
-#if CONFIG_EXT_TX
- plane,
-#endif // CONFIG_EXT_TX
- tx_type, tx_size, recon, MAX_TX_SIZE, eob);
+ plane, tx_type, tx_size, recon, MAX_TX_SIZE,
+ eob);
#if CONFIG_DIST_8X8
if (x->using_dist_8x8 && plane == 0 && (bsw < 8 || bsh < 8)) {
@@ -2349,7 +2324,6 @@
const MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi;
const int is_inter = is_inter_block(mbmi);
-#if CONFIG_EXT_TX
if (get_ext_tx_types(tx_size, bsize, is_inter, cm->reduced_tx_set_used) > 1 &&
!xd->lossless[xd->mi[0]->mbmi.segment_id]) {
const int ext_tx_set =
@@ -2376,20 +2350,6 @@
}
}
}
-#else
- (void)bsize;
- (void)cm;
- if (tx_size < TX_32X32 && !xd->lossless[xd->mi[0]->mbmi.segment_id] &&
- !FIXED_TX_TYPE) {
- if (is_inter) {
- return x->inter_tx_type_costs[tx_size][tx_type];
- } else {
- return x->intra_tx_type_costs[tx_size]
- [intra_mode_to_tx_type_context[mbmi->mode]]
- [tx_type];
- }
- }
-#endif // CONFIG_EXT_TX
return 0;
}
static int64_t txfm_yrd(const AV1_COMP *const cpi, MACROBLOCK *x,
@@ -2407,9 +2367,7 @@
const int r_tx_size = tx_size_cost(cm, x, bs, tx_size);
-#if CONFIG_EXT_TX
assert(IMPLIES(is_rect_tx(tx_size), is_rect_tx_allowed_bsize(bs)));
-#endif // CONFIG_EXT_TX
s0 = x->skip_cost[skip_ctx][0];
s1 = x->skip_cost[skip_ctx][1];
@@ -2480,7 +2438,6 @@
tx_type != get_default_tx_type(0, xd, 0, tx_size))
return 1;
if (max_tx_size >= TX_32X32 && tx_size == TX_4X4) return 1;
-#if CONFIG_EXT_TX
const AV1_COMMON *const cm = &cpi->common;
const TxSetType tx_set_type =
get_ext_tx_set_type(tx_size, bs, is_inter, cm->reduced_tx_set_used);
@@ -2495,12 +2452,6 @@
if (tx_type != intra_mode_to_tx_type_context[mbmi->mode]) return 1;
}
}
-#else // CONFIG_EXT_TX
- if (tx_size >= TX_32X32 && tx_type != DCT_DCT) return 1;
- if (is_inter && cpi->sf.tx_type_search.prune_mode > NO_PRUNE &&
- !do_tx_type_search(tx_type, prune, cpi->sf.tx_type_search.prune_mode))
- return 1;
-#endif // CONFIG_EXT_TX
return 0;
}
@@ -2543,20 +2494,13 @@
mbmi->tx_size = tx_size_from_tx_mode(bs, cm->tx_mode, is_inter);
mbmi->min_tx_size = get_min_tx_size(mbmi->tx_size);
-#if CONFIG_EXT_TX
const TxSetType tx_set_type =
get_ext_tx_set_type(mbmi->tx_size, bs, is_inter, cm->reduced_tx_set_used);
-#endif // CONFIG_EXT_TX
if (is_inter && cpi->sf.tx_type_search.prune_mode > NO_PRUNE &&
!x->use_default_inter_tx_type) {
-#if CONFIG_EXT_TX
prune = prune_tx_types(cpi, bs, x, xd, tx_set_type);
-#else
- prune = prune_tx_types(cpi, bs, x, xd, 0);
-#endif // CONFIG_EXT_TX
}
-#if CONFIG_EXT_TX
if (get_ext_tx_types(mbmi->tx_size, bs, is_inter, cm->reduced_tx_set_used) >
1 &&
!xd->lossless[mbmi->segment_id]) {
@@ -2637,48 +2581,6 @@
txfm_rd_in_plane(x, cpi, rd_stats, ref_best_rd, 0, bs, mbmi->tx_size,
cpi->sf.use_fast_coef_costing);
}
-#else // CONFIG_EXT_TX
- if (mbmi->tx_size < TX_32X32 && !xd->lossless[mbmi->segment_id]) {
- for (tx_type = 0; tx_type < TX_TYPES; ++tx_type) {
- RD_STATS this_rd_stats;
- if (!is_inter && x->use_default_intra_tx_type &&
- tx_type != get_default_tx_type(0, xd, 0, mbmi->tx_size))
- continue;
- if (is_inter && x->use_default_inter_tx_type &&
- tx_type != get_default_tx_type(0, xd, 0, mbmi->tx_size))
- continue;
- mbmi->tx_type = tx_type;
- txfm_rd_in_plane(x, cpi, &this_rd_stats, ref_best_rd, 0, bs,
- mbmi->tx_size, cpi->sf.use_fast_coef_costing);
- if (this_rd_stats.rate == INT_MAX) continue;
-
- av1_tx_type_cost(cm, x, xd, bs, plane, mbmi->tx_size, tx_type);
- if (is_inter) {
- if (cpi->sf.tx_type_search.prune_mode > NO_PRUNE &&
- !do_tx_type_search(tx_type, prune,
- cpi->sf.tx_type_search.prune_mode))
- continue;
- }
- if (this_rd_stats.skip)
- this_rd = RDCOST(x->rdmult, s1, this_rd_stats.sse);
- else
- this_rd =
- RDCOST(x->rdmult, this_rd_stats.rate + s0, this_rd_stats.dist);
- if (is_inter && !xd->lossless[mbmi->segment_id] && !this_rd_stats.skip)
- this_rd = AOMMIN(this_rd, RDCOST(x->rdmult, s1, this_rd_stats.sse));
-
- if (this_rd < best_rd) {
- best_rd = this_rd;
- best_tx_type = mbmi->tx_type;
- *rd_stats = this_rd_stats;
- }
- }
- } else {
- mbmi->tx_type = DCT_DCT;
- txfm_rd_in_plane(x, cpi, rd_stats, ref_best_rd, 0, bs, mbmi->tx_size,
- cpi->sf.use_fast_coef_costing);
- }
-#endif // CONFIG_EXT_TX
mbmi->tx_type = best_tx_type;
#if CONFIG_LGT_FROM_PRED
mbmi->use_lgt = is_lgt_best;
@@ -2730,7 +2632,6 @@
av1_invalid_rd_stats(rd_stats);
-#if CONFIG_EXT_TX
int evaluate_rect_tx = 0;
if (tx_select) {
evaluate_rect_tx = is_rect_tx_allowed(xd, mbmi);
@@ -2857,7 +2758,6 @@
#endif // CONFIG_LGT_FROM_PRED
}
#endif // CONFIG_RECT_TX_EXT
-#endif // CONFIG_EXT_TX
if (tx_select) {
start_tx = max_tx_size;
@@ -2872,18 +2772,12 @@
int prune = 0;
if (is_inter && cpi->sf.tx_type_search.prune_mode > NO_PRUNE &&
!x->use_default_inter_tx_type) {
-#if CONFIG_EXT_TX
prune = prune_tx_types(cpi, bs, x, xd, EXT_TX_SET_ALL16);
-#else
- prune = prune_tx_types(cpi, bs, x, xd, 0);
-#endif // CONFIG_EXT_TX
}
last_rd = INT64_MAX;
for (n = start_tx; n >= end_tx; --n) {
-#if CONFIG_EXT_TX
if (is_rect_tx(n)) break;
-#endif // CONFIG_EXT_TX
TX_TYPE tx_start = DCT_DCT;
TX_TYPE tx_end = TX_TYPES;
#if CONFIG_TXK_SEL
@@ -2954,10 +2848,6 @@
#endif
mbmi->min_tx_size = get_min_tx_size(mbmi->tx_size);
-
-#if !CONFIG_EXT_TX
- if (mbmi->tx_size >= TX_32X32) assert(mbmi->tx_type == DCT_DCT);
-#endif // !CONFIG_EXT_TX
}
static void super_block_yrd(const AV1_COMP *const cpi, MACROBLOCK *x,
@@ -4009,10 +3899,8 @@
#if CONFIG_MRC_TX && SIGNAL_ANY_MRC_MASK
mrc_mask,
#endif // CONFIG_MRC_TX && SIGNAL_ANY_MRC_MASK
-#if CONFIG_EXT_TX
- plane,
-#endif // CONFIG_EXT_TX
- tx_type, tx_size, rec_buffer, MAX_TX_SIZE, eob);
+ plane, tx_type, tx_size, rec_buffer, MAX_TX_SIZE,
+ eob);
if (eob > 0) {
#if CONFIG_DIST_8X8
if (x->using_dist_8x8 && plane == 0 && (bw < 8 && bh < 8)) {
@@ -4606,7 +4494,6 @@
}
#if !CONFIG_TXK_SEL
-#if CONFIG_EXT_TX
if (get_ext_tx_types(mbmi->min_tx_size, bsize, is_inter,
cm->reduced_tx_set_used) > 1 &&
!xd->lossless[xd->mi[0]->mbmi.segment_id]) {
@@ -4652,10 +4539,6 @@
#if CONFIG_LGT_FROM_PRED
}
#endif
-#else
- if (mbmi->min_tx_size < TX_32X32 && !xd->lossless[xd->mi[0]->mbmi.segment_id])
- rd_stats->rate += x->inter_tx_type_costs[mbmi->min_tx_size][mbmi->tx_type];
-#endif // CONFIG_EXT_TX
#endif // CONFIG_TXK_SEL
if (rd_stats->skip)
@@ -4908,7 +4791,6 @@
param.tx_size = max_txsize_rect_lookup[bsize];
param.bd = 8;
param.lossless = 0;
-#if CONFIG_EXT_TX
const MACROBLOCKD *xd = &x->e_mbd;
const struct macroblockd_plane *const pd = &xd->plane[0];
const BLOCK_SIZE plane_bsize =
@@ -4918,7 +4800,6 @@
// within this function.
param.tx_set_type = get_ext_tx_set_type(param.tx_size, plane_bsize,
is_inter_block(&xd->mi[0]->mbmi), 0);
-#endif // CONFIG_EXT_TX
#if CONFIG_TXMG
av1_highbd_fwd_txfm(p->src_diff, DCT_coefs, bw, ¶m);
@@ -4969,7 +4850,6 @@
rate += x->txfm_partition_cost[ctx][0];
}
#if !CONFIG_TXK_SEL
-#if CONFIG_EXT_TX
const AV1_COMMON *cm = &cpi->common;
const int ext_tx_set = get_ext_tx_set(max_txsize_lookup[bsize], bsize, 1,
cm->reduced_tx_set_used);
@@ -4981,10 +4861,6 @@
x->inter_tx_type_costs[ext_tx_set][txsize_sqr_map[mbmi->min_tx_size]]
[mbmi->tx_type];
}
-#else
- if (mbmi->min_tx_size < TX_32X32 && !xd->lossless[xd->mi[0]->mbmi.segment_id])
- rd_stats->rate += x->inter_tx_type_costs[mbmi->min_tx_size][mbmi->tx_type];
-#endif // CONFIG_EXT_TX
#endif // CONFIG_TXK_SEL
rd_stats->rate = rate;
@@ -5022,26 +4898,24 @@
const int n4 = bsize_to_num_blk(bsize);
int idx, idy;
int prune = 0;
-#if CONFIG_EXT_TX
const TX_SIZE sqr_up_tx_size =
txsize_sqr_up_map[max_txsize_rect_lookup[bsize]];
// Get the tx_size 1 level down
TX_SIZE min_tx_size = sub_tx_size_map[sqr_up_tx_size];
const TxSetType tx_set_type = get_ext_tx_set_type(
min_tx_size, bsize, is_inter, cm->reduced_tx_set_used);
-#endif // CONFIG_EXT_TX
int within_border = (mi_row + mi_size_high[bsize] <= cm->mi_rows) &&
(mi_col + mi_size_wide[bsize] <= cm->mi_cols);
av1_invalid_rd_stats(rd_stats);
-#if CONFIG_EXT_TX && CONFIG_LGT_FROM_PRED
+#if CONFIG_LGT_FROM_PRED
mbmi->use_lgt = 0;
int search_lgt = is_inter
? LGT_FROM_PRED_INTER &&
(!cpi->sf.tx_type_search.prune_mode > NO_PRUNE)
: LGT_FROM_PRED_INTRA && ALLOW_INTRA_EXT_TX;
-#endif // CONFIG_EXT_TX && CONFIG_LGT_FROM_PRED
+#endif // CONFIG_LGT_FROM_PRED
const uint32_t hash = get_block_residue_hash(x, bsize);
TX_RD_RECORD *tx_rd_record = &x->tx_rd_record;
@@ -5074,11 +4948,7 @@
if (is_inter && cpi->sf.tx_type_search.prune_mode > NO_PRUNE &&
!x->use_default_inter_tx_type && !xd->lossless[mbmi->segment_id]) {
-#if CONFIG_EXT_TX
prune = prune_tx_types(cpi, bsize, x, xd, tx_set_type);
-#else
- prune = prune_tx_types(cpi, bsize, x, xd, 0);
-#endif // CONFIG_EXT_TX
}
int found = 0;
@@ -5086,38 +4956,32 @@
for (tx_type = txk_start; tx_type < txk_end; ++tx_type) {
RD_STATS this_rd_stats;
av1_init_rd_stats(&this_rd_stats);
-#if CONFIG_EXT_TX && CONFIG_MRC_TX
+#if CONFIG_MRC_TX
// MRC_DCT only implemented for TX_32X32 so only include this tx in
// the search for TX_32X32
if (tx_type == MRC_DCT &&
(max_tx_size != TX_32X32 || (is_inter && !USE_MRC_INTER) ||
(!is_inter && !USE_MRC_INTRA)))
continue;
-#endif // CONFIG_EXT_TX && CONFIG_MRC_TX
-#if CONFIG_EXT_TX
+#endif // CONFIG_MRC_TX
if (!av1_ext_tx_used[tx_set_type][tx_type]) continue;
(void)prune;
-// TODO(sarahparker) This speed feature has been temporarily disabled
-// with ext-tx because it is not compatible with the current
-// search method. It will be fixed in a followup.
-/*
- if (is_inter) {
- if (cpi->sf.tx_type_search.prune_mode > NO_PRUNE) {
- if (!do_tx_type_search(tx_type, prune,
- cpi->sf.tx_type_search.prune_mode))
- continue;
- }
- } else {
- if (!ALLOW_INTRA_EXT_TX && bsize >= BLOCK_8X8) {
- if (tx_type != intra_mode_to_tx_type_context[mbmi->mode]) continue;
- }
- }
-*/
-#else // CONFIG_EXT_TX
- if (is_inter && cpi->sf.tx_type_search.prune_mode > NO_PRUNE &&
- !do_tx_type_search(tx_type, prune, cpi->sf.tx_type_search.prune_mode))
- continue;
-#endif // CONFIG_EXT_TX
+ // TODO(sarahparker) This speed feature has been temporarily disabled
+ // with ext-tx because it is not compatible with the current
+ // search method. It will be fixed in a followup.
+ /*
+ if (is_inter) {
+ if (cpi->sf.tx_type_search.prune_mode > NO_PRUNE) {
+ if (!do_tx_type_search(tx_type, prune,
+ cpi->sf.tx_type_search.prune_mode))
+ continue;
+ }
+ } else {
+ if (!ALLOW_INTRA_EXT_TX && bsize >= BLOCK_8X8) {
+ if (tx_type != intra_mode_to_tx_type_context[mbmi->mode]) continue;
+ }
+ }
+ */
if (is_inter && x->use_default_inter_tx_type &&
tx_type != get_default_tx_type(0, xd, 0, max_tx_size))
continue;
@@ -5127,14 +4991,12 @@
rd = select_tx_size_fix_type(cpi, x, &this_rd_stats, bsize, mi_row, mi_col,
ref_best_rd, tx_type);
-#if CONFIG_EXT_TX
// If the current tx_type is not included in the tx_set for the smallest
// tx size found, then all vartx partitions were actually transformed with
// DCT_DCT and we should avoid picking it.
const TxSetType min_tx_set_type = get_ext_tx_set_type(
mbmi->min_tx_size, bsize, is_inter, cm->reduced_tx_set_used);
if (!av1_ext_tx_used[min_tx_set_type][tx_type]) continue;
-#endif // CONFIG_EXT_TX
ref_best_rd = AOMMIN(rd, ref_best_rd);
if (rd < best_rd) {
@@ -5157,7 +5019,7 @@
assert(IMPLIES(!found, ref_best_rd != INT64_MAX));
if (!found) return;
-#if CONFIG_EXT_TX && CONFIG_LGT_FROM_PRED
+#if CONFIG_LGT_FROM_PRED
if (search_lgt && is_lgt_allowed(mbmi->mode, max_tx_size) &&
!cm->reduced_tx_set_used) {
RD_STATS this_rd_stats;
@@ -5177,7 +5039,7 @@
mbmi->use_lgt = 0;
}
}
-#endif // CONFIG_EXT_TX && CONFIG_LGT_FROM_PRED
+#endif // CONFIG_LGT_FROM_PRED
// We found a candidate transform to use. Copy our results from the "best"
// array into mbmi.
mbmi->tx_type = best_tx_type;
diff --git a/av1/encoder/speed_features.c b/av1/encoder/speed_features.c
index 82e4d03..6fe07ec 100644
--- a/av1/encoder/speed_features.c
+++ b/av1/encoder/speed_features.c
@@ -220,9 +220,7 @@
sf->allow_partition_search_skip = 1;
sf->use_upsampled_references = 0;
sf->adaptive_rd_thresh = 2;
-#if CONFIG_EXT_TX
sf->tx_type_search.prune_mode = PRUNE_2D_FAST;
-#endif
sf->gm_search_type = GM_DISABLE_SEARCH;
}
@@ -398,11 +396,7 @@
sf->cb_partition_search = 0;
sf->alt_ref_search_fp = 0;
sf->partition_search_type = SEARCH_PARTITION;
-#if CONFIG_EXT_TX
sf->tx_type_search.prune_mode = PRUNE_2D_ACCURATE;
-#else
- sf->tx_type_search.prune_mode = NO_PRUNE;
-#endif // CONFIG_EXT_TX
sf->tx_type_search.use_skip_flag_prediction = 1;
sf->tx_type_search.fast_intra_tx_type_search = 0;
sf->tx_type_search.fast_inter_tx_type_search = 0;
diff --git a/av1/encoder/speed_features.h b/av1/encoder/speed_features.h
index d886ad4..db62f02 100644
--- a/av1/encoder/speed_features.h
+++ b/av1/encoder/speed_features.h
@@ -190,7 +190,6 @@
NO_PRUNE = 0,
// eliminates one tx type in vertical and horizontal direction
PRUNE_ONE = 1,
-#if CONFIG_EXT_TX
// eliminates two tx types in each direction
PRUNE_TWO = 2,
// adaptively prunes the least perspective tx types out of all 16
@@ -198,7 +197,6 @@
PRUNE_2D_ACCURATE = 3,
// similar, but applies much more aggressive pruning to get better speed-up
PRUNE_2D_FAST = 4,
-#endif
} TX_TYPE_PRUNE_MODE;
typedef struct {
diff --git a/av1/encoder/x86/dct_intrin_sse2.c b/av1/encoder/x86/dct_intrin_sse2.c
index e5b19a4..9a3e334 100644
--- a/av1/encoder/x86/dct_intrin_sse2.c
+++ b/av1/encoder/x86/dct_intrin_sse2.c
@@ -167,7 +167,6 @@
transpose_4x4(in);
}
-#if CONFIG_EXT_TX
static void fidtx4_sse2(__m128i *in) {
const __m128i k__zero_epi16 = _mm_set1_epi16((int16_t)0);
const __m128i k__sqrt2_epi16 = _mm_set1_epi16((int16_t)Sqrt2);
@@ -200,7 +199,6 @@
in[1] = _mm_packs_epi32(u1, u3);
transpose_4x4(in);
}
-#endif // CONFIG_EXT_TX
void av1_fht4x4_sse2(const int16_t *input, tran_low_t *output, int stride,
TxfmParam *txfm_param) {
@@ -230,7 +228,6 @@
fadst4_sse2(in);
write_buffer_4x4(output, in);
break;
-#if CONFIG_EXT_TX
case FLIPADST_DCT:
load_buffer_4x4(input, in, stride, 1, 0);
fadst4_sse2(in);
@@ -303,7 +300,6 @@
fadst4_sse2(in);
write_buffer_4x4(output, in);
break;
-#endif // CONFIG_EXT_TX
default: assert(0);
}
}
@@ -848,7 +844,6 @@
array_transpose_8x8(in, in);
}
-#if CONFIG_EXT_TX
static void fidtx8_sse2(__m128i *in) {
in[0] = _mm_slli_epi16(in[0], 1);
in[1] = _mm_slli_epi16(in[1], 1);
@@ -861,7 +856,6 @@
array_transpose_8x8(in, in);
}
-#endif // CONFIG_EXT_TX
void av1_fht8x8_sse2(const int16_t *input, tran_low_t *output, int stride,
TxfmParam *txfm_param) {
@@ -894,7 +888,6 @@
right_shift_8x8(in, 1);
write_buffer_8x8(output, in, 8);
break;
-#if CONFIG_EXT_TX
case FLIPADST_DCT:
load_buffer_8x8(input, in, stride, 1, 0);
fadst8_sse2(in);
@@ -979,7 +972,6 @@
right_shift_8x8(in, 1);
write_buffer_8x8(output, in, 8);
break;
-#endif // CONFIG_EXT_TX
default: assert(0);
}
}
@@ -1892,13 +1884,11 @@
array_transpose_16x16(in0, in1);
}
-#if CONFIG_EXT_TX
static void fidtx16_sse2(__m128i *in0, __m128i *in1) {
idtx16_8col(in0);
idtx16_8col(in1);
array_transpose_16x16(in0, in1);
}
-#endif // CONFIG_EXT_TX
void av1_fht16x16_sse2(const int16_t *input, tran_low_t *output, int stride,
TxfmParam *txfm_param) {
@@ -1937,7 +1927,6 @@
fadst16_sse2(in0, in1);
write_buffer_16x16(output, in0, in1, 16);
break;
-#if CONFIG_EXT_TX
case FLIPADST_DCT:
load_buffer_16x16(input, in0, in1, stride, 1, 0);
fadst16_sse2(in0, in1);
@@ -2022,7 +2011,6 @@
fadst16_sse2(in0, in1);
write_buffer_16x16(output, in0, in1, 16);
break;
-#endif // CONFIG_EXT_TX
default: assert(0); break;
}
}
@@ -2153,7 +2141,6 @@
fadst4_sse2(in + 4);
fadst8_sse2(in);
break;
-#if CONFIG_EXT_TX
case FLIPADST_DCT:
load_buffer_4x8(input, in, stride, 1, 0);
fdct4_sse2(in);
@@ -2226,7 +2213,6 @@
fadst4_sse2(in + 4);
fidtx8_sse2(in);
break;
-#endif
default: assert(0); break;
}
write_buffer_4x8(output, in);
@@ -2331,7 +2317,6 @@
fadst4_sse2(in + 4);
fadst8_sse2(in);
break;
-#if CONFIG_EXT_TX
case FLIPADST_DCT:
load_buffer_8x4(input, in, stride, 1, 0);
fadst4_sse2(in);
@@ -2404,7 +2389,6 @@
fidtx4_sse2(in + 4);
fadst8_sse2(in);
break;
-#endif
default: assert(0); break;
}
write_buffer_8x4(output, in);
@@ -2490,7 +2474,6 @@
row_8x16_rounding(in, 2);
fadst16_8col(in);
break;
-#if CONFIG_EXT_TX
case FLIPADST_DCT:
load_buffer_8x16(input, in, stride, 1, 0);
array_transpose_8x8(t, t);
@@ -2599,7 +2582,6 @@
row_8x16_rounding(in, 2);
idtx16_8col(in);
break;
-#endif
default: assert(0); break;
}
write_buffer_8x8(output, t, 8);
@@ -2668,7 +2650,6 @@
col_16x8_rounding(in, 2);
fadst16_8col(in);
break;
-#if CONFIG_EXT_TX
case FLIPADST_DCT:
load_buffer_16x8(input, in, stride, 1, 0);
fadst8_sse2(l);
@@ -2753,7 +2734,6 @@
col_16x8_rounding(in, 2);
fadst16_8col(in);
break;
-#endif
default: assert(0); break;
}
array_transpose_8x8(l, l);
@@ -2772,7 +2752,6 @@
array_transpose_16x16(bl, br);
}
-#if CONFIG_EXT_TX
static INLINE void fidtx32_16col(__m128i *tl, __m128i *tr, __m128i *bl,
__m128i *br) {
int i;
@@ -2785,7 +2764,6 @@
array_transpose_16x16(tl, tr);
array_transpose_16x16(bl, br);
}
-#endif
static INLINE void load_buffer_16x32(const int16_t *input, __m128i *intl,
__m128i *intr, __m128i *inbl,
@@ -2982,7 +2960,6 @@
round_signed_16x16(inbl, inbr);
fhalfright32_16col(intl, intr, inbl, inbr, transpose);
break;
-#if CONFIG_EXT_TX
case FLIPADST_DCT:
load_buffer_16x32(input, intl, intr, inbl, inbr, stride, 1, 0);
fdct16t_sse2(intl, intr);
@@ -3079,7 +3056,6 @@
round_signed_16x16(inbl, inbr);
fidtx32_16col(intl, intr, inbl, inbr);
break;
-#endif
default: assert(0); break;
}
write_buffer_16x32(output, intl, intr, inbl, inbr);
@@ -3172,7 +3148,6 @@
round_signed_16x16(in2, in3);
fhalfright32_16col(in0, in1, in2, in3, no_transpose);
break;
-#if CONFIG_EXT_TX
case FLIPADST_DCT:
load_buffer_32x16(input, in0, in1, in2, in3, stride, 1, 0);
fadst16_sse2(in0, in1);
@@ -3269,7 +3244,6 @@
round_signed_16x16(in2, in3);
fhalfright32_16col(in0, in1, in2, in3, no_transpose);
break;
-#endif
default: assert(0); break;
}
write_buffer_32x16(output, in0, in1, in2, in3);
@@ -3350,14 +3324,12 @@
swap_16x16(&in0[16], &in1[16], in2, in3);
}
-#if CONFIG_EXT_TX
static INLINE void fidtx32(__m128i *in0, __m128i *in1, __m128i *in2,
__m128i *in3) {
fidtx32_16col(in0, in1, &in0[16], &in1[16]);
fidtx32_16col(in2, in3, &in2[16], &in3[16]);
swap_16x16(&in0[16], &in1[16], in2, in3);
}
-#endif
static INLINE void round_signed_32x32(__m128i *in0, __m128i *in1, __m128i *in2,
__m128i *in3) {
@@ -3408,7 +3380,6 @@
round_signed_32x32(in0, in1, in2, in3);
fhalfright32(in0, in1, in2, in3);
break;
-#if CONFIG_EXT_TX
case FLIPADST_DCT:
load_buffer_32x32(input, in0, in1, in2, in3, stride, 1, 0);
fhalfright32(in0, in1, in2, in3);
@@ -3476,7 +3447,6 @@
round_signed_32x32(in0, in1, in2, in3);
fhalfright32(in0, in1, in2, in3);
break;
-#endif
default: assert(0);
}
write_buffer_32x32(in0, in1, in2, in3, output);
diff --git a/av1/encoder/x86/highbd_fwd_txfm_sse4.c b/av1/encoder/x86/highbd_fwd_txfm_sse4.c
index b684f7a..e620eee 100644
--- a/av1/encoder/x86/highbd_fwd_txfm_sse4.c
+++ b/av1/encoder/x86/highbd_fwd_txfm_sse4.c
@@ -233,7 +233,6 @@
fadst4x4_sse4_1(in, row_cfg->cos_bit[2]);
write_buffer_4x4(in, coeff);
break;
-#if CONFIG_EXT_TX
case FLIPADST_DCT:
row_cfg = &fwd_txfm_1d_row_cfg_dct_4;
col_cfg = &fwd_txfm_1d_col_cfg_adst_4;
@@ -274,7 +273,6 @@
fadst4x4_sse4_1(in, row_cfg->cos_bit[2]);
write_buffer_4x4(in, coeff);
break;
-#endif
default: assert(0);
}
(void)bd;
@@ -976,7 +974,6 @@
transpose_8x8(out, in);
write_buffer_8x8(in, coeff);
break;
-#if CONFIG_EXT_TX
case FLIPADST_DCT:
row_cfg = &fwd_txfm_1d_row_cfg_dct_8;
col_cfg = &fwd_txfm_1d_col_cfg_adst_8;
@@ -1032,7 +1029,6 @@
transpose_8x8(out, in);
write_buffer_8x8(in, coeff);
break;
-#endif // CONFIG_EXT_TX
default: assert(0);
}
(void)bd;
@@ -1850,7 +1846,6 @@
transpose_16x16(out, in);
write_buffer_16x16(in, coeff);
break;
-#if CONFIG_EXT_TX
case FLIPADST_DCT:
row_cfg = &fwd_txfm_1d_row_cfg_dct_16;
col_cfg = &fwd_txfm_1d_col_cfg_adst_16;
@@ -1906,7 +1901,6 @@
transpose_16x16(out, in);
write_buffer_16x16(in, coeff);
break;
-#endif // CONFIG_EXT_TX
default: assert(0);
}
(void)bd;
diff --git a/av1/encoder/x86/hybrid_fwd_txfm_avx2.c b/av1/encoder/x86/hybrid_fwd_txfm_avx2.c
index 88621c8..d714181 100644
--- a/av1/encoder/x86/hybrid_fwd_txfm_avx2.c
+++ b/av1/encoder/x86/hybrid_fwd_txfm_avx2.c
@@ -907,11 +907,9 @@
in[15] = _mm256_sub_epi16(zero, x1);
}
-#if CONFIG_EXT_TX
static void fidtx16_avx2(__m256i *in) {
txfm_scaling16_avx2((int16_t)Sqrt2, in);
}
-#endif
void av1_fht16x16_avx2(const int16_t *input, tran_low_t *output, int stride,
TxfmParam *txfm_param) {
@@ -950,7 +948,6 @@
right_shift_16x16(in);
fadst16_avx2(in);
break;
-#if CONFIG_EXT_TX
case FLIPADST_DCT:
load_buffer_16x16(input, stride, 1, 0, in);
fadst16_avx2(in);
@@ -1035,7 +1032,6 @@
right_shift_16x16(in);
fadst16_avx2(in);
break;
-#endif // CONFIG_EXT_TX
default: assert(0); break;
}
mm256_transpose_16x16(in, in);
@@ -1405,7 +1401,6 @@
}
}
-#if CONFIG_EXT_TX
static void fhalfright32_16col_avx2(__m256i *in) {
int i = 0;
const __m256i zero = _mm256_setzero_si256();
@@ -1436,7 +1431,6 @@
mm256_vectors_swap(in1, &in1[16], 16);
mm256_transpose_32x32(in0, in1);
}
-#endif // CONFIG_EXT_TX
static INLINE void load_buffer_32x32(const int16_t *input, int stride,
int flipud, int fliplr, __m256i *in0,
@@ -1500,7 +1494,6 @@
right_shift_32x32_16col(bit, in1);
}
-#if CONFIG_EXT_TX
static void fidtx32_avx2(__m256i *in0, __m256i *in1) {
int i = 0;
while (i < 32) {
@@ -1510,7 +1503,6 @@
}
mm256_transpose_32x32(in0, in1);
}
-#endif
void av1_fht32x32_avx2(const int16_t *input, tran_low_t *output, int stride,
TxfmParam *txfm_param) {
@@ -1528,7 +1520,6 @@
right_shift_32x32(in0, in1);
fdct32_avx2(in0, in1);
break;
-#if CONFIG_EXT_TX
case ADST_DCT:
load_buffer_32x32(input, stride, 0, 0, in0, in1);
fhalfright32_avx2(in0, in1);
@@ -1619,7 +1610,6 @@
right_shift_32x32(in0, in1);
fhalfright32_avx2(in0, in1);
break;
-#endif // CONFIG_EXT_TX
default: assert(0); break;
}
write_buffer_32x32(in0, in1, output);