Remove CONFIG_TX64X64
The experiment is fully adopted.
Change-Id: I6cc80a2acf0c93c13b0e36e6f4a2378fe5ce33c3
diff --git a/av1/common/av1_inv_txfm1d.c b/av1/common/av1_inv_txfm1d.c
index 46cfc73..7b1bee3 100644
--- a/av1/common/av1_inv_txfm1d.c
+++ b/av1/common/av1_inv_txfm1d.c
@@ -1606,7 +1606,6 @@
apply_range(0, input, output, 32, stage_range[0]);
}
-#if CONFIG_TX64X64
void av1_iidentity64_c(const int32_t *input, int32_t *output, int8_t cos_bit,
const int8_t *stage_range) {
(void)cos_bit;
@@ -1615,9 +1614,7 @@
assert(stage_range[0] + NewSqrt2Bits <= 32);
apply_range(0, input, output, 64, stage_range[0]);
}
-#endif // CONFIG_TX64X64
-#if CONFIG_TX64X64
void av1_idct64_new(const int32_t *input, int32_t *output, int8_t cos_bit,
const int8_t *stage_range) {
const int32_t size = 64;
@@ -2409,4 +2406,3 @@
bf1[63] = bf0[0] - bf0[63];
apply_range(stage, input, bf1, size, stage_range[stage]);
}
-#endif // CONFIG_TX64X64
diff --git a/av1/common/av1_inv_txfm1d.h b/av1/common/av1_inv_txfm1d.h
index 6a33352..525b779 100644
--- a/av1/common/av1_inv_txfm1d.h
+++ b/av1/common/av1_inv_txfm1d.h
@@ -26,11 +26,8 @@
const int8_t *stage_range);
void av1_idct32_new(const int32_t *input, int32_t *output, int8_t cos_bit,
const int8_t *stage_range);
-#if CONFIG_TX64X64
void av1_idct64_new(const int32_t *input, int32_t *output, int8_t cos_bit,
const int8_t *stage_range);
-#endif // CONFIG_TX64X64
-
void av1_iadst4_new(const int32_t *input, int32_t *output, int8_t cos_bit,
const int8_t *stage_range);
void av1_iadst8_new(const int32_t *input, int32_t *output, int8_t cos_bit,
@@ -47,10 +44,8 @@
const int8_t *stage_range);
void av1_iidentity32_c(const int32_t *input, int32_t *output, int8_t cos_bit,
const int8_t *stage_range);
-#if CONFIG_TX64X64
void av1_iidentity64_c(const int32_t *input, int32_t *output, int8_t cos_bit,
const int8_t *stage_range);
-#endif // CONFIG_TX64X64
#ifdef __cplusplus
}
diff --git a/av1/common/av1_inv_txfm1d_cfg.h b/av1/common/av1_inv_txfm1d_cfg.h
index 8237d40..4c600f7 100644
--- a/av1/common/av1_inv_txfm1d_cfg.h
+++ b/av1/common/av1_inv_txfm1d_cfg.h
@@ -19,27 +19,21 @@
6, // 8x8 transform
7, // 16x16 transform
7, // 32x32 transform
-#if CONFIG_TX64X64
- 7, // 64x64 transform
-#endif // CONFIG_TX64X64
- 5, // 4x8 transform
- 5, // 8x4 transform
- 6, // 8x16 transform
- 6, // 16x8 transform
- 6, // 16x32 transform
- 6, // 32x16 transform
-#if CONFIG_TX64X64
- 6, // 32x64 transform
- 6, // 64x32 transform
-#endif // CONFIG_TX64X64
- 6, // 4x16 transform
- 6, // 16x4 transform
- 7, // 8x32 transform
- 7, // 32x8 transform
-#if CONFIG_TX64X64
+ 7, // 64x64 transform
+ 5, // 4x8 transform
+ 5, // 8x4 transform
+ 6, // 8x16 transform
+ 6, // 16x8 transform
+ 6, // 16x32 transform
+ 6, // 32x16 transform
+ 6, // 32x64 transform
+ 6, // 64x32 transform
+ 6, // 4x16 transform
+ 6, // 16x4 transform
+ 7, // 8x32 transform
+ 7, // 32x8 transform
7, // 16x64 transform
7, // 64x16 transform
-#endif
};
extern const int8_t *inv_txfm_shift_ls[TX_SIZES_ALL];
diff --git a/av1/common/av1_inv_txfm2d.c b/av1/common/av1_inv_txfm2d.c
index bbbef9f..932f91f 100644
--- a/av1/common/av1_inv_txfm2d.c
+++ b/av1/common/av1_inv_txfm2d.c
@@ -33,9 +33,7 @@
case TXFM_TYPE_DCT8: return av1_idct8_new;
case TXFM_TYPE_DCT16: return av1_idct16_new;
case TXFM_TYPE_DCT32: return av1_idct32_new;
-#if CONFIG_TX64X64
case TXFM_TYPE_DCT64: return av1_idct64_new;
-#endif // CONFIG_TX64X64
case TXFM_TYPE_ADST4: return av1_iadst4_new;
case TXFM_TYPE_ADST8: return av1_iadst8_new;
case TXFM_TYPE_ADST16: return av1_iadst16_new;
@@ -44,9 +42,7 @@
case TXFM_TYPE_IDENTITY8: return av1_iidentity8_c;
case TXFM_TYPE_IDENTITY16: return av1_iidentity16_c;
case TXFM_TYPE_IDENTITY32: return av1_iidentity32_c;
-#if CONFIG_TX64X64
case TXFM_TYPE_IDENTITY64: return av1_iidentity64_c;
-#endif // CONFIG_TX64X64
default: assert(0); return NULL;
}
}
@@ -55,42 +51,28 @@
static const int8_t inv_shift_8x8[2] = { -1, -4 };
static const int8_t inv_shift_16x16[2] = { -2, -4 };
static const int8_t inv_shift_32x32[2] = { -2, -4 };
-#if CONFIG_TX64X64
static const int8_t inv_shift_64x64[2] = { -2, -4 };
-#endif
static const int8_t inv_shift_4x8[2] = { 0, -4 };
static const int8_t inv_shift_8x4[2] = { 0, -4 };
static const int8_t inv_shift_8x16[2] = { -1, -4 };
static const int8_t inv_shift_16x8[2] = { -1, -4 };
static const int8_t inv_shift_16x32[2] = { -1, -4 };
static const int8_t inv_shift_32x16[2] = { -1, -4 };
-#if CONFIG_TX64X64
static const int8_t inv_shift_32x64[2] = { -1, -4 };
static const int8_t inv_shift_64x32[2] = { -1, -4 };
-#endif
static const int8_t inv_shift_4x16[2] = { -1, -4 };
static const int8_t inv_shift_16x4[2] = { -1, -4 };
static const int8_t inv_shift_8x32[2] = { -2, -4 };
static const int8_t inv_shift_32x8[2] = { -2, -4 };
-#if CONFIG_TX64X64
static const int8_t inv_shift_16x64[2] = { -2, -4 };
static const int8_t inv_shift_64x16[2] = { -2, -4 };
-#endif // CONFIG_TX64X64
const int8_t *inv_txfm_shift_ls[TX_SIZES_ALL] = {
inv_shift_4x4, inv_shift_8x8, inv_shift_16x16, inv_shift_32x32,
-#if CONFIG_TX64X64
- inv_shift_64x64,
-#endif // CONFIG_TX64X64
- inv_shift_4x8, inv_shift_8x4, inv_shift_8x16, inv_shift_16x8,
- inv_shift_16x32, inv_shift_32x16,
-#if CONFIG_TX64X64
- inv_shift_32x64, inv_shift_64x32,
-#endif // CONFIG_TX64X64
- inv_shift_4x16, inv_shift_16x4, inv_shift_8x32, inv_shift_32x8,
-#if CONFIG_TX64X64
- inv_shift_16x64, inv_shift_64x16,
-#endif // CONFIG_TX64X64
+ inv_shift_64x64, inv_shift_4x8, inv_shift_8x4, inv_shift_8x16,
+ inv_shift_16x8, inv_shift_16x32, inv_shift_32x16, inv_shift_32x64,
+ inv_shift_64x32, inv_shift_4x16, inv_shift_16x4, inv_shift_8x32,
+ inv_shift_32x8, inv_shift_16x64, inv_shift_64x16,
};
/* clang-format off */
@@ -357,7 +339,6 @@
inv_txfm2d_add_facade(input, output, stride, txfm_buf, tx_type, TX_32X32, bd);
}
-#if CONFIG_TX64X64
void av1_inv_txfm2d_add_64x64_c(const int32_t *input, uint16_t *output,
int stride, TX_TYPE tx_type, int bd) {
// TODO(urvang): Can the same array be reused, instead of using a new array?
@@ -462,7 +443,6 @@
transpose_uint16(output, stride, routput, rw, rw, rh);
#endif // NO_INV_TRANSPOSE
}
-#endif // CONFIG_TX64X64
void av1_inv_txfm2d_add_4x16_c(const int32_t *input, uint16_t *output,
int stride, TX_TYPE tx_type, int bd) {
diff --git a/av1/common/av1_loopfilter.c b/av1/common/av1_loopfilter.c
index 33c787a..8799a44 100644
--- a/av1/common/av1_loopfilter.c
+++ b/av1/common/av1_loopfilter.c
@@ -67,9 +67,7 @@
0xffffffffffffffffULL, // TX_8x8
0x5555555555555555ULL, // TX_16x16
0x1111111111111111ULL, // TX_32x32
-#if CONFIG_TX64X64
0x0101010101010101ULL, // TX_64x64
-#endif // CONFIG_TX64X64
};
// 64 bit masks for above transform size. Each 1 represents a position where
@@ -94,9 +92,7 @@
0xffffffffffffffffULL, // TX_8x8
0x00ff00ff00ff00ffULL, // TX_16x16
0x000000ff000000ffULL, // TX_32x32
-#if CONFIG_TX64X64
0x00000000000000ffULL, // TX_64x64
-#endif // CONFIG_TX64X64
};
// 64 bit masks for prediction sizes (left). Each 1 represents a position
@@ -193,9 +189,7 @@
0xffff, // TX_8x8
0x5555, // TX_16x16
0x1111, // TX_32x32
-#if CONFIG_TX64X64
0x0101, // TX_64x64, never used
-#endif // CONFIG_TX64X64
};
static const uint16_t above_64x64_txform_mask_uv[TX_SIZES] = {
@@ -203,9 +197,7 @@
0xffff, // TX_8x8
0x0f0f, // TX_16x16
0x000f, // TX_32x32
-#if CONFIG_TX64X64
0x0003, // TX_64x64, never used
-#endif // CONFIG_TX64X64
};
// 16 bit left mask to shift and set for each uv prediction size.
@@ -1920,54 +1912,42 @@
8 - 1, // TX_8X8
16 - 1, // TX_16X16
32 - 1, // TX_32X32
-#if CONFIG_TX64X64
64 - 1, // TX_64X64
-#endif // CONFIG_TX64X64
4 - 1, // TX_4X8
8 - 1, // TX_8X4
8 - 1, // TX_8X16
16 - 1, // TX_16X8
16 - 1, // TX_16X32
32 - 1, // TX_32X16
-#if CONFIG_TX64X64
32 - 1, // TX_32X64
64 - 1, // TX_64X32
-#endif // CONFIG_TX64X64
4 - 1, // TX_4X16
16 - 1, // TX_16X4
8 - 1, // TX_8X32
32 - 1, // TX_32X8
-#if CONFIG_TX64X64
16 - 1, // TX_16X64
64 - 1, // TX_64X16
-#endif // CONFIG_TX64X64
},
{
4 - 1, // TX_4X4
8 - 1, // TX_8X8
16 - 1, // TX_16X16
32 - 1, // TX_32X32
-#if CONFIG_TX64X64
64 - 1, // TX_64X64
-#endif // CONFIG_TX64X64
8 - 1, // TX_4X8
4 - 1, // TX_8X4
16 - 1, // TX_8X16
8 - 1, // TX_16X8
32 - 1, // TX_16X32
16 - 1, // TX_32X16
-#if CONFIG_TX64X64
64 - 1, // TX_32X64
32 - 1, // TX_64X32
-#endif // CONFIG_TX64X64
16 - 1, // TX_4X16
4 - 1, // TX_16X4
32 - 1, // TX_8X32
8 - 1, // TX_32X8
-#if CONFIG_TX64X64
64 - 1, // TX_16X64
16 - 1, // TX_64X16
-#endif // CONFIG_TX64X64
}
};
diff --git a/av1/common/av1_rtcd_defs.pl b/av1/common/av1_rtcd_defs.pl
index a56eb94..62e1e27 100755
--- a/av1/common/av1_rtcd_defs.pl
+++ b/av1/common/av1_rtcd_defs.pl
@@ -128,13 +128,12 @@
add_proto qw/void av1_iht32x32_1024_add/, "const tran_low_t *input, uint8_t *output, int pitch, const struct txfm_param *param";
-if (aom_config("CONFIG_TX64X64") eq "yes") {
+
add_proto qw/void av1_iht64x64_4096_add/, "const tran_low_t *input, uint8_t *output, int pitch, const struct txfm_param *param";
add_proto qw/void av1_iht32x64_2048_add/, "const tran_low_t *input, uint8_t *output, int pitch, const struct txfm_param *param";
add_proto qw/void av1_iht64x32_2048_add/, "const tran_low_t *input, uint8_t *output, int pitch, const struct txfm_param *param";
add_proto qw/void av1_iht16x64_1024_add/, "const tran_low_t *input, uint8_t *output, int pitch, const struct txfm_param *param";
add_proto qw/void av1_iht64x16_1024_add/, "const tran_low_t *input, uint8_t *output, int pitch, const struct txfm_param *param";
-}
if (aom_config("CONFIG_NEW_QUANT") eq "yes") {
add_proto qw/void quantize_nuq/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, const int16_t *dequant_ptr, int dq, const cuml_bins_type_nuq *cuml_bins_ptr, const dequant_val_type_nuq *dequant_val, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, uint16_t *eob_ptr, const int16_t *scan, const qm_val_t *qm_ptr, const qm_val_t *iqm_ptr";
@@ -145,12 +144,12 @@
add_proto qw/void quantize_32x32_fp_nuq/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *quant_ptr, const int16_t *dequant_ptr, int dq, const cuml_bins_type_nuq *cuml_bins_ptr, const dequant_val_type_nuq *dequant_val, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, uint16_t *eob_ptr, const int16_t *scan, const qm_val_t *qm_ptr, const qm_val_t *iqm_ptr";
- if (aom_config("CONFIG_TX64X64") eq "yes") {
+
add_proto qw/void quantize_64x64_nuq/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, const int16_t *dequant_ptr, int dq, const cuml_bins_type_nuq *cuml_bins_ptr, const dequant_val_type_nuq *dequant_val, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, uint16_t *eob_ptr, const int16_t *scan, const qm_val_t *qm_ptr, const qm_val_t *iqm_ptr";
add_proto qw/void quantize_64x64_fp_nuq/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *quant_ptr, const int16_t *dequant_ptr, int dq, const cuml_bins_type_nuq *cuml_bins_ptr, const dequant_val_type_nuq *dequant_val, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, uint16_t *eob_ptr, const int16_t *scan, const qm_val_t *qm_ptr, const qm_val_t *iqm_ptr";
}
-}
+
# FILTER_INTRA predictor functions
if (aom_config("CONFIG_FILTER_INTRA") eq "yes") {
@@ -245,7 +244,7 @@
add_proto qw/void av1_inv_txfm2d_add_32x32/, "const int32_t *input, uint16_t *output, int stride, TX_TYPE tx_type, int bd";
specialize qw/av1_inv_txfm2d_add_32x32 avx2/;
-if (aom_config("CONFIG_TX64X64") eq "yes") {
+
add_proto qw/void av1_inv_txfm2d_add_64x64/, "const int32_t *input, uint16_t *output, int stride, TX_TYPE tx_type, int bd";
add_proto qw/void av1_inv_txfm2d_add_32x64/, "const int32_t *input, uint16_t *output, int stride, TX_TYPE tx_type, int bd";
add_proto qw/void av1_inv_txfm2d_add_64x32/, "const int32_t *input, uint16_t *output, int stride, TX_TYPE tx_type, int bd";
@@ -253,7 +252,7 @@
add_proto qw/void av1_inv_txfm2d_add_64x16/, "const int32_t *input, uint16_t *output, int stride, TX_TYPE tx_type, int bd";
specialize qw/av1_inv_txfm2d_add_64x64 sse4_1/;
-}
+
add_proto qw/void av1_inv_txfm2d_add_4x16/, "const int32_t *input, uint16_t *output, int stride, TX_TYPE tx_type, int bd";
add_proto qw/void av1_inv_txfm2d_add_16x4/, "const int32_t *input, uint16_t *output, int stride, TX_TYPE tx_type, int bd";
add_proto qw/void av1_inv_txfm2d_add_8x32/, "const int32_t *input, uint16_t *output, int stride, TX_TYPE tx_type, int bd";
@@ -277,9 +276,9 @@
add_proto qw/void av1_quantize_fp_32x32/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan";
specialize qw/av1_quantize_fp_32x32 avx2/;
- if (aom_config("CONFIG_TX64X64") eq "yes") {
+
add_proto qw/void av1_quantize_fp_64x64/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan";
- }
+
# fdct functions
@@ -297,13 +296,13 @@
add_proto qw/void av1_fht32x32/, "const int16_t *input, tran_low_t *output, int stride, struct txfm_param *param";
specialize qw/av1_fht32x32 sse2 avx2/;
- if (aom_config("CONFIG_TX64X64") eq "yes") {
+
add_proto qw/void av1_fht64x64/, "const int16_t *input, tran_low_t *output, int stride, struct txfm_param *param";
add_proto qw/void av1_fht32x64/, "const int16_t *input, tran_low_t *output, int stride, struct txfm_param *param";
add_proto qw/void av1_fht64x32/, "const int16_t *input, tran_low_t *output, int stride, struct txfm_param *param";
add_proto qw/void av1_fht16x64/, "const int16_t *input, tran_low_t *output, int stride, struct txfm_param *param";
add_proto qw/void av1_fht64x16/, "const int16_t *input, tran_low_t *output, int stride, struct txfm_param *param";
- }
+
add_proto qw/void av1_fht4x8/, "const int16_t *input, tran_low_t *output, int stride, struct txfm_param *param";
specialize qw/av1_fht4x8 sse2/;
@@ -357,13 +356,13 @@
add_proto qw/void av1_fwd_txfm2d_32x32/, "const int16_t *input, int32_t *output, int stride, TX_TYPE tx_type, int bd";
specialize qw/av1_fwd_txfm2d_32x32 sse4_1/;
- if (aom_config("CONFIG_TX64X64") eq "yes") {
+
add_proto qw/void av1_fwd_txfm2d_64x64/, "const int16_t *input, int32_t *output, int stride, TX_TYPE tx_type, int bd";
add_proto qw/void av1_fwd_txfm2d_32x64/, "const int16_t *input, int32_t *output, int stride, TX_TYPE tx_type, int bd";
add_proto qw/void av1_fwd_txfm2d_64x32/, "const int16_t *input, int32_t *output, int stride, TX_TYPE tx_type, int bd";
add_proto qw/void av1_fwd_txfm2d_16x64/, "const int16_t *input, int32_t *output, int stride, TX_TYPE tx_type, int bd";
add_proto qw/void av1_fwd_txfm2d_64x16/, "const int16_t *input, int32_t *output, int stride, TX_TYPE tx_type, int bd";
- }
+
#
# Motion search
#
@@ -395,11 +394,11 @@
add_proto qw/void highbd_quantize_32x32_fp_nuq/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *quant_ptr, const int16_t *dequant_ptr, int dq, const cuml_bins_type_nuq *cuml_bins_ptr, const dequant_val_type_nuq *dequant_val, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, uint16_t *eob_ptr, const int16_t *scan, const qm_val_t *qm_ptr, const qm_val_t *iqm_ptr";
- if (aom_config("CONFIG_TX64X64") eq "yes") {
+
add_proto qw/void highbd_quantize_64x64_nuq/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, const int16_t *dequant_ptr, int dq, const cuml_bins_type_nuq *cuml_bins_ptr, const dequant_val_type_nuq *dequant_val, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, uint16_t *eob_ptr, const int16_t *scan, const qm_val_t *qm_ptr, const qm_val_t *iqm_ptr";
add_proto qw/void highbd_quantize_64x64_fp_nuq/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *quant_ptr, const int16_t *dequant_ptr, int dq, const cuml_bins_type_nuq *cuml_bins_ptr, const dequant_val_type_nuq *dequant_val, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, uint16_t *eob_ptr, const int16_t *scan, const qm_val_t *qm_ptr, const qm_val_t *iqm_ptr";
- }
+
}
add_proto qw/int64_t av1_highbd_block_error/, "const tran_low_t *coeff, const tran_low_t *dqcoeff, intptr_t block_size, int64_t *ssz, int bd";
diff --git a/av1/common/av1_txfm.h b/av1/common/av1_txfm.h
index 0f9dff41..3d844a4 100644
--- a/av1/common/av1_txfm.h
+++ b/av1/common/av1_txfm.h
@@ -212,11 +212,9 @@
case TX_8X8: return TX_8X8;
case TX_16X16: return TX_16X16;
case TX_32X32: return TX_32X32;
-#if CONFIG_TX64X64
case TX_64X64: return TX_64X64;
case TX_32X64: return TX_64X32;
case TX_64X32: return TX_32X64;
-#endif // CONFIG_TX64X64
case TX_4X8: return TX_8X4;
case TX_8X4: return TX_4X8;
case TX_8X16: return TX_16X8;
@@ -227,10 +225,8 @@
case TX_16X4: return TX_4X16;
case TX_8X32: return TX_32X8;
case TX_32X8: return TX_8X32;
-#if CONFIG_TX64X64
case TX_16X64: return TX_64X16;
case TX_64X16: return TX_16X64;
-#endif // CONFIG_TX64X64
default: assert(0); return TX_INVALID;
}
}
diff --git a/av1/common/blockd.h b/av1/common/blockd.h
index 41c6281..06fed67 100644
--- a/av1/common/blockd.h
+++ b/av1/common/blockd.h
@@ -231,11 +231,7 @@
COMPOUND_TYPE interinter_compound_type;
} INTERINTER_COMPOUND_DATA;
-#if CONFIG_TX64X64
#define INTER_TX_SIZE_BUF_LEN 16
-#else
-#define INTER_TX_SIZE_BUF_LEN 256
-#endif
// This structure now relates to 4x4 block regions.
typedef struct MB_MODE_INFO {
// Common for both INTER and INTRA blocks
@@ -997,7 +993,6 @@
const BLOCK_SIZE plane_bsize = ss_size_lookup[bsize][ss_x][ss_y];
assert(plane_bsize < BLOCK_SIZES_ALL);
TX_SIZE uv_tx = max_txsize_rect_lookup[is_inter][plane_bsize];
-#if CONFIG_TX64X64
switch (uv_tx) {
case TX_64X64:
case TX_64X32:
@@ -1006,7 +1001,6 @@
case TX_16X64: return TX_16X32;
default: break;
}
-#endif // CONFIG_TX64X64
return uv_tx;
}
@@ -1092,7 +1086,7 @@
? TX_4X4
: get_max_rect_tx_size(bsize, is_inter_block(&xd->mi[0]->mbmi));
-#if CONFIG_EXT_PARTITION && CONFIG_TX64X64
+#if CONFIG_EXT_PARTITION
// The decoder is designed so that it can process 64x64 luma pixels at a
// time. If this is a chroma plane with subsampling and bsize corresponds to
// a subsampled BLOCK_128X128 then the lookup above will give TX_64X64. That
@@ -1302,19 +1296,16 @@
}
static INLINE int av1_get_max_eob(TX_SIZE tx_size) {
-#if CONFIG_TX64X64
if (tx_size == TX_64X64 || tx_size == TX_64X32 || tx_size == TX_32X64) {
return 1024;
}
if (tx_size == TX_16X64 || tx_size == TX_64X16) {
return 512;
}
-#endif // CONFIG_TX64X64
return tx_size_2d[tx_size];
}
static INLINE TX_SIZE av1_get_adjusted_tx_size(TX_SIZE tx_size) {
-#if CONFIG_TX64X64
if (tx_size == TX_64X64 || tx_size == TX_64X32 || tx_size == TX_32X64) {
return TX_32X32;
}
@@ -1324,7 +1315,6 @@
if (tx_size == TX_64X16) {
return TX_32X16;
}
-#endif // CONFIG_TX64X64
return tx_size;
}
diff --git a/av1/common/common_data.h b/av1/common/common_data.h
index 8b091cc..a0d78ce 100644
--- a/av1/common/common_data.h
+++ b/av1/common/common_data.h
@@ -444,21 +444,12 @@
TX_16X16, TX_16X16, TX_32X32,
// 32X64, 64X32,
TX_32X32, TX_32X32,
-#if CONFIG_TX64X64
// 64X64
TX_64X64,
#if CONFIG_EXT_PARTITION
// 64x128, 128x64, 128x128
TX_64X64, TX_64X64, TX_64X64,
#endif // CONFIG_EXT_PARTITION
-#else
- // 64X64
- TX_32X32,
-#if CONFIG_EXT_PARTITION
- // 64x128, 128x64, 128x128
- TX_32X32, TX_32X32, TX_32X32,
-#endif // CONFIG_EXT_PARTITION
-#endif // CONFIG_TX64X64
// 4x16, 16x4, 8x32
TX_4X4, TX_4X4, TX_8X8,
// 32x8, 16x64 64x16
@@ -480,7 +471,6 @@
TX_8X16, TX_16X8, TX_16X16,
// 16X32, 32X16, 32X32
TX_16X32, TX_32X16, TX_32X32,
-#if CONFIG_TX64X64
// 32X64, 64X32,
TX_32X64, TX_64X32,
// 64X64
@@ -489,29 +479,14 @@
// 64x128, 128x64, 128x128
TX_64X64, TX_64X64, TX_64X64,
#endif // CONFIG_EXT_PARTITION
-#else
- // 32X64, 64X32,
- TX_32X32, TX_32X32,
- // 64X64
- TX_32X32,
-#if CONFIG_EXT_PARTITION
- // 64x128, 128x64, 128x128
- TX_32X32, TX_32X32, TX_32X32,
-#endif // CONFIG_EXT_PARTITION
-#endif // CONFIG_TX64X64
#if CONFIG_EXT_PARTITION_TYPES && CONFIG_RECT_TX_EXT_INTRA
// 4x16, 16x4,
TX_4X16, TX_16X4,
// 8x32, 32x8
TX_8X32, TX_32X8,
-#if CONFIG_TX64X64
// 16x64, 64x16
TX_16X64, TX_64X16,
#else
- // 16x64, 64x16
- TX_16X32, TX_32X16,
-#endif // CONFIG_TX64X64
-#else
// 4x16, 16x4,
TX_4X8, TX_8X4,
// 8x32, 32x8
@@ -520,13 +495,8 @@
TX_16X32, TX_32X16,
#endif // CONFIG_EXT_PARTITION_TYPES && CONFIG_RECT_TX_EXT_INTRA
#if CONFIG_EXT_PARTITION
-#if CONFIG_TX64X64
// 32x128 128x32
TX_32X64, TX_64X32
-#else
- // 32x128 128x32
- TX_32X32, TX_32X32
-#endif // CONFIG_TX64X64
#endif // CONFIG_EXT_PARTITION
}, {
// Inter
@@ -538,7 +508,6 @@
TX_8X16, TX_16X8, TX_16X16,
// 16X32, 32X16, 32X32
TX_16X32, TX_32X16, TX_32X32,
-#if CONFIG_TX64X64
// 32X64, 64X32,
TX_32X64, TX_64X32,
// 64X64
@@ -547,29 +516,14 @@
// 64x128, 128x64, 128x128
TX_64X64, TX_64X64, TX_64X64,
#endif // CONFIG_EXT_PARTITION
-#else
- // 32X64, 64X32,
- TX_32X32, TX_32X32,
- // 64X64
- TX_32X32,
-#if CONFIG_EXT_PARTITION
- // 64x128, 128x64, 128x128
- TX_32X32, TX_32X32, TX_32X32,
-#endif // CONFIG_EXT_PARTITION
-#endif // CONFIG_TX64X64
#if CONFIG_EXT_PARTITION_TYPES && CONFIG_RECT_TX_EXT
// 4x16, 16x4, 8x32
TX_4X16, TX_16X4, TX_8X32,
// 32x8
TX_32X8,
-#if CONFIG_TX64X64
// 16x64, 64x16
TX_16X64, TX_64X16,
#else
- // 16x64, 64x16
- TX_16X32, TX_32X16,
-#endif // CONFIG_TX64X64
-#else
// 4x16, 16x4, 8x32
TX_4X8, TX_8X4, TX_8X16,
// 32x8
@@ -578,13 +532,8 @@
TX_16X32, TX_32X16,
#endif // CONFIG_EXT_PARTITION_TYPES && CONFIG_RECT_TX_EXT
#if CONFIG_EXT_PARTITION
-#if CONFIG_TX64X64
// 32x128 128x32
TX_32X64, TX_64X32
-#else
- // 32x128 128x32
- TX_32X32, TX_32X32
-#endif // CONFIG_TX64X64
#endif // CONFIG_EXT_PARTITION
},
};
@@ -612,37 +561,29 @@
TX_4X4, // TX_8X8
TX_8X8, // TX_16X16
TX_16X16, // TX_32X32
-#if CONFIG_TX64X64
TX_32X32, // TX_64X64
-#endif // CONFIG_TX64X64
TX_4X4, // TX_4X8
TX_4X4, // TX_8X4
TX_8X8, // TX_8X16
TX_8X8, // TX_16X8
TX_16X16, // TX_16X32
TX_16X16, // TX_32X16
-#if CONFIG_TX64X64
TX_32X32, // TX_32X64
TX_32X32, // TX_64X32
-#endif // CONFIG_TX64X64
#if CONFIG_EXT_PARTITION_TYPES && CONFIG_RECT_TX_EXT_INTRA
- TX_4X8, // TX_4X16
- TX_8X4, // TX_16X4
- TX_8X16, // TX_8X32
- TX_16X8, // TX_32X8
-#if CONFIG_TX64X64
+ TX_4X8, // TX_4X16
+ TX_8X4, // TX_16X4
+ TX_8X16, // TX_8X32
+ TX_16X8, // TX_32X8
TX_16X32, // TX_16X64
TX_32X16, // TX_64X16
-#endif // CONFIG_TX64X64
#else
TX_4X4, // TX_4X16
TX_4X4, // TX_16X4
TX_8X8, // TX_8X32
TX_8X8, // TX_32X8
-#if CONFIG_TX64X64
TX_16X16, // TX_16X64
TX_16X16, // TX_64X16
-#endif // CONFIG_TX64X64
#endif // CONFIG_EXT_PARTITION_TYPES && CONFIG_RECT_TX_EXT_INTRA
},
{
@@ -651,37 +592,29 @@
TX_4X4, // TX_8X8
TX_8X8, // TX_16X16
TX_16X16, // TX_32X32
-#if CONFIG_TX64X64
TX_32X32, // TX_64X64
-#endif // CONFIG_TX64X64
TX_4X4, // TX_4X8
TX_4X4, // TX_8X4
TX_8X8, // TX_8X16
TX_8X8, // TX_16X8
TX_16X16, // TX_16X32
TX_16X16, // TX_32X16
-#if CONFIG_TX64X64
TX_32X32, // TX_32X64
TX_32X32, // TX_64X32
-#endif // CONFIG_TX64X64
#if CONFIG_EXT_PARTITION_TYPES && CONFIG_RECT_TX_EXT
- TX_4X8, // TX_4X16
- TX_8X4, // TX_16X4
- TX_8X16, // TX_8X32
- TX_16X8, // TX_32X8
-#if CONFIG_TX64X64
+ TX_4X8, // TX_4X16
+ TX_8X4, // TX_16X4
+ TX_8X16, // TX_8X32
+ TX_16X8, // TX_32X8
TX_16X32, // TX_16X64
TX_32X16, // TX_64X16
-#endif // CONFIG_TX64X64
#else
TX_4X4, // TX_4X16
TX_4X4, // TX_16X4
TX_8X8, // TX_8X32
TX_8X8, // TX_32X8
-#if CONFIG_TX64X64
TX_16X16, // TX_16X64
TX_16X16, // TX_64X16
-#endif // CONFIG_TX64X64
#endif // CONFIG_EXT_PARTITION_TYPES && CONFIG_RECT_TX_EXT
},
};
@@ -691,27 +624,21 @@
TX_8X8, // TX_8X8
TX_16X16, // TX_16X16
TX_32X32, // TX_32X32
-#if CONFIG_TX64X64
TX_64X64, // TX_64X64
-#endif // CONFIG_TX64X64
TX_4X4, // TX_4X8
TX_8X8, // TX_8X4
TX_8X8, // TX_8X16
TX_16X16, // TX_16X8
TX_16X16, // TX_16X32
TX_32X32, // TX_32X16
-#if CONFIG_TX64X64
TX_32X32, // TX_32X64
TX_64X64, // TX_64X32
-#endif // CONFIG_TX64X64
TX_4X4, // TX_4X16
TX_16X16, // TX_16X4
TX_8X8, // TX_8X32
TX_32X32, // TX_32X8
-#if CONFIG_TX64X64
TX_16X16, // TX_16X64
TX_64X64, // TX_64X16
-#endif // CONFIG_TX64X64
};
static const TX_SIZE txsize_vert_map[TX_SIZES_ALL] = {
@@ -719,145 +646,63 @@
TX_8X8, // TX_8X8
TX_16X16, // TX_16X16
TX_32X32, // TX_32X32
-#if CONFIG_TX64X64
TX_64X64, // TX_64X64
-#endif // CONFIG_TX64X64
TX_8X8, // TX_4X8
TX_4X4, // TX_8X4
TX_16X16, // TX_8X16
TX_8X8, // TX_16X8
TX_32X32, // TX_16X32
TX_16X16, // TX_32X16
-#if CONFIG_TX64X64
TX_64X64, // TX_32X64
TX_32X32, // TX_64X32
-#endif // CONFIG_TX64X64
TX_16X16, // TX_4X16
TX_4X4, // TX_16X4
TX_32X32, // TX_8X32
TX_8X8, // TX_32X8
-#if CONFIG_TX64X64
TX_64X64, // TX_16X64
TX_16X16, // TX_64X16
-#endif // CONFIG_TX64X64
};
#define TX_SIZE_W_MIN 4
// Transform block width in pixels
static const int tx_size_wide[TX_SIZES_ALL] = {
- 4, 8, 16, 32,
-#if CONFIG_TX64X64
- 64,
-#endif // CONFIG_TX64X64
- 4, 8, 8, 16, 16, 32,
-#if CONFIG_TX64X64
- 32, 64,
-#endif // CONFIG_TX64X64
- 4, 16, 8, 32,
-#if CONFIG_TX64X64
- 16, 64,
-#endif // CONFIG_TX64X64
+ 4, 8, 16, 32, 64, 4, 8, 8, 16, 16, 32, 32, 64, 4, 16, 8, 32, 16, 64,
};
#define TX_SIZE_H_MIN 4
// Transform block height in pixels
static const int tx_size_high[TX_SIZES_ALL] = {
- 4, 8, 16, 32,
-#if CONFIG_TX64X64
- 64,
-#endif // CONFIG_TX64X64
- 8, 4, 16, 8, 32, 16,
-#if CONFIG_TX64X64
- 64, 32,
-#endif // CONFIG_TX64X64
- 16, 4, 32, 8,
-#if CONFIG_TX64X64
- 64, 16,
-#endif // CONFIG_TX64X64
+ 4, 8, 16, 32, 64, 8, 4, 16, 8, 32, 16, 64, 32, 16, 4, 32, 8, 64, 16,
};
// Transform block width in unit
static const int tx_size_wide_unit[TX_SIZES_ALL] = {
- 1, 2, 4, 8,
-#if CONFIG_TX64X64
- 16,
-#endif // CONFIG_TX64X64
- 1, 2, 2, 4, 4, 8,
-#if CONFIG_TX64X64
- 8, 16,
-#endif // CONFIG_TX64X64
- 1, 4, 2, 8,
-#if CONFIG_TX64X64
- 4, 16,
-#endif // CONFIG_TX64X64
+ 1, 2, 4, 8, 16, 1, 2, 2, 4, 4, 8, 8, 16, 1, 4, 2, 8, 4, 16,
};
// Transform block height in unit
static const int tx_size_high_unit[TX_SIZES_ALL] = {
- 1, 2, 4, 8,
-#if CONFIG_TX64X64
- 16,
-#endif // CONFIG_TX64X64
- 2, 1, 4, 2, 8, 4,
-#if CONFIG_TX64X64
- 16, 8,
-#endif // CONFIG_TX64X64
- 4, 1, 8, 2,
-#if CONFIG_TX64X64
- 16, 4,
-#endif // CONFIG_TX64X64
+ 1, 2, 4, 8, 16, 2, 1, 4, 2, 8, 4, 16, 8, 4, 1, 8, 2, 16, 4,
};
// Transform block width in log2
static const int tx_size_wide_log2[TX_SIZES_ALL] = {
- 2, 3, 4, 5,
-#if CONFIG_TX64X64
- 6,
-#endif // CONFIG_TX64X64
- 2, 3, 3, 4, 4, 5,
-#if CONFIG_TX64X64
- 5, 6,
-#endif // CONFIG_TX64X64
- 2, 4, 3, 5,
-#if CONFIG_TX64X64
- 4, 6,
-#endif // CONFIG_TX64X64
+ 2, 3, 4, 5, 6, 2, 3, 3, 4, 4, 5, 5, 6, 2, 4, 3, 5, 4, 6,
};
// Transform block height in log2
static const int tx_size_high_log2[TX_SIZES_ALL] = {
- 2, 3, 4, 5,
-#if CONFIG_TX64X64
- 6,
-#endif // CONFIG_TX64X64
- 3, 2, 4, 3, 5, 4,
-#if CONFIG_TX64X64
- 6, 5,
-#endif // CONFIG_TX64X64
- 4, 2, 5, 3,
-#if CONFIG_TX64X64
- 6, 4,
-#endif // CONFIG_TX64X64
+ 2, 3, 4, 5, 6, 3, 2, 4, 3, 5, 4, 6, 5, 4, 2, 5, 3, 6, 4,
};
#define TX_UNIT_WIDE_LOG2 (MI_SIZE_LOG2 - tx_size_wide_log2[0])
#define TX_UNIT_HIGH_LOG2 (MI_SIZE_LOG2 - tx_size_high_log2[0])
static const int tx_size_2d[TX_SIZES_ALL + 1] = {
- 16, 64, 256, 1024,
-#if CONFIG_TX64X64
- 4096,
-#endif // CONFIG_TX64X64
- 32, 32, 128, 128, 512, 512,
-#if CONFIG_TX64X64
- 2048, 2048,
-#endif // CONFIG_TX64X64
- 64, 64, 256, 256,
-#if CONFIG_TX64X64
- 1024, 1024,
-#endif // CONFIG_TX64X64
+ 16, 64, 256, 1024, 4096, 32, 32, 128, 128, 512,
+ 512, 2048, 2048, 64, 64, 256, 256, 1024, 1024,
};
static const BLOCK_SIZE txsize_to_bsize[TX_SIZES_ALL] = {
@@ -865,27 +710,21 @@
BLOCK_8X8, // TX_8X8
BLOCK_16X16, // TX_16X16
BLOCK_32X32, // TX_32X32
-#if CONFIG_TX64X64
BLOCK_64X64, // TX_64X64
-#endif // CONFIG_TX64X64
BLOCK_4X8, // TX_4X8
BLOCK_8X4, // TX_8X4
BLOCK_8X16, // TX_8X16
BLOCK_16X8, // TX_16X8
BLOCK_16X32, // TX_16X32
BLOCK_32X16, // TX_32X16
-#if CONFIG_TX64X64
BLOCK_32X64, // TX_32X64
BLOCK_64X32, // TX_64X32
-#endif // CONFIG_TX64X64
BLOCK_4X16, // TX_4X16
BLOCK_16X4, // TX_16X4
BLOCK_8X32, // TX_8X32
BLOCK_32X8, // TX_32X8
-#if CONFIG_TX64X64
BLOCK_16X64, // TX_16X64
BLOCK_64X16, // TX_64X16
-#endif // CONFIG_TX64X64
};
static const TX_SIZE txsize_sqr_map[TX_SIZES_ALL] = {
@@ -893,27 +732,21 @@
TX_8X8, // TX_8X8
TX_16X16, // TX_16X16
TX_32X32, // TX_32X32
-#if CONFIG_TX64X64
TX_64X64, // TX_64X64
-#endif // CONFIG_TX64X64
TX_4X4, // TX_4X8
TX_4X4, // TX_8X4
TX_8X8, // TX_8X16
TX_8X8, // TX_16X8
TX_16X16, // TX_16X32
TX_16X16, // TX_32X16
-#if CONFIG_TX64X64
TX_32X32, // TX_32X64
TX_32X32, // TX_64X32
-#endif // CONFIG_TX64X64
TX_4X4, // TX_4X16
TX_4X4, // TX_16X4
TX_8X8, // TX_8X32
TX_8X8, // TX_32X8
-#if CONFIG_TX64X64
TX_16X16, // TX_16X64
TX_16X16, // TX_64X16
-#endif // CONFIG_TX64X64
};
static const TX_SIZE txsize_sqr_up_map[TX_SIZES_ALL] = {
@@ -921,27 +754,21 @@
TX_8X8, // TX_8X8
TX_16X16, // TX_16X16
TX_32X32, // TX_32X32
-#if CONFIG_TX64X64
TX_64X64, // TX_64X64
-#endif // CONFIG_TX64X64
TX_8X8, // TX_4X8
TX_8X8, // TX_8X4
TX_16X16, // TX_8X16
TX_16X16, // TX_16X8
TX_32X32, // TX_16X32
TX_32X32, // TX_32X16
-#if CONFIG_TX64X64
TX_64X64, // TX_32X64
TX_64X64, // TX_64X32
-#endif // CONFIG_TX64X64
TX_16X16, // TX_4X16
TX_16X16, // TX_16X4
TX_32X32, // TX_8X32
TX_32X32, // TX_32X8
-#if CONFIG_TX64X64
TX_64X64, // TX_16X64
TX_64X64, // TX_64X16
-#endif // CONFIG_TX64X64
};
static const int8_t txsize_log2_minus4[TX_SIZES_ALL] = {
@@ -949,39 +776,28 @@
2, // TX_8X8
4, // TX_16X16
6, // TX_32X32
-#if CONFIG_TX64X64
- 6, // TX_64X64
-#endif // CONFIG_TX64X64
- 1, // TX_4X8
- 1, // TX_8X4
- 3, // TX_8X16
- 3, // TX_16X8
- 5, // TX_16X32
- 5, // TX_32X16
-#if CONFIG_TX64X64
- 6, // TX_32X64
- 6, // TX_64X32
-#endif // CONFIG_TX64X64
- 2, // TX_4X16
- 2, // TX_16X4
- 4, // TX_8X32
- 4, // TX_32X8
-#if CONFIG_TX64X64
- 5, // TX_16X64
- 5, // TX_64X16
-#endif // CONFIG_TX64X64
+ 6, // TX_64X64
+ 1, // TX_4X8
+ 1, // TX_8X4
+ 3, // TX_8X16
+ 3, // TX_16X8
+ 5, // TX_16X32
+ 5, // TX_32X16
+ 6, // TX_32X64
+ 6, // TX_64X32
+ 2, // TX_4X16
+ 2, // TX_16X4
+ 4, // TX_8X32
+ 4, // TX_32X8
+ 5, // TX_16X64
+ 5, // TX_64X16
};
/* clang-format off */
static const TX_SIZE tx_mode_to_biggest_tx_size[TX_MODES] = {
TX_4X4, // ONLY_4X4
-#if CONFIG_TX64X64
TX_64X64, // TX_MODE_LARGEST
TX_64X64, // TX_MODE_SELECT
-#else
- TX_32X32, // TX_MODE_LARGEST
- TX_32X32, // TX_MODE_SELECT
-#endif // CONFIG_TX64X64
};
/* clang-format on */
diff --git a/av1/common/entropy.c b/av1/common/entropy.c
index c837c27..b3fa2ff 100644
--- a/av1/common/entropy.c
+++ b/av1/common/entropy.c
@@ -103,8 +103,6 @@
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
- 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
-#if CONFIG_TX64X64
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
@@ -223,8 +221,7 @@
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
- 5, 5, 5, 5
-#endif // CONFIG_TX64X64
+ 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5
};
const uint8_t av1_coefband_trans_4x8_8x4[32] = {
@@ -608,10 +605,8 @@
(*av1_default_qctx_coef_cdfs[index])[TX_16X16]);
av1_copy(cm->fc->coef_head_cdfs[TX_32X32],
(*av1_default_qctx_coef_cdfs[index])[TX_32X32]);
-#if CONFIG_TX64X64
av1_copy(cm->fc->coef_head_cdfs[TX_64X64],
(*av1_default_qctx_coef_cdfs[index])[TX_32X32]);
-#endif // CONFIG_TX64X64
av1_coef_pareto_cdfs(cm->fc);
#endif // CONFIG_LV_MAP
}
diff --git a/av1/common/entropy.h b/av1/common/entropy.h
index 3a91fe6..87a2eb6 100644
--- a/av1/common/entropy.h
+++ b/av1/common/entropy.h
@@ -130,10 +130,8 @@
static INLINE int av1_get_cat6_extrabits_size(TX_SIZE tx_size,
aom_bit_depth_t bit_depth) {
tx_size = txsize_sqr_up_map[tx_size];
-#if CONFIG_TX64X64
// TODO(debargha): Does TX_64X64 require an additional extrabit?
if (tx_size > TX_32X32) tx_size = TX_32X32;
-#endif
int tx_offset = (int)(tx_size - TX_4X4);
int bits = (int)bit_depth + 3 + tx_offset;
// Round up
@@ -274,7 +272,6 @@
above_ec = !!*(const uint64_t *)a;
left_ec = !!*(const uint64_t *)l;
break;
-#if CONFIG_TX64X64
case TX_64X64:
above_ec = !!(*(const uint64_t *)a | *(const uint64_t *)(a + 8));
left_ec = !!(*(const uint64_t *)l | *(const uint64_t *)(l + 8));
@@ -287,7 +284,6 @@
above_ec = !!(*(const uint64_t *)a | *(const uint64_t *)(a + 8));
left_ec = !!*(const uint64_t *)l;
break;
-#endif // CONFIG_TX64X64
case TX_4X16:
above_ec = a[0] != 0;
left_ec = !!*(const uint32_t *)l;
@@ -304,7 +300,6 @@
above_ec = !!*(const uint64_t *)a;
left_ec = !!*(const uint16_t *)l;
break;
-#if CONFIG_TX64X64
case TX_16X64:
above_ec = !!*(const uint32_t *)a;
left_ec = !!(*(const uint64_t *)l | *(const uint64_t *)(l + 8));
@@ -313,7 +308,6 @@
above_ec = !!(*(const uint64_t *)a | *(const uint64_t *)(a + 8));
left_ec = !!*(const uint32_t *)l;
break;
-#endif // CONFIG_TX64X64
default: assert(0 && "Invalid transform size."); break;
}
return combine_entropy_contexts(above_ec, left_ec);
diff --git a/av1/common/entropymode.c b/av1/common/entropymode.c
index be4a7d5..80abf85 100644
--- a/av1/common/entropymode.c
+++ b/av1/common/entropymode.c
@@ -565,7 +565,6 @@
static const aom_cdf_prob
default_txfm_partition_cdf[TXFM_PARTITION_CONTEXTS][CDF_SIZE(2)] = {
-#if CONFIG_TX64X64
{ AOM_CDF2(249 * 128) }, { AOM_CDF2(240 * 128) }, { AOM_CDF2(223 * 128) },
{ AOM_CDF2(249 * 128) }, { AOM_CDF2(229 * 128) }, { AOM_CDF2(177 * 128) },
{ AOM_CDF2(250 * 128) }, { AOM_CDF2(243 * 128) }, { AOM_CDF2(208 * 128) },
@@ -574,14 +573,6 @@
{ AOM_CDF2(183 * 128) }, { AOM_CDF2(149 * 128) }, { AOM_CDF2(125 * 128) },
{ AOM_CDF2(181 * 128) }, { AOM_CDF2(146 * 128) }, { AOM_CDF2(113 * 128) },
{ AOM_CDF2(128 * 128) }
-#else
- { AOM_CDF2(250 * 128) }, { AOM_CDF2(231 * 128) }, { AOM_CDF2(212 * 128) },
- { AOM_CDF2(241 * 128) }, { AOM_CDF2(166 * 128) }, { AOM_CDF2(66 * 128) },
- { AOM_CDF2(241 * 128) }, { AOM_CDF2(230 * 128) }, { AOM_CDF2(135 * 128) },
- { AOM_CDF2(243 * 128) }, { AOM_CDF2(154 * 128) }, { AOM_CDF2(64 * 128) },
- { AOM_CDF2(248 * 128) }, { AOM_CDF2(161 * 128) }, { AOM_CDF2(63 * 128) },
- { AOM_CDF2(128 * 128) },
-#endif // CONFIG_TX64X64
};
#if CONFIG_EXT_SKIP
@@ -626,20 +617,12 @@
static const aom_cdf_prob default_filter_intra_cdfs[TX_SIZES_ALL][CDF_SIZE(2)] =
{
{ AOM_CDF2(10985) }, { AOM_CDF2(10985) }, { AOM_CDF2(16645) },
+ { AOM_CDF2(27378) }, { AOM_CDF2(30378) }, { AOM_CDF2(10985) },
+ { AOM_CDF2(10985) }, { AOM_CDF2(15723) }, { AOM_CDF2(12373) },
+ { AOM_CDF2(27199) }, { AOM_CDF2(24217) }, { AOM_CDF2(27378) },
+ { AOM_CDF2(27378) }, { AOM_CDF2(16767) }, { AOM_CDF2(16767) },
+ { AOM_CDF2(27767) }, { AOM_CDF2(27767) }, { AOM_CDF2(27378) },
{ AOM_CDF2(27378) },
-#if CONFIG_TX64X64
- { AOM_CDF2(30378) },
-#endif // CONFIG_TX64X64
- { AOM_CDF2(10985) }, { AOM_CDF2(10985) }, { AOM_CDF2(15723) },
- { AOM_CDF2(12373) }, { AOM_CDF2(27199) }, { AOM_CDF2(24217) },
-#if CONFIG_TX64X64
- { AOM_CDF2(27378) }, { AOM_CDF2(27378) },
-#endif // CONFIG_TX64X64
- { AOM_CDF2(16767) }, { AOM_CDF2(16767) }, { AOM_CDF2(27767) },
- { AOM_CDF2(27767) },
-#if CONFIG_TX64X64
- { AOM_CDF2(27378) }, { AOM_CDF2(27378) },
-#endif // CONFIG_TX64X64
};
#endif // CONFIG_FILTER_INTRA
@@ -700,24 +683,18 @@
{ { AOM_CDF2(19968) }, { AOM_CDF2(24320) } },
{ { AOM_CDF3(12272, 30172) }, { AOM_CDF3(18677, 30848) } },
{ { AOM_CDF3(12986, 15180) }, { AOM_CDF3(24302, 25602) } },
-#if CONFIG_TX64X64
{ { AOM_CDF3(5782, 11475) }, { AOM_CDF3(16803, 22759) } },
-#endif // CONFIG_TX64X64
#elif MAX_TX_DEPTH == 3
{ { AOM_CDF2(19968) }, { AOM_CDF2(24320) } },
{ { AOM_CDF3(12272, 30172) }, { AOM_CDF3(18677, 30848) } },
{ { AOM_CDF4(12986, 15180, 32384) }, { AOM_CDF4(24302, 25602, 32128) } },
-#if CONFIG_TX64X64
{ { AOM_CDF4(5782, 11475, 24480) }, { AOM_CDF4(16803, 22759, 28560) } },
-#endif // CONFIG_TX64X64
#else
{ { AOM_CDF2(19968) }, { AOM_CDF2(24320) } },
{ { AOM_CDF3(12272, 30172) }, { AOM_CDF3(18677, 30848) } },
{ { AOM_CDF4(12986, 15180, 32384) }, { AOM_CDF4(24302, 25602, 32128) } },
-#if CONFIG_TX64X64
{ { AOM_CDF5(5782, 11475, 24480, 32640) },
{ AOM_CDF5(16803, 22759, 28560, 32640) } },
-#endif // CONFIG_TX64X64
#endif // MAX_TX_DEPTH == 2
};
diff --git a/av1/common/enums.h b/av1/common/enums.h
index 245a060..d2509ac 100644
--- a/av1/common/enums.h
+++ b/av1/common/enums.h
@@ -178,38 +178,28 @@
#else
typedef enum ATTRIBUTE_PACKED {
#endif
- TX_4X4, // 4x4 transform
- TX_8X8, // 8x8 transform
- TX_16X16, // 16x16 transform
- TX_32X32, // 32x32 transform
-#if CONFIG_TX64X64
- TX_64X64, // 64x64 transform
-#endif // CONFIG_TX64X64
- TX_4X8, // 4x8 transform
- TX_8X4, // 8x4 transform
- TX_8X16, // 8x16 transform
- TX_16X8, // 16x8 transform
- TX_16X32, // 16x32 transform
- TX_32X16, // 32x16 transform
-#if CONFIG_TX64X64
- TX_32X64, // 32x64 transform
- TX_64X32, // 64x32 transform
-#endif // CONFIG_TX64X64
- TX_4X16, // 4x16 transform
- TX_16X4, // 16x4 transform
- TX_8X32, // 8x32 transform
- TX_32X8, // 32x8 transform
-#if CONFIG_TX64X64
+ TX_4X4, // 4x4 transform
+ TX_8X8, // 8x8 transform
+ TX_16X16, // 16x16 transform
+ TX_32X32, // 32x32 transform
+ TX_64X64, // 64x64 transform
+ TX_4X8, // 4x8 transform
+ TX_8X4, // 8x4 transform
+ TX_8X16, // 8x16 transform
+ TX_16X8, // 16x8 transform
+ TX_16X32, // 16x32 transform
+ TX_32X16, // 32x16 transform
+ TX_32X64, // 32x64 transform
+ TX_64X32, // 64x32 transform
+ TX_4X16, // 4x16 transform
+ TX_16X4, // 16x4 transform
+ TX_8X32, // 8x32 transform
+ TX_32X8, // 32x8 transform
TX_16X64, // 16x64 transform
TX_64X16, // 64x16 transform
-#endif // CONFIG_TX64X64
TX_SIZES_ALL, // Includes rectangular transforms
TX_SIZES = TX_4X8, // Does NOT include rectangular transforms
-#if CONFIG_TX64X64
TX_SIZES_LARGEST = TX_64X64,
-#else
- TX_SIZES_LARGEST = TX_32X32,
-#endif
TX_INVALID = 255 // Invalid transform size
#if defined(_MSC_VER)
};
@@ -226,7 +216,7 @@
#define MAX_TX_CATS (TX_SIZES - TX_SIZE_CTX_MIN)
#define MAX_TX_DEPTH 2
-#define MAX_TX_SIZE_LOG2 (5 + CONFIG_TX64X64)
+#define MAX_TX_SIZE_LOG2 (6)
#define MAX_TX_SIZE (1 << MAX_TX_SIZE_LOG2)
#define MIN_TX_SIZE_LOG2 2
#define MIN_TX_SIZE (1 << MIN_TX_SIZE_LOG2)
diff --git a/av1/common/idct.c b/av1/common/idct.c
index dc5cf45..17af82c 100644
--- a/av1/common/idct.c
+++ b/av1/common/idct.c
@@ -52,13 +52,11 @@
}
}
-#if CONFIG_TX64X64
static void iidtx64_c(const tran_low_t *input, tran_low_t *output) {
for (int i = 0; i < 64; ++i) {
output[i] = (tran_low_t)dct_const_round_shift(input[i] * 4 * Sqrt2);
}
}
-#endif // CONFIG_TX64X64
// For use in lieu of ADST
static void ihalfright32_c(const tran_low_t *input, tran_low_t *output) {
@@ -74,7 +72,6 @@
// Note overall scaling factor is 4 times orthogonal
}
-#if CONFIG_TX64X64
static const int8_t inv_stage_range_col_dct_64[12] = { 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0 };
static const int8_t inv_stage_range_row_dct_64[12] = { 0, 0, 0, 0, 0, 0,
@@ -112,7 +109,6 @@
aom_idct32_c(inputhalf, output + 32);
// Note overall scaling factor is 4 * sqrt(2) times orthogonal
}
-#endif // CONFIG_TX64X64
#define FLIPUD_PTR(dest, stride, size) \
do { \
@@ -158,7 +154,6 @@
}
}
-#if CONFIG_TX64X64
static void highbd_inv_idtx_add_c(const tran_low_t *input, uint8_t *dest8,
int stride, int bsx, int bsy, TX_TYPE tx_type,
int bd) {
@@ -175,7 +170,6 @@
}
}
}
-#endif // CONFIG_TX64X64
void av1_iht4x4_16_add_c(const tran_low_t *input, uint8_t *dest, int stride,
const TxfmParam *txfm_param) {
@@ -979,7 +973,6 @@
}
}
-#if CONFIG_TX64X64
void av1_iht64x64_4096_add_c(const tran_low_t *input, uint8_t *dest, int stride,
const TxfmParam *txfm_param) {
const TX_TYPE tx_type = txfm_param->tx_type;
@@ -1293,7 +1286,6 @@
}
}
}
-#endif // CONFIG_TX64X64
// idct
static void av1_highbd_iwht4x4_add(const tran_low_t *input, uint8_t *dest,
@@ -1427,7 +1419,6 @@
txfm_param->tx_type, txfm_param->bd);
}
-#if CONFIG_TX64X64
static void highbd_inv_txfm_add_32x64(const tran_low_t *input, uint8_t *dest,
int stride, const TxfmParam *txfm_param) {
const int32_t *src = cast_to_int32(input);
@@ -1455,7 +1446,6 @@
av1_inv_txfm2d_add_64x16_c(src, CONVERT_TO_SHORTPTR(dest), stride,
txfm_param->tx_type, txfm_param->bd);
}
-#endif // CONFIG_TX64X64
static void highbd_inv_txfm_add_8x8(const tran_low_t *input, uint8_t *dest,
int stride, const TxfmParam *txfm_param) {
@@ -1565,7 +1555,6 @@
}
}
-#if CONFIG_TX64X64
static void highbd_inv_txfm_add_64x64(const tran_low_t *input, uint8_t *dest,
int stride, const TxfmParam *txfm_param) {
int bd = txfm_param->bd;
@@ -1605,7 +1594,6 @@
default: assert(0); break;
}
}
-#endif // CONFIG_TX64X64
static void init_txfm_param(const MACROBLOCKD *xd, int plane, TX_SIZE tx_size,
TX_TYPE tx_type, int eob, int reduced_tx_set,
@@ -1656,7 +1644,6 @@
case TX_32X16:
highbd_inv_txfm_add_32x16(input, dest, stride, txfm_param);
break;
-#if CONFIG_TX64X64
case TX_64X64:
highbd_inv_txfm_add_64x64(input, dest, stride, txfm_param);
break;
@@ -1672,7 +1659,6 @@
case TX_64X16:
highbd_inv_txfm_add_64x16(input, dest, stride, txfm_param);
break;
-#endif // CONFIG_TX64X64
case TX_4X4:
// this is like av1_short_idct4x4 but has a special case around eob<=1
// which is significant (not just an optimization) for the lossless
diff --git a/av1/common/onyxc_int.h b/av1/common/onyxc_int.h
index 4fd933f..6bbcdac 100644
--- a/av1/common/onyxc_int.h
+++ b/av1/common/onyxc_int.h
@@ -1287,13 +1287,7 @@
#if CONFIG_EXT_PARTITION
case 128:
#endif // CONFIG_EXT_PARTITION
- case 64:
-#if CONFIG_TX64X64
- return TX_64X64;
-#else
- return TX_32X32;
-#endif // CONFIG_TX64X64
- break;
+ case 64: return TX_64X64; break;
case 32: return TX_32X32; break;
case 16: return TX_16X16; break;
case 8: return TX_8X8; break;
diff --git a/av1/common/reconintra.c b/av1/common/reconintra.c
index 8f34ea0..ab5863b 100644
--- a/av1/common/reconintra.c
+++ b/av1/common/reconintra.c
@@ -573,7 +573,6 @@
static void av1_init_intra_predictors_internal(void) {
assert(NELEMENTS(mode_to_angle_map) == INTRA_MODES);
-#if CONFIG_TX64X64
#define INIT_RECTANGULAR(p, type) \
p[TX_4X8] = aom_##type##_predictor_4x8; \
p[TX_8X4] = aom_##type##_predictor_8x4; \
@@ -589,34 +588,13 @@
p[TX_32X8] = aom_##type##_predictor_32x8; \
p[TX_16X64] = aom_##type##_predictor_16x64; \
p[TX_64X16] = aom_##type##_predictor_64x16;
-#else
-#define INIT_RECTANGULAR(p, type) \
- p[TX_4X8] = aom_##type##_predictor_4x8; \
- p[TX_8X4] = aom_##type##_predictor_8x4; \
- p[TX_8X16] = aom_##type##_predictor_8x16; \
- p[TX_16X8] = aom_##type##_predictor_16x8; \
- p[TX_16X32] = aom_##type##_predictor_16x32; \
- p[TX_32X16] = aom_##type##_predictor_32x16; \
- p[TX_4X16] = aom_##type##_predictor_4x16; \
- p[TX_16X4] = aom_##type##_predictor_16x4; \
- p[TX_8X32] = aom_##type##_predictor_8x32; \
- p[TX_32X8] = aom_##type##_predictor_32x8;
-#endif // CONFIG_TX64X64
-#if CONFIG_TX64X64
#define INIT_NO_4X4(p, type) \
p[TX_8X8] = aom_##type##_predictor_8x8; \
p[TX_16X16] = aom_##type##_predictor_16x16; \
p[TX_32X32] = aom_##type##_predictor_32x32; \
p[TX_64X64] = aom_##type##_predictor_64x64; \
INIT_RECTANGULAR(p, type)
-#else
-#define INIT_NO_4X4(p, type) \
- p[TX_8X8] = aom_##type##_predictor_8x8; \
- p[TX_16X16] = aom_##type##_predictor_16x16; \
- p[TX_32X32] = aom_##type##_predictor_32x32; \
- INIT_RECTANGULAR(p, type)
-#endif // CONFIG_TX64X64
#define INIT_ALL_SIZES(p, type) \
p[TX_4X4] = aom_##type##_predictor_4x4; \
diff --git a/av1/common/scan.c b/av1/common/scan.c
index 61ea93a..927babe 100644
--- a/av1/common/scan.c
+++ b/av1/common/scan.c
@@ -1133,7 +1133,6 @@
};
#endif
-#if CONFIG_TX64X64
// Approximate versions, which reuse the 32x32 scan and assume rest of the
// coeffs to be zero.
#define default_scan_32x64 default_scan_32x32
@@ -1141,7 +1140,6 @@
#define default_scan_64x64 default_scan_32x32
#define default_scan_16x64 default_scan_16x32
#define default_scan_64x16 default_scan_32x16
-#endif // CONFIG_TX64X64
// Neighborhood 2-tuples for various scans and blocksizes,
// in {top, left} order for each position in corresponding scan order.
@@ -3007,13 +3005,11 @@
};
#endif
-#if CONFIG_TX64X64
#define default_scan_32x64_neighbors default_scan_32x32_neighbors
#define default_scan_64x32_neighbors default_scan_32x32_neighbors
#define default_scan_64x64_neighbors default_scan_32x32_neighbors
#define default_scan_16x64_neighbors default_scan_16x32_neighbors
#define default_scan_64x16_neighbors default_scan_32x16_neighbors
-#endif // CONFIG_TX64X64
#if CONFIG_LV_MAP
DECLARE_ALIGNED(16, static const int16_t, av1_default_iscan_4x4[16]) = {
@@ -4131,22 +4127,18 @@
};
#endif
-#if CONFIG_TX64X64
#define av1_default_iscan_32x64 av1_default_iscan_32x32
#define av1_default_iscan_64x32 av1_default_iscan_32x32
#define av1_default_iscan_64x64 av1_default_iscan_32x32
#define av1_default_iscan_16x64 av1_default_iscan_16x32
#define av1_default_iscan_64x16 av1_default_iscan_32x16
-#endif // CONFIG_TX64X64
const SCAN_ORDER av1_default_scan_orders[TX_SIZES] = {
{ default_scan_4x4, av1_default_iscan_4x4, default_scan_4x4_neighbors },
{ default_scan_8x8, av1_default_iscan_8x8, default_scan_8x8_neighbors },
{ default_scan_16x16, av1_default_iscan_16x16, default_scan_16x16_neighbors },
{ default_scan_32x32, av1_default_iscan_32x32, default_scan_32x32_neighbors },
-#if CONFIG_TX64X64
{ default_scan_64x64, av1_default_iscan_64x64, default_scan_64x64_neighbors },
-#endif // CONFIG_TX64X64
};
const SCAN_ORDER av1_intra_scan_orders[TX_SIZES_ALL][TX_TYPES] = {
@@ -4242,7 +4234,6 @@
{ mrow_scan_32x32, av1_mrow_iscan_32x32, mrow_scan_32x32_neighbors },
{ mcol_scan_32x32, av1_mcol_iscan_32x32, mcol_scan_32x32_neighbors },
},
-#if CONFIG_TX64X64
{
// TX_64X64
{ default_scan_64x64, av1_default_iscan_64x64,
@@ -4278,7 +4269,6 @@
{ default_scan_64x64, av1_default_iscan_64x64,
default_scan_64x64_neighbors },
},
-#endif // CONFIG_TX64X64
{
// TX_4X8
{ default_scan_4x8, av1_default_iscan_4x8, default_scan_4x8_neighbors },
@@ -4421,7 +4411,6 @@
{ mrow_scan_32x16, av1_mrow_iscan_32x16, mrow_scan_32x16_neighbors },
{ mcol_scan_32x16, av1_mcol_iscan_32x16, mcol_scan_32x16_neighbors },
},
-#if CONFIG_TX64X64
{
// TX_32X64
{ default_scan_32x64, av1_default_iscan_32x64,
@@ -4492,7 +4481,6 @@
{ default_scan_64x32, av1_default_iscan_64x32,
default_scan_64x32_neighbors },
},
-#endif // CONFIG_TX64X64
{
// TX_4X16
{ default_scan_4x16, av1_default_iscan_4x16,
@@ -4597,7 +4585,6 @@
{ mrow_scan_32x8, av1_mrow_iscan_32x8, mrow_scan_32x8_neighbors },
{ mcol_scan_32x8, av1_mcol_iscan_32x8, mcol_scan_32x8_neighbors },
},
-#if CONFIG_TX64X64
{
// TX_16X64
{ default_scan_16x64, av1_default_iscan_16x64,
@@ -4668,7 +4655,6 @@
{ default_scan_64x16, av1_default_iscan_64x16,
default_scan_64x16_neighbors },
},
-#endif // CONFIG_TX64X64
};
const SCAN_ORDER av1_inter_scan_orders[TX_SIZES_ALL][TX_TYPES] = {
@@ -4781,7 +4767,6 @@
{ mrow_scan_32x32, av1_mrow_iscan_32x32, mrow_scan_32x32_neighbors },
{ mcol_scan_32x32, av1_mcol_iscan_32x32, mcol_scan_32x32_neighbors },
},
-#if CONFIG_TX64X64
{
// TX_64X64
{ default_scan_64x64, av1_default_iscan_64x64,
@@ -4817,7 +4802,6 @@
{ default_scan_64x64, av1_default_iscan_64x64,
default_scan_64x64_neighbors },
},
-#endif // CONFIG_TX64X64
{
// TX_4X8
{ default_scan_4x8, av1_default_iscan_4x8, default_scan_4x8_neighbors },
@@ -4968,7 +4952,6 @@
{ mrow_scan_32x16, av1_mrow_iscan_32x16, mrow_scan_32x16_neighbors },
{ mcol_scan_32x16, av1_mcol_iscan_32x16, mcol_scan_32x16_neighbors },
},
-#if CONFIG_TX64X64
{
// TX_32X64
{ default_scan_32x64, av1_default_iscan_32x64,
@@ -5039,7 +5022,6 @@
{ default_scan_64x32, av1_default_iscan_64x32,
default_scan_64x32_neighbors },
},
-#endif // CONFIG_TX64X64
{
// TX_4X16
{ default_scan_4x16, av1_default_iscan_4x16,
@@ -5172,7 +5154,6 @@
{ mrow_scan_32x8, av1_mrow_iscan_32x8, mrow_scan_32x8_neighbors },
{ mcol_scan_32x8, av1_mcol_iscan_32x8, mcol_scan_32x8_neighbors },
},
-#if CONFIG_TX64X64
{
// TX_16X64
{ default_scan_16x64, av1_default_iscan_16x64,
@@ -5243,5 +5224,4 @@
{ default_scan_64x16, av1_default_iscan_64x16,
default_scan_64x16_neighbors },
},
-#endif // CONFIG_TX64X64
};
diff --git a/av1/common/token_cdfs.h b/av1/common/token_cdfs.h
index aa60a65..8ae5673 100644
--- a/av1/common/token_cdfs.h
+++ b/av1/common/token_cdfs.h
@@ -70,7 +70,6 @@
{ AOM_CDF2(15766) },
{ AOM_CDF2(26756) },
{ AOM_CDF2(29848) } },
-#if CONFIG_TX64X64
{ { AOM_CDF2(21611) },
{ AOM_CDF2(5461) },
{ AOM_CDF2(28087) },
@@ -84,7 +83,6 @@
{ AOM_CDF2(16384) },
{ AOM_CDF2(16384) },
{ AOM_CDF2(16384) } },
-#endif
},
{
{ { AOM_CDF2(31574) },
@@ -139,7 +137,6 @@
{ AOM_CDF2(8536) },
{ AOM_CDF2(24702) },
{ AOM_CDF2(30147) } },
-#if CONFIG_TX64X64
{ { AOM_CDF2(26684) },
{ AOM_CDF2(3208) },
{ AOM_CDF2(17644) },
@@ -153,7 +150,6 @@
{ AOM_CDF2(16384) },
{ AOM_CDF2(16384) },
{ AOM_CDF2(16384) } },
-#endif
},
{ { { AOM_CDF2(29941) },
{ AOM_CDF2(9962) },
@@ -207,7 +203,6 @@
{ AOM_CDF2(3421) },
{ AOM_CDF2(21167) },
{ AOM_CDF2(30760) } },
-#if CONFIG_TX64X64
{ { AOM_CDF2(30272) },
{ AOM_CDF2(7198) },
{ AOM_CDF2(22483) },
@@ -220,9 +215,7 @@
{ AOM_CDF2(16384) },
{ AOM_CDF2(16384) },
{ AOM_CDF2(16384) },
- { AOM_CDF2(16384) } }
-#endif
- },
+ { AOM_CDF2(16384) } } },
{ { { AOM_CDF2(30198) },
{ AOM_CDF2(7807) },
{ AOM_CDF2(9940) },
@@ -275,7 +268,6 @@
{ AOM_CDF2(1762) },
{ AOM_CDF2(14824) },
{ AOM_CDF2(26877) } },
-#if CONFIG_TX64X64
{ { AOM_CDF2(32374) },
{ AOM_CDF2(14704) },
{ AOM_CDF2(24688) },
@@ -288,9 +280,7 @@
{ AOM_CDF2(16384) },
{ AOM_CDF2(16384) },
{ AOM_CDF2(16384) },
- { AOM_CDF2(16384) } }
-#endif
- },
+ { AOM_CDF2(16384) } } },
};
static const aom_cdf_prob
@@ -860,7 +850,6 @@
{ AOM_CDF4(8192, 16384, 24576) },
{ AOM_CDF4(8192, 16384, 24576) },
{ AOM_CDF4(8192, 16384, 24576) } } },
-#if CONFIG_TX64X64
{ { { AOM_CDF4(5206, 12013, 15839) },
{ AOM_CDF4(25287, 29818, 31398) },
{ AOM_CDF4(8979, 17375, 20640) },
@@ -944,9 +933,7 @@
{ AOM_CDF4(8192, 16384, 24576) },
{ AOM_CDF4(8192, 16384, 24576) },
{ AOM_CDF4(8192, 16384, 24576) },
- { AOM_CDF4(8192, 16384, 24576) } } }
-#endif
- },
+ { AOM_CDF4(8192, 16384, 24576) } } } },
{ { { { AOM_CDF4(4124, 11059, 15633) },
{ AOM_CDF4(18394, 30703, 32248) },
{ AOM_CDF4(13398, 27103, 30987) },
@@ -1283,7 +1270,6 @@
{ AOM_CDF4(8192, 16384, 24576) },
{ AOM_CDF4(8192, 16384, 24576) },
{ AOM_CDF4(8192, 16384, 24576) } } },
-#if CONFIG_TX64X64
{ { { AOM_CDF4(3024, 7914, 11762) },
{ AOM_CDF4(18244, 25626, 28987) },
{ AOM_CDF4(7226, 16180, 20955) },
@@ -1367,9 +1353,7 @@
{ AOM_CDF4(8192, 16384, 24576) },
{ AOM_CDF4(8192, 16384, 24576) },
{ AOM_CDF4(8192, 16384, 24576) },
- { AOM_CDF4(8192, 16384, 24576) } } }
-#endif
- },
+ { AOM_CDF4(8192, 16384, 24576) } } } },
{ { { { AOM_CDF4(4002, 11946, 17010) },
{ AOM_CDF4(19950, 30806, 32374) },
{ AOM_CDF4(12490, 26525, 30902) },
@@ -1706,7 +1690,6 @@
{ AOM_CDF4(8192, 16384, 24576) },
{ AOM_CDF4(8192, 16384, 24576) },
{ AOM_CDF4(8192, 16384, 24576) } } },
-#if CONFIG_TX64X64
{ { { AOM_CDF4(2558, 8044, 11808) },
{ AOM_CDF4(19701, 26697, 29994) },
{ AOM_CDF4(9122, 21302, 25761) },
@@ -1790,9 +1773,7 @@
{ AOM_CDF4(8192, 16384, 24576) },
{ AOM_CDF4(8192, 16384, 24576) },
{ AOM_CDF4(8192, 16384, 24576) },
- { AOM_CDF4(8192, 16384, 24576) } } }
-#endif
- },
+ { AOM_CDF4(8192, 16384, 24576) } } } },
{ { { { AOM_CDF4(5036, 14072, 20572) },
{ AOM_CDF4(22265, 32059, 32670) },
{ AOM_CDF4(12492, 27733, 31751) },
@@ -2129,7 +2110,6 @@
{ AOM_CDF4(8192, 16384, 24576) },
{ AOM_CDF4(8192, 16384, 24576) },
{ AOM_CDF4(8192, 16384, 24576) } } },
-#if CONFIG_TX64X64
{ { { AOM_CDF4(3604, 9595, 14487) },
{ AOM_CDF4(22240, 30555, 32310) },
{ AOM_CDF4(11403, 24475, 29405) },
@@ -2213,188 +2193,173 @@
{ AOM_CDF4(8192, 16384, 24576) },
{ AOM_CDF4(8192, 16384, 24576) },
{ AOM_CDF4(8192, 16384, 24576) },
- { AOM_CDF4(8192, 16384, 24576) } } }
-#endif
- },
+ { AOM_CDF4(8192, 16384, 24576) } } } },
};
-static const aom_cdf_prob
- av1_default_coeff_base_eob_multi_cdfs[TOKEN_CDF_Q_CTXS][TX_SIZES]
- [PLANE_TYPES][SIG_COEF_CONTEXTS_EOB]
- [CDF_SIZE(NUM_BASE_LEVELS + 1)] = {
- { { { { AOM_CDF3(20597, 31183) },
- { AOM_CDF3(31273, 32627) },
- { AOM_CDF3(31825, 32680) },
- { AOM_CDF3(27224, 30407) } },
- { { AOM_CDF3(28394, 31807) },
- { AOM_CDF3(31660, 32638) },
- { AOM_CDF3(32058, 32667) },
- { AOM_CDF3(31564, 32518) } } },
- { { { AOM_CDF3(15187, 30433) },
- { AOM_CDF3(32209, 32689) },
- { AOM_CDF3(32628, 32745) },
- { AOM_CDF3(29289, 32300) } },
- { { AOM_CDF3(26024, 31469) },
- { AOM_CDF3(31866, 32605) },
- { AOM_CDF3(32328, 32726) },
- { AOM_CDF3(32366, 32703) } } },
- { { { AOM_CDF3(5796, 25887) },
- { AOM_CDF3(32280, 32684) },
- { AOM_CDF3(32718, 32756) },
- { AOM_CDF3(32341, 32689) } },
- { { AOM_CDF3(17040, 25504) },
- { AOM_CDF3(32367, 32732) },
- { AOM_CDF3(32104, 32702) },
- { AOM_CDF3(32536, 32731) } } },
- { { { AOM_CDF3(10486, 20733) },
- { AOM_CDF3(31320, 32449) },
- { AOM_CDF3(32303, 32722) },
- { AOM_CDF3(32517, 32704) } },
- { { AOM_CDF3(27125, 29437) },
- { AOM_CDF3(31058, 31898) },
- { AOM_CDF3(31343, 32056) },
- { AOM_CDF3(31555, 32742) } } },
-#if CONFIG_TX64X64
- { { { AOM_CDF3(16384, 19661) },
- { AOM_CDF3(32276, 32589) },
- { AOM_CDF3(30554, 32547) },
- { AOM_CDF3(32549, 32713) } },
- { { AOM_CDF3(10923, 21845) },
- { AOM_CDF3(10923, 21845) },
- { AOM_CDF3(10923, 21845) },
- { AOM_CDF3(10923, 21845) } } }
-#endif
- },
- { { { { AOM_CDF3(20771, 31640) },
- { AOM_CDF3(31727, 32593) },
- { AOM_CDF3(31842, 32688) },
- { AOM_CDF3(29451, 31534) } },
- { { AOM_CDF3(27842, 31527) },
- { AOM_CDF3(31658, 32647) },
- { AOM_CDF3(32108, 32699) },
- { AOM_CDF3(32038, 32662) } } },
- { { { AOM_CDF3(17666, 30666) },
- { AOM_CDF3(32305, 32679) },
- { AOM_CDF3(32591, 32751) },
- { AOM_CDF3(31284, 32594) } },
- { { AOM_CDF3(28021, 31540) },
- { AOM_CDF3(31750, 32595) },
- { AOM_CDF3(32403, 32725) },
- { AOM_CDF3(32425, 32722) } } },
- { { { AOM_CDF3(12056, 29503) },
- { AOM_CDF3(32342, 32686) },
- { AOM_CDF3(32719, 32756) },
- { AOM_CDF3(32590, 32738) } },
- { { AOM_CDF3(21009, 28705) },
- { AOM_CDF3(31851, 32617) },
- { AOM_CDF3(32262, 32743) },
- { AOM_CDF3(32547, 32721) } } },
- { { { AOM_CDF3(3890, 15934) },
- { AOM_CDF3(30573, 31743) },
- { AOM_CDF3(32509, 32749) },
- { AOM_CDF3(32625, 32744) } },
- { { AOM_CDF3(22429, 27807) },
- { AOM_CDF3(30492, 31850) },
- { AOM_CDF3(31374, 32419) },
- { AOM_CDF3(31007, 32717) } } },
-#if CONFIG_TX64X64
- { { { AOM_CDF3(4830, 5875) },
- { AOM_CDF3(29974, 31257) },
- { AOM_CDF3(31928, 32628) },
- { AOM_CDF3(31925, 32734) } },
- { { AOM_CDF3(10923, 21845) },
- { AOM_CDF3(10923, 21845) },
- { AOM_CDF3(10923, 21845) },
- { AOM_CDF3(10923, 21845) } } }
-#endif
- },
- { { { { AOM_CDF3(22884, 31609) },
- { AOM_CDF3(31723, 32529) },
- { AOM_CDF3(31905, 32699) },
- { AOM_CDF3(30839, 32165) } },
- { { AOM_CDF3(27149, 31080) },
- { AOM_CDF3(31720, 32693) },
- { AOM_CDF3(32371, 32721) },
- { AOM_CDF3(32387, 32743) } } },
- { { { AOM_CDF3(21572, 31546) },
- { AOM_CDF3(32053, 32643) },
- { AOM_CDF3(32497, 32733) },
- { AOM_CDF3(32270, 32655) } },
- { { AOM_CDF3(27952, 31297) },
- { AOM_CDF3(31833, 32635) },
- { AOM_CDF3(32444, 32747) },
- { AOM_CDF3(32513, 32742) } } },
- { { { AOM_CDF3(16700, 29042) },
- { AOM_CDF3(32185, 32665) },
- { AOM_CDF3(32667, 32740) },
- { AOM_CDF3(32619, 32747) } },
- { { AOM_CDF3(25356, 30314) },
- { AOM_CDF3(31958, 32606) },
- { AOM_CDF3(32451, 32715) },
- { AOM_CDF3(32602, 32747) } } },
- { { { AOM_CDF3(12218, 22903) },
- { AOM_CDF3(31421, 32164) },
- { AOM_CDF3(32454, 32748) },
- { AOM_CDF3(32174, 32570) } },
- { { AOM_CDF3(22272, 28451) },
- { AOM_CDF3(30802, 32293) },
- { AOM_CDF3(30093, 32099) },
- { AOM_CDF3(30899, 32725) } } },
-#if CONFIG_TX64X64
- { { { AOM_CDF3(5670, 16547) },
- { AOM_CDF3(28947, 30241) },
- { AOM_CDF3(32105, 32635) },
- { AOM_CDF3(32316, 32693) } },
- { { AOM_CDF3(10923, 21845) },
- { AOM_CDF3(10923, 21845) },
- { AOM_CDF3(10923, 21845) },
- { AOM_CDF3(10923, 21845) } } }
-#endif
- },
- { { { { AOM_CDF3(23556, 31195) },
- { AOM_CDF3(31953, 32674) },
- { AOM_CDF3(32129, 32703) },
- { AOM_CDF3(31963, 32607) } },
- { { AOM_CDF3(27624, 31497) },
- { AOM_CDF3(32234, 32756) },
- { AOM_CDF3(32558, 32740) },
- { AOM_CDF3(32640, 32747) } } },
- { { { AOM_CDF3(23114, 31301) },
- { AOM_CDF3(32277, 32729) },
- { AOM_CDF3(32435, 32723) },
- { AOM_CDF3(32395, 32706) } },
- { { AOM_CDF3(27864, 31264) },
- { AOM_CDF3(31905, 32658) },
- { AOM_CDF3(32506, 32731) },
- { AOM_CDF3(32618, 32730) } } },
- { { { AOM_CDF3(21525, 30206) },
- { AOM_CDF3(32186, 32691) },
- { AOM_CDF3(32572, 32724) },
- { AOM_CDF3(32656, 32744) } },
- { { AOM_CDF3(27055, 30657) },
- { AOM_CDF3(32025, 32586) },
- { AOM_CDF3(31279, 32272) },
- { AOM_CDF3(31858, 32313) } } },
- { { { AOM_CDF3(21773, 28489) },
- { AOM_CDF3(32495, 32732) },
- { AOM_CDF3(32332, 32728) },
- { AOM_CDF3(30806, 32184) } },
- { { AOM_CDF3(27061, 30422) },
- { AOM_CDF3(32361, 32731) },
- { AOM_CDF3(21845, 27307) },
- { AOM_CDF3(31694, 32231) } } },
-#if CONFIG_TX64X64
- { { { AOM_CDF3(12922, 23047) },
- { AOM_CDF3(31948, 32657) },
- { AOM_CDF3(32165, 32567) },
- { AOM_CDF3(31591, 32376) } },
- { { AOM_CDF3(10923, 21845) },
- { AOM_CDF3(10923, 21845) },
- { AOM_CDF3(10923, 21845) },
- { AOM_CDF3(10923, 21845) } } }
-#endif
- },
- };
+static const aom_cdf_prob av1_default_coeff_base_eob_multi_cdfs
+ [TOKEN_CDF_Q_CTXS][TX_SIZES][PLANE_TYPES][SIG_COEF_CONTEXTS_EOB]
+ [CDF_SIZE(NUM_BASE_LEVELS + 1)] = {
+ { { { { AOM_CDF3(20597, 31183) },
+ { AOM_CDF3(31273, 32627) },
+ { AOM_CDF3(31825, 32680) },
+ { AOM_CDF3(27224, 30407) } },
+ { { AOM_CDF3(28394, 31807) },
+ { AOM_CDF3(31660, 32638) },
+ { AOM_CDF3(32058, 32667) },
+ { AOM_CDF3(31564, 32518) } } },
+ { { { AOM_CDF3(15187, 30433) },
+ { AOM_CDF3(32209, 32689) },
+ { AOM_CDF3(32628, 32745) },
+ { AOM_CDF3(29289, 32300) } },
+ { { AOM_CDF3(26024, 31469) },
+ { AOM_CDF3(31866, 32605) },
+ { AOM_CDF3(32328, 32726) },
+ { AOM_CDF3(32366, 32703) } } },
+ { { { AOM_CDF3(5796, 25887) },
+ { AOM_CDF3(32280, 32684) },
+ { AOM_CDF3(32718, 32756) },
+ { AOM_CDF3(32341, 32689) } },
+ { { AOM_CDF3(17040, 25504) },
+ { AOM_CDF3(32367, 32732) },
+ { AOM_CDF3(32104, 32702) },
+ { AOM_CDF3(32536, 32731) } } },
+ { { { AOM_CDF3(10486, 20733) },
+ { AOM_CDF3(31320, 32449) },
+ { AOM_CDF3(32303, 32722) },
+ { AOM_CDF3(32517, 32704) } },
+ { { AOM_CDF3(27125, 29437) },
+ { AOM_CDF3(31058, 31898) },
+ { AOM_CDF3(31343, 32056) },
+ { AOM_CDF3(31555, 32742) } } },
+ { { { AOM_CDF3(16384, 19661) },
+ { AOM_CDF3(32276, 32589) },
+ { AOM_CDF3(30554, 32547) },
+ { AOM_CDF3(32549, 32713) } },
+ { { AOM_CDF3(10923, 21845) },
+ { AOM_CDF3(10923, 21845) },
+ { AOM_CDF3(10923, 21845) },
+ { AOM_CDF3(10923, 21845) } } } },
+ { { { { AOM_CDF3(20771, 31640) },
+ { AOM_CDF3(31727, 32593) },
+ { AOM_CDF3(31842, 32688) },
+ { AOM_CDF3(29451, 31534) } },
+ { { AOM_CDF3(27842, 31527) },
+ { AOM_CDF3(31658, 32647) },
+ { AOM_CDF3(32108, 32699) },
+ { AOM_CDF3(32038, 32662) } } },
+ { { { AOM_CDF3(17666, 30666) },
+ { AOM_CDF3(32305, 32679) },
+ { AOM_CDF3(32591, 32751) },
+ { AOM_CDF3(31284, 32594) } },
+ { { AOM_CDF3(28021, 31540) },
+ { AOM_CDF3(31750, 32595) },
+ { AOM_CDF3(32403, 32725) },
+ { AOM_CDF3(32425, 32722) } } },
+ { { { AOM_CDF3(12056, 29503) },
+ { AOM_CDF3(32342, 32686) },
+ { AOM_CDF3(32719, 32756) },
+ { AOM_CDF3(32590, 32738) } },
+ { { AOM_CDF3(21009, 28705) },
+ { AOM_CDF3(31851, 32617) },
+ { AOM_CDF3(32262, 32743) },
+ { AOM_CDF3(32547, 32721) } } },
+ { { { AOM_CDF3(3890, 15934) },
+ { AOM_CDF3(30573, 31743) },
+ { AOM_CDF3(32509, 32749) },
+ { AOM_CDF3(32625, 32744) } },
+ { { AOM_CDF3(22429, 27807) },
+ { AOM_CDF3(30492, 31850) },
+ { AOM_CDF3(31374, 32419) },
+ { AOM_CDF3(31007, 32717) } } },
+ { { { AOM_CDF3(4830, 5875) },
+ { AOM_CDF3(29974, 31257) },
+ { AOM_CDF3(31928, 32628) },
+ { AOM_CDF3(31925, 32734) } },
+ { { AOM_CDF3(10923, 21845) },
+ { AOM_CDF3(10923, 21845) },
+ { AOM_CDF3(10923, 21845) },
+ { AOM_CDF3(10923, 21845) } } } },
+ { { { { AOM_CDF3(22884, 31609) },
+ { AOM_CDF3(31723, 32529) },
+ { AOM_CDF3(31905, 32699) },
+ { AOM_CDF3(30839, 32165) } },
+ { { AOM_CDF3(27149, 31080) },
+ { AOM_CDF3(31720, 32693) },
+ { AOM_CDF3(32371, 32721) },
+ { AOM_CDF3(32387, 32743) } } },
+ { { { AOM_CDF3(21572, 31546) },
+ { AOM_CDF3(32053, 32643) },
+ { AOM_CDF3(32497, 32733) },
+ { AOM_CDF3(32270, 32655) } },
+ { { AOM_CDF3(27952, 31297) },
+ { AOM_CDF3(31833, 32635) },
+ { AOM_CDF3(32444, 32747) },
+ { AOM_CDF3(32513, 32742) } } },
+ { { { AOM_CDF3(16700, 29042) },
+ { AOM_CDF3(32185, 32665) },
+ { AOM_CDF3(32667, 32740) },
+ { AOM_CDF3(32619, 32747) } },
+ { { AOM_CDF3(25356, 30314) },
+ { AOM_CDF3(31958, 32606) },
+ { AOM_CDF3(32451, 32715) },
+ { AOM_CDF3(32602, 32747) } } },
+ { { { AOM_CDF3(12218, 22903) },
+ { AOM_CDF3(31421, 32164) },
+ { AOM_CDF3(32454, 32748) },
+ { AOM_CDF3(32174, 32570) } },
+ { { AOM_CDF3(22272, 28451) },
+ { AOM_CDF3(30802, 32293) },
+ { AOM_CDF3(30093, 32099) },
+ { AOM_CDF3(30899, 32725) } } },
+ { { { AOM_CDF3(5670, 16547) },
+ { AOM_CDF3(28947, 30241) },
+ { AOM_CDF3(32105, 32635) },
+ { AOM_CDF3(32316, 32693) } },
+ { { AOM_CDF3(10923, 21845) },
+ { AOM_CDF3(10923, 21845) },
+ { AOM_CDF3(10923, 21845) },
+ { AOM_CDF3(10923, 21845) } } } },
+ { { { { AOM_CDF3(23556, 31195) },
+ { AOM_CDF3(31953, 32674) },
+ { AOM_CDF3(32129, 32703) },
+ { AOM_CDF3(31963, 32607) } },
+ { { AOM_CDF3(27624, 31497) },
+ { AOM_CDF3(32234, 32756) },
+ { AOM_CDF3(32558, 32740) },
+ { AOM_CDF3(32640, 32747) } } },
+ { { { AOM_CDF3(23114, 31301) },
+ { AOM_CDF3(32277, 32729) },
+ { AOM_CDF3(32435, 32723) },
+ { AOM_CDF3(32395, 32706) } },
+ { { AOM_CDF3(27864, 31264) },
+ { AOM_CDF3(31905, 32658) },
+ { AOM_CDF3(32506, 32731) },
+ { AOM_CDF3(32618, 32730) } } },
+ { { { AOM_CDF3(21525, 30206) },
+ { AOM_CDF3(32186, 32691) },
+ { AOM_CDF3(32572, 32724) },
+ { AOM_CDF3(32656, 32744) } },
+ { { AOM_CDF3(27055, 30657) },
+ { AOM_CDF3(32025, 32586) },
+ { AOM_CDF3(31279, 32272) },
+ { AOM_CDF3(31858, 32313) } } },
+ { { { AOM_CDF3(21773, 28489) },
+ { AOM_CDF3(32495, 32732) },
+ { AOM_CDF3(32332, 32728) },
+ { AOM_CDF3(30806, 32184) } },
+ { { AOM_CDF3(27061, 30422) },
+ { AOM_CDF3(32361, 32731) },
+ { AOM_CDF3(21845, 27307) },
+ { AOM_CDF3(31694, 32231) } } },
+ { { { AOM_CDF3(12922, 23047) },
+ { AOM_CDF3(31948, 32657) },
+ { AOM_CDF3(32165, 32567) },
+ { AOM_CDF3(31591, 32376) } },
+ { { AOM_CDF3(10923, 21845) },
+ { AOM_CDF3(10923, 21845) },
+ { AOM_CDF3(10923, 21845) },
+ { AOM_CDF3(10923, 21845) } } } },
+ };
static const aom_cdf_prob av1_default_eob_extra_cdfs
[TOKEN_CDF_Q_CTXS][TX_SIZES][PLANE_TYPES][EOB_COEF_CONTEXTS][CDF_SIZE(2)] =
@@ -2463,7 +2428,6 @@
{ AOM_CDF2(16384) }, { AOM_CDF2(16384) }, { AOM_CDF2(16384) },
{ AOM_CDF2(16384) }, { AOM_CDF2(16384) }, { AOM_CDF2(16384) },
{ AOM_CDF2(16384) } } },
-#if CONFIG_TX64X64
{ { { AOM_CDF2(16384) }, { AOM_CDF2(16384) }, { AOM_CDF2(16384) },
{ AOM_CDF2(19181) }, { AOM_CDF2(16733) }, { AOM_CDF2(18422) },
{ AOM_CDF2(15124) }, { AOM_CDF2(22686) }, { AOM_CDF2(20549) },
@@ -2479,9 +2443,7 @@
{ AOM_CDF2(16384) }, { AOM_CDF2(16384) }, { AOM_CDF2(16384) },
{ AOM_CDF2(16384) }, { AOM_CDF2(16384) }, { AOM_CDF2(16384) },
{ AOM_CDF2(16384) }, { AOM_CDF2(16384) }, { AOM_CDF2(16384) },
- { AOM_CDF2(16384) } } }
-#endif
- },
+ { AOM_CDF2(16384) } } } },
{ { { { AOM_CDF2(16384) }, { AOM_CDF2(16384) }, { AOM_CDF2(16384) },
{ AOM_CDF2(14911) }, { AOM_CDF2(15414) }, { AOM_CDF2(9035) },
{ AOM_CDF2(16384) }, { AOM_CDF2(16384) }, { AOM_CDF2(16384) },
@@ -2546,7 +2508,6 @@
{ AOM_CDF2(16384) }, { AOM_CDF2(16384) }, { AOM_CDF2(16384) },
{ AOM_CDF2(16384) }, { AOM_CDF2(16384) }, { AOM_CDF2(16384) },
{ AOM_CDF2(16384) } } },
-#if CONFIG_TX64X64
{ { { AOM_CDF2(16384) }, { AOM_CDF2(16384) }, { AOM_CDF2(16384) },
{ AOM_CDF2(26354) }, { AOM_CDF2(20723) }, { AOM_CDF2(20647) },
{ AOM_CDF2(15541) }, { AOM_CDF2(21027) }, { AOM_CDF2(22805) },
@@ -2562,9 +2523,7 @@
{ AOM_CDF2(16384) }, { AOM_CDF2(16384) }, { AOM_CDF2(16384) },
{ AOM_CDF2(16384) }, { AOM_CDF2(16384) }, { AOM_CDF2(16384) },
{ AOM_CDF2(16384) }, { AOM_CDF2(16384) }, { AOM_CDF2(16384) },
- { AOM_CDF2(16384) } } }
-#endif
- },
+ { AOM_CDF2(16384) } } } },
{ { { { AOM_CDF2(16384) }, { AOM_CDF2(16384) }, { AOM_CDF2(16384) },
{ AOM_CDF2(15912) }, { AOM_CDF2(16777) }, { AOM_CDF2(13160) },
{ AOM_CDF2(16384) }, { AOM_CDF2(16384) }, { AOM_CDF2(16384) },
@@ -2629,7 +2588,6 @@
{ AOM_CDF2(16384) }, { AOM_CDF2(16384) }, { AOM_CDF2(16384) },
{ AOM_CDF2(16384) }, { AOM_CDF2(16384) }, { AOM_CDF2(16384) },
{ AOM_CDF2(16384) } } },
-#if CONFIG_TX64X64
{ { { AOM_CDF2(16384) }, { AOM_CDF2(16384) }, { AOM_CDF2(16384) },
{ AOM_CDF2(27530) }, { AOM_CDF2(20747) }, { AOM_CDF2(21651) },
{ AOM_CDF2(18812) }, { AOM_CDF2(17457) }, { AOM_CDF2(22176) },
@@ -2645,9 +2603,7 @@
{ AOM_CDF2(16384) }, { AOM_CDF2(16384) }, { AOM_CDF2(16384) },
{ AOM_CDF2(16384) }, { AOM_CDF2(16384) }, { AOM_CDF2(16384) },
{ AOM_CDF2(16384) }, { AOM_CDF2(16384) }, { AOM_CDF2(16384) },
- { AOM_CDF2(16384) } } }
-#endif
- },
+ { AOM_CDF2(16384) } } } },
{ { { { AOM_CDF2(16384) }, { AOM_CDF2(16384) }, { AOM_CDF2(16384) },
{ AOM_CDF2(15381) }, { AOM_CDF2(17118) }, { AOM_CDF2(18874) },
{ AOM_CDF2(16384) }, { AOM_CDF2(16384) }, { AOM_CDF2(16384) },
@@ -2712,7 +2668,6 @@
{ AOM_CDF2(16384) }, { AOM_CDF2(16384) }, { AOM_CDF2(16384) },
{ AOM_CDF2(16384) }, { AOM_CDF2(16384) }, { AOM_CDF2(16384) },
{ AOM_CDF2(16384) } } },
-#if CONFIG_TX64X64
{ { { AOM_CDF2(16384) }, { AOM_CDF2(16384) }, { AOM_CDF2(16384) },
{ AOM_CDF2(25699) }, { AOM_CDF2(21787) }, { AOM_CDF2(21964) },
{ AOM_CDF2(17686) }, { AOM_CDF2(21433) }, { AOM_CDF2(16384) },
@@ -2728,9 +2683,7 @@
{ AOM_CDF2(16384) }, { AOM_CDF2(16384) }, { AOM_CDF2(16384) },
{ AOM_CDF2(16384) }, { AOM_CDF2(16384) }, { AOM_CDF2(16384) },
{ AOM_CDF2(16384) }, { AOM_CDF2(16384) }, { AOM_CDF2(16384) },
- { AOM_CDF2(16384) } } }
-#endif
- },
+ { AOM_CDF2(16384) } } } },
};
static const aom_cdf_prob av1_default_coeff_lps_multi_cdfs
@@ -2904,7 +2857,6 @@
{ AOM_CDF4(14407, 21936, 25727) },
{ AOM_CDF4(13899, 21999, 26141) },
{ AOM_CDF4(9849, 16897, 23246) } } },
-#if CONFIG_TX64X64
{ { { AOM_CDF4(8192, 16384, 24576) }, { AOM_CDF4(8192, 16384, 24576) },
{ AOM_CDF4(8192, 16384, 24576) }, { AOM_CDF4(8192, 16384, 24576) },
{ AOM_CDF4(8192, 16384, 24576) }, { AOM_CDF4(8192, 16384, 24576) },
@@ -2926,9 +2878,7 @@
{ AOM_CDF4(8192, 16384, 24576) }, { AOM_CDF4(8192, 16384, 24576) },
{ AOM_CDF4(8192, 16384, 24576) }, { AOM_CDF4(8192, 16384, 24576) },
{ AOM_CDF4(8192, 16384, 24576) }, { AOM_CDF4(8192, 16384, 24576) },
- { AOM_CDF4(8192, 16384, 24576) } } }
-#endif
- },
+ { AOM_CDF4(8192, 16384, 24576) } } } },
{ { { { AOM_CDF4(19774, 26358, 29082) },
{ AOM_CDF4(14793, 22128, 26184) },
{ AOM_CDF4(8992, 15426, 19832) },
@@ -3097,7 +3047,6 @@
{ AOM_CDF4(11133, 19325, 24471) },
{ AOM_CDF4(10412, 18221, 22356) },
{ AOM_CDF4(7025, 12331, 16310) } } },
-#if CONFIG_TX64X64
{ { { AOM_CDF4(8192, 16384, 24576) }, { AOM_CDF4(8192, 16384, 24576) },
{ AOM_CDF4(8192, 16384, 24576) }, { AOM_CDF4(8192, 16384, 24576) },
{ AOM_CDF4(8192, 16384, 24576) }, { AOM_CDF4(8192, 16384, 24576) },
@@ -3119,9 +3068,7 @@
{ AOM_CDF4(8192, 16384, 24576) }, { AOM_CDF4(8192, 16384, 24576) },
{ AOM_CDF4(8192, 16384, 24576) }, { AOM_CDF4(8192, 16384, 24576) },
{ AOM_CDF4(8192, 16384, 24576) }, { AOM_CDF4(8192, 16384, 24576) },
- { AOM_CDF4(8192, 16384, 24576) } } }
-#endif
- },
+ { AOM_CDF4(8192, 16384, 24576) } } } },
{ { { { AOM_CDF4(18958, 25307, 28119) },
{ AOM_CDF4(14936, 22106, 26063) },
{ AOM_CDF4(8199, 15365, 20003) },
@@ -3290,7 +3237,6 @@
{ AOM_CDF4(10835, 17970, 24576) },
{ AOM_CDF4(10639, 20852, 24257) },
{ AOM_CDF4(6642, 11661, 15498) } } },
-#if CONFIG_TX64X64
{ { { AOM_CDF4(8192, 16384, 24576) }, { AOM_CDF4(8192, 16384, 24576) },
{ AOM_CDF4(8192, 16384, 24576) }, { AOM_CDF4(8192, 16384, 24576) },
{ AOM_CDF4(8192, 16384, 24576) }, { AOM_CDF4(8192, 16384, 24576) },
@@ -3312,9 +3258,7 @@
{ AOM_CDF4(8192, 16384, 24576) }, { AOM_CDF4(8192, 16384, 24576) },
{ AOM_CDF4(8192, 16384, 24576) }, { AOM_CDF4(8192, 16384, 24576) },
{ AOM_CDF4(8192, 16384, 24576) }, { AOM_CDF4(8192, 16384, 24576) },
- { AOM_CDF4(8192, 16384, 24576) } } }
-#endif
- },
+ { AOM_CDF4(8192, 16384, 24576) } } } },
{ { { { AOM_CDF4(20286, 26127, 29060) },
{ AOM_CDF4(16807, 23937, 27782) },
{ AOM_CDF4(10158, 17838, 23197) },
@@ -3483,7 +3427,6 @@
{ AOM_CDF4(9362, 18725, 23406) },
{ AOM_CDF4(10923, 16384, 21845) },
{ AOM_CDF4(10923, 18204, 21845) } } },
-#if CONFIG_TX64X64
{ { { AOM_CDF4(8192, 16384, 24576) }, { AOM_CDF4(8192, 16384, 24576) },
{ AOM_CDF4(8192, 16384, 24576) }, { AOM_CDF4(8192, 16384, 24576) },
{ AOM_CDF4(8192, 16384, 24576) }, { AOM_CDF4(8192, 16384, 24576) },
@@ -3505,9 +3448,7 @@
{ AOM_CDF4(8192, 16384, 24576) }, { AOM_CDF4(8192, 16384, 24576) },
{ AOM_CDF4(8192, 16384, 24576) }, { AOM_CDF4(8192, 16384, 24576) },
{ AOM_CDF4(8192, 16384, 24576) }, { AOM_CDF4(8192, 16384, 24576) },
- { AOM_CDF4(8192, 16384, 24576) } } }
-#endif
- },
+ { AOM_CDF4(8192, 16384, 24576) } } } },
};
#else
static const aom_cdf_prob av1_default_txb_skip_cdf[TX_SIZES][TXB_SKIP_CONTEXTS]
@@ -3576,7 +3517,6 @@
{ AOM_CDF2(128 * 220) },
},
-#if CONFIG_TX64X64
{
{ AOM_CDF2(128 * 243) },
{ AOM_CDF2(128 * 46) },
@@ -3592,7 +3532,6 @@
{ AOM_CDF2(128 * 100) },
{ AOM_CDF2(128 * 220) },
},
-#endif
};
static const aom_cdf_prob av1_default_dc_sign_cdf[PLANE_TYPES][DC_SIGN_CONTEXTS]
@@ -3999,7 +3938,6 @@
{ AOM_CDF4(8192, 16384, 24576) },
{ AOM_CDF4(8192, 16384, 24576) },
{ AOM_CDF4(8192, 16384, 24576) } } },
-#if CONFIG_TX64X64
{ { { AOM_CDF4(4262, 11039, 15994) },
{ AOM_CDF4(20472, 29927, 31691) },
{ AOM_CDF4(11874, 24067, 28293) },
@@ -4084,56 +4022,50 @@
{ AOM_CDF4(8192, 16384, 24576) },
{ AOM_CDF4(8192, 16384, 24576) },
{ AOM_CDF4(8192, 16384, 24576) } } }
-#endif
};
-static const aom_cdf_prob
- av1_default_coeff_base_eob_multi[TX_SIZES][PLANE_TYPES]
- [SIG_COEF_CONTEXTS_EOB]
- [CDF_SIZE(NUM_BASE_LEVELS + 1)] = {
- { { { AOM_CDF3(26406, 31407) },
- { AOM_CDF3(31541, 32669) },
- { AOM_CDF3(31825, 32677) },
- { AOM_CDF3(31793, 32623) } },
- { { AOM_CDF3(28736, 32063) },
- { AOM_CDF3(31937, 32727) },
- { AOM_CDF3(32407, 32754) },
- { AOM_CDF3(32532, 32756) } } },
- { { { AOM_CDF3(25824, 31212) },
- { AOM_CDF3(32123, 32717) },
- { AOM_CDF3(32414, 32744) },
- { AOM_CDF3(32274, 32715) } },
- { { AOM_CDF3(29276, 32110) },
- { AOM_CDF3(31990, 32707) },
- { AOM_CDF3(32408, 32752) },
- { AOM_CDF3(32626, 32756) } } },
- { { { AOM_CDF3(24649, 30865) },
- { AOM_CDF3(32220, 32717) },
- { AOM_CDF3(32643, 32756) },
- { AOM_CDF3(32670, 32756) } },
- { { AOM_CDF3(27932, 31415) },
- { AOM_CDF3(32361, 32726) },
- { AOM_CDF3(32681, 32756) },
- { AOM_CDF3(32725, 32756) } } },
- { { { AOM_CDF3(26749, 31719) },
- { AOM_CDF3(32418, 32740) },
- { AOM_CDF3(32670, 32756) },
- { AOM_CDF3(32717, 32756) } },
- { { AOM_CDF3(29650, 32131) },
- { AOM_CDF3(32017, 32613) },
- { AOM_CDF3(32753, 32757) },
- { AOM_CDF3(32721, 32756) } } },
-#if CONFIG_TX64X64
- { { { AOM_CDF3(26028, 31648) },
- { AOM_CDF3(31035, 32098) },
- { AOM_CDF3(32251, 32736) },
- { AOM_CDF3(32314, 32741) } },
- { { AOM_CDF3(10923, 21845) },
- { AOM_CDF3(10923, 21845) },
- { AOM_CDF3(10923, 21845) },
- { AOM_CDF3(10923, 21845) } } }
-#endif
- };
+static const aom_cdf_prob av1_default_coeff_base_eob_multi
+ [TX_SIZES][PLANE_TYPES][SIG_COEF_CONTEXTS_EOB]
+ [CDF_SIZE(NUM_BASE_LEVELS + 1)] = { { { { AOM_CDF3(26406, 31407) },
+ { AOM_CDF3(31541, 32669) },
+ { AOM_CDF3(31825, 32677) },
+ { AOM_CDF3(31793, 32623) } },
+ { { AOM_CDF3(28736, 32063) },
+ { AOM_CDF3(31937, 32727) },
+ { AOM_CDF3(32407, 32754) },
+ { AOM_CDF3(32532, 32756) } } },
+ { { { AOM_CDF3(25824, 31212) },
+ { AOM_CDF3(32123, 32717) },
+ { AOM_CDF3(32414, 32744) },
+ { AOM_CDF3(32274, 32715) } },
+ { { AOM_CDF3(29276, 32110) },
+ { AOM_CDF3(31990, 32707) },
+ { AOM_CDF3(32408, 32752) },
+ { AOM_CDF3(32626, 32756) } } },
+ { { { AOM_CDF3(24649, 30865) },
+ { AOM_CDF3(32220, 32717) },
+ { AOM_CDF3(32643, 32756) },
+ { AOM_CDF3(32670, 32756) } },
+ { { AOM_CDF3(27932, 31415) },
+ { AOM_CDF3(32361, 32726) },
+ { AOM_CDF3(32681, 32756) },
+ { AOM_CDF3(32725, 32756) } } },
+ { { { AOM_CDF3(26749, 31719) },
+ { AOM_CDF3(32418, 32740) },
+ { AOM_CDF3(32670, 32756) },
+ { AOM_CDF3(32717, 32756) } },
+ { { AOM_CDF3(29650, 32131) },
+ { AOM_CDF3(32017, 32613) },
+ { AOM_CDF3(32753, 32757) },
+ { AOM_CDF3(32721, 32756) } } },
+ { { { AOM_CDF3(26028, 31648) },
+ { AOM_CDF3(31035, 32098) },
+ { AOM_CDF3(32251, 32736) },
+ { AOM_CDF3(32314, 32741) } },
+ { { AOM_CDF3(10923, 21845) },
+ { AOM_CDF3(10923, 21845) },
+ { AOM_CDF3(10923, 21845) },
+ { AOM_CDF3(10923, 21845) } } } };
static const aom_cdf_prob av1_default_eob_extra_cdf
[TX_SIZES][PLANE_TYPES][EOB_COEF_CONTEXTS][CDF_SIZE(2)] = {
@@ -4252,7 +4184,6 @@
{ AOM_CDF2(128 * 128) }, { AOM_CDF2(128 * 128) },
},
},
-#if CONFIG_TX64X64
{
{
{ AOM_CDF2(128 * 128) }, { AOM_CDF2(128 * 128) },
@@ -4281,7 +4212,6 @@
{ AOM_CDF2(128 * 128) }, { AOM_CDF2(128 * 128) },
},
},
-#endif
};
static const aom_cdf_prob
@@ -4375,7 +4305,6 @@
{ AOM_CDF4(19117, 26550, 29504) }, { AOM_CDF4(15829, 23871, 27889) },
{ AOM_CDF4(12944, 21092, 25623) }, { AOM_CDF4(11432, 19053, 23889) },
{ AOM_CDF4(7507, 13964, 18716) } } },
-#if CONFIG_TX64X64
{ { { AOM_CDF4(8192, 16384, 24576) }, { AOM_CDF4(8192, 16384, 24576) },
{ AOM_CDF4(8192, 16384, 24576) }, { AOM_CDF4(8192, 16384, 24576) },
{ AOM_CDF4(8192, 16384, 24576) }, { AOM_CDF4(8192, 16384, 24576) },
@@ -4398,7 +4327,6 @@
{ AOM_CDF4(8192, 16384, 24576) }, { AOM_CDF4(8192, 16384, 24576) },
{ AOM_CDF4(8192, 16384, 24576) }, { AOM_CDF4(8192, 16384, 24576) },
{ AOM_CDF4(8192, 16384, 24576) } } }
-#endif
};
#endif // CONFIG_Q_ADAPT_PROBS
#else
diff --git a/av1/common/txb_common.c b/av1/common/txb_common.c
index db989a2..1d70be8 100644
--- a/av1/common/txb_common.c
+++ b/av1/common/txb_common.c
@@ -123,14 +123,12 @@
{ 6, 6, 21, 21, 21 },
{ 6, 21, 21, 21, 21 },
{ 21, 21, 21, 21, 21 } },
-#if CONFIG_TX64X64
// TX_64X64
{ { 0, 1, 6, 6, 21 },
{ 1, 6, 6, 21, 21 },
{ 6, 6, 21, 21, 21 },
{ 6, 21, 21, 21, 21 },
{ 21, 21, 21, 21, 21 } },
-#endif // CONFIG_TX64X64
// TX_4X8
{ { 0, 11, 11, 11, 0 },
{ 11, 11, 11, 11, 0 },
@@ -167,7 +165,6 @@
{ 16, 16, 21, 21, 21 },
{ 16, 16, 21, 21, 21 },
{ 16, 16, 21, 21, 21 } },
-#if CONFIG_TX64X64
// TX_32X64
{ { 0, 11, 11, 11, 11 },
{ 11, 11, 11, 11, 11 },
@@ -180,7 +177,6 @@
{ 16, 16, 21, 21, 21 },
{ 16, 16, 21, 21, 21 },
{ 16, 16, 21, 21, 21 } },
-#endif // CONFIG_TX64X64
// TX_4X16
{ { 0, 11, 11, 11, 0 },
{ 11, 11, 11, 11, 0 },
@@ -205,7 +201,6 @@
{ 16, 16, 21, 21, 21 },
{ 16, 16, 21, 21, 21 },
{ 16, 16, 21, 21, 21 } },
-#if CONFIG_TX64X64
// TX_16X64
{ { 0, 11, 11, 11, 11 },
{ 11, 11, 11, 11, 11 },
@@ -218,7 +213,6 @@
{ 16, 16, 21, 21, 21 },
{ 16, 16, 21, 21, 21 },
{ 16, 16, 21, 21, 21 } }
-#endif // CONFIG_TX64X64
};
void av1_init_lv_map(AV1_COMMON *cm) {
diff --git a/av1/common/x86/av1_inv_txfm_sse2.c b/av1/common/x86/av1_inv_txfm_sse2.c
index 9eaadb2..8ba8210 100644
--- a/av1/common/x86/av1_inv_txfm_sse2.c
+++ b/av1/common/x86/av1_inv_txfm_sse2.c
@@ -1726,7 +1726,6 @@
}
}
-#if CONFIG_TX64X64
static void iidentity64_new_sse2(const __m128i *input, __m128i *output,
int8_t cos_bit) {
(void)cos_bit;
@@ -1744,7 +1743,6 @@
output[i] = _mm_packs_epi32(c_lo, c_hi);
}
}
-#endif
static INLINE __m128i lowbd_get_recon_8x8_sse2(const __m128i pred,
__m128i res) {
@@ -1784,9 +1782,7 @@
{ idct8_new_sse2, iadst8_new_sse2, iadst8_new_sse2, iidentity8_new_sse2 },
{ idct16_new_sse2, iadst16_new_sse2, iadst16_new_sse2, iidentity16_new_sse2 },
{ idct32_new_sse2, NULL, NULL, iidentity32_new_sse2 },
-#if CONFIG_TX64X64
{ idct64_new_sse2, NULL, NULL, iidentity64_new_sse2 },
-#endif
};
// TODO(binpengsmail@gmail.com): Replace 1D txfm functions with functions which
@@ -1982,7 +1978,6 @@
lowbd_inv_txfm2d_add_internal_sse2(input, output, stride, tx_type, TX_32X32);
}
-#if CONFIG_TX64X64
void av1_lowbd_inv_txfm2d_add_64x64_sse2(const int32_t *input, uint8_t *output,
int stride, TX_TYPE tx_type, int bd) {
(void)bd;
@@ -2002,7 +1997,6 @@
lowbd_inv_txfm2d_add_internal_sse2(mod_input, output, stride, tx_type,
TX_64X64);
}
-#endif
void av1_lowbd_inv_txfm2d_add_4x8_sse2(const int32_t *input, uint8_t *output,
int stride, TX_TYPE tx_type, int bd) {
@@ -2102,7 +2096,6 @@
lowbd_inv_txfm2d_add_internal_sse2(input, output, stride, tx_type, TX_32X16);
}
-#if CONFIG_TX64X64
void av1_lowbd_inv_txfm2d_add_32x64_sse2(const int32_t *input, uint8_t *output,
int stride, TX_TYPE tx_type, int bd) {
(void)bd;
@@ -2130,7 +2123,6 @@
lowbd_inv_txfm2d_add_internal_sse2(mod_input, output, stride, tx_type,
TX_64X32);
}
-#endif
void av1_lowbd_inv_txfm2d_add_4x16_sse2(const int32_t *input, uint8_t *output,
int stride, TX_TYPE tx_type, int bd) {
@@ -2234,7 +2226,7 @@
(void)bd;
lowbd_inv_txfm2d_add_internal_sse2(input, output, stride, tx_type, TX_32X8);
}
-#if CONFIG_TX64X64
+
void av1_lowbd_inv_txfm2d_add_16x64_sse2(const int32_t *input, uint8_t *output,
int stride, TX_TYPE tx_type, int bd) {
(void)bd;
@@ -2262,7 +2254,6 @@
lowbd_inv_txfm2d_add_internal_sse2(mod_input, output, stride, tx_type,
TX_64X16);
}
-#endif
typedef void (*inv_txfm_func)(const int32_t *input, uint8_t *output, int stride,
TX_TYPE tx_type, int bd);
@@ -2272,27 +2263,21 @@
av1_lowbd_inv_txfm2d_add_8x8_sse2, // 8x8
av1_lowbd_inv_txfm2d_add_16x16_sse2, // 16x16
av1_lowbd_inv_txfm2d_add_32x32_sse2, // 32x32
-#if CONFIG_TX64X64
av1_lowbd_inv_txfm2d_add_64x64_sse2, // 64x64
-#endif // CONFIG_TX64X64
av1_lowbd_inv_txfm2d_add_4x8_sse2, // 4x8
av1_lowbd_inv_txfm2d_add_8x4_sse2, // 8x4
av1_lowbd_inv_txfm2d_add_8x16_sse2, // 8x16
av1_lowbd_inv_txfm2d_add_16x8_sse2, // 16x8
av1_lowbd_inv_txfm2d_add_16x32_sse2, // 16x32
av1_lowbd_inv_txfm2d_add_32x16_sse2, // 32x16
-#if CONFIG_TX64X64
av1_lowbd_inv_txfm2d_add_32x64_sse2, // 32x64
av1_lowbd_inv_txfm2d_add_64x32_sse2, // 64x32
-#endif // CONFIG_TX64X64
av1_lowbd_inv_txfm2d_add_4x16_sse2, // 4x16
av1_lowbd_inv_txfm2d_add_16x4_sse2, // 16x4
av1_lowbd_inv_txfm2d_add_8x32_sse2, // 8x32
av1_lowbd_inv_txfm2d_add_32x8_sse2, // 32x8
-#if CONFIG_TX64X64
av1_lowbd_inv_txfm2d_add_16x64_sse2, // 16x64
av1_lowbd_inv_txfm2d_add_64x16_sse2, // 64x16
-#endif // CONFIG_TX64X64
};
void av1_inv_txfm_add_sse2(const tran_low_t *dqcoeff, uint8_t *dst, int stride,
diff --git a/av1/common/x86/av1_txfm_sse2.h b/av1/common/x86/av1_txfm_sse2.h
index c2a03a4..77c84e6 100644
--- a/av1/common/x86/av1_txfm_sse2.h
+++ b/av1/common/x86/av1_txfm_sse2.h
@@ -293,10 +293,8 @@
void av1_lowbd_inv_txfm2d_add_32x32_sse2(const int32_t *input, uint8_t *output,
int stride, TX_TYPE tx_type, int bd);
-#if CONFIG_TX64X64
void av1_lowbd_inv_txfm2d_add_64x64_sse2(const int32_t *input, uint8_t *output,
int stride, TX_TYPE tx_type, int bd);
-#endif
void av1_lowbd_inv_txfm2d_add_4x8_sse2(const int32_t *input, uint8_t *output,
int stride, TX_TYPE tx_type, int bd);
@@ -316,13 +314,11 @@
void av1_lowbd_inv_txfm2d_add_32x16_sse2(const int32_t *input, uint8_t *output,
int stride, TX_TYPE tx_type, int bd);
-#if CONFIG_TX64X64
void av1_lowbd_inv_txfm2d_add_32x64_sse2(const int32_t *input, uint8_t *output,
int stride, TX_TYPE tx_type, int bd);
void av1_lowbd_inv_txfm2d_add_64x32_sse2(const int32_t *input, uint8_t *output,
int stride, TX_TYPE tx_type, int bd);
-#endif
void av1_lowbd_inv_txfm2d_add_4x16_sse2(const int32_t *input, uint8_t *output,
int stride, TX_TYPE tx_type, int bd);
@@ -336,13 +332,11 @@
void av1_lowbd_inv_txfm2d_add_32x8_sse2(const int32_t *input, uint8_t *output,
int stride, TX_TYPE tx_type, int bd);
-#if CONFIG_TX64X64
void av1_lowbd_inv_txfm2d_add_16x64_sse2(const int32_t *input, uint8_t *output,
int stride, TX_TYPE tx_type, int bd);
void av1_lowbd_inv_txfm2d_add_64x16_sse2(const int32_t *input, uint8_t *output,
int stride, TX_TYPE tx_type, int bd);
-#endif
#ifdef __cplusplus
}
diff --git a/av1/common/x86/highbd_inv_txfm_sse4.c b/av1/common/x86/highbd_inv_txfm_sse4.c
index cf976d9..7eed8cd 100644
--- a/av1/common/x86/highbd_inv_txfm_sse4.c
+++ b/av1/common/x86/highbd_inv_txfm_sse4.c
@@ -1623,7 +1623,6 @@
}
}
-#if CONFIG_TX64X64
static void load_buffer_64x64_lower_32x32(const int32_t *coeff, __m128i *in) {
int i, j;
@@ -2264,4 +2263,3 @@
break;
}
}
-#endif
diff --git a/av1/encoder/av1_fwd_txfm1d.c b/av1/encoder/av1_fwd_txfm1d.c
index aaa3a5d..f0bf190 100644
--- a/av1/encoder/av1_fwd_txfm1d.c
+++ b/av1/encoder/av1_fwd_txfm1d.c
@@ -1546,7 +1546,6 @@
range_check(0, input, output, 32, stage_range[0]);
}
-#if CONFIG_TX64X64
void av1_fidentity64_c(const int32_t *input, int32_t *output, int8_t cos_bit,
const int8_t *stage_range) {
(void)cos_bit;
@@ -1555,9 +1554,7 @@
assert(stage_range[0] + NewSqrt2Bits <= 32);
range_check(0, input, output, 64, stage_range[0]);
}
-#endif // CONFIG_TX64X64
-#if CONFIG_TX64X64
void av1_fdct64_new(const int32_t *input, int32_t *output, int8_t cos_bit,
const int8_t *stage_range) {
const int32_t size = 64;
@@ -2348,4 +2345,3 @@
bf1[63] = bf0[63];
range_check(stage, input, bf1, size, stage_range[stage]);
}
-#endif // CONFIG_TX64X64
diff --git a/av1/encoder/av1_fwd_txfm1d.h b/av1/encoder/av1_fwd_txfm1d.h
index be3a6d9..f4573dd 100644
--- a/av1/encoder/av1_fwd_txfm1d.h
+++ b/av1/encoder/av1_fwd_txfm1d.h
@@ -26,11 +26,8 @@
const int8_t *stage_range);
void av1_fdct32_new(const int32_t *input, int32_t *output, int8_t cos_bit,
const int8_t *stage_range);
-#if CONFIG_TX64X64
void av1_fdct64_new(const int32_t *input, int32_t *output, int8_t cos_bit,
const int8_t *stage_range);
-#endif // CONFIG_TX64X64
-
void av1_fadst4_new(const int32_t *input, int32_t *output, int8_t cos_bit,
const int8_t *stage_range);
void av1_fadst8_new(const int32_t *input, int32_t *output, int8_t cos_bit,
@@ -47,11 +44,8 @@
const int8_t *stage_range);
void av1_fidentity32_c(const int32_t *input, int32_t *output, int8_t cos_bit,
const int8_t *stage_range);
-#if CONFIG_TX64X64
void av1_fidentity64_c(const int32_t *input, int32_t *output, int8_t cos_bit,
const int8_t *stage_range);
-#endif // CONFIG_TX64X64
-
#ifdef __cplusplus
}
#endif
diff --git a/av1/encoder/av1_fwd_txfm2d.c b/av1/encoder/av1_fwd_txfm2d.c
index 7835de6..001ffc6 100644
--- a/av1/encoder/av1_fwd_txfm2d.c
+++ b/av1/encoder/av1_fwd_txfm2d.c
@@ -27,9 +27,7 @@
case TXFM_TYPE_DCT8: return av1_fdct8_new;
case TXFM_TYPE_DCT16: return av1_fdct16_new;
case TXFM_TYPE_DCT32: return av1_fdct32_new;
-#if CONFIG_TX64X64
case TXFM_TYPE_DCT64: return av1_fdct64_new;
-#endif // CONFIG_TX64X64
case TXFM_TYPE_ADST4: return av1_fadst4_new;
case TXFM_TYPE_ADST8: return av1_fadst8_new;
case TXFM_TYPE_ADST16: return av1_fadst16_new;
@@ -38,9 +36,7 @@
case TXFM_TYPE_IDENTITY8: return av1_fidentity8_c;
case TXFM_TYPE_IDENTITY16: return av1_fidentity16_c;
case TXFM_TYPE_IDENTITY32: return av1_fidentity32_c;
-#if CONFIG_TX64X64
case TXFM_TYPE_IDENTITY64: return av1_fidentity64_c;
-#endif // CONFIG_TX64X64
default: assert(0); return NULL;
}
}
@@ -314,7 +310,6 @@
fwd_txfm2d_c(input, output, stride, &cfg, txfm_buf, bd);
}
-#if CONFIG_TX64X64
void av1_fwd_txfm2d_64x64_c(const int16_t *input, int32_t *output, int stride,
TX_TYPE tx_type, int bd) {
int32_t txfm_buf[64 * 64];
@@ -419,48 +414,33 @@
memcpy(output + row * 32, output + row * 64, 32 * sizeof(*output));
}
}
-#endif // CONFIG_TX64X64
static const int8_t fwd_shift_4x4[3] = { 2, 0, 0 };
static const int8_t fwd_shift_8x8[3] = { 2, -1, 0 };
static const int8_t fwd_shift_16x16[3] = { 2, -2, 0 };
static const int8_t fwd_shift_32x32[3] = { 2, -4, 0 };
-#if CONFIG_TX64X64
static const int8_t fwd_shift_64x64[3] = { 0, -2, -2 };
-#endif
static const int8_t fwd_shift_4x8[3] = { 2, -1, 0 };
static const int8_t fwd_shift_8x4[3] = { 2, -1, 0 };
static const int8_t fwd_shift_8x16[3] = { 2, -2, 0 };
static const int8_t fwd_shift_16x8[3] = { 2, -2, 0 };
static const int8_t fwd_shift_16x32[3] = { 2, -4, 0 };
static const int8_t fwd_shift_32x16[3] = { 2, -4, 0 };
-#if CONFIG_TX64X64
static const int8_t fwd_shift_32x64[3] = { 0, -2, -2 };
static const int8_t fwd_shift_64x32[3] = { 2, -4, -2 };
-#endif
static const int8_t fwd_shift_4x16[3] = { 2, -1, 0 };
static const int8_t fwd_shift_16x4[3] = { 2, -1, 0 };
static const int8_t fwd_shift_8x32[3] = { 2, -2, 0 };
static const int8_t fwd_shift_32x8[3] = { 2, -2, 0 };
-#if CONFIG_TX64X64
static const int8_t fwd_shift_16x64[3] = { 0, -2, 0 };
static const int8_t fwd_shift_64x16[3] = { 2, -4, 0 };
-#endif // CONFIG_TX64X64
const int8_t *fwd_txfm_shift_ls[TX_SIZES_ALL] = {
fwd_shift_4x4, fwd_shift_8x8, fwd_shift_16x16, fwd_shift_32x32,
-#if CONFIG_TX64X64
- fwd_shift_64x64,
-#endif // CONFIG_TX64X64
- fwd_shift_4x8, fwd_shift_8x4, fwd_shift_8x16, fwd_shift_16x8,
- fwd_shift_16x32, fwd_shift_32x16,
-#if CONFIG_TX64X64
- fwd_shift_32x64, fwd_shift_64x32,
-#endif // CONFIG_TX64X64
- fwd_shift_4x16, fwd_shift_16x4, fwd_shift_8x32, fwd_shift_32x8,
-#if CONFIG_TX64X64
- fwd_shift_16x64, fwd_shift_64x16,
-#endif // CONFIG_TX64X64
+ fwd_shift_64x64, fwd_shift_4x8, fwd_shift_8x4, fwd_shift_8x16,
+ fwd_shift_16x8, fwd_shift_16x32, fwd_shift_32x16, fwd_shift_32x64,
+ fwd_shift_64x32, fwd_shift_4x16, fwd_shift_16x4, fwd_shift_8x32,
+ fwd_shift_32x8, fwd_shift_16x64, fwd_shift_64x16,
};
const int8_t fwd_cos_bit_col[MAX_TXWH_IDX /*txw_idx*/]
diff --git a/av1/encoder/av1_quantize.c b/av1/encoder/av1_quantize.c
index f767ddb..7b3ca2f 100644
--- a/av1/encoder/av1_quantize.c
+++ b/av1/encoder/av1_quantize.c
@@ -272,7 +272,6 @@
*eob_ptr = eob + 1;
}
-#if CONFIG_TX64X64
void quantize_dc_64x64_nuq(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
int skip_block, const int16_t *zbin_ptr,
const int16_t quant, const int16_t quant_shift,
@@ -329,7 +328,6 @@
}
*eob_ptr = eob + 1;
}
-#endif // CONFIG_TX64X64
void quantize_nuq_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
int skip_block, const int16_t *zbin_ptr,
@@ -464,7 +462,6 @@
*eob_ptr = eob + 1;
}
-#if CONFIG_TX64X64
void quantize_64x64_nuq_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
int skip_block, const int16_t *zbin_ptr,
const int16_t *quant_ptr,
@@ -531,7 +528,6 @@
}
*eob_ptr = eob + 1;
}
-#endif // CONFIG_TX64X64
#endif // CONFIG_NEW_QUANT
void av1_quantize_skip(intptr_t n_coeffs, tran_low_t *qcoeff_ptr,
@@ -656,7 +652,6 @@
dequant_ptr, eob_ptr, scan, iscan, NULL, NULL, 1);
}
-#if CONFIG_TX64X64
void av1_quantize_fp_64x64_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
int skip_block, const int16_t *zbin_ptr,
const int16_t *round_ptr, const int16_t *quant_ptr,
@@ -668,7 +663,6 @@
quant_ptr, quant_shift_ptr, qcoeff_ptr, dqcoeff_ptr,
dequant_ptr, eob_ptr, scan, iscan, NULL, NULL, 2);
}
-#endif // CONFIG_TX64X64
void av1_quantize_fp_facade(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
const MACROBLOCK_PLANE *p, tran_low_t *qcoeff_ptr,
@@ -712,27 +706,23 @@
}
break;
case 1:
-#if CONFIG_TX64X64
if (qparam->tx_size == TX_16X64 || qparam->tx_size == TX_64X16)
av1_quantize_fp_32x32_c(coeff_ptr, n_coeffs, skip_block, p->zbin_QTX,
p->round_fp_QTX, p->quant_fp_QTX,
p->quant_shift_QTX, qcoeff_ptr, dqcoeff_ptr,
p->dequant_QTX, eob_ptr, sc->scan, sc->iscan);
else
-#endif // CONFIG_RECT_TX_EXT && CONFIG_TX64X64
av1_quantize_fp_32x32(coeff_ptr, n_coeffs, skip_block, p->zbin_QTX,
p->round_fp_QTX, p->quant_fp_QTX,
p->quant_shift_QTX, qcoeff_ptr, dqcoeff_ptr,
p->dequant_QTX, eob_ptr, sc->scan, sc->iscan);
break;
-#if CONFIG_TX64X64
case 2:
av1_quantize_fp_64x64(coeff_ptr, n_coeffs, skip_block, p->zbin_QTX,
p->round_fp_QTX, p->quant_fp_QTX,
p->quant_shift_QTX, qcoeff_ptr, dqcoeff_ptr,
p->dequant_QTX, eob_ptr, sc->scan, sc->iscan);
break;
-#endif // CONFIG_TX64X64
default: assert(0);
}
#if CONFIG_AOM_QM
@@ -771,14 +761,12 @@
qcoeff_ptr, dqcoeff_ptr, p->dequant_QTX, eob_ptr,
sc->scan, sc->iscan);
break;
-#if CONFIG_TX64X64
case 2:
aom_quantize_b_64x64(coeff_ptr, n_coeffs, skip_block, p->zbin_QTX,
p->round_QTX, p->quant_QTX, p->quant_shift_QTX,
qcoeff_ptr, dqcoeff_ptr, p->dequant_QTX, eob_ptr,
sc->scan, sc->iscan);
break;
-#endif // CONFIG_TX64X64
default: assert(0);
}
#if CONFIG_AOM_QM
@@ -825,7 +813,7 @@
// obsolete skip_block
const int skip_block = 0;
(void)sc;
- assert(qparam->log_scale >= 0 && qparam->log_scale < (2 + CONFIG_TX64X64));
+ assert(qparam->log_scale >= 0 && qparam->log_scale < (3));
#if CONFIG_AOM_QM
const qm_val_t *qm_ptr = qparam->qmatrix;
const qm_val_t *iqm_ptr = qparam->iqmatrix;
@@ -888,7 +876,6 @@
qcoeff_ptr, dqcoeff_ptr, eob_ptr, sc->scan, qm_ptr,
iqm_ptr);
break;
-#if CONFIG_TX64X64
case 2:
quantize_64x64_nuq(coeff_ptr, n_coeffs, skip_block, p->zbin_QTX,
p->quant_QTX, p->quant_shift_QTX, p->dequant_QTX, dq,
@@ -902,7 +889,6 @@
qcoeff_ptr, dqcoeff_ptr, eob_ptr, sc->scan, qm_ptr,
iqm_ptr);
break;
-#endif // CONFIG_TX64X64
default: assert(0);
}
}
@@ -954,7 +940,6 @@
#endif // CONFIG_AOM_QM
qcoeff_ptr, dqcoeff_ptr, eob_ptr, sc->scan, qm_ptr, iqm_ptr);
break;
-#if CONFIG_TX64X64
case 2:
quantize_64x64_fp_nuq(
coeff_ptr, n_coeffs, skip_block, p->quant_fp_QTX, p->dequant_QTX, dq,
@@ -966,7 +951,6 @@
#endif // CONFIG_AOM_QM
qcoeff_ptr, dqcoeff_ptr, eob_ptr, sc->scan, qm_ptr, iqm_ptr);
break;
-#endif // CONFIG_TX64X64
default: assert(0);
}
}
@@ -1018,7 +1002,6 @@
#endif // CONFIG_AOM_QM
qcoeff_ptr, dqcoeff_ptr, eob_ptr, qm_ptr, iqm_ptr);
break;
-#if CONFIG_TX64X64
case 2:
quantize_dc_64x64_fp_nuq(
coeff_ptr, n_coeffs, skip_block, p->quant_fp_QTX[0],
@@ -1030,7 +1013,6 @@
#endif // CONFIG_AOM_QM
qcoeff_ptr, dqcoeff_ptr, eob_ptr, qm_ptr, iqm_ptr);
break;
-#endif // CONFIG_TX64X64
default: assert(0);
}
}
@@ -1117,14 +1099,12 @@
p->quant_QTX, p->quant_shift_QTX, qcoeff_ptr, dqcoeff_ptr,
p->dequant_QTX, eob_ptr, sc->scan, sc->iscan);
break;
-#if CONFIG_TX64X64
case 2:
aom_highbd_quantize_b_64x64(
coeff_ptr, n_coeffs, skip_block, p->zbin_QTX, p->round_QTX,
p->quant_QTX, p->quant_shift_QTX, qcoeff_ptr, dqcoeff_ptr,
p->dequant_QTX, eob_ptr, sc->scan, sc->iscan);
break;
-#endif // CONFIG_TX64X64
default: assert(0);
}
#if CONFIG_AOM_QM
@@ -1477,7 +1457,6 @@
*eob_ptr = eob + 1;
}
-#if CONFIG_TX64X64
void highbd_quantize_64x64_nuq_c(
const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block,
const int16_t *zbin_ptr, const int16_t *quant_ptr,
@@ -1541,7 +1520,6 @@
}
*eob_ptr = eob + 1;
}
-#endif // CONFIG_TX64X64
void highbd_quantize_fp_nuq_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
int skip_block, const int16_t *quant_ptr,
@@ -1628,7 +1606,6 @@
*eob_ptr = eob + 1;
}
-#if CONFIG_TX64X64
void highbd_quantize_dc_64x64_nuq(
const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block,
const int16_t *zbin_ptr, const int16_t quant, const int16_t quant_shift,
@@ -1681,7 +1658,6 @@
}
*eob_ptr = eob + 1;
}
-#endif // CONFIG_TX64X64
void av1_highbd_quantize_b_nuq_facade(
const tran_low_t *coeff_ptr, intptr_t n_coeffs, const MACROBLOCK_PLANE *p,
@@ -1732,7 +1708,6 @@
#endif // CONFIG_AOM_QM
qcoeff_ptr, dqcoeff_ptr, eob_ptr, sc->scan, qm_ptr, iqm_ptr);
break;
-#if CONFIG_TX64X64
case 2:
highbd_quantize_64x64_nuq(
coeff_ptr, n_coeffs, skip_block, p->zbin_QTX, p->quant_QTX,
@@ -1745,7 +1720,6 @@
#endif // CONFIG_AOM_QM
qcoeff_ptr, dqcoeff_ptr, eob_ptr, sc->scan, qm_ptr, iqm_ptr);
break;
-#endif // CONFIG_TX64X64
default: assert(0);
}
}
@@ -1796,7 +1770,6 @@
#endif // CONFIG_AOM_QM
qcoeff_ptr, dqcoeff_ptr, eob_ptr, sc->scan, qm_ptr, iqm_ptr);
break;
-#if CONFIG_TX64X64
case 2:
highbd_quantize_64x64_fp_nuq(
coeff_ptr, n_coeffs, skip_block, p->quant_fp_QTX, p->dequant_QTX, dq,
@@ -1808,7 +1781,6 @@
#endif // CONFIG_AOM_QM
qcoeff_ptr, dqcoeff_ptr, eob_ptr, sc->scan, qm_ptr, iqm_ptr);
break;
-#endif // CONFIG_TX64X64
default: assert(0);
}
}
@@ -1860,7 +1832,6 @@
#endif // CONFIG_AOM_QM
qcoeff_ptr, dqcoeff_ptr, eob_ptr, qm_ptr, iqm_ptr);
break;
-#if CONFIG_TX64X64
case 2:
highbd_quantize_dc_64x64_fp_nuq(
coeff_ptr, n_coeffs, skip_block, p->quant_fp_QTX[0],
@@ -1872,7 +1843,6 @@
#endif // CONFIG_AOM_QM
qcoeff_ptr, dqcoeff_ptr, eob_ptr, qm_ptr, iqm_ptr);
break;
-#endif // CONFIG_TX64X64
default: assert(0);
}
}
diff --git a/av1/encoder/block.h b/av1/encoder/block.h
index 123f9af..646edb5 100644
--- a/av1/encoder/block.h
+++ b/av1/encoder/block.h
@@ -194,10 +194,8 @@
tx_size_rd_record_16X16[(MAX_MIB_SIZE >> 2) * (MAX_MIB_SIZE >> 2)];
TX_SIZE_RD_RECORD
tx_size_rd_record_32X32[(MAX_MIB_SIZE >> 3) * (MAX_MIB_SIZE >> 3)];
-#if CONFIG_TX64X64
TX_SIZE_RD_RECORD
tx_size_rd_record_64X64[(MAX_MIB_SIZE >> 4) * (MAX_MIB_SIZE >> 4)];
-#endif
MACROBLOCKD e_mbd;
MB_MODE_INFO_EXT *mbmi_ext;
diff --git a/av1/encoder/dct.c b/av1/encoder/dct.c
index b13e3d8..031ab00 100644
--- a/av1/encoder/dct.c
+++ b/av1/encoder/dct.c
@@ -1959,7 +1959,6 @@
}
}
-#if CONFIG_TX64X64
static void fidtx64(const tran_low_t *input, tran_low_t *output) {
int i;
for (i = 0; i < 64; ++i)
@@ -2284,7 +2283,6 @@
}
// Note: overall scale factor of transform is 4 times unitary
}
-#endif // CONFIG_TX64X64
// Forward identity transform.
void av1_fwd_idtx_c(const int16_t *src_diff, tran_low_t *coeff, int stride,
diff --git a/av1/encoder/encodeframe.c b/av1/encoder/encodeframe.c
index f8a2000..b4e2623 100644
--- a/av1/encoder/encodeframe.c
+++ b/av1/encoder/encodeframe.c
@@ -3642,9 +3642,7 @@
av1_zero(x->tx_size_rd_record_8X8);
av1_zero(x->tx_size_rd_record_16X16);
av1_zero(x->tx_size_rd_record_32X32);
-#if CONFIG_TX64X64
av1_zero(x->tx_size_rd_record_64X64);
-#endif
av1_zero(x->pred_mv);
pc_root->index = 0;
@@ -3767,9 +3765,7 @@
av1_zero(x->tx_size_rd_record_8X8);
av1_zero(x->tx_size_rd_record_16X16);
av1_zero(x->tx_size_rd_record_32X32);
-#if CONFIG_TX64X64
av1_zero(x->tx_size_rd_record_64X64);
-#endif
av1_zero(x->pred_mv);
pc_root->index = 0;
diff --git a/av1/encoder/hybrid_fwd_txfm.c b/av1/encoder/hybrid_fwd_txfm.c
index b8c33a7..897ff25 100644
--- a/av1/encoder/hybrid_fwd_txfm.c
+++ b/av1/encoder/hybrid_fwd_txfm.c
@@ -72,7 +72,6 @@
av1_fht32x32(src_diff, coeff, diff_stride, txfm_param);
}
-#if CONFIG_TX64X64
static void fwd_txfm_64x64(const int16_t *src_diff, tran_low_t *coeff,
int diff_stride, TxfmParam *txfm_param) {
if (txfm_param->tx_type == IDTX)
@@ -112,7 +111,6 @@
else
av1_fht64x16(src_diff, coeff, diff_stride, txfm_param);
}
-#endif // CONFIG_TX64X64
static void fwd_txfm_16x4(const int16_t *src_diff, tran_low_t *coeff,
int diff_stride, TxfmParam *txfm_param) {
@@ -341,7 +339,6 @@
}
}
-#if CONFIG_TX64X64
static void highbd_fwd_txfm_32x64(const int16_t *src_diff, tran_low_t *coeff,
int diff_stride, TxfmParam *txfm_param) {
int32_t *dst_coeff = (int32_t *)coeff;
@@ -531,7 +528,6 @@
default: assert(0); break;
}
}
-#endif // CONFIG_TX64X64
void av1_fwd_txfm(const int16_t *src_diff, tran_low_t *coeff, int diff_stride,
TxfmParam *txfm_param) {
@@ -551,7 +547,6 @@
assert(av1_ext_tx_used[txfm_param->tx_set_type][txfm_param->tx_type]);
const TX_SIZE tx_size = txfm_param->tx_size;
switch (tx_size) {
-#if CONFIG_TX64X64
case TX_64X64:
highbd_fwd_txfm_64x64(src_diff, coeff, diff_stride, txfm_param);
break;
@@ -567,7 +562,6 @@
case TX_64X16:
highbd_fwd_txfm_64x16(src_diff, coeff, diff_stride, txfm_param);
break;
-#endif // CONFIG_TX64X64
case TX_32X32:
highbd_fwd_txfm_32x32(src_diff, coeff, diff_stride, txfm_param);
break;
diff --git a/av1/encoder/rd.c b/av1/encoder/rd.c
index 14c1eb9..401fd03 100644
--- a/av1/encoder/rd.c
+++ b/av1/encoder/rd.c
@@ -764,7 +764,6 @@
for (i = 0; i < num_4x4_h; i += 8)
t_left[i] = !!*(const uint64_t *)&left[i];
break;
-#if CONFIG_TX64X64
case TX_32X64:
for (i = 0; i < num_4x4_w; i += 8)
t_above[i] = !!*(const uint64_t *)&above[i];
@@ -801,7 +800,6 @@
t_left[i] =
!!(*(const uint64_t *)&left[i] | *(const uint64_t *)&left[i + 8]);
break;
-#endif // CONFIG_TX64X64
case TX_4X8:
memcpy(t_above, above, sizeof(ENTROPY_CONTEXT) * num_4x4_w);
for (i = 0; i < num_4x4_h; i += 2)
diff --git a/av1/encoder/rdopt.c b/av1/encoder/rdopt.c
index 3122aff..b789456 100644
--- a/av1/encoder/rdopt.c
+++ b/av1/encoder/rdopt.c
@@ -1734,12 +1734,10 @@
#endif // CONFIG_DIST_8X8
if (cpi->sf.use_transform_domain_distortion
-#if CONFIG_TX64X64
// Any 64-pt transforms only preserves half the coefficients.
// Therefore transform domain distortion is not valid for these
// transform sizes.
&& txsize_sqr_up_map[tx_size] != TX_64X64
-#endif // CONFIG_TX64X64
#if CONFIG_DIST_8X8
&& !x->using_dist_8x8
#endif
@@ -4673,16 +4671,10 @@
static int find_tx_size_rd_records(MACROBLOCK *x, BLOCK_SIZE bsize, int mi_row,
int mi_col,
TX_SIZE_RD_INFO_NODE *dst_rd_info) {
-#if CONFIG_TX64X64
TX_SIZE_RD_RECORD *rd_records_table[4] = { x->tx_size_rd_record_8X8,
x->tx_size_rd_record_16X16,
x->tx_size_rd_record_32X32,
x->tx_size_rd_record_64X64 };
-#else
- TX_SIZE_RD_RECORD *rd_records_table[3] = { x->tx_size_rd_record_8X8,
- x->tx_size_rd_record_16X16,
- x->tx_size_rd_record_32X32 };
-#endif
const TX_SIZE max_square_tx_size = max_txsize_lookup[bsize];
const int bw = block_size_wide[bsize];
const int bh = block_size_high[bsize];
diff --git a/av1/encoder/rdopt.h b/av1/encoder/rdopt.h
index 0a71b78..a53d193 100644
--- a/av1/encoder/rdopt.h
+++ b/av1/encoder/rdopt.h
@@ -88,20 +88,14 @@
const uint16_t band_count_table[TX_SIZES_ALL][8] = {
{ 1, 2, 3, 4, 3, 16 - 13, 0 }, { 1, 2, 3, 4, 11, 64 - 21, 0 },
{ 1, 2, 3, 4, 11, 256 - 21, 0 }, { 1, 2, 3, 4, 11, 1024 - 21, 0 },
-#if CONFIG_TX64X64
- { 1, 2, 3, 4, 11, 4096 - 21, 0 },
-#endif // CONFIG_TX64X64
- { 1, 2, 3, 4, 8, 32 - 18, 0 }, { 1, 2, 3, 4, 8, 32 - 18, 0 },
- { 1, 2, 3, 4, 11, 128 - 21, 0 }, { 1, 2, 3, 4, 11, 128 - 21, 0 },
- { 1, 2, 3, 4, 11, 512 - 21, 0 }, { 1, 2, 3, 4, 11, 512 - 21, 0 },
-#if CONFIG_TX64X64
- { 1, 2, 3, 4, 11, 2048 - 21, 0 }, { 1, 2, 3, 4, 11, 2048 - 21, 0 },
-#endif // CONFIG_TX64X64
- { 1, 2, 3, 4, 11, 64 - 21, 0 }, { 1, 2, 3, 4, 11, 64 - 21, 0 },
- { 1, 2, 3, 4, 11, 256 - 21, 0 }, { 1, 2, 3, 4, 11, 256 - 21, 0 },
-#if CONFIG_TX64X64
- { 1, 2, 3, 4, 11, 1024 - 21, 0 }, { 1, 2, 3, 4, 11, 1024 - 21, 0 },
-#endif // CONFIG_TX64X64
+ { 1, 2, 3, 4, 11, 4096 - 21, 0 }, { 1, 2, 3, 4, 8, 32 - 18, 0 },
+ { 1, 2, 3, 4, 8, 32 - 18, 0 }, { 1, 2, 3, 4, 11, 128 - 21, 0 },
+ { 1, 2, 3, 4, 11, 128 - 21, 0 }, { 1, 2, 3, 4, 11, 512 - 21, 0 },
+ { 1, 2, 3, 4, 11, 512 - 21, 0 }, { 1, 2, 3, 4, 11, 2048 - 21, 0 },
+ { 1, 2, 3, 4, 11, 2048 - 21, 0 }, { 1, 2, 3, 4, 11, 64 - 21, 0 },
+ { 1, 2, 3, 4, 11, 64 - 21, 0 }, { 1, 2, 3, 4, 11, 256 - 21, 0 },
+ { 1, 2, 3, 4, 11, 256 - 21, 0 }, { 1, 2, 3, 4, 11, 1024 - 21, 0 },
+ { 1, 2, 3, 4, 11, 1024 - 21, 0 },
};
static INLINE int cost_coeffs(const AV1_COMMON *const cm, MACROBLOCK *x,
diff --git a/av1/encoder/speed_features.c b/av1/encoder/speed_features.c
index cbfa541..1a246d5 100644
--- a/av1/encoder/speed_features.c
+++ b/av1/encoder/speed_features.c
@@ -187,14 +187,12 @@
sf->adaptive_pred_interp_filter = 1;
sf->recode_loop = ALLOW_RECODE_KFARFGF;
-#if CONFIG_TX64X64
sf->intra_y_mode_mask[TX_64X64] = INTRA_DC_H_V;
#if CONFIG_CFL
sf->intra_uv_mode_mask[TX_64X64] = UV_INTRA_DC_H_V_CFL;
#else
sf->intra_uv_mode_mask[TX_64X64] = INTRA_DC_H_V;
#endif // CONFIG_CFL
-#endif // CONFIG_TX64X64
sf->intra_y_mode_mask[TX_32X32] = INTRA_DC_H_V;
#if CONFIG_CFL
sf->intra_uv_mode_mask[TX_32X32] = UV_INTRA_DC_H_V_CFL;
@@ -251,14 +249,12 @@
sf->recode_loop = ALLOW_RECODE_KFMAXBW;
sf->adaptive_rd_thresh = 3;
sf->mode_skip_start = 6;
-#if CONFIG_TX64X64
sf->intra_y_mode_mask[TX_64X64] = INTRA_DC;
#if CONFIG_CFL
sf->intra_uv_mode_mask[TX_64X64] = UV_INTRA_DC_CFL;
#else
sf->intra_uv_mode_mask[TX_64X64] = INTRA_DC;
#endif // CONFIG_CFL
-#endif // CONFIG_TX64X64
sf->intra_y_mode_mask[TX_32X32] = INTRA_DC;
#if CONFIG_CFL
sf->intra_uv_mode_mask[TX_32X32] = UV_INTRA_DC_CFL;
@@ -304,9 +300,7 @@
const int frames_since_key = is_keyframe ? 0 : cpi->rc.frames_since_key;
sf->default_max_partition_size = BLOCK_32X32;
sf->default_min_partition_size = BLOCK_8X8;
-#if CONFIG_TX64X64
sf->intra_y_mode_mask[TX_64X64] = INTRA_DC;
-#endif // CONFIG_TX64X64
sf->intra_y_mode_mask[TX_32X32] = INTRA_DC;
sf->frame_parameter_update = 0;
sf->mv.search_method = FAST_HEX;
diff --git a/av1/encoder/x86/av1_fwd_txfm_sse2.c b/av1/encoder/x86/av1_fwd_txfm_sse2.c
index ce5109a..52a0d01 100644
--- a/av1/encoder/x86/av1_fwd_txfm_sse2.c
+++ b/av1/encoder/x86/av1_fwd_txfm_sse2.c
@@ -2650,27 +2650,21 @@
av1_lowbd_fwd_txfm2d_8x8_sse2, // 8x8 transform
av1_lowbd_fwd_txfm2d_16x16_sse2, // 16x16 transform
av1_lowbd_fwd_txfm2d_32x32_sse2, // 32x32 transform
-#if CONFIG_TX64X64
NULL, // 64x64 transform
-#endif // CONFIG_TX64X64
av1_lowbd_fwd_txfm2d_4x8_sse2, // 4x8 transform
av1_lowbd_fwd_txfm2d_8x4_sse2, // 8x4 transform
av1_lowbd_fwd_txfm2d_8x16_sse2, // 8x16 transform
av1_lowbd_fwd_txfm2d_16x8_sse2, // 16x8 transform
av1_lowbd_fwd_txfm2d_16x32_sse2, // 16x32 transform
av1_lowbd_fwd_txfm2d_32x16_sse2, // 32x16 transform
-#if CONFIG_TX64X64
- NULL, // 32x64 transform
- NULL, // 64x32 transform
-#endif // CONFIG_TX64X64
- NULL, // 4x16 transform
- NULL, // 16x4 transform
- av1_lowbd_fwd_txfm2d_8x32_sse2, // 8x32 transform
- av1_lowbd_fwd_txfm2d_32x8_sse2, // 32x8 transform
-#if CONFIG_TX64X64
- NULL, // 16x64 transform
- NULL, // 64x16 transform
-#endif // CONFIG_TX64X64
+ NULL, // 32x64 transform
+ NULL, // 64x32 transform
+ NULL, // 4x16 transform
+ NULL, // 16x4 transform
+ av1_lowbd_fwd_txfm2d_8x32_sse2, // 8x32 transform
+ av1_lowbd_fwd_txfm2d_32x8_sse2, // 32x8 transform
+ NULL, // 16x64 transform
+ NULL, // 64x16 transform
};
void av1_lowbd_fwd_txfm_sse2(const int16_t *src_diff, tran_low_t *coeff,