Use TX_TYPE consistently instead of int.
Change-Id: Idf01b14bed4701ce84fa1c127e01560f4764fadb
diff --git a/aom_dsp/txfm_common.h b/aom_dsp/txfm_common.h
index 2bf0403..de95a40 100644
--- a/aom_dsp/txfm_common.h
+++ b/aom_dsp/txfm_common.h
@@ -13,6 +13,7 @@
#define AOM_DSP_TXFM_COMMON_H_
#include "aom_dsp/aom_dsp_common.h"
+#include "av1/common/enums.h"
// Constants and Macros used by all idct/dct functions
#define DCT_CONST_BITS 14
@@ -23,8 +24,8 @@
typedef struct txfm_param {
// for both forward and inverse transforms
- int tx_type;
- int tx_size;
+ TX_TYPE tx_type;
+ TX_SIZE tx_size;
int lossless;
int bd;
#if CONFIG_MRC_TX || CONFIG_LGT
diff --git a/av1/common/arm/neon/iht4x4_add_neon.c b/av1/common/arm/neon/iht4x4_add_neon.c
index 68184c5..b29228e 100644
--- a/av1/common/arm/neon/iht4x4_add_neon.c
+++ b/av1/common/arm/neon/iht4x4_add_neon.c
@@ -148,13 +148,13 @@
TRANSPOSE4X4(&q8s16, &q9s16);
- int tx_type = txfm_param->tx_type;
+ const TX_TYPE tx_type = txfm_param->tx_type;
switch (tx_type) {
- case 0: // idct_idct is not supported. Fall back to C
+ case DCT_DCT: // idct_idct is not supported. Fall back to C
av1_iht4x4_16_add_c(input, dest, dest_stride, txfm_param);
return;
break;
- case 1: // iadst_idct
+ case ADST_DCT: // iadst_idct
// generate constants
GENERATE_COSINE_CONSTANTS(&d0s16, &d1s16, &d2s16);
GENERATE_SINE_CONSTANTS(&d3s16, &d4s16, &d5s16, &q3s16);
@@ -168,7 +168,7 @@
// then transform columns
IADST4x4_1D(&d3s16, &d4s16, &d5s16, &q3s16, &q8s16, &q9s16);
break;
- case 2: // idct_iadst
+ case DCT_ADST: // idct_iadst
// generate constantsyy
GENERATE_COSINE_CONSTANTS(&d0s16, &d1s16, &d2s16);
GENERATE_SINE_CONSTANTS(&d3s16, &d4s16, &d5s16, &q3s16);
@@ -182,7 +182,7 @@
// then transform columns
IDCT4x4_1D(&d0s16, &d1s16, &d2s16, &q8s16, &q9s16);
break;
- case 3: // iadst_iadst
+ case ADST_ADST: // iadst_iadst
// generate constants
GENERATE_SINE_CONSTANTS(&d3s16, &d4s16, &d5s16, &q3s16);
diff --git a/av1/common/arm/neon/iht8x8_add_neon.c b/av1/common/arm/neon/iht8x8_add_neon.c
index a984495..4cd43a9 100644
--- a/av1/common/arm/neon/iht8x8_add_neon.c
+++ b/av1/common/arm/neon/iht8x8_add_neon.c
@@ -478,13 +478,13 @@
TRANSPOSE8X8(&q8s16, &q9s16, &q10s16, &q11s16, &q12s16, &q13s16, &q14s16,
&q15s16);
- int tx_type = txfm_param->tx_type;
+ const TX_TYPE tx_type = txfm_param->tx_type;
switch (tx_type) {
- case 0: // idct_idct is not supported. Fall back to C
+ case DCT_DCT: // idct_idct is not supported. Fall back to C
av1_iht8x8_64_add_c(input, dest, dest_stride, txfm_param);
return;
break;
- case 1: // iadst_idct
+ case ADST_DCT: // iadst_idct
// generate IDCT constants
// GENERATE_IDCT_CONSTANTS
@@ -503,7 +503,7 @@
IADST8X8_1D(&q8s16, &q9s16, &q10s16, &q11s16, &q12s16, &q13s16, &q14s16,
&q15s16);
break;
- case 2: // idct_iadst
+ case DCT_ADST: // idct_iadst
// generate IADST constants
// GENERATE_IADST_CONSTANTS
@@ -522,7 +522,7 @@
IDCT8x8_1D(&q8s16, &q9s16, &q10s16, &q11s16, &q12s16, &q13s16, &q14s16,
&q15s16);
break;
- case 3: // iadst_iadst
+ case ADST_ADST: // iadst_iadst
// generate IADST constants
// GENERATE_IADST_CONSTANTS
diff --git a/av1/common/av1_fwd_txfm2d.c b/av1/common/av1_fwd_txfm2d.c
index d4ff86b..a46aae3 100644
--- a/av1/common/av1_fwd_txfm2d.c
+++ b/av1/common/av1_fwd_txfm2d.c
@@ -143,13 +143,13 @@
}
void av1_fwd_txfm2d_4x8_c(const int16_t *input, int32_t *output, int stride,
- int tx_type, int bd) {
+ TX_TYPE tx_type, int bd) {
#if CONFIG_TXMG
int32_t txfm_buf[4 * 8];
int16_t rinput[4 * 8];
int tx_size = TX_4X8;
int rtx_size = av1_rotate_tx_size(tx_size);
- int rtx_type = av1_rotate_tx_type(tx_type);
+ TX_TYPE rtx_type = av1_rotate_tx_type(tx_type);
int w = tx_size_wide[tx_size];
int h = tx_size_high[tx_size];
int rw = h;
@@ -166,20 +166,20 @@
}
void av1_fwd_txfm2d_8x4_c(const int16_t *input, int32_t *output, int stride,
- int tx_type, int bd) {
+ TX_TYPE tx_type, int bd) {
int32_t txfm_buf[8 * 4];
TXFM_2D_FLIP_CFG cfg = av1_get_fwd_txfm_cfg(tx_type, TX_8X4);
fwd_txfm2d_c(input, output, stride, &cfg, txfm_buf, bd);
}
void av1_fwd_txfm2d_8x16_c(const int16_t *input, int32_t *output, int stride,
- int tx_type, int bd) {
+ TX_TYPE tx_type, int bd) {
#if CONFIG_TXMG
int32_t txfm_buf[8 * 16];
int16_t rinput[8 * 16];
int tx_size = TX_8X16;
int rtx_size = av1_rotate_tx_size(tx_size);
- int rtx_type = av1_rotate_tx_type(tx_type);
+ TX_TYPE rtx_type = av1_rotate_tx_type(tx_type);
int w = tx_size_wide[tx_size];
int h = tx_size_high[tx_size];
int rw = h;
@@ -196,20 +196,20 @@
}
void av1_fwd_txfm2d_16x8_c(const int16_t *input, int32_t *output, int stride,
- int tx_type, int bd) {
+ TX_TYPE tx_type, int bd) {
int32_t txfm_buf[16 * 8];
TXFM_2D_FLIP_CFG cfg = av1_get_fwd_txfm_cfg(tx_type, TX_16X8);
fwd_txfm2d_c(input, output, stride, &cfg, txfm_buf, bd);
}
void av1_fwd_txfm2d_16x32_c(const int16_t *input, int32_t *output, int stride,
- int tx_type, int bd) {
+ TX_TYPE tx_type, int bd) {
#if CONFIG_TXMG
int32_t txfm_buf[16 * 32];
int16_t rinput[16 * 32];
int tx_size = TX_16X32;
int rtx_size = av1_rotate_tx_size(tx_size);
- int rtx_type = av1_rotate_tx_type(tx_type);
+ TX_TYPE rtx_type = av1_rotate_tx_type(tx_type);
int w = tx_size_wide[tx_size];
int h = tx_size_high[tx_size];
int rw = h;
@@ -226,35 +226,35 @@
}
void av1_fwd_txfm2d_32x16_c(const int16_t *input, int32_t *output, int stride,
- int tx_type, int bd) {
+ TX_TYPE tx_type, int bd) {
int32_t txfm_buf[32 * 16];
TXFM_2D_FLIP_CFG cfg = av1_get_fwd_txfm_cfg(tx_type, TX_32X16);
fwd_txfm2d_c(input, output, stride, &cfg, txfm_buf, bd);
}
void av1_fwd_txfm2d_4x4_c(const int16_t *input, int32_t *output, int stride,
- int tx_type, int bd) {
+ TX_TYPE tx_type, int bd) {
int32_t txfm_buf[4 * 4];
TXFM_2D_FLIP_CFG cfg = av1_get_fwd_txfm_cfg(tx_type, TX_4X4);
fwd_txfm2d_c(input, output, stride, &cfg, txfm_buf, bd);
}
void av1_fwd_txfm2d_8x8_c(const int16_t *input, int32_t *output, int stride,
- int tx_type, int bd) {
+ TX_TYPE tx_type, int bd) {
int32_t txfm_buf[8 * 8];
TXFM_2D_FLIP_CFG cfg = av1_get_fwd_txfm_cfg(tx_type, TX_8X8);
fwd_txfm2d_c(input, output, stride, &cfg, txfm_buf, bd);
}
void av1_fwd_txfm2d_16x16_c(const int16_t *input, int32_t *output, int stride,
- int tx_type, int bd) {
+ TX_TYPE tx_type, int bd) {
int32_t txfm_buf[16 * 16];
TXFM_2D_FLIP_CFG cfg = av1_get_fwd_txfm_cfg(tx_type, TX_16X16);
fwd_txfm2d_c(input, output, stride, &cfg, txfm_buf, bd);
}
void av1_fwd_txfm2d_32x32_c(const int16_t *input, int32_t *output, int stride,
- int tx_type, int bd) {
+ TX_TYPE tx_type, int bd) {
int32_t txfm_buf[32 * 32];
TXFM_2D_FLIP_CFG cfg = av1_get_fwd_txfm_cfg(tx_type, TX_32X32);
fwd_txfm2d_c(input, output, stride, &cfg, txfm_buf, bd);
@@ -262,21 +262,21 @@
#if CONFIG_TX64X64
void av1_fwd_txfm2d_64x64_c(const int16_t *input, int32_t *output, int stride,
- int tx_type, int bd) {
+ TX_TYPE tx_type, int bd) {
int32_t txfm_buf[64 * 64];
TXFM_2D_FLIP_CFG cfg = av1_get_fwd_txfm_64x64_cfg(tx_type);
fwd_txfm2d_c(input, output, stride, &cfg, txfm_buf, bd);
}
void av1_fwd_txfm2d_32x64_c(const int16_t *input, int32_t *output, int stride,
- int tx_type, int bd) {
+ TX_TYPE tx_type, int bd) {
int32_t txfm_buf[32 * 64];
TXFM_2D_FLIP_CFG cfg = av1_get_fwd_txfm_32x64_cfg(tx_type);
fwd_txfm2d_c(input, output, stride, &cfg, txfm_buf, bd);
}
void av1_fwd_txfm2d_64x32_c(const int16_t *input, int32_t *output, int stride,
- int tx_type, int bd) {
+ TX_TYPE tx_type, int bd) {
int32_t txfm_buf[64 * 32];
TXFM_2D_FLIP_CFG cfg = av1_get_fwd_txfm_64x32_cfg(tx_type);
fwd_txfm2d_c(input, output, stride, &cfg, txfm_buf, bd);
@@ -349,11 +349,11 @@
#endif // CONFIG_EXT_TX
};
-TXFM_2D_FLIP_CFG av1_get_fwd_txfm_cfg(int tx_type, int tx_size) {
+TXFM_2D_FLIP_CFG av1_get_fwd_txfm_cfg(TX_TYPE tx_type, int tx_size) {
TXFM_2D_FLIP_CFG cfg;
set_flip_cfg(tx_type, &cfg);
- const int tx_type_col = vtx_tab[tx_type];
- const int tx_type_row = htx_tab[tx_type];
+ const TX_TYPE_1D tx_type_col = vtx_tab[tx_type];
+ const TX_TYPE_1D tx_type_row = htx_tab[tx_type];
const int tx_size_col = txsize_vert_map[tx_size];
const int tx_size_row = txsize_horz_map[tx_size];
cfg.col_cfg = fwd_txfm_col_cfg_ls[tx_type_col][tx_size_col];
@@ -362,9 +362,9 @@
}
#if CONFIG_TX64X64
-TXFM_2D_FLIP_CFG av1_get_fwd_txfm_32x64_cfg(int tx_type) {
+TXFM_2D_FLIP_CFG av1_get_fwd_txfm_32x64_cfg(TX_TYPE tx_type) {
TXFM_2D_FLIP_CFG cfg;
- const int tx_type_row = htx_tab[tx_type];
+ const TX_TYPE_1D tx_type_row = htx_tab[tx_type];
const int tx_size_row = txsize_horz_map[TX_32X64];
switch (tx_type) {
case DCT_DCT:
@@ -378,9 +378,9 @@
return cfg;
}
-TXFM_2D_FLIP_CFG av1_get_fwd_txfm_64x32_cfg(int tx_type) {
+TXFM_2D_FLIP_CFG av1_get_fwd_txfm_64x32_cfg(TX_TYPE tx_type) {
TXFM_2D_FLIP_CFG cfg;
- const int tx_type_col = vtx_tab[tx_type];
+ const TX_TYPE_1D tx_type_col = vtx_tab[tx_type];
const int tx_size_col = txsize_vert_map[TX_64X32];
switch (tx_type) {
case DCT_DCT:
@@ -394,7 +394,7 @@
return cfg;
}
-TXFM_2D_FLIP_CFG av1_get_fwd_txfm_64x64_cfg(int tx_type) {
+TXFM_2D_FLIP_CFG av1_get_fwd_txfm_64x64_cfg(TX_TYPE tx_type) {
TXFM_2D_FLIP_CFG cfg;
switch (tx_type) {
case DCT_DCT:
diff --git a/av1/common/av1_inv_txfm2d.c b/av1/common/av1_inv_txfm2d.c
index 2c01f46..e59907c 100644
--- a/av1/common/av1_inv_txfm2d.c
+++ b/av1/common/av1_inv_txfm2d.c
@@ -140,11 +140,11 @@
#endif // CONFIG_EXT_TX
};
-TXFM_2D_FLIP_CFG av1_get_inv_txfm_cfg(int tx_type, int tx_size) {
+TXFM_2D_FLIP_CFG av1_get_inv_txfm_cfg(TX_TYPE tx_type, int tx_size) {
TXFM_2D_FLIP_CFG cfg;
set_flip_cfg(tx_type, &cfg);
- const int tx_type_col = vtx_tab[tx_type];
- const int tx_type_row = htx_tab[tx_type];
+ const TX_TYPE_1D tx_type_col = vtx_tab[tx_type];
+ const TX_TYPE_1D tx_type_row = htx_tab[tx_type];
const int tx_size_col = txsize_vert_map[tx_size];
const int tx_size_row = txsize_horz_map[tx_size];
cfg.col_cfg = inv_txfm_col_cfg_ls[tx_type_col][tx_size_col];
@@ -153,7 +153,7 @@
}
#if CONFIG_TX64X64
-TXFM_2D_FLIP_CFG av1_get_inv_txfm_64x64_cfg(int tx_type) {
+TXFM_2D_FLIP_CFG av1_get_inv_txfm_64x64_cfg(TX_TYPE tx_type) {
TXFM_2D_FLIP_CFG cfg = { 0, 0, NULL, NULL };
switch (tx_type) {
case DCT_DCT:
@@ -294,7 +294,7 @@
static INLINE void inv_txfm2d_add_facade(const int32_t *input, uint16_t *output,
int stride, int32_t *txfm_buf,
- int tx_type, int tx_size, int bd) {
+ TX_TYPE tx_type, int tx_size, int bd) {
TXFM_2D_FLIP_CFG cfg = av1_get_inv_txfm_cfg(tx_type, tx_size);
int tx_size_sqr = txsize_sqr_map[tx_size];
inv_txfm2d_add_c(input, output, stride, &cfg, txfm_buf,
@@ -302,20 +302,20 @@
}
void av1_inv_txfm2d_add_4x8_c(const int32_t *input, uint16_t *output,
- int stride, int tx_type, int bd) {
+ int stride, TX_TYPE tx_type, int bd) {
int txfm_buf[4 * 8 + 8 + 8];
inv_txfm2d_add_facade(input, output, stride, txfm_buf, tx_type, TX_4X8, bd);
}
void av1_inv_txfm2d_add_8x4_c(const int32_t *input, uint16_t *output,
- int stride, int tx_type, int bd) {
+ int stride, TX_TYPE tx_type, int bd) {
#if CONFIG_TXMG
int txfm_buf[8 * 4 + 8 + 8];
int32_t rinput[8 * 4];
uint16_t routput[8 * 4];
int tx_size = TX_8X4;
int rtx_size = av1_rotate_tx_size(tx_size);
- int rtx_type = av1_rotate_tx_type(tx_type);
+ TX_TYPE rtx_type = av1_rotate_tx_type(tx_type);
int w = tx_size_wide[tx_size];
int h = tx_size_high[tx_size];
int rw = h;
@@ -331,20 +331,20 @@
}
void av1_inv_txfm2d_add_8x16_c(const int32_t *input, uint16_t *output,
- int stride, int tx_type, int bd) {
+ int stride, TX_TYPE tx_type, int bd) {
int txfm_buf[8 * 16 + 16 + 16];
inv_txfm2d_add_facade(input, output, stride, txfm_buf, tx_type, TX_8X16, bd);
}
void av1_inv_txfm2d_add_16x8_c(const int32_t *input, uint16_t *output,
- int stride, int tx_type, int bd) {
+ int stride, TX_TYPE tx_type, int bd) {
#if CONFIG_TXMG
int txfm_buf[16 * 8 + 16 + 16];
int32_t rinput[16 * 8];
uint16_t routput[16 * 8];
int tx_size = TX_16X8;
int rtx_size = av1_rotate_tx_size(tx_size);
- int rtx_type = av1_rotate_tx_type(tx_type);
+ TX_TYPE rtx_type = av1_rotate_tx_type(tx_type);
int w = tx_size_wide[tx_size];
int h = tx_size_high[tx_size];
int rw = h;
@@ -360,20 +360,20 @@
}
void av1_inv_txfm2d_add_16x32_c(const int32_t *input, uint16_t *output,
- int stride, int tx_type, int bd) {
+ int stride, TX_TYPE tx_type, int bd) {
int txfm_buf[16 * 32 + 32 + 32];
inv_txfm2d_add_facade(input, output, stride, txfm_buf, tx_type, TX_16X32, bd);
}
void av1_inv_txfm2d_add_32x16_c(const int32_t *input, uint16_t *output,
- int stride, int tx_type, int bd) {
+ int stride, TX_TYPE tx_type, int bd) {
#if CONFIG_TXMG
int txfm_buf[32 * 16 + 32 + 32];
int32_t rinput[32 * 16];
uint16_t routput[32 * 16];
int tx_size = TX_32X16;
int rtx_size = av1_rotate_tx_size(tx_size);
- int rtx_type = av1_rotate_tx_type(tx_type);
+ TX_TYPE rtx_type = av1_rotate_tx_type(tx_type);
int w = tx_size_wide[tx_size];
int h = tx_size_high[tx_size];
int rw = h;
@@ -389,45 +389,45 @@
}
void av1_inv_txfm2d_add_4x4_c(const int32_t *input, uint16_t *output,
- int stride, int tx_type, int bd) {
+ int stride, TX_TYPE tx_type, int bd) {
int txfm_buf[4 * 4 + 4 + 4];
inv_txfm2d_add_facade(input, output, stride, txfm_buf, tx_type, TX_4X4, bd);
}
void av1_inv_txfm2d_add_8x8_c(const int32_t *input, uint16_t *output,
- int stride, int tx_type, int bd) {
+ int stride, TX_TYPE tx_type, int bd) {
int txfm_buf[8 * 8 + 8 + 8];
inv_txfm2d_add_facade(input, output, stride, txfm_buf, tx_type, TX_8X8, bd);
}
void av1_inv_txfm2d_add_16x16_c(const int32_t *input, uint16_t *output,
- int stride, int tx_type, int bd) {
+ int stride, TX_TYPE tx_type, int bd) {
int txfm_buf[16 * 16 + 16 + 16];
inv_txfm2d_add_facade(input, output, stride, txfm_buf, tx_type, TX_16X16, bd);
}
void av1_inv_txfm2d_add_32x32_c(const int32_t *input, uint16_t *output,
- int stride, int tx_type, int bd) {
+ int stride, TX_TYPE tx_type, int bd) {
int txfm_buf[32 * 32 + 32 + 32];
inv_txfm2d_add_facade(input, output, stride, txfm_buf, tx_type, TX_32X32, bd);
}
#if CONFIG_TX64X64
void av1_inv_txfm2d_add_64x64_c(const int32_t *input, uint16_t *output,
- int stride, int tx_type, int bd) {
+ int stride, TX_TYPE tx_type, int bd) {
int txfm_buf[64 * 64 + 64 + 64];
inv_txfm2d_add_facade(input, output, stride, txfm_buf, tx_type, TX_64X64, bd);
}
void av1_inv_txfm2d_add_64x32_c(const int32_t *input, uint16_t *output,
- int stride, int tx_type, int bd) {
+ int stride, TX_TYPE tx_type, int bd) {
#if CONFIG_TXMG
int txfm_buf[64 * 32 + 64 + 64];
int32_t rinput[64 * 32];
uint16_t routput[64 * 32];
int tx_size = TX_64X32;
int rtx_size = av1_rotate_tx_size(tx_size);
- int rtx_type = av1_rotate_tx_type(tx_type);
+ TX_TYPE rtx_type = av1_rotate_tx_type(tx_type);
int w = tx_size_wide[tx_size];
int h = tx_size_high[tx_size];
int rw = h;
@@ -443,7 +443,7 @@
}
void av1_inv_txfm2d_add_32x64_c(const int32_t *input, uint16_t *output,
- int stride, int tx_type, int bd) {
+ int stride, TX_TYPE tx_type, int bd) {
int txfm_buf[64 * 32 + 64 + 64];
inv_txfm2d_add_facade(input, output, stride, txfm_buf, tx_type, TX_32X64, bd);
}
diff --git a/av1/common/av1_rtcd_defs.pl b/av1/common/av1_rtcd_defs.pl
index c4026bf..5f8f96c 100755
--- a/av1/common/av1_rtcd_defs.pl
+++ b/av1/common/av1_rtcd_defs.pl
@@ -265,32 +265,32 @@
}
#inv txfm
-add_proto qw/void av1_inv_txfm2d_add_4x8/, "const int32_t *input, uint16_t *output, int stride, int tx_type, int bd";
-add_proto qw/void av1_inv_txfm2d_add_8x4/, "const int32_t *input, uint16_t *output, int stride, int tx_type, int bd";
-add_proto qw/void av1_inv_txfm2d_add_8x16/, "const int32_t *input, uint16_t *output, int stride, int tx_type, int bd";
-add_proto qw/void av1_inv_txfm2d_add_16x8/, "const int32_t *input, uint16_t *output, int stride, int tx_type, int bd";
-add_proto qw/void av1_inv_txfm2d_add_16x32/, "const int32_t *input, uint16_t *output, int stride, int tx_type, int bd";
-add_proto qw/void av1_inv_txfm2d_add_32x16/, "const int32_t *input, uint16_t *output, int stride, int tx_type, int bd";
-add_proto qw/void av1_inv_txfm2d_add_4x4/, "const int32_t *input, uint16_t *output, int stride, int tx_type, int bd";
+add_proto qw/void av1_inv_txfm2d_add_4x8/, "const int32_t *input, uint16_t *output, int stride, TX_TYPE tx_type, int bd";
+add_proto qw/void av1_inv_txfm2d_add_8x4/, "const int32_t *input, uint16_t *output, int stride, TX_TYPE tx_type, int bd";
+add_proto qw/void av1_inv_txfm2d_add_8x16/, "const int32_t *input, uint16_t *output, int stride, TX_TYPE tx_type, int bd";
+add_proto qw/void av1_inv_txfm2d_add_16x8/, "const int32_t *input, uint16_t *output, int stride, TX_TYPE tx_type, int bd";
+add_proto qw/void av1_inv_txfm2d_add_16x32/, "const int32_t *input, uint16_t *output, int stride, TX_TYPE tx_type, int bd";
+add_proto qw/void av1_inv_txfm2d_add_32x16/, "const int32_t *input, uint16_t *output, int stride, TX_TYPE tx_type, int bd";
+add_proto qw/void av1_inv_txfm2d_add_4x4/, "const int32_t *input, uint16_t *output, int stride, TX_TYPE tx_type, int bd";
if (aom_config("CONFIG_DAALA_DCT4") ne "yes") {
specialize qw/av1_inv_txfm2d_add_4x4 sse4_1/;
}
-add_proto qw/void av1_inv_txfm2d_add_8x8/, "const int32_t *input, uint16_t *output, int stride, int tx_type, int bd";
+add_proto qw/void av1_inv_txfm2d_add_8x8/, "const int32_t *input, uint16_t *output, int stride, TX_TYPE tx_type, int bd";
if (aom_config("CONFIG_DAALA_DCT8") ne "yes") {
specialize qw/av1_inv_txfm2d_add_8x8 sse4_1/;
}
-add_proto qw/void av1_inv_txfm2d_add_16x16/, "const int32_t *input, uint16_t *output, int stride, int tx_type, int bd";
+add_proto qw/void av1_inv_txfm2d_add_16x16/, "const int32_t *input, uint16_t *output, int stride, TX_TYPE tx_type, int bd";
if (aom_config("CONFIG_DAALA_DCT16") ne "yes") {
specialize qw/av1_inv_txfm2d_add_16x16 sse4_1/;
}
-add_proto qw/void av1_inv_txfm2d_add_32x32/, "const int32_t *input, uint16_t *output, int stride, int tx_type, int bd";
+add_proto qw/void av1_inv_txfm2d_add_32x32/, "const int32_t *input, uint16_t *output, int stride, TX_TYPE tx_type, int bd";
if (aom_config("CONFIG_DAALA_DCT32") ne "yes") {
specialize qw/av1_inv_txfm2d_add_32x32 avx2/;
}
if (aom_config("CONFIG_TX64X64") eq "yes") {
- add_proto qw/void av1_inv_txfm2d_add_64x64/, "const int32_t *input, uint16_t *output, int stride, int tx_type, int bd";
- add_proto qw/void av1_inv_txfm2d_add_64x32/, "const int32_t *input, uint16_t *output, int stride, int tx_type, int bd";
- add_proto qw/void av1_inv_txfm2d_add_32x64/, "const int32_t *input, uint16_t *output, int stride, int tx_type, int bd";
+ add_proto qw/void av1_inv_txfm2d_add_64x64/, "const int32_t *input, uint16_t *output, int stride, TX_TYPE tx_type, int bd";
+ add_proto qw/void av1_inv_txfm2d_add_64x32/, "const int32_t *input, uint16_t *output, int stride, TX_TYPE tx_type, int bd";
+ add_proto qw/void av1_inv_txfm2d_add_32x64/, "const int32_t *input, uint16_t *output, int stride, TX_TYPE tx_type, int bd";
}
#
@@ -404,36 +404,36 @@
}
}
- add_proto qw/void av1_fwd_idtx/, "const int16_t *src_diff, tran_low_t *coeff, int stride, int bsx, int bsy, int tx_type";
+ add_proto qw/void av1_fwd_idtx/, "const int16_t *src_diff, tran_low_t *coeff, int stride, int bsx, int bsy, TX_TYPE tx_type";
#fwd txfm
- add_proto qw/void av1_fwd_txfm2d_4x8/, "const int16_t *input, int32_t *output, int stride, int tx_type, int bd";
- add_proto qw/void av1_fwd_txfm2d_8x4/, "const int16_t *input, int32_t *output, int stride, int tx_type, int bd";
- add_proto qw/void av1_fwd_txfm2d_8x16/, "const int16_t *input, int32_t *output, int stride, int tx_type, int bd";
- add_proto qw/void av1_fwd_txfm2d_16x8/, "const int16_t *input, int32_t *output, int stride, int tx_type, int bd";
- add_proto qw/void av1_fwd_txfm2d_16x32/, "const int16_t *input, int32_t *output, int stride, int tx_type, int bd";
- add_proto qw/void av1_fwd_txfm2d_32x16/, "const int16_t *input, int32_t *output, int stride, int tx_type, int bd";
- add_proto qw/void av1_fwd_txfm2d_4x4/, "const int16_t *input, int32_t *output, int stride, int tx_type, int bd";
+ add_proto qw/void av1_fwd_txfm2d_4x8/, "const int16_t *input, int32_t *output, int stride, TX_TYPE tx_type, int bd";
+ add_proto qw/void av1_fwd_txfm2d_8x4/, "const int16_t *input, int32_t *output, int stride, TX_TYPE tx_type, int bd";
+ add_proto qw/void av1_fwd_txfm2d_8x16/, "const int16_t *input, int32_t *output, int stride, TX_TYPE tx_type, int bd";
+ add_proto qw/void av1_fwd_txfm2d_16x8/, "const int16_t *input, int32_t *output, int stride, TX_TYPE tx_type, int bd";
+ add_proto qw/void av1_fwd_txfm2d_16x32/, "const int16_t *input, int32_t *output, int stride, TX_TYPE tx_type, int bd";
+ add_proto qw/void av1_fwd_txfm2d_32x16/, "const int16_t *input, int32_t *output, int stride, TX_TYPE tx_type, int bd";
+ add_proto qw/void av1_fwd_txfm2d_4x4/, "const int16_t *input, int32_t *output, int stride, TX_TYPE tx_type, int bd";
if (aom_config("CONFIG_DAALA_DCT4") ne "yes") {
specialize qw/av1_fwd_txfm2d_4x4 sse4_1/;
}
- add_proto qw/void av1_fwd_txfm2d_8x8/, "const int16_t *input, int32_t *output, int stride, int tx_type, int bd";
+ add_proto qw/void av1_fwd_txfm2d_8x8/, "const int16_t *input, int32_t *output, int stride, TX_TYPE tx_type, int bd";
if (aom_config("CONFIG_DAALA_DCT8") ne "yes") {
specialize qw/av1_fwd_txfm2d_8x8 sse4_1/;
}
- add_proto qw/void av1_fwd_txfm2d_16x16/, "const int16_t *input, int32_t *output, int stride, int tx_type, int bd";
+ add_proto qw/void av1_fwd_txfm2d_16x16/, "const int16_t *input, int32_t *output, int stride, TX_TYPE tx_type, int bd";
if (aom_config("CONFIG_DAALA_DCT16") ne "yes") {
specialize qw/av1_fwd_txfm2d_16x16 sse4_1/;
}
- add_proto qw/void av1_fwd_txfm2d_32x32/, "const int16_t *input, int32_t *output, int stride, int tx_type, int bd";
+ add_proto qw/void av1_fwd_txfm2d_32x32/, "const int16_t *input, int32_t *output, int stride, TX_TYPE tx_type, int bd";
if (aom_config("CONFIG_DAALA_DCT32") ne "yes") {
specialize qw/av1_fwd_txfm2d_32x32 sse4_1/;
}
if (aom_config("CONFIG_TX64X64") eq "yes") {
- add_proto qw/void av1_fwd_txfm2d_32x64/, "const int16_t *input, int32_t *output, int stride, int tx_type, int bd";
- add_proto qw/void av1_fwd_txfm2d_64x32/, "const int16_t *input, int32_t *output, int stride, int tx_type, int bd";
- add_proto qw/void av1_fwd_txfm2d_64x64/, "const int16_t *input, int32_t *output, int stride, int tx_type, int bd";
+ add_proto qw/void av1_fwd_txfm2d_32x64/, "const int16_t *input, int32_t *output, int stride, TX_TYPE tx_type, int bd";
+ add_proto qw/void av1_fwd_txfm2d_64x32/, "const int16_t *input, int32_t *output, int stride, TX_TYPE tx_type, int bd";
+ add_proto qw/void av1_fwd_txfm2d_64x64/, "const int16_t *input, int32_t *output, int stride, TX_TYPE tx_type, int bd";
}
#
# Motion search
diff --git a/av1/common/av1_txfm.h b/av1/common/av1_txfm.h
index 4c0a2d1..bcc37d2 100644
--- a/av1/common/av1_txfm.h
+++ b/av1/common/av1_txfm.h
@@ -154,7 +154,7 @@
const TXFM_1D_CFG *row_cfg;
} TXFM_2D_FLIP_CFG;
-static INLINE void set_flip_cfg(int tx_type, TXFM_2D_FLIP_CFG *cfg) {
+static INLINE void set_flip_cfg(TX_TYPE tx_type, TXFM_2D_FLIP_CFG *cfg) {
switch (tx_type) {
case DCT_DCT:
case ADST_DCT:
@@ -225,7 +225,7 @@
}
}
-static INLINE int av1_rotate_tx_type(int tx_type) {
+static INLINE TX_TYPE av1_rotate_tx_type(TX_TYPE tx_type) {
switch (tx_type) {
case DCT_DCT: return DCT_DCT;
case ADST_DCT: return DCT_ADST;
@@ -354,13 +354,13 @@
const TXFM_2D_FLIP_CFG *cfg, int8_t fwd_shift,
int bd);
-TXFM_2D_FLIP_CFG av1_get_fwd_txfm_cfg(int tx_type, int tx_size);
+TXFM_2D_FLIP_CFG av1_get_fwd_txfm_cfg(TX_TYPE tx_type, int tx_size);
#if CONFIG_TX64X64
-TXFM_2D_FLIP_CFG av1_get_fwd_txfm_64x64_cfg(int tx_type);
-TXFM_2D_FLIP_CFG av1_get_fwd_txfm_64x32_cfg(int tx_type);
-TXFM_2D_FLIP_CFG av1_get_fwd_txfm_32x64_cfg(int tx_type);
+TXFM_2D_FLIP_CFG av1_get_fwd_txfm_64x64_cfg(TX_TYPE tx_type);
+TXFM_2D_FLIP_CFG av1_get_fwd_txfm_64x32_cfg(TX_TYPE tx_type);
+TXFM_2D_FLIP_CFG av1_get_fwd_txfm_32x64_cfg(TX_TYPE tx_type);
#endif // CONFIG_TX64X64
-TXFM_2D_FLIP_CFG av1_get_inv_txfm_cfg(int tx_type, int tx_size);
+TXFM_2D_FLIP_CFG av1_get_inv_txfm_cfg(TX_TYPE tx_type, int tx_size);
#ifdef __cplusplus
}
#endif // __cplusplus
diff --git a/av1/common/idct.c b/av1/common/idct.c
index 9d64afb..b5f5c43 100644
--- a/av1/common/idct.c
+++ b/av1/common/idct.c
@@ -122,7 +122,7 @@
// Inverse identity transform and add.
#if CONFIG_EXT_TX
static void inv_idtx_add_c(const tran_low_t *input, uint8_t *dest, int stride,
- int bsx, int bsy, int tx_type) {
+ int bsx, int bsy, TX_TYPE tx_type) {
int r, c;
const int pels = bsx * bsy;
const int shift = 3 - ((pels > 256) + (pels > 1024));
@@ -145,7 +145,7 @@
#if CONFIG_EXT_TX
static void maybe_flip_strides(uint8_t **dst, int *dstride, tran_low_t **src,
- int *sstride, int tx_type, int sizey,
+ int *sstride, TX_TYPE tx_type, int sizey,
int sizex) {
// Note that the transpose of src will be added to dst. In order to LR
// flip the addends (in dst coordinates), we UD flip the src. To UD flip
@@ -186,7 +186,7 @@
#if CONFIG_HIGHBITDEPTH
#if CONFIG_EXT_TX && CONFIG_TX64X64
static void highbd_inv_idtx_add_c(const tran_low_t *input, uint8_t *dest8,
- int stride, int bsx, int bsy, int tx_type,
+ int stride, int bsx, int bsy, TX_TYPE tx_type,
int bd) {
int r, c;
const int pels = bsx * bsy;
@@ -263,7 +263,7 @@
void av1_iht4x4_16_add_c(const tran_low_t *input, uint8_t *dest, int stride,
const TxfmParam *txfm_param) {
- int tx_type = txfm_param->tx_type;
+ const TX_TYPE tx_type = txfm_param->tx_type;
#if CONFIG_MRC_TX
assert(tx_type != MRC_DCT && "Invalid tx type for tx size");
#endif // CONFIG_MRC_TX
@@ -386,7 +386,7 @@
void av1_iht4x8_32_add_c(const tran_low_t *input, uint8_t *dest, int stride,
const TxfmParam *txfm_param) {
- int tx_type = txfm_param->tx_type;
+ const TX_TYPE tx_type = txfm_param->tx_type;
#if CONFIG_MRC_TX
assert(tx_type != MRC_DCT && "Invalid tx type for tx size");
#endif // CONFIG_MRC_TX
@@ -467,7 +467,7 @@
void av1_iht8x4_32_add_c(const tran_low_t *input, uint8_t *dest, int stride,
const TxfmParam *txfm_param) {
- int tx_type = txfm_param->tx_type;
+ const TX_TYPE tx_type = txfm_param->tx_type;
#if CONFIG_MRC_TX
assert(tx_type != MRC_DCT && "Invalid tx type for tx size");
#endif // CONFIG_MRC_TX
@@ -549,7 +549,7 @@
void av1_iht4x16_64_add_c(const tran_low_t *input, uint8_t *dest, int stride,
const TxfmParam *txfm_param) {
- int tx_type = txfm_param->tx_type;
+ const TX_TYPE tx_type = txfm_param->tx_type;
#if CONFIG_MRC_TX
assert(tx_type != MRC_DCT && "Invalid tx type for tx size");
#endif // CONFIG_MRC_TX
@@ -622,7 +622,7 @@
void av1_iht16x4_64_add_c(const tran_low_t *input, uint8_t *dest, int stride,
const TxfmParam *txfm_param) {
- int tx_type = txfm_param->tx_type;
+ const TX_TYPE tx_type = txfm_param->tx_type;
#if CONFIG_MRC_TX
assert(tx_type != MRC_DCT && "Invalid tx type for tx size");
#endif // CONFIG_MRC_TX
@@ -696,7 +696,7 @@
void av1_iht8x16_128_add_c(const tran_low_t *input, uint8_t *dest, int stride,
const TxfmParam *txfm_param) {
- int tx_type = txfm_param->tx_type;
+ const TX_TYPE tx_type = txfm_param->tx_type;
#if CONFIG_MRC_TX
assert(tx_type != MRC_DCT && "Invalid tx type for tx size");
#endif // CONFIG_MRC_TX
@@ -770,7 +770,7 @@
void av1_iht16x8_128_add_c(const tran_low_t *input, uint8_t *dest, int stride,
const TxfmParam *txfm_param) {
- int tx_type = txfm_param->tx_type;
+ const TX_TYPE tx_type = txfm_param->tx_type;
#if CONFIG_MRC_TX
assert(tx_type != MRC_DCT && "Invalid tx type for tx size");
#endif // CONFIG_MRC_TX
@@ -845,7 +845,7 @@
void av1_iht8x32_256_add_c(const tran_low_t *input, uint8_t *dest, int stride,
const TxfmParam *txfm_param) {
- int tx_type = txfm_param->tx_type;
+ const TX_TYPE tx_type = txfm_param->tx_type;
#if CONFIG_MRC_TX
assert(tx_type != MRC_DCT && "Invalid tx type for tx size");
#endif // CONFIG_MRC_TX
@@ -918,7 +918,7 @@
void av1_iht32x8_256_add_c(const tran_low_t *input, uint8_t *dest, int stride,
const TxfmParam *txfm_param) {
- int tx_type = txfm_param->tx_type;
+ const TX_TYPE tx_type = txfm_param->tx_type;
#if CONFIG_MRC_TX
assert(tx_type != MRC_DCT && "Invalid tx type for tx size");
#endif // CONFIG_MRC_TX
@@ -992,7 +992,7 @@
void av1_iht16x32_512_add_c(const tran_low_t *input, uint8_t *dest, int stride,
const TxfmParam *txfm_param) {
- int tx_type = txfm_param->tx_type;
+ const TX_TYPE tx_type = txfm_param->tx_type;
#if CONFIG_MRC_TX
assert(tx_type != MRC_DCT && "Invalid tx type for tx size");
#endif // CONFIG_MRC_TX
@@ -1054,7 +1054,7 @@
void av1_iht32x16_512_add_c(const tran_low_t *input, uint8_t *dest, int stride,
const TxfmParam *txfm_param) {
- int tx_type = txfm_param->tx_type;
+ const TX_TYPE tx_type = txfm_param->tx_type;
#if CONFIG_MRC_TX
assert(tx_type != MRC_DCT && "Invalid tx type for tx size");
#endif // CONFIG_MRC_TX
@@ -1116,7 +1116,7 @@
void av1_iht8x8_64_add_c(const tran_low_t *input, uint8_t *dest, int stride,
const TxfmParam *txfm_param) {
- int tx_type = txfm_param->tx_type;
+ const TX_TYPE tx_type = txfm_param->tx_type;
#if CONFIG_MRC_TX
assert(tx_type != MRC_DCT && "Invalid tx type for tx size");
#endif // CONFIG_MRC_TX
@@ -1232,7 +1232,7 @@
void av1_iht16x16_256_add_c(const tran_low_t *input, uint8_t *dest, int stride,
const TxfmParam *txfm_param) {
- int tx_type = txfm_param->tx_type;
+ const TX_TYPE tx_type = txfm_param->tx_type;
#if CONFIG_MRC_TX
assert(tx_type != MRC_DCT && "Invalid tx type for tx size");
#endif // CONFIG_MRC_TX
@@ -1330,7 +1330,7 @@
#if CONFIG_EXT_TX || CONFIG_DAALA_DCT32
void av1_iht32x32_1024_add_c(const tran_low_t *input, uint8_t *dest, int stride,
const TxfmParam *txfm_param) {
- int tx_type = txfm_param->tx_type;
+ const TX_TYPE tx_type = txfm_param->tx_type;
#if CONFIG_DCT_ONLY
assert(tx_type == DCT_DCT);
#endif
@@ -1428,7 +1428,7 @@
#if CONFIG_TX64X64
void av1_iht64x64_4096_add_c(const tran_low_t *input, uint8_t *dest, int stride,
const TxfmParam *txfm_param) {
- int tx_type = txfm_param->tx_type;
+ const TX_TYPE tx_type = txfm_param->tx_type;
#if CONFIG_MRC_TX
assert(tx_type != MRC_DCT && "Invalid tx type for tx size");
#endif // CONFIG_MRC_TX
@@ -1527,7 +1527,7 @@
void av1_iht64x32_2048_add_c(const tran_low_t *input, uint8_t *dest, int stride,
const TxfmParam *txfm_param) {
- int tx_type = txfm_param->tx_type;
+ const TX_TYPE tx_type = txfm_param->tx_type;
#if CONFIG_MRC_TX
assert(tx_type != MRC_DCT && "Invalid tx type for tx size");
#endif // CONFIG_MRC_TX
@@ -1589,7 +1589,7 @@
void av1_iht32x64_2048_add_c(const tran_low_t *input, uint8_t *dest, int stride,
const TxfmParam *txfm_param) {
- int tx_type = txfm_param->tx_type;
+ const TX_TYPE tx_type = txfm_param->tx_type;
#if CONFIG_MRC_TX
assert(tx_type != MRC_DCT && "Invalid tx type for tx size");
#endif // CONFIG_MRC_TX
@@ -2155,7 +2155,7 @@
int eob = txfm_param->eob;
int bd = txfm_param->bd;
int lossless = txfm_param->lossless;
- TX_TYPE tx_type = txfm_param->tx_type;
+ const TX_TYPE tx_type = txfm_param->tx_type;
tran_high_t a1 = input[0] >> UNIT_QUANT_SHIFT;
tran_high_t b1 = input[1] >> UNIT_QUANT_SHIFT;
tran_high_t c1 = input[2] >> UNIT_QUANT_SHIFT;
@@ -2195,7 +2195,7 @@
int bd = txfm_param->bd;
int lossless = txfm_param->lossless;
const int32_t *src = cast_to_int32(input);
- TX_TYPE tx_type = txfm_param->tx_type;
+ const TX_TYPE tx_type = txfm_param->tx_type;
if (lossless) {
assert(tx_type == DCT_DCT);
av1_highbd_iwht4x4_add(input, dest, stride, eob, bd);
@@ -2295,7 +2295,7 @@
static void highbd_inv_txfm_add_8x8(const tran_low_t *input, uint8_t *dest,
int stride, const TxfmParam *txfm_param) {
int bd = txfm_param->bd;
- TX_TYPE tx_type = txfm_param->tx_type;
+ const TX_TYPE tx_type = txfm_param->tx_type;
const int32_t *src = cast_to_int32(input);
switch (tx_type) {
case DCT_DCT:
@@ -2333,7 +2333,7 @@
static void highbd_inv_txfm_add_16x16(const tran_low_t *input, uint8_t *dest,
int stride, const TxfmParam *txfm_param) {
int bd = txfm_param->bd;
- TX_TYPE tx_type = txfm_param->tx_type;
+ const TX_TYPE tx_type = txfm_param->tx_type;
const int32_t *src = cast_to_int32(input);
switch (tx_type) {
case DCT_DCT:
@@ -2371,7 +2371,7 @@
static void highbd_inv_txfm_add_32x32(const tran_low_t *input, uint8_t *dest,
int stride, const TxfmParam *txfm_param) {
int bd = txfm_param->bd;
- TX_TYPE tx_type = txfm_param->tx_type;
+ const TX_TYPE tx_type = txfm_param->tx_type;
const int32_t *src = cast_to_int32(input);
switch (tx_type) {
case DCT_DCT:
@@ -2410,7 +2410,7 @@
static void highbd_inv_txfm_add_64x64(const tran_low_t *input, uint8_t *dest,
int stride, const TxfmParam *txfm_param) {
int bd = txfm_param->bd;
- TX_TYPE tx_type = txfm_param->tx_type;
+ const TX_TYPE tx_type = txfm_param->tx_type;
const int32_t *src = cast_to_int32(input);
switch (tx_type) {
case DCT_DCT:
diff --git a/av1/common/mips/msa/av1_idct16x16_msa.c b/av1/common/mips/msa/av1_idct16x16_msa.c
index 522cce0..ff461b9 100644
--- a/av1/common/mips/msa/av1_idct16x16_msa.c
+++ b/av1/common/mips/msa/av1_idct16x16_msa.c
@@ -19,7 +19,7 @@
int32_t i;
DECLARE_ALIGNED(32, int16_t, out[16 * 16]);
int16_t *out_ptr = &out[0];
- int32_t tx_type = txfm_param->tx_type;
+ const TX_TYPE tx_type = txfm_param->tx_type;
switch (tx_type) {
case DCT_DCT:
diff --git a/av1/common/mips/msa/av1_idct4x4_msa.c b/av1/common/mips/msa/av1_idct4x4_msa.c
index 7a68dbb..37f7fd7 100644
--- a/av1/common/mips/msa/av1_idct4x4_msa.c
+++ b/av1/common/mips/msa/av1_idct4x4_msa.c
@@ -17,7 +17,7 @@
void av1_iht4x4_16_add_msa(const int16_t *input, uint8_t *dst,
int32_t dst_stride, TxfmParam *txfm_param) {
v8i16 in0, in1, in2, in3;
- int32_t tx_type = txfm_param->tx_type;
+ const TX_TYPE tx_type = txfm_param->tx_type;
/* load vector elements of 4x4 block */
LD4x4_SH(input, in0, in1, in2, in3);
diff --git a/av1/common/mips/msa/av1_idct8x8_msa.c b/av1/common/mips/msa/av1_idct8x8_msa.c
index c6ef61e..7410f7b 100644
--- a/av1/common/mips/msa/av1_idct8x8_msa.c
+++ b/av1/common/mips/msa/av1_idct8x8_msa.c
@@ -17,7 +17,7 @@
void av1_iht8x8_64_add_msa(const int16_t *input, uint8_t *dst,
int32_t dst_stride, TxfmParam *txfm_param) {
v8i16 in0, in1, in2, in3, in4, in5, in6, in7;
- int32_t tx_type = txfm_param->tx_type;
+ const TX_TYPE tx_type = txfm_param->tx_type;
/* load vector elements of 8x8 block */
LD_SH8(input, 8, in0, in1, in2, in3, in4, in5, in6, in7);
diff --git a/av1/common/x86/av1_fwd_txfm2d_sse4.c b/av1/common/x86/av1_fwd_txfm2d_sse4.c
index 07a7de4..58ede02 100644
--- a/av1/common/x86/av1_fwd_txfm2d_sse4.c
+++ b/av1/common/x86/av1_fwd_txfm2d_sse4.c
@@ -74,7 +74,7 @@
}
void av1_fwd_txfm2d_32x32_sse4_1(const int16_t *input, int32_t *output,
- int stride, int tx_type, int bd) {
+ int stride, TX_TYPE tx_type, int bd) {
DECLARE_ALIGNED(16, int32_t, txfm_buf[1024]);
TXFM_2D_FLIP_CFG cfg = av1_get_fwd_txfm_cfg(tx_type, TX_32X32);
(void)bd;
diff --git a/av1/common/x86/highbd_inv_txfm_avx2.c b/av1/common/x86/highbd_inv_txfm_avx2.c
index dd2a681..0e833e6 100644
--- a/av1/common/x86/highbd_inv_txfm_avx2.c
+++ b/av1/common/x86/highbd_inv_txfm_avx2.c
@@ -599,7 +599,7 @@
}
void av1_inv_txfm2d_add_32x32_avx2(const int32_t *coeff, uint16_t *output,
- int stride, int tx_type, int bd) {
+ int stride, TX_TYPE tx_type, int bd) {
__m256i in[128], out[128];
const TXFM_1D_CFG *row_cfg = NULL;
const TXFM_1D_CFG *col_cfg = NULL;
diff --git a/av1/common/x86/highbd_inv_txfm_sse4.c b/av1/common/x86/highbd_inv_txfm_sse4.c
index a93699f..8613bed 100644
--- a/av1/common/x86/highbd_inv_txfm_sse4.c
+++ b/av1/common/x86/highbd_inv_txfm_sse4.c
@@ -230,7 +230,7 @@
}
void av1_inv_txfm2d_add_4x4_sse4_1(const int32_t *coeff, uint16_t *output,
- int stride, int tx_type, int bd) {
+ int stride, TX_TYPE tx_type, int bd) {
__m128i in[4];
const TXFM_1D_CFG *row_cfg = NULL;
const TXFM_1D_CFG *col_cfg = NULL;
@@ -706,7 +706,7 @@
}
void av1_inv_txfm2d_add_8x8_sse4_1(const int32_t *coeff, uint16_t *output,
- int stride, int tx_type, int bd) {
+ int stride, TX_TYPE tx_type, int bd) {
__m128i in[16], out[16];
const TXFM_1D_CFG *row_cfg = NULL;
const TXFM_1D_CFG *col_cfg = NULL;
@@ -1316,7 +1316,7 @@
}
void av1_inv_txfm2d_add_16x16_sse4_1(const int32_t *coeff, uint16_t *output,
- int stride, int tx_type, int bd) {
+ int stride, TX_TYPE tx_type, int bd) {
__m128i in[64], out[64];
const TXFM_1D_CFG *row_cfg = NULL;
const TXFM_1D_CFG *col_cfg = NULL;
diff --git a/av1/common/x86/hybrid_inv_txfm_avx2.c b/av1/common/x86/hybrid_inv_txfm_avx2.c
index 0648b95..c440d0f 100644
--- a/av1/common/x86/hybrid_inv_txfm_avx2.c
+++ b/av1/common/x86/hybrid_inv_txfm_avx2.c
@@ -366,7 +366,7 @@
void av1_iht16x16_256_add_avx2(const tran_low_t *input, uint8_t *dest,
int stride, const TxfmParam *txfm_param) {
__m256i in[16];
- int tx_type = txfm_param->tx_type;
+ const TX_TYPE tx_type = txfm_param->tx_type;
load_buffer_16x16(input, in);
switch (tx_type) {
diff --git a/av1/common/x86/idct_intrin_sse2.c b/av1/common/x86/idct_intrin_sse2.c
index bf12a26..541165c 100644
--- a/av1/common/x86/idct_intrin_sse2.c
+++ b/av1/common/x86/idct_intrin_sse2.c
@@ -63,7 +63,7 @@
__m128i in[2];
const __m128i zero = _mm_setzero_si128();
const __m128i eight = _mm_set1_epi16(8);
- int tx_type = txfm_param->tx_type;
+ const TX_TYPE tx_type = txfm_param->tx_type;
in[0] = load_input_data(input);
in[1] = load_input_data(input + 8);
@@ -155,7 +155,7 @@
__m128i in[8];
const __m128i zero = _mm_setzero_si128();
const __m128i final_rounding = _mm_set1_epi16(1 << 4);
- int tx_type = txfm_param->tx_type;
+ const TX_TYPE tx_type = txfm_param->tx_type;
// load input data
in[0] = load_input_data(input);
@@ -257,7 +257,7 @@
__m128i in[32];
__m128i *in0 = &in[0];
__m128i *in1 = &in[16];
- int tx_type = txfm_param->tx_type;
+ const TX_TYPE tx_type = txfm_param->tx_type;
load_buffer_8x16(input, in0);
input += 8;
@@ -393,7 +393,7 @@
void av1_iht8x16_128_add_sse2(const tran_low_t *input, uint8_t *dest,
int stride, const TxfmParam *txfm_param) {
__m128i in[16];
- int tx_type = txfm_param->tx_type;
+ const TX_TYPE tx_type = txfm_param->tx_type;
in[0] = load_input_data(input + 0 * 8);
in[1] = load_input_data(input + 1 * 8);
@@ -559,7 +559,7 @@
void av1_iht16x8_128_add_sse2(const tran_low_t *input, uint8_t *dest,
int stride, const TxfmParam *txfm_param) {
__m128i in[16];
- int tx_type = txfm_param->tx_type;
+ const TX_TYPE tx_type = txfm_param->tx_type;
// Transpose 16x8 input into in[]
in[0] = load_input_data(input + 0 * 16);
@@ -720,7 +720,7 @@
void av1_iht8x4_32_add_sse2(const tran_low_t *input, uint8_t *dest, int stride,
const TxfmParam *txfm_param) {
__m128i in[8];
- int tx_type = txfm_param->tx_type;
+ const TX_TYPE tx_type = txfm_param->tx_type;
in[0] = load_input_data(input + 0 * 8);
in[1] = load_input_data(input + 1 * 8);
@@ -905,7 +905,7 @@
void av1_iht4x8_32_add_sse2(const tran_low_t *input, uint8_t *dest, int stride,
const TxfmParam *txfm_param) {
__m128i in[8];
- int tx_type = txfm_param->tx_type;
+ const TX_TYPE tx_type = txfm_param->tx_type;
// Load rows, packed two per element of 'in'.
// We pack into the bottom half of 'in' so that the
@@ -1128,7 +1128,7 @@
void av1_iht16x32_512_add_sse2(const tran_low_t *input, uint8_t *dest,
int stride, const TxfmParam *txfm_param) {
__m128i intl[16], intr[16], inbl[16], inbr[16];
- int tx_type = txfm_param->tx_type;
+ const TX_TYPE tx_type = txfm_param->tx_type;
int i;
for (i = 0; i < 16; ++i) {
@@ -1282,7 +1282,7 @@
void av1_iht32x16_512_add_sse2(const tran_low_t *input, uint8_t *dest,
int stride, const TxfmParam *txfm_param) {
__m128i in0[16], in1[16], in2[16], in3[16];
- int tx_type = txfm_param->tx_type;
+ const TX_TYPE tx_type = txfm_param->tx_type;
int i;
for (i = 0; i < 16; ++i) {
diff --git a/av1/encoder/dct.c b/av1/encoder/dct.c
index af2bf06..c91e289 100644
--- a/av1/encoder/dct.c
+++ b/av1/encoder/dct.c
@@ -1189,7 +1189,7 @@
}
static void maybe_flip_input(const int16_t **src, int *src_stride, int l, int w,
- int16_t *buff, int tx_type) {
+ int16_t *buff, TX_TYPE tx_type) {
switch (tx_type) {
#if CONFIG_MRC_TX
case MRC_DCT:
@@ -1229,7 +1229,7 @@
void av1_fht4x4_c(const int16_t *input, tran_low_t *output, int stride,
TxfmParam *txfm_param) {
- int tx_type = txfm_param->tx_type;
+ const TX_TYPE tx_type = txfm_param->tx_type;
#if CONFIG_MRC_TX
assert(tx_type != MRC_DCT && "Invalid tx type for tx size");
#endif
@@ -1341,7 +1341,7 @@
void av1_fht4x8_c(const int16_t *input, tran_low_t *output, int stride,
TxfmParam *txfm_param) {
- int tx_type = txfm_param->tx_type;
+ const TX_TYPE tx_type = txfm_param->tx_type;
#if CONFIG_MRC_TX
assert(tx_type != MRC_DCT && "Invalid tx type for tx size");
#endif // CONFIG_MRC_TX
@@ -1417,7 +1417,7 @@
void av1_fht8x4_c(const int16_t *input, tran_low_t *output, int stride,
TxfmParam *txfm_param) {
- int tx_type = txfm_param->tx_type;
+ const TX_TYPE tx_type = txfm_param->tx_type;
#if CONFIG_MRC_TX
assert(tx_type != MRC_DCT && "Invalid tx type for tx size");
#endif // CONFIG_MRC_TX
@@ -1493,7 +1493,7 @@
void av1_fht4x16_c(const int16_t *input, tran_low_t *output, int stride,
TxfmParam *txfm_param) {
- int tx_type = txfm_param->tx_type;
+ const TX_TYPE tx_type = txfm_param->tx_type;
#if CONFIG_MRC_TX
assert(tx_type != MRC_DCT && "Invalid tx type for tx size");
#endif // CONFIG_MRC_TX
@@ -1560,7 +1560,7 @@
void av1_fht16x4_c(const int16_t *input, tran_low_t *output, int stride,
TxfmParam *txfm_param) {
- int tx_type = txfm_param->tx_type;
+ const TX_TYPE tx_type = txfm_param->tx_type;
#if CONFIG_MRC_TX
assert(tx_type != MRC_DCT && "Invalid tx type for tx size");
#endif // CONFIG_MRC_TX
@@ -1627,7 +1627,7 @@
void av1_fht8x16_c(const int16_t *input, tran_low_t *output, int stride,
TxfmParam *txfm_param) {
- int tx_type = txfm_param->tx_type;
+ const TX_TYPE tx_type = txfm_param->tx_type;
#if CONFIG_MRC_TX
assert(tx_type != MRC_DCT && "Invalid tx type for tx size");
#endif // CONFIG_MRC_TX
@@ -1696,7 +1696,7 @@
void av1_fht16x8_c(const int16_t *input, tran_low_t *output, int stride,
TxfmParam *txfm_param) {
- int tx_type = txfm_param->tx_type;
+ const TX_TYPE tx_type = txfm_param->tx_type;
#if CONFIG_MRC_TX
assert(tx_type != MRC_DCT && "Invalid tx type for tx size");
#endif // CONFIG_MRC_TX
@@ -1765,7 +1765,7 @@
void av1_fht8x32_c(const int16_t *input, tran_low_t *output, int stride,
TxfmParam *txfm_param) {
- int tx_type = txfm_param->tx_type;
+ const TX_TYPE tx_type = txfm_param->tx_type;
#if CONFIG_MRC_TX
assert(tx_type != MRC_DCT && "Invalid tx type for tx size");
#endif // CONFIG_MRC_TX
@@ -1832,7 +1832,7 @@
void av1_fht32x8_c(const int16_t *input, tran_low_t *output, int stride,
TxfmParam *txfm_param) {
- int tx_type = txfm_param->tx_type;
+ const TX_TYPE tx_type = txfm_param->tx_type;
#if CONFIG_MRC_TX
assert(tx_type != MRC_DCT && "Invalid tx type for tx size");
#endif // CONFIG_MRC_TX
@@ -1899,7 +1899,7 @@
void av1_fht16x32_c(const int16_t *input, tran_low_t *output, int stride,
TxfmParam *txfm_param) {
- int tx_type = txfm_param->tx_type;
+ const TX_TYPE tx_type = txfm_param->tx_type;
#if CONFIG_MRC_TX
assert(tx_type != MRC_DCT && "Invalid tx type for tx size");
#endif // CONFIG_MRC_TX
@@ -1958,7 +1958,7 @@
void av1_fht32x16_c(const int16_t *input, tran_low_t *output, int stride,
TxfmParam *txfm_param) {
- int tx_type = txfm_param->tx_type;
+ const TX_TYPE tx_type = txfm_param->tx_type;
#if CONFIG_MRC_TX
assert(tx_type != MRC_DCT && "Invalid tx type for tx size");
#endif // CONFIG_MRC_TX
@@ -2017,7 +2017,7 @@
void av1_fht8x8_c(const int16_t *input, tran_low_t *output, int stride,
TxfmParam *txfm_param) {
- int tx_type = txfm_param->tx_type;
+ const TX_TYPE tx_type = txfm_param->tx_type;
#if CONFIG_MRC_TX
assert(tx_type != MRC_DCT && "Invalid tx type for tx size");
#endif // CONFIG_MRC_TX
@@ -2183,7 +2183,7 @@
void av1_fht16x16_c(const int16_t *input, tran_low_t *output, int stride,
TxfmParam *txfm_param) {
- int tx_type = txfm_param->tx_type;
+ const TX_TYPE tx_type = txfm_param->tx_type;
#if CONFIG_MRC_TX
assert(tx_type != MRC_DCT && "Invalid tx type for tx size");
#endif // CONFIG_MRC_TX
@@ -2281,7 +2281,7 @@
void av1_fht32x32_c(const int16_t *input, tran_low_t *output, int stride,
TxfmParam *txfm_param) {
- int tx_type = txfm_param->tx_type;
+ const TX_TYPE tx_type = txfm_param->tx_type;
#if CONFIG_DCT_ONLY
assert(tx_type == DCT_DCT);
#endif
@@ -2420,7 +2420,7 @@
void av1_fht64x64_c(const int16_t *input, tran_low_t *output, int stride,
TxfmParam *txfm_param) {
- int tx_type = txfm_param->tx_type;
+ const TX_TYPE tx_type = txfm_param->tx_type;
#if CONFIG_MRC_TX
assert(tx_type != MRC_DCT && "Invalid tx type for tx size");
#endif // CONFIG_MRC_TX
@@ -2509,7 +2509,7 @@
void av1_fht64x32_c(const int16_t *input, tran_low_t *output, int stride,
TxfmParam *txfm_param) {
- int tx_type = txfm_param->tx_type;
+ const TX_TYPE tx_type = txfm_param->tx_type;
#if CONFIG_MRC_TX
assert(tx_type != MRC_DCT && "Invalid tx type for tx size");
#endif // CONFIG_MRC_TX
@@ -2568,7 +2568,7 @@
void av1_fht32x64_c(const int16_t *input, tran_low_t *output, int stride,
TxfmParam *txfm_param) {
- int tx_type = txfm_param->tx_type;
+ const TX_TYPE tx_type = txfm_param->tx_type;
#if CONFIG_MRC_TX
assert(tx_type != MRC_DCT && "Invalid tx type for tx size");
#endif // CONFIG_MRC_TX
@@ -2628,7 +2628,7 @@
#if CONFIG_EXT_TX
// Forward identity transform.
void av1_fwd_idtx_c(const int16_t *src_diff, tran_low_t *coeff, int stride,
- int bsx, int bsy, int tx_type) {
+ int bsx, int bsy, TX_TYPE tx_type) {
int r, c;
const int pels = bsx * bsy;
const int shift = 3 - ((pels > 256) + (pels > 1024));
diff --git a/av1/encoder/hybrid_fwd_txfm.c b/av1/encoder/hybrid_fwd_txfm.c
index cc3bb07..c36b177 100644
--- a/av1/encoder/hybrid_fwd_txfm.c
+++ b/av1/encoder/hybrid_fwd_txfm.c
@@ -235,7 +235,7 @@
static void highbd_fwd_txfm_4x4(const int16_t *src_diff, tran_low_t *coeff,
int diff_stride, TxfmParam *txfm_param) {
int32_t *dst_coeff = (int32_t *)coeff;
- const int tx_type = txfm_param->tx_type;
+ const TX_TYPE tx_type = txfm_param->tx_type;
const int bd = txfm_param->bd;
if (txfm_param->lossless) {
assert(tx_type == DCT_DCT);
@@ -320,7 +320,7 @@
static void highbd_fwd_txfm_8x8(const int16_t *src_diff, tran_low_t *coeff,
int diff_stride, TxfmParam *txfm_param) {
int32_t *dst_coeff = (int32_t *)coeff;
- const int tx_type = txfm_param->tx_type;
+ const TX_TYPE tx_type = txfm_param->tx_type;
const int bd = txfm_param->bd;
switch (tx_type) {
case DCT_DCT:
@@ -358,7 +358,7 @@
static void highbd_fwd_txfm_16x16(const int16_t *src_diff, tran_low_t *coeff,
int diff_stride, TxfmParam *txfm_param) {
int32_t *dst_coeff = (int32_t *)coeff;
- const int tx_type = txfm_param->tx_type;
+ const TX_TYPE tx_type = txfm_param->tx_type;
const int bd = txfm_param->bd;
switch (tx_type) {
case DCT_DCT:
@@ -396,7 +396,7 @@
static void highbd_fwd_txfm_32x32(const int16_t *src_diff, tran_low_t *coeff,
int diff_stride, TxfmParam *txfm_param) {
int32_t *dst_coeff = (int32_t *)coeff;
- const int tx_type = txfm_param->tx_type;
+ const TX_TYPE tx_type = txfm_param->tx_type;
const int bd = txfm_param->bd;
switch (tx_type) {
case DCT_DCT:
@@ -435,7 +435,7 @@
static void highbd_fwd_txfm_32x64(const int16_t *src_diff, tran_low_t *coeff,
int diff_stride, TxfmParam *txfm_param) {
int32_t *dst_coeff = (int32_t *)coeff;
- const int tx_type = txfm_param->tx_type;
+ const TX_TYPE tx_type = txfm_param->tx_type;
const int bd = txfm_param->bd;
switch (tx_type) {
case DCT_DCT:
@@ -475,7 +475,7 @@
static void highbd_fwd_txfm_64x32(const int16_t *src_diff, tran_low_t *coeff,
int diff_stride, TxfmParam *txfm_param) {
int32_t *dst_coeff = (int32_t *)coeff;
- const int tx_type = txfm_param->tx_type;
+ const TX_TYPE tx_type = txfm_param->tx_type;
const int bd = txfm_param->bd;
switch (tx_type) {
case DCT_DCT:
@@ -514,7 +514,7 @@
static void highbd_fwd_txfm_64x64(const int16_t *src_diff, tran_low_t *coeff,
int diff_stride, TxfmParam *txfm_param) {
int32_t *dst_coeff = (int32_t *)coeff;
- const int tx_type = txfm_param->tx_type;
+ const TX_TYPE tx_type = txfm_param->tx_type;
const int bd = txfm_param->bd;
switch (tx_type) {
case DCT_DCT:
diff --git a/av1/encoder/x86/dct_intrin_sse2.c b/av1/encoder/x86/dct_intrin_sse2.c
index 35e7062..e5b19a4 100644
--- a/av1/encoder/x86/dct_intrin_sse2.c
+++ b/av1/encoder/x86/dct_intrin_sse2.c
@@ -205,7 +205,7 @@
void av1_fht4x4_sse2(const int16_t *input, tran_low_t *output, int stride,
TxfmParam *txfm_param) {
__m128i in[4];
- int tx_type = txfm_param->tx_type;
+ const TX_TYPE tx_type = txfm_param->tx_type;
#if CONFIG_MRC_TX
assert(tx_type != MRC_DCT && "Invalid tx type for tx size");
#endif
@@ -866,7 +866,7 @@
void av1_fht8x8_sse2(const int16_t *input, tran_low_t *output, int stride,
TxfmParam *txfm_param) {
__m128i in[8];
- int tx_type = txfm_param->tx_type;
+ const TX_TYPE tx_type = txfm_param->tx_type;
#if CONFIG_MRC_TX
assert(tx_type != MRC_DCT && "Invalid tx type for tx size");
#endif
@@ -1903,7 +1903,7 @@
void av1_fht16x16_sse2(const int16_t *input, tran_low_t *output, int stride,
TxfmParam *txfm_param) {
__m128i in0[16], in1[16];
- int tx_type = txfm_param->tx_type;
+ const TX_TYPE tx_type = txfm_param->tx_type;
#if CONFIG_MRC_TX
assert(tx_type != MRC_DCT && "Invalid tx type for tx size");
#endif
@@ -2123,7 +2123,7 @@
void av1_fht4x8_sse2(const int16_t *input, tran_low_t *output, int stride,
TxfmParam *txfm_param) {
__m128i in[8];
- int tx_type = txfm_param->tx_type;
+ const TX_TYPE tx_type = txfm_param->tx_type;
#if CONFIG_MRC_TX
assert(tx_type != MRC_DCT && "Invalid tx type for tx size");
#endif
@@ -2301,7 +2301,7 @@
void av1_fht8x4_sse2(const int16_t *input, tran_low_t *output, int stride,
TxfmParam *txfm_param) {
__m128i in[8];
- int tx_type = txfm_param->tx_type;
+ const TX_TYPE tx_type = txfm_param->tx_type;
#if CONFIG_MRC_TX
assert(tx_type != MRC_DCT && "Invalid tx type for tx size");
#endif
@@ -2445,7 +2445,7 @@
void av1_fht8x16_sse2(const int16_t *input, tran_low_t *output, int stride,
TxfmParam *txfm_param) {
__m128i in[16];
- int tx_type = txfm_param->tx_type;
+ const TX_TYPE tx_type = txfm_param->tx_type;
#if CONFIG_MRC_TX
assert(tx_type != MRC_DCT && "Invalid tx type for tx size");
#endif
@@ -2630,7 +2630,7 @@
void av1_fht16x8_sse2(const int16_t *input, tran_low_t *output, int stride,
TxfmParam *txfm_param) {
__m128i in[16];
- int tx_type = txfm_param->tx_type;
+ const TX_TYPE tx_type = txfm_param->tx_type;
#if CONFIG_MRC_TX
assert(tx_type != MRC_DCT && "Invalid tx type for tx size");
#endif
@@ -2944,7 +2944,7 @@
void av1_fht16x32_sse2(const int16_t *input, tran_low_t *output, int stride,
TxfmParam *txfm_param) {
__m128i intl[16], intr[16], inbl[16], inbr[16];
- int tx_type = txfm_param->tx_type;
+ const TX_TYPE tx_type = txfm_param->tx_type;
#if CONFIG_MRC_TX
assert(tx_type != MRC_DCT && "Invalid tx type for tx size");
#endif
@@ -3137,7 +3137,7 @@
void av1_fht32x16_sse2(const int16_t *input, tran_low_t *output, int stride,
TxfmParam *txfm_param) {
__m128i in0[16], in1[16], in2[16], in3[16];
- int tx_type = txfm_param->tx_type;
+ const TX_TYPE tx_type = txfm_param->tx_type;
#if CONFIG_MRC_TX
assert(tx_type != MRC_DCT && "Invalid tx type for tx size");
#endif
@@ -3381,7 +3381,7 @@
void av1_fht32x32_sse2(const int16_t *input, tran_low_t *output, int stride,
TxfmParam *txfm_param) {
__m128i in0[32], in1[32], in2[32], in3[32];
- int tx_type = txfm_param->tx_type;
+ const TX_TYPE tx_type = txfm_param->tx_type;
#if CONFIG_MRC_TX
assert(tx_type != MRC_DCT && "No 32x32 sse2 MRC_DCT implementation");
#endif
diff --git a/av1/encoder/x86/highbd_fwd_txfm_sse4.c b/av1/encoder/x86/highbd_fwd_txfm_sse4.c
index cab36f2..b684f7a 100644
--- a/av1/encoder/x86/highbd_fwd_txfm_sse4.c
+++ b/av1/encoder/x86/highbd_fwd_txfm_sse4.c
@@ -195,7 +195,7 @@
}
void av1_fwd_txfm2d_4x4_sse4_1(const int16_t *input, int32_t *coeff,
- int input_stride, int tx_type, int bd) {
+ int input_stride, TX_TYPE tx_type, int bd) {
__m128i in[4];
const TXFM_1D_CFG *row_cfg = NULL;
const TXFM_1D_CFG *col_cfg = NULL;
@@ -926,7 +926,7 @@
}
void av1_fwd_txfm2d_8x8_sse4_1(const int16_t *input, int32_t *coeff, int stride,
- int tx_type, int bd) {
+ TX_TYPE tx_type, int bd) {
__m128i in[16], out[16];
const TXFM_1D_CFG *row_cfg = NULL;
const TXFM_1D_CFG *col_cfg = NULL;
@@ -1800,7 +1800,7 @@
}
void av1_fwd_txfm2d_16x16_sse4_1(const int16_t *input, int32_t *coeff,
- int stride, int tx_type, int bd) {
+ int stride, TX_TYPE tx_type, int bd) {
__m128i in[64], out[64];
const TXFM_1D_CFG *row_cfg = NULL;
const TXFM_1D_CFG *col_cfg = NULL;
diff --git a/av1/encoder/x86/hybrid_fwd_txfm_avx2.c b/av1/encoder/x86/hybrid_fwd_txfm_avx2.c
index af8e9a5..88621c8 100644
--- a/av1/encoder/x86/hybrid_fwd_txfm_avx2.c
+++ b/av1/encoder/x86/hybrid_fwd_txfm_avx2.c
@@ -916,7 +916,7 @@
void av1_fht16x16_avx2(const int16_t *input, tran_low_t *output, int stride,
TxfmParam *txfm_param) {
__m256i in[16];
- int tx_type = txfm_param->tx_type;
+ const TX_TYPE tx_type = txfm_param->tx_type;
#if CONFIG_MRC_TX
assert(tx_type != MRC_DCT && "Invalid tx type for tx size");
#endif
@@ -1516,7 +1516,7 @@
TxfmParam *txfm_param) {
__m256i in0[32]; // left 32 columns
__m256i in1[32]; // right 32 columns
- int tx_type = txfm_param->tx_type;
+ const TX_TYPE tx_type = txfm_param->tx_type;
#if CONFIG_MRC_TX
assert(tx_type != MRC_DCT && "No avx2 32x32 implementation of MRC_DCT");
#endif
diff --git a/test/av1_fht16x16_test.cc b/test/av1_fht16x16_test.cc
index 5a9fcc2..21235a8 100644
--- a/test/av1_fht16x16_test.cc
+++ b/test/av1_fht16x16_test.cc
@@ -28,7 +28,7 @@
const TxfmParam *txfm_param);
using std::tr1::tuple;
using libaom_test::FhtFunc;
-typedef tuple<FhtFunc, IhtFunc, int, aom_bit_depth_t, int> Ht16x16Param;
+typedef tuple<FhtFunc, IhtFunc, TX_TYPE, aom_bit_depth_t, int> Ht16x16Param;
void fht16x16_ref(const int16_t *in, tran_low_t *out, int stride,
TxfmParam *txfm_param) {
@@ -42,15 +42,15 @@
#if CONFIG_HIGHBITDEPTH
typedef void (*IHbdHtFunc)(const tran_low_t *in, uint8_t *out, int stride,
- int tx_type, int bd);
+ TX_TYPE tx_type, int bd);
typedef void (*HbdHtFunc)(const int16_t *input, int32_t *output, int stride,
- int tx_type, int bd);
+ TX_TYPE tx_type, int bd);
// Target optimized function, tx_type, bit depth
-typedef tuple<HbdHtFunc, int, int> HighbdHt16x16Param;
+typedef tuple<HbdHtFunc, TX_TYPE, int> HighbdHt16x16Param;
void highbd_fht16x16_ref(const int16_t *in, int32_t *out, int stride,
- int tx_type, int bd) {
+ TX_TYPE tx_type, int bd) {
av1_fwd_txfm2d_16x16_c(in, out, stride, tx_type, bd);
}
#endif // CONFIG_HIGHBITDEPTH
@@ -128,7 +128,7 @@
private:
HbdHtFunc fwd_txfm_;
HbdHtFunc fwd_txfm_ref_;
- int tx_type_;
+ TX_TYPE tx_type_;
int bit_depth_;
int mask_;
int num_coeffs_;
@@ -166,39 +166,39 @@
#if HAVE_SSE2 && !CONFIG_DAALA_DCT16
const Ht16x16Param kArrayHt16x16Param_sse2[] = {
- make_tuple(&av1_fht16x16_sse2, &av1_iht16x16_256_add_sse2, 0, AOM_BITS_8,
- 256),
- make_tuple(&av1_fht16x16_sse2, &av1_iht16x16_256_add_sse2, 1, AOM_BITS_8,
- 256),
- make_tuple(&av1_fht16x16_sse2, &av1_iht16x16_256_add_sse2, 2, AOM_BITS_8,
- 256),
- make_tuple(&av1_fht16x16_sse2, &av1_iht16x16_256_add_sse2, 3, AOM_BITS_8,
- 256),
+ make_tuple(&av1_fht16x16_sse2, &av1_iht16x16_256_add_sse2, DCT_DCT,
+ AOM_BITS_8, 256),
+ make_tuple(&av1_fht16x16_sse2, &av1_iht16x16_256_add_sse2, ADST_DCT,
+ AOM_BITS_8, 256),
+ make_tuple(&av1_fht16x16_sse2, &av1_iht16x16_256_add_sse2, DCT_ADST,
+ AOM_BITS_8, 256),
+ make_tuple(&av1_fht16x16_sse2, &av1_iht16x16_256_add_sse2, ADST_ADST,
+ AOM_BITS_8, 256),
#if CONFIG_EXT_TX
- make_tuple(&av1_fht16x16_sse2, &av1_iht16x16_256_add_sse2, 4, AOM_BITS_8,
+ make_tuple(&av1_fht16x16_sse2, &av1_iht16x16_256_add_sse2, FLIPADST_DCT,
+ AOM_BITS_8, 256),
+ make_tuple(&av1_fht16x16_sse2, &av1_iht16x16_256_add_sse2, DCT_FLIPADST,
+ AOM_BITS_8, 256),
+ make_tuple(&av1_fht16x16_sse2, &av1_iht16x16_256_add_sse2, FLIPADST_FLIPADST,
+ AOM_BITS_8, 256),
+ make_tuple(&av1_fht16x16_sse2, &av1_iht16x16_256_add_sse2, ADST_FLIPADST,
+ AOM_BITS_8, 256),
+ make_tuple(&av1_fht16x16_sse2, &av1_iht16x16_256_add_sse2, FLIPADST_ADST,
+ AOM_BITS_8, 256),
+ make_tuple(&av1_fht16x16_sse2, &av1_iht16x16_256_add_sse2, IDTX, AOM_BITS_8,
256),
- make_tuple(&av1_fht16x16_sse2, &av1_iht16x16_256_add_sse2, 5, AOM_BITS_8,
+ make_tuple(&av1_fht16x16_sse2, &av1_iht16x16_256_add_sse2, V_DCT, AOM_BITS_8,
256),
- make_tuple(&av1_fht16x16_sse2, &av1_iht16x16_256_add_sse2, 6, AOM_BITS_8,
+ make_tuple(&av1_fht16x16_sse2, &av1_iht16x16_256_add_sse2, H_DCT, AOM_BITS_8,
256),
- make_tuple(&av1_fht16x16_sse2, &av1_iht16x16_256_add_sse2, 7, AOM_BITS_8,
+ make_tuple(&av1_fht16x16_sse2, &av1_iht16x16_256_add_sse2, V_ADST, AOM_BITS_8,
256),
- make_tuple(&av1_fht16x16_sse2, &av1_iht16x16_256_add_sse2, 8, AOM_BITS_8,
+ make_tuple(&av1_fht16x16_sse2, &av1_iht16x16_256_add_sse2, H_ADST, AOM_BITS_8,
256),
- make_tuple(&av1_fht16x16_sse2, &av1_iht16x16_256_add_sse2, 9, AOM_BITS_8,
- 256),
- make_tuple(&av1_fht16x16_sse2, &av1_iht16x16_256_add_sse2, 10, AOM_BITS_8,
- 256),
- make_tuple(&av1_fht16x16_sse2, &av1_iht16x16_256_add_sse2, 11, AOM_BITS_8,
- 256),
- make_tuple(&av1_fht16x16_sse2, &av1_iht16x16_256_add_sse2, 12, AOM_BITS_8,
- 256),
- make_tuple(&av1_fht16x16_sse2, &av1_iht16x16_256_add_sse2, 13, AOM_BITS_8,
- 256),
- make_tuple(&av1_fht16x16_sse2, &av1_iht16x16_256_add_sse2, 14, AOM_BITS_8,
- 256),
- make_tuple(&av1_fht16x16_sse2, &av1_iht16x16_256_add_sse2, 15, AOM_BITS_8,
- 256)
+ make_tuple(&av1_fht16x16_sse2, &av1_iht16x16_256_add_sse2, V_FLIPADST,
+ AOM_BITS_8, 256),
+ make_tuple(&av1_fht16x16_sse2, &av1_iht16x16_256_add_sse2, H_FLIPADST,
+ AOM_BITS_8, 256)
#endif // CONFIG_EXT_TX
};
INSTANTIATE_TEST_CASE_P(SSE2, AV1Trans16x16HT,
@@ -207,39 +207,39 @@
#if HAVE_AVX2 && !CONFIG_DAALA_DCT16
const Ht16x16Param kArrayHt16x16Param_avx2[] = {
- make_tuple(&av1_fht16x16_avx2, &av1_iht16x16_256_add_avx2, 0, AOM_BITS_8,
- 256),
- make_tuple(&av1_fht16x16_avx2, &av1_iht16x16_256_add_avx2, 1, AOM_BITS_8,
- 256),
- make_tuple(&av1_fht16x16_avx2, &av1_iht16x16_256_add_avx2, 2, AOM_BITS_8,
- 256),
- make_tuple(&av1_fht16x16_avx2, &av1_iht16x16_256_add_avx2, 3, AOM_BITS_8,
- 256),
+ make_tuple(&av1_fht16x16_avx2, &av1_iht16x16_256_add_avx2, DCT_DCT,
+ AOM_BITS_8, 256),
+ make_tuple(&av1_fht16x16_avx2, &av1_iht16x16_256_add_avx2, ADST_DCT,
+ AOM_BITS_8, 256),
+ make_tuple(&av1_fht16x16_avx2, &av1_iht16x16_256_add_avx2, DCT_ADST,
+ AOM_BITS_8, 256),
+ make_tuple(&av1_fht16x16_avx2, &av1_iht16x16_256_add_avx2, ADST_ADST,
+ AOM_BITS_8, 256),
#if CONFIG_EXT_TX
- make_tuple(&av1_fht16x16_avx2, &av1_iht16x16_256_add_avx2, 4, AOM_BITS_8,
+ make_tuple(&av1_fht16x16_avx2, &av1_iht16x16_256_add_avx2, FLIPADST_DCT,
+ AOM_BITS_8, 256),
+ make_tuple(&av1_fht16x16_avx2, &av1_iht16x16_256_add_avx2, DCT_FLIPADST,
+ AOM_BITS_8, 256),
+ make_tuple(&av1_fht16x16_avx2, &av1_iht16x16_256_add_avx2, FLIPADST_FLIPADST,
+ AOM_BITS_8, 256),
+ make_tuple(&av1_fht16x16_avx2, &av1_iht16x16_256_add_avx2, ADST_FLIPADST,
+ AOM_BITS_8, 256),
+ make_tuple(&av1_fht16x16_avx2, &av1_iht16x16_256_add_avx2, FLIPADST_ADST,
+ AOM_BITS_8, 256),
+ make_tuple(&av1_fht16x16_avx2, &av1_iht16x16_256_add_avx2, IDTX, AOM_BITS_8,
256),
- make_tuple(&av1_fht16x16_avx2, &av1_iht16x16_256_add_avx2, 5, AOM_BITS_8,
+ make_tuple(&av1_fht16x16_avx2, &av1_iht16x16_256_add_avx2, V_DCT, AOM_BITS_8,
256),
- make_tuple(&av1_fht16x16_avx2, &av1_iht16x16_256_add_avx2, 6, AOM_BITS_8,
+ make_tuple(&av1_fht16x16_avx2, &av1_iht16x16_256_add_avx2, H_DCT, AOM_BITS_8,
256),
- make_tuple(&av1_fht16x16_avx2, &av1_iht16x16_256_add_avx2, 7, AOM_BITS_8,
+ make_tuple(&av1_fht16x16_avx2, &av1_iht16x16_256_add_avx2, V_ADST, AOM_BITS_8,
256),
- make_tuple(&av1_fht16x16_avx2, &av1_iht16x16_256_add_avx2, 8, AOM_BITS_8,
+ make_tuple(&av1_fht16x16_avx2, &av1_iht16x16_256_add_avx2, H_ADST, AOM_BITS_8,
256),
- make_tuple(&av1_fht16x16_avx2, &av1_iht16x16_256_add_avx2, 9, AOM_BITS_8,
- 256),
- make_tuple(&av1_fht16x16_avx2, &av1_iht16x16_256_add_avx2, 10, AOM_BITS_8,
- 256),
- make_tuple(&av1_fht16x16_avx2, &av1_iht16x16_256_add_avx2, 11, AOM_BITS_8,
- 256),
- make_tuple(&av1_fht16x16_avx2, &av1_iht16x16_256_add_avx2, 12, AOM_BITS_8,
- 256),
- make_tuple(&av1_fht16x16_avx2, &av1_iht16x16_256_add_avx2, 13, AOM_BITS_8,
- 256),
- make_tuple(&av1_fht16x16_avx2, &av1_iht16x16_256_add_avx2, 14, AOM_BITS_8,
- 256),
- make_tuple(&av1_fht16x16_avx2, &av1_iht16x16_256_add_avx2, 15, AOM_BITS_8,
- 256)
+ make_tuple(&av1_fht16x16_avx2, &av1_iht16x16_256_add_avx2, V_FLIPADST,
+ AOM_BITS_8, 256),
+ make_tuple(&av1_fht16x16_avx2, &av1_iht16x16_256_add_avx2, H_FLIPADST,
+ AOM_BITS_8, 256)
#endif // CONFIG_EXT_TX
};
INSTANTIATE_TEST_CASE_P(AVX2, AV1Trans16x16HT,
@@ -248,25 +248,25 @@
#if HAVE_SSE4_1 && CONFIG_HIGHBITDEPTH && !CONFIG_DAALA_DCT16
const HighbdHt16x16Param kArrayHBDHt16x16Param_sse4_1[] = {
- make_tuple(&av1_fwd_txfm2d_16x16_sse4_1, 0, 10),
- make_tuple(&av1_fwd_txfm2d_16x16_sse4_1, 0, 12),
- make_tuple(&av1_fwd_txfm2d_16x16_sse4_1, 1, 10),
- make_tuple(&av1_fwd_txfm2d_16x16_sse4_1, 1, 12),
- make_tuple(&av1_fwd_txfm2d_16x16_sse4_1, 2, 10),
- make_tuple(&av1_fwd_txfm2d_16x16_sse4_1, 2, 12),
- make_tuple(&av1_fwd_txfm2d_16x16_sse4_1, 3, 10),
- make_tuple(&av1_fwd_txfm2d_16x16_sse4_1, 3, 12),
+ make_tuple(&av1_fwd_txfm2d_16x16_sse4_1, DCT_DCT, 10),
+ make_tuple(&av1_fwd_txfm2d_16x16_sse4_1, DCT_DCT, 12),
+ make_tuple(&av1_fwd_txfm2d_16x16_sse4_1, ADST_DCT, 10),
+ make_tuple(&av1_fwd_txfm2d_16x16_sse4_1, ADST_DCT, 12),
+ make_tuple(&av1_fwd_txfm2d_16x16_sse4_1, DCT_ADST, 10),
+ make_tuple(&av1_fwd_txfm2d_16x16_sse4_1, DCT_ADST, 12),
+ make_tuple(&av1_fwd_txfm2d_16x16_sse4_1, ADST_ADST, 10),
+ make_tuple(&av1_fwd_txfm2d_16x16_sse4_1, ADST_ADST, 12),
#if CONFIG_EXT_TX
- make_tuple(&av1_fwd_txfm2d_16x16_sse4_1, 4, 10),
- make_tuple(&av1_fwd_txfm2d_16x16_sse4_1, 4, 12),
- make_tuple(&av1_fwd_txfm2d_16x16_sse4_1, 5, 10),
- make_tuple(&av1_fwd_txfm2d_16x16_sse4_1, 5, 12),
- make_tuple(&av1_fwd_txfm2d_16x16_sse4_1, 6, 10),
- make_tuple(&av1_fwd_txfm2d_16x16_sse4_1, 6, 12),
- make_tuple(&av1_fwd_txfm2d_16x16_sse4_1, 7, 10),
- make_tuple(&av1_fwd_txfm2d_16x16_sse4_1, 7, 12),
- make_tuple(&av1_fwd_txfm2d_16x16_sse4_1, 8, 10),
- make_tuple(&av1_fwd_txfm2d_16x16_sse4_1, 8, 12),
+ make_tuple(&av1_fwd_txfm2d_16x16_sse4_1, FLIPADST_DCT, 10),
+ make_tuple(&av1_fwd_txfm2d_16x16_sse4_1, FLIPADST_DCT, 12),
+ make_tuple(&av1_fwd_txfm2d_16x16_sse4_1, DCT_FLIPADST, 10),
+ make_tuple(&av1_fwd_txfm2d_16x16_sse4_1, DCT_FLIPADST, 12),
+ make_tuple(&av1_fwd_txfm2d_16x16_sse4_1, FLIPADST_FLIPADST, 10),
+ make_tuple(&av1_fwd_txfm2d_16x16_sse4_1, FLIPADST_FLIPADST, 12),
+ make_tuple(&av1_fwd_txfm2d_16x16_sse4_1, ADST_FLIPADST, 10),
+ make_tuple(&av1_fwd_txfm2d_16x16_sse4_1, ADST_FLIPADST, 12),
+ make_tuple(&av1_fwd_txfm2d_16x16_sse4_1, FLIPADST_ADST, 10),
+ make_tuple(&av1_fwd_txfm2d_16x16_sse4_1, FLIPADST_ADST, 12),
#endif // CONFIG_EXT_TX
};
INSTANTIATE_TEST_CASE_P(SSE4_1, AV1HighbdTrans16x16HT,
diff --git a/test/av1_fht16x32_test.cc b/test/av1_fht16x32_test.cc
index 099a312..0b3928f 100644
--- a/test/av1_fht16x32_test.cc
+++ b/test/av1_fht16x32_test.cc
@@ -28,7 +28,7 @@
const TxfmParam *txfm_param);
using std::tr1::tuple;
using libaom_test::FhtFunc;
-typedef tuple<FhtFunc, IhtFunc, int, aom_bit_depth_t, int> Ht16x32Param;
+typedef tuple<FhtFunc, IhtFunc, TX_TYPE, aom_bit_depth_t, int> Ht16x32Param;
void fht16x32_ref(const int16_t *in, tran_low_t *out, int stride,
TxfmParam *txfm_param) {
@@ -80,23 +80,34 @@
using std::tr1::make_tuple;
const Ht16x32Param kArrayHt16x32Param_c[] = {
- make_tuple(&av1_fht16x32_c, &av1_iht16x32_512_add_c, 0, AOM_BITS_8, 512),
- make_tuple(&av1_fht16x32_c, &av1_iht16x32_512_add_c, 1, AOM_BITS_8, 512),
- make_tuple(&av1_fht16x32_c, &av1_iht16x32_512_add_c, 2, AOM_BITS_8, 512),
- make_tuple(&av1_fht16x32_c, &av1_iht16x32_512_add_c, 3, AOM_BITS_8, 512),
+ make_tuple(&av1_fht16x32_c, &av1_iht16x32_512_add_c, DCT_DCT, AOM_BITS_8,
+ 512),
+ make_tuple(&av1_fht16x32_c, &av1_iht16x32_512_add_c, ADST_DCT, AOM_BITS_8,
+ 512),
+ make_tuple(&av1_fht16x32_c, &av1_iht16x32_512_add_c, DCT_ADST, AOM_BITS_8,
+ 512),
+ make_tuple(&av1_fht16x32_c, &av1_iht16x32_512_add_c, ADST_ADST, AOM_BITS_8,
+ 512),
#if CONFIG_EXT_TX
- make_tuple(&av1_fht16x32_c, &av1_iht16x32_512_add_c, 4, AOM_BITS_8, 512),
- make_tuple(&av1_fht16x32_c, &av1_iht16x32_512_add_c, 5, AOM_BITS_8, 512),
- make_tuple(&av1_fht16x32_c, &av1_iht16x32_512_add_c, 6, AOM_BITS_8, 512),
- make_tuple(&av1_fht16x32_c, &av1_iht16x32_512_add_c, 7, AOM_BITS_8, 512),
- make_tuple(&av1_fht16x32_c, &av1_iht16x32_512_add_c, 8, AOM_BITS_8, 512),
- make_tuple(&av1_fht16x32_c, &av1_iht16x32_512_add_c, 9, AOM_BITS_8, 512),
- make_tuple(&av1_fht16x32_c, &av1_iht16x32_512_add_c, 10, AOM_BITS_8, 512),
- make_tuple(&av1_fht16x32_c, &av1_iht16x32_512_add_c, 11, AOM_BITS_8, 512),
- make_tuple(&av1_fht16x32_c, &av1_iht16x32_512_add_c, 12, AOM_BITS_8, 512),
- make_tuple(&av1_fht16x32_c, &av1_iht16x32_512_add_c, 13, AOM_BITS_8, 512),
- make_tuple(&av1_fht16x32_c, &av1_iht16x32_512_add_c, 14, AOM_BITS_8, 512),
- make_tuple(&av1_fht16x32_c, &av1_iht16x32_512_add_c, 15, AOM_BITS_8, 512)
+ make_tuple(&av1_fht16x32_c, &av1_iht16x32_512_add_c, FLIPADST_DCT, AOM_BITS_8,
+ 512),
+ make_tuple(&av1_fht16x32_c, &av1_iht16x32_512_add_c, DCT_FLIPADST, AOM_BITS_8,
+ 512),
+ make_tuple(&av1_fht16x32_c, &av1_iht16x32_512_add_c, FLIPADST_FLIPADST,
+ AOM_BITS_8, 512),
+ make_tuple(&av1_fht16x32_c, &av1_iht16x32_512_add_c, ADST_FLIPADST,
+ AOM_BITS_8, 512),
+ make_tuple(&av1_fht16x32_c, &av1_iht16x32_512_add_c, FLIPADST_ADST,
+ AOM_BITS_8, 512),
+ make_tuple(&av1_fht16x32_c, &av1_iht16x32_512_add_c, IDTX, AOM_BITS_8, 512),
+ make_tuple(&av1_fht16x32_c, &av1_iht16x32_512_add_c, V_DCT, AOM_BITS_8, 512),
+ make_tuple(&av1_fht16x32_c, &av1_iht16x32_512_add_c, H_DCT, AOM_BITS_8, 512),
+ make_tuple(&av1_fht16x32_c, &av1_iht16x32_512_add_c, V_ADST, AOM_BITS_8, 512),
+ make_tuple(&av1_fht16x32_c, &av1_iht16x32_512_add_c, H_ADST, AOM_BITS_8, 512),
+ make_tuple(&av1_fht16x32_c, &av1_iht16x32_512_add_c, V_FLIPADST, AOM_BITS_8,
+ 512),
+ make_tuple(&av1_fht16x32_c, &av1_iht16x32_512_add_c, H_FLIPADST, AOM_BITS_8,
+ 512)
#endif // CONFIG_EXT_TX
};
INSTANTIATE_TEST_CASE_P(C, AV1Trans16x32HT,
@@ -104,39 +115,39 @@
#if HAVE_SSE2
const Ht16x32Param kArrayHt16x32Param_sse2[] = {
- make_tuple(&av1_fht16x32_sse2, &av1_iht16x32_512_add_sse2, 0, AOM_BITS_8,
- 512),
- make_tuple(&av1_fht16x32_sse2, &av1_iht16x32_512_add_sse2, 1, AOM_BITS_8,
- 512),
- make_tuple(&av1_fht16x32_sse2, &av1_iht16x32_512_add_sse2, 2, AOM_BITS_8,
- 512),
- make_tuple(&av1_fht16x32_sse2, &av1_iht16x32_512_add_sse2, 3, AOM_BITS_8,
- 512),
+ make_tuple(&av1_fht16x32_sse2, &av1_iht16x32_512_add_sse2, DCT_DCT,
+ AOM_BITS_8, 512),
+ make_tuple(&av1_fht16x32_sse2, &av1_iht16x32_512_add_sse2, ADST_DCT,
+ AOM_BITS_8, 512),
+ make_tuple(&av1_fht16x32_sse2, &av1_iht16x32_512_add_sse2, DCT_ADST,
+ AOM_BITS_8, 512),
+ make_tuple(&av1_fht16x32_sse2, &av1_iht16x32_512_add_sse2, ADST_ADST,
+ AOM_BITS_8, 512),
#if CONFIG_EXT_TX
- make_tuple(&av1_fht16x32_sse2, &av1_iht16x32_512_add_sse2, 4, AOM_BITS_8,
+ make_tuple(&av1_fht16x32_sse2, &av1_iht16x32_512_add_sse2, FLIPADST_DCT,
+ AOM_BITS_8, 512),
+ make_tuple(&av1_fht16x32_sse2, &av1_iht16x32_512_add_sse2, DCT_FLIPADST,
+ AOM_BITS_8, 512),
+ make_tuple(&av1_fht16x32_sse2, &av1_iht16x32_512_add_sse2, FLIPADST_FLIPADST,
+ AOM_BITS_8, 512),
+ make_tuple(&av1_fht16x32_sse2, &av1_iht16x32_512_add_sse2, ADST_FLIPADST,
+ AOM_BITS_8, 512),
+ make_tuple(&av1_fht16x32_sse2, &av1_iht16x32_512_add_sse2, FLIPADST_ADST,
+ AOM_BITS_8, 512),
+ make_tuple(&av1_fht16x32_sse2, &av1_iht16x32_512_add_sse2, IDTX, AOM_BITS_8,
512),
- make_tuple(&av1_fht16x32_sse2, &av1_iht16x32_512_add_sse2, 5, AOM_BITS_8,
+ make_tuple(&av1_fht16x32_sse2, &av1_iht16x32_512_add_sse2, V_DCT, AOM_BITS_8,
512),
- make_tuple(&av1_fht16x32_sse2, &av1_iht16x32_512_add_sse2, 6, AOM_BITS_8,
+ make_tuple(&av1_fht16x32_sse2, &av1_iht16x32_512_add_sse2, H_DCT, AOM_BITS_8,
512),
- make_tuple(&av1_fht16x32_sse2, &av1_iht16x32_512_add_sse2, 7, AOM_BITS_8,
+ make_tuple(&av1_fht16x32_sse2, &av1_iht16x32_512_add_sse2, V_ADST, AOM_BITS_8,
512),
- make_tuple(&av1_fht16x32_sse2, &av1_iht16x32_512_add_sse2, 8, AOM_BITS_8,
+ make_tuple(&av1_fht16x32_sse2, &av1_iht16x32_512_add_sse2, H_ADST, AOM_BITS_8,
512),
- make_tuple(&av1_fht16x32_sse2, &av1_iht16x32_512_add_sse2, 9, AOM_BITS_8,
- 512),
- make_tuple(&av1_fht16x32_sse2, &av1_iht16x32_512_add_sse2, 10, AOM_BITS_8,
- 512),
- make_tuple(&av1_fht16x32_sse2, &av1_iht16x32_512_add_sse2, 11, AOM_BITS_8,
- 512),
- make_tuple(&av1_fht16x32_sse2, &av1_iht16x32_512_add_sse2, 12, AOM_BITS_8,
- 512),
- make_tuple(&av1_fht16x32_sse2, &av1_iht16x32_512_add_sse2, 13, AOM_BITS_8,
- 512),
- make_tuple(&av1_fht16x32_sse2, &av1_iht16x32_512_add_sse2, 14, AOM_BITS_8,
- 512),
- make_tuple(&av1_fht16x32_sse2, &av1_iht16x32_512_add_sse2, 15, AOM_BITS_8,
- 512)
+ make_tuple(&av1_fht16x32_sse2, &av1_iht16x32_512_add_sse2, V_FLIPADST,
+ AOM_BITS_8, 512),
+ make_tuple(&av1_fht16x32_sse2, &av1_iht16x32_512_add_sse2, H_FLIPADST,
+ AOM_BITS_8, 512)
#endif // CONFIG_EXT_TX
};
INSTANTIATE_TEST_CASE_P(SSE2, AV1Trans16x32HT,
diff --git a/test/av1_fht16x8_test.cc b/test/av1_fht16x8_test.cc
index 8277e28..3ee1a08 100644
--- a/test/av1_fht16x8_test.cc
+++ b/test/av1_fht16x8_test.cc
@@ -28,7 +28,7 @@
const TxfmParam *txfm_param);
using std::tr1::tuple;
using libaom_test::FhtFunc;
-typedef tuple<FhtFunc, IhtFunc, int, aom_bit_depth_t, int> Ht16x8Param;
+typedef tuple<FhtFunc, IhtFunc, TX_TYPE, aom_bit_depth_t, int> Ht16x8Param;
void fht16x8_ref(const int16_t *in, tran_low_t *out, int stride,
TxfmParam *txfm_param) {
@@ -81,23 +81,31 @@
using std::tr1::make_tuple;
const Ht16x8Param kArrayHt16x8Param_c[] = {
- make_tuple(&av1_fht16x8_c, &av1_iht16x8_128_add_c, 0, AOM_BITS_8, 128),
- make_tuple(&av1_fht16x8_c, &av1_iht16x8_128_add_c, 1, AOM_BITS_8, 128),
- make_tuple(&av1_fht16x8_c, &av1_iht16x8_128_add_c, 2, AOM_BITS_8, 128),
- make_tuple(&av1_fht16x8_c, &av1_iht16x8_128_add_c, 3, AOM_BITS_8, 128),
+ make_tuple(&av1_fht16x8_c, &av1_iht16x8_128_add_c, DCT_DCT, AOM_BITS_8, 128),
+ make_tuple(&av1_fht16x8_c, &av1_iht16x8_128_add_c, ADST_DCT, AOM_BITS_8, 128),
+ make_tuple(&av1_fht16x8_c, &av1_iht16x8_128_add_c, DCT_ADST, AOM_BITS_8, 128),
+ make_tuple(&av1_fht16x8_c, &av1_iht16x8_128_add_c, ADST_ADST, AOM_BITS_8,
+ 128),
#if CONFIG_EXT_TX
- make_tuple(&av1_fht16x8_c, &av1_iht16x8_128_add_c, 4, AOM_BITS_8, 128),
- make_tuple(&av1_fht16x8_c, &av1_iht16x8_128_add_c, 5, AOM_BITS_8, 128),
- make_tuple(&av1_fht16x8_c, &av1_iht16x8_128_add_c, 6, AOM_BITS_8, 128),
- make_tuple(&av1_fht16x8_c, &av1_iht16x8_128_add_c, 7, AOM_BITS_8, 128),
- make_tuple(&av1_fht16x8_c, &av1_iht16x8_128_add_c, 8, AOM_BITS_8, 128),
- make_tuple(&av1_fht16x8_c, &av1_iht16x8_128_add_c, 9, AOM_BITS_8, 128),
- make_tuple(&av1_fht16x8_c, &av1_iht16x8_128_add_c, 10, AOM_BITS_8, 128),
- make_tuple(&av1_fht16x8_c, &av1_iht16x8_128_add_c, 11, AOM_BITS_8, 128),
- make_tuple(&av1_fht16x8_c, &av1_iht16x8_128_add_c, 12, AOM_BITS_8, 128),
- make_tuple(&av1_fht16x8_c, &av1_iht16x8_128_add_c, 13, AOM_BITS_8, 128),
- make_tuple(&av1_fht16x8_c, &av1_iht16x8_128_add_c, 14, AOM_BITS_8, 128),
- make_tuple(&av1_fht16x8_c, &av1_iht16x8_128_add_c, 15, AOM_BITS_8, 128)
+ make_tuple(&av1_fht16x8_c, &av1_iht16x8_128_add_c, FLIPADST_DCT, AOM_BITS_8,
+ 128),
+ make_tuple(&av1_fht16x8_c, &av1_iht16x8_128_add_c, DCT_FLIPADST, AOM_BITS_8,
+ 128),
+ make_tuple(&av1_fht16x8_c, &av1_iht16x8_128_add_c, FLIPADST_FLIPADST,
+ AOM_BITS_8, 128),
+ make_tuple(&av1_fht16x8_c, &av1_iht16x8_128_add_c, ADST_FLIPADST, AOM_BITS_8,
+ 128),
+ make_tuple(&av1_fht16x8_c, &av1_iht16x8_128_add_c, FLIPADST_ADST, AOM_BITS_8,
+ 128),
+ make_tuple(&av1_fht16x8_c, &av1_iht16x8_128_add_c, IDTX, AOM_BITS_8, 128),
+ make_tuple(&av1_fht16x8_c, &av1_iht16x8_128_add_c, V_DCT, AOM_BITS_8, 128),
+ make_tuple(&av1_fht16x8_c, &av1_iht16x8_128_add_c, H_DCT, AOM_BITS_8, 128),
+ make_tuple(&av1_fht16x8_c, &av1_iht16x8_128_add_c, V_ADST, AOM_BITS_8, 128),
+ make_tuple(&av1_fht16x8_c, &av1_iht16x8_128_add_c, H_ADST, AOM_BITS_8, 128),
+ make_tuple(&av1_fht16x8_c, &av1_iht16x8_128_add_c, V_FLIPADST, AOM_BITS_8,
+ 128),
+ make_tuple(&av1_fht16x8_c, &av1_iht16x8_128_add_c, H_FLIPADST, AOM_BITS_8,
+ 128)
#endif // CONFIG_EXT_TX
};
INSTANTIATE_TEST_CASE_P(C, AV1Trans16x8HT,
@@ -105,23 +113,39 @@
#if HAVE_SSE2
const Ht16x8Param kArrayHt16x8Param_sse2[] = {
- make_tuple(&av1_fht16x8_sse2, &av1_iht16x8_128_add_sse2, 0, AOM_BITS_8, 128),
- make_tuple(&av1_fht16x8_sse2, &av1_iht16x8_128_add_sse2, 1, AOM_BITS_8, 128),
- make_tuple(&av1_fht16x8_sse2, &av1_iht16x8_128_add_sse2, 2, AOM_BITS_8, 128),
- make_tuple(&av1_fht16x8_sse2, &av1_iht16x8_128_add_sse2, 3, AOM_BITS_8, 128),
+ make_tuple(&av1_fht16x8_sse2, &av1_iht16x8_128_add_sse2, DCT_DCT, AOM_BITS_8,
+ 128),
+ make_tuple(&av1_fht16x8_sse2, &av1_iht16x8_128_add_sse2, ADST_DCT, AOM_BITS_8,
+ 128),
+ make_tuple(&av1_fht16x8_sse2, &av1_iht16x8_128_add_sse2, DCT_ADST, AOM_BITS_8,
+ 128),
+ make_tuple(&av1_fht16x8_sse2, &av1_iht16x8_128_add_sse2, ADST_ADST,
+ AOM_BITS_8, 128),
#if CONFIG_EXT_TX
- make_tuple(&av1_fht16x8_sse2, &av1_iht16x8_128_add_sse2, 4, AOM_BITS_8, 128),
- make_tuple(&av1_fht16x8_sse2, &av1_iht16x8_128_add_sse2, 5, AOM_BITS_8, 128),
- make_tuple(&av1_fht16x8_sse2, &av1_iht16x8_128_add_sse2, 6, AOM_BITS_8, 128),
- make_tuple(&av1_fht16x8_sse2, &av1_iht16x8_128_add_sse2, 7, AOM_BITS_8, 128),
- make_tuple(&av1_fht16x8_sse2, &av1_iht16x8_128_add_sse2, 8, AOM_BITS_8, 128),
- make_tuple(&av1_fht16x8_sse2, &av1_iht16x8_128_add_sse2, 9, AOM_BITS_8, 128),
- make_tuple(&av1_fht16x8_sse2, &av1_iht16x8_128_add_sse2, 10, AOM_BITS_8, 128),
- make_tuple(&av1_fht16x8_sse2, &av1_iht16x8_128_add_sse2, 11, AOM_BITS_8, 128),
- make_tuple(&av1_fht16x8_sse2, &av1_iht16x8_128_add_sse2, 12, AOM_BITS_8, 128),
- make_tuple(&av1_fht16x8_sse2, &av1_iht16x8_128_add_sse2, 13, AOM_BITS_8, 128),
- make_tuple(&av1_fht16x8_sse2, &av1_iht16x8_128_add_sse2, 14, AOM_BITS_8, 128),
- make_tuple(&av1_fht16x8_sse2, &av1_iht16x8_128_add_sse2, 15, AOM_BITS_8, 128)
+ make_tuple(&av1_fht16x8_sse2, &av1_iht16x8_128_add_sse2, FLIPADST_DCT,
+ AOM_BITS_8, 128),
+ make_tuple(&av1_fht16x8_sse2, &av1_iht16x8_128_add_sse2, DCT_FLIPADST,
+ AOM_BITS_8, 128),
+ make_tuple(&av1_fht16x8_sse2, &av1_iht16x8_128_add_sse2, FLIPADST_FLIPADST,
+ AOM_BITS_8, 128),
+ make_tuple(&av1_fht16x8_sse2, &av1_iht16x8_128_add_sse2, ADST_FLIPADST,
+ AOM_BITS_8, 128),
+ make_tuple(&av1_fht16x8_sse2, &av1_iht16x8_128_add_sse2, FLIPADST_ADST,
+ AOM_BITS_8, 128),
+ make_tuple(&av1_fht16x8_sse2, &av1_iht16x8_128_add_sse2, IDTX, AOM_BITS_8,
+ 128),
+ make_tuple(&av1_fht16x8_sse2, &av1_iht16x8_128_add_sse2, V_DCT, AOM_BITS_8,
+ 128),
+ make_tuple(&av1_fht16x8_sse2, &av1_iht16x8_128_add_sse2, H_DCT, AOM_BITS_8,
+ 128),
+ make_tuple(&av1_fht16x8_sse2, &av1_iht16x8_128_add_sse2, V_ADST, AOM_BITS_8,
+ 128),
+ make_tuple(&av1_fht16x8_sse2, &av1_iht16x8_128_add_sse2, H_ADST, AOM_BITS_8,
+ 128),
+ make_tuple(&av1_fht16x8_sse2, &av1_iht16x8_128_add_sse2, V_FLIPADST,
+ AOM_BITS_8, 128),
+ make_tuple(&av1_fht16x8_sse2, &av1_iht16x8_128_add_sse2, H_FLIPADST,
+ AOM_BITS_8, 128)
#endif // CONFIG_EXT_TX
};
INSTANTIATE_TEST_CASE_P(SSE2, AV1Trans16x8HT,
diff --git a/test/av1_fht32x16_test.cc b/test/av1_fht32x16_test.cc
index 1c70fd4..cbce074 100644
--- a/test/av1_fht32x16_test.cc
+++ b/test/av1_fht32x16_test.cc
@@ -28,7 +28,7 @@
const TxfmParam *txfm_param);
using std::tr1::tuple;
using libaom_test::FhtFunc;
-typedef tuple<FhtFunc, IhtFunc, int, aom_bit_depth_t, int> Ht32x16Param;
+typedef tuple<FhtFunc, IhtFunc, TX_TYPE, aom_bit_depth_t, int> Ht32x16Param;
void fht32x16_ref(const int16_t *in, tran_low_t *out, int stride,
TxfmParam *txfm_param) {
@@ -80,23 +80,34 @@
using std::tr1::make_tuple;
const Ht32x16Param kArrayHt32x16Param_c[] = {
- make_tuple(&av1_fht32x16_c, &av1_iht32x16_512_add_c, 0, AOM_BITS_8, 512),
- make_tuple(&av1_fht32x16_c, &av1_iht32x16_512_add_c, 1, AOM_BITS_8, 512),
- make_tuple(&av1_fht32x16_c, &av1_iht32x16_512_add_c, 2, AOM_BITS_8, 512),
- make_tuple(&av1_fht32x16_c, &av1_iht32x16_512_add_c, 3, AOM_BITS_8, 512),
+ make_tuple(&av1_fht32x16_c, &av1_iht32x16_512_add_c, DCT_DCT, AOM_BITS_8,
+ 512),
+ make_tuple(&av1_fht32x16_c, &av1_iht32x16_512_add_c, ADST_DCT, AOM_BITS_8,
+ 512),
+ make_tuple(&av1_fht32x16_c, &av1_iht32x16_512_add_c, DCT_ADST, AOM_BITS_8,
+ 512),
+ make_tuple(&av1_fht32x16_c, &av1_iht32x16_512_add_c, ADST_ADST, AOM_BITS_8,
+ 512),
#if CONFIG_EXT_TX
- make_tuple(&av1_fht32x16_c, &av1_iht32x16_512_add_c, 4, AOM_BITS_8, 512),
- make_tuple(&av1_fht32x16_c, &av1_iht32x16_512_add_c, 5, AOM_BITS_8, 512),
- make_tuple(&av1_fht32x16_c, &av1_iht32x16_512_add_c, 6, AOM_BITS_8, 512),
- make_tuple(&av1_fht32x16_c, &av1_iht32x16_512_add_c, 7, AOM_BITS_8, 512),
- make_tuple(&av1_fht32x16_c, &av1_iht32x16_512_add_c, 8, AOM_BITS_8, 512),
- make_tuple(&av1_fht32x16_c, &av1_iht32x16_512_add_c, 9, AOM_BITS_8, 512),
- make_tuple(&av1_fht32x16_c, &av1_iht32x16_512_add_c, 10, AOM_BITS_8, 512),
- make_tuple(&av1_fht32x16_c, &av1_iht32x16_512_add_c, 11, AOM_BITS_8, 512),
- make_tuple(&av1_fht32x16_c, &av1_iht32x16_512_add_c, 12, AOM_BITS_8, 512),
- make_tuple(&av1_fht32x16_c, &av1_iht32x16_512_add_c, 13, AOM_BITS_8, 512),
- make_tuple(&av1_fht32x16_c, &av1_iht32x16_512_add_c, 14, AOM_BITS_8, 512),
- make_tuple(&av1_fht32x16_c, &av1_iht32x16_512_add_c, 15, AOM_BITS_8, 512)
+ make_tuple(&av1_fht32x16_c, &av1_iht32x16_512_add_c, FLIPADST_DCT, AOM_BITS_8,
+ 512),
+ make_tuple(&av1_fht32x16_c, &av1_iht32x16_512_add_c, DCT_FLIPADST, AOM_BITS_8,
+ 512),
+ make_tuple(&av1_fht32x16_c, &av1_iht32x16_512_add_c, FLIPADST_FLIPADST,
+ AOM_BITS_8, 512),
+ make_tuple(&av1_fht32x16_c, &av1_iht32x16_512_add_c, ADST_FLIPADST,
+ AOM_BITS_8, 512),
+ make_tuple(&av1_fht32x16_c, &av1_iht32x16_512_add_c, FLIPADST_ADST,
+ AOM_BITS_8, 512),
+ make_tuple(&av1_fht32x16_c, &av1_iht32x16_512_add_c, IDTX, AOM_BITS_8, 512),
+ make_tuple(&av1_fht32x16_c, &av1_iht32x16_512_add_c, V_DCT, AOM_BITS_8, 512),
+ make_tuple(&av1_fht32x16_c, &av1_iht32x16_512_add_c, H_DCT, AOM_BITS_8, 512),
+ make_tuple(&av1_fht32x16_c, &av1_iht32x16_512_add_c, V_ADST, AOM_BITS_8, 512),
+ make_tuple(&av1_fht32x16_c, &av1_iht32x16_512_add_c, H_ADST, AOM_BITS_8, 512),
+ make_tuple(&av1_fht32x16_c, &av1_iht32x16_512_add_c, V_FLIPADST, AOM_BITS_8,
+ 512),
+ make_tuple(&av1_fht32x16_c, &av1_iht32x16_512_add_c, H_FLIPADST, AOM_BITS_8,
+ 512)
#endif // CONFIG_EXT_TX
};
INSTANTIATE_TEST_CASE_P(C, AV1Trans32x16HT,
@@ -104,39 +115,39 @@
#if HAVE_SSE2
const Ht32x16Param kArrayHt32x16Param_sse2[] = {
- make_tuple(&av1_fht32x16_sse2, &av1_iht32x16_512_add_sse2, 0, AOM_BITS_8,
- 512),
- make_tuple(&av1_fht32x16_sse2, &av1_iht32x16_512_add_sse2, 1, AOM_BITS_8,
- 512),
- make_tuple(&av1_fht32x16_sse2, &av1_iht32x16_512_add_sse2, 2, AOM_BITS_8,
- 512),
- make_tuple(&av1_fht32x16_sse2, &av1_iht32x16_512_add_sse2, 3, AOM_BITS_8,
- 512),
+ make_tuple(&av1_fht32x16_sse2, &av1_iht32x16_512_add_sse2, DCT_DCT,
+ AOM_BITS_8, 512),
+ make_tuple(&av1_fht32x16_sse2, &av1_iht32x16_512_add_sse2, ADST_DCT,
+ AOM_BITS_8, 512),
+ make_tuple(&av1_fht32x16_sse2, &av1_iht32x16_512_add_sse2, DCT_ADST,
+ AOM_BITS_8, 512),
+ make_tuple(&av1_fht32x16_sse2, &av1_iht32x16_512_add_sse2, ADST_ADST,
+ AOM_BITS_8, 512),
#if CONFIG_EXT_TX
- make_tuple(&av1_fht32x16_sse2, &av1_iht32x16_512_add_sse2, 4, AOM_BITS_8,
+ make_tuple(&av1_fht32x16_sse2, &av1_iht32x16_512_add_sse2, FLIPADST_DCT,
+ AOM_BITS_8, 512),
+ make_tuple(&av1_fht32x16_sse2, &av1_iht32x16_512_add_sse2, DCT_FLIPADST,
+ AOM_BITS_8, 512),
+ make_tuple(&av1_fht32x16_sse2, &av1_iht32x16_512_add_sse2, FLIPADST_FLIPADST,
+ AOM_BITS_8, 512),
+ make_tuple(&av1_fht32x16_sse2, &av1_iht32x16_512_add_sse2, ADST_FLIPADST,
+ AOM_BITS_8, 512),
+ make_tuple(&av1_fht32x16_sse2, &av1_iht32x16_512_add_sse2, FLIPADST_ADST,
+ AOM_BITS_8, 512),
+ make_tuple(&av1_fht32x16_sse2, &av1_iht32x16_512_add_sse2, IDTX, AOM_BITS_8,
512),
- make_tuple(&av1_fht32x16_sse2, &av1_iht32x16_512_add_sse2, 5, AOM_BITS_8,
+ make_tuple(&av1_fht32x16_sse2, &av1_iht32x16_512_add_sse2, V_DCT, AOM_BITS_8,
512),
- make_tuple(&av1_fht32x16_sse2, &av1_iht32x16_512_add_sse2, 6, AOM_BITS_8,
+ make_tuple(&av1_fht32x16_sse2, &av1_iht32x16_512_add_sse2, H_DCT, AOM_BITS_8,
512),
- make_tuple(&av1_fht32x16_sse2, &av1_iht32x16_512_add_sse2, 7, AOM_BITS_8,
+ make_tuple(&av1_fht32x16_sse2, &av1_iht32x16_512_add_sse2, V_ADST, AOM_BITS_8,
512),
- make_tuple(&av1_fht32x16_sse2, &av1_iht32x16_512_add_sse2, 8, AOM_BITS_8,
+ make_tuple(&av1_fht32x16_sse2, &av1_iht32x16_512_add_sse2, H_ADST, AOM_BITS_8,
512),
- make_tuple(&av1_fht32x16_sse2, &av1_iht32x16_512_add_sse2, 9, AOM_BITS_8,
- 512),
- make_tuple(&av1_fht32x16_sse2, &av1_iht32x16_512_add_sse2, 10, AOM_BITS_8,
- 512),
- make_tuple(&av1_fht32x16_sse2, &av1_iht32x16_512_add_sse2, 11, AOM_BITS_8,
- 512),
- make_tuple(&av1_fht32x16_sse2, &av1_iht32x16_512_add_sse2, 12, AOM_BITS_8,
- 512),
- make_tuple(&av1_fht32x16_sse2, &av1_iht32x16_512_add_sse2, 13, AOM_BITS_8,
- 512),
- make_tuple(&av1_fht32x16_sse2, &av1_iht32x16_512_add_sse2, 14, AOM_BITS_8,
- 512),
- make_tuple(&av1_fht32x16_sse2, &av1_iht32x16_512_add_sse2, 15, AOM_BITS_8,
- 512)
+ make_tuple(&av1_fht32x16_sse2, &av1_iht32x16_512_add_sse2, V_FLIPADST,
+ AOM_BITS_8, 512),
+ make_tuple(&av1_fht32x16_sse2, &av1_iht32x16_512_add_sse2, H_FLIPADST,
+ AOM_BITS_8, 512)
#endif // CONFIG_EXT_TX
};
INSTANTIATE_TEST_CASE_P(SSE2, AV1Trans32x16HT,
diff --git a/test/av1_fht32x32_test.cc b/test/av1_fht32x32_test.cc
index e56611e..613bc91 100644
--- a/test/av1_fht32x32_test.cc
+++ b/test/av1_fht32x32_test.cc
@@ -28,7 +28,7 @@
const TxfmParam *txfm_param);
using std::tr1::tuple;
using libaom_test::FhtFunc;
-typedef tuple<FhtFunc, IhtFunc, int, aom_bit_depth_t, int> Ht32x32Param;
+typedef tuple<FhtFunc, IhtFunc, TX_TYPE, aom_bit_depth_t, int> Ht32x32Param;
void fht32x32_ref(const int16_t *in, tran_low_t *out, int stride,
TxfmParam *txfm_param) {
@@ -37,15 +37,15 @@
#if CONFIG_HIGHBITDEPTH
typedef void (*IHbdHtFunc)(const tran_low_t *in, uint8_t *out, int stride,
- int tx_type, int bd);
+ TX_TYPE tx_type, int bd);
typedef void (*HbdHtFunc)(const int16_t *input, int32_t *output, int stride,
- int tx_type, int bd);
+ TX_TYPE tx_type, int bd);
// Target optimized function, tx_type, bit depth
-typedef tuple<HbdHtFunc, int, int> HighbdHt32x32Param;
+typedef tuple<HbdHtFunc, TX_TYPE, int> HighbdHt32x32Param;
void highbd_fht32x32_ref(const int16_t *in, int32_t *out, int stride,
- int tx_type, int bd) {
+ TX_TYPE tx_type, int bd) {
av1_fwd_txfm2d_32x32_c(in, out, stride, tx_type, bd);
}
#endif // CONFIG_HIGHBITDEPTH
@@ -129,7 +129,7 @@
private:
HbdHtFunc fwd_txfm_;
HbdHtFunc fwd_txfm_ref_;
- int tx_type_;
+ TX_TYPE tx_type_;
int bit_depth_;
int mask_;
int num_coeffs_;
@@ -167,23 +167,28 @@
#if HAVE_SSE2 && !CONFIG_DAALA_DCT32
const Ht32x32Param kArrayHt32x32Param_sse2[] = {
- make_tuple(&av1_fht32x32_sse2, &dummy_inv_txfm, 0, AOM_BITS_8, 1024),
- make_tuple(&av1_fht32x32_sse2, &dummy_inv_txfm, 1, AOM_BITS_8, 1024),
- make_tuple(&av1_fht32x32_sse2, &dummy_inv_txfm, 2, AOM_BITS_8, 1024),
- make_tuple(&av1_fht32x32_sse2, &dummy_inv_txfm, 3, AOM_BITS_8, 1024),
+ make_tuple(&av1_fht32x32_sse2, &dummy_inv_txfm, DCT_DCT, AOM_BITS_8, 1024),
+ make_tuple(&av1_fht32x32_sse2, &dummy_inv_txfm, ADST_DCT, AOM_BITS_8, 1024),
+ make_tuple(&av1_fht32x32_sse2, &dummy_inv_txfm, DCT_ADST, AOM_BITS_8, 1024),
+ make_tuple(&av1_fht32x32_sse2, &dummy_inv_txfm, ADST_ADST, AOM_BITS_8, 1024),
#if CONFIG_EXT_TX
- make_tuple(&av1_fht32x32_sse2, &dummy_inv_txfm, 4, AOM_BITS_8, 1024),
- make_tuple(&av1_fht32x32_sse2, &dummy_inv_txfm, 5, AOM_BITS_8, 1024),
- make_tuple(&av1_fht32x32_sse2, &dummy_inv_txfm, 6, AOM_BITS_8, 1024),
- make_tuple(&av1_fht32x32_sse2, &dummy_inv_txfm, 7, AOM_BITS_8, 1024),
- make_tuple(&av1_fht32x32_sse2, &dummy_inv_txfm, 8, AOM_BITS_8, 1024),
- make_tuple(&av1_fht32x32_sse2, &dummy_inv_txfm, 9, AOM_BITS_8, 1024),
- make_tuple(&av1_fht32x32_sse2, &dummy_inv_txfm, 10, AOM_BITS_8, 1024),
- make_tuple(&av1_fht32x32_sse2, &dummy_inv_txfm, 11, AOM_BITS_8, 1024),
- make_tuple(&av1_fht32x32_sse2, &dummy_inv_txfm, 12, AOM_BITS_8, 1024),
- make_tuple(&av1_fht32x32_sse2, &dummy_inv_txfm, 13, AOM_BITS_8, 1024),
- make_tuple(&av1_fht32x32_sse2, &dummy_inv_txfm, 14, AOM_BITS_8, 1024),
- make_tuple(&av1_fht32x32_sse2, &dummy_inv_txfm, 15, AOM_BITS_8, 1024)
+ make_tuple(&av1_fht32x32_sse2, &dummy_inv_txfm, FLIPADST_DCT, AOM_BITS_8,
+ 1024),
+ make_tuple(&av1_fht32x32_sse2, &dummy_inv_txfm, DCT_FLIPADST, AOM_BITS_8,
+ 1024),
+ make_tuple(&av1_fht32x32_sse2, &dummy_inv_txfm, FLIPADST_FLIPADST, AOM_BITS_8,
+ 1024),
+ make_tuple(&av1_fht32x32_sse2, &dummy_inv_txfm, ADST_FLIPADST, AOM_BITS_8,
+ 1024),
+ make_tuple(&av1_fht32x32_sse2, &dummy_inv_txfm, FLIPADST_ADST, AOM_BITS_8,
+ 1024),
+ make_tuple(&av1_fht32x32_sse2, &dummy_inv_txfm, IDTX, AOM_BITS_8, 1024),
+ make_tuple(&av1_fht32x32_sse2, &dummy_inv_txfm, V_DCT, AOM_BITS_8, 1024),
+ make_tuple(&av1_fht32x32_sse2, &dummy_inv_txfm, H_DCT, AOM_BITS_8, 1024),
+ make_tuple(&av1_fht32x32_sse2, &dummy_inv_txfm, V_ADST, AOM_BITS_8, 1024),
+ make_tuple(&av1_fht32x32_sse2, &dummy_inv_txfm, H_ADST, AOM_BITS_8, 1024),
+ make_tuple(&av1_fht32x32_sse2, &dummy_inv_txfm, V_FLIPADST, AOM_BITS_8, 1024),
+ make_tuple(&av1_fht32x32_sse2, &dummy_inv_txfm, H_FLIPADST, AOM_BITS_8, 1024)
#endif // CONFIG_EXT_TX
};
INSTANTIATE_TEST_CASE_P(SSE2, AV1Trans32x32HT,
@@ -192,23 +197,28 @@
#if HAVE_AVX2 && !CONFIG_DAALA_DCT32
const Ht32x32Param kArrayHt32x32Param_avx2[] = {
- make_tuple(&av1_fht32x32_avx2, &dummy_inv_txfm, 0, AOM_BITS_8, 1024),
- make_tuple(&av1_fht32x32_avx2, &dummy_inv_txfm, 1, AOM_BITS_8, 1024),
- make_tuple(&av1_fht32x32_avx2, &dummy_inv_txfm, 2, AOM_BITS_8, 1024),
- make_tuple(&av1_fht32x32_avx2, &dummy_inv_txfm, 3, AOM_BITS_8, 1024),
+ make_tuple(&av1_fht32x32_avx2, &dummy_inv_txfm, DCT_DCT, AOM_BITS_8, 1024),
+ make_tuple(&av1_fht32x32_avx2, &dummy_inv_txfm, ADST_DCT, AOM_BITS_8, 1024),
+ make_tuple(&av1_fht32x32_avx2, &dummy_inv_txfm, DCT_ADST, AOM_BITS_8, 1024),
+ make_tuple(&av1_fht32x32_avx2, &dummy_inv_txfm, ADST_ADST, AOM_BITS_8, 1024),
#if CONFIG_EXT_TX
- make_tuple(&av1_fht32x32_avx2, &dummy_inv_txfm, 4, AOM_BITS_8, 1024),
- make_tuple(&av1_fht32x32_avx2, &dummy_inv_txfm, 5, AOM_BITS_8, 1024),
- make_tuple(&av1_fht32x32_avx2, &dummy_inv_txfm, 6, AOM_BITS_8, 1024),
- make_tuple(&av1_fht32x32_avx2, &dummy_inv_txfm, 7, AOM_BITS_8, 1024),
- make_tuple(&av1_fht32x32_avx2, &dummy_inv_txfm, 8, AOM_BITS_8, 1024),
- make_tuple(&av1_fht32x32_avx2, &dummy_inv_txfm, 9, AOM_BITS_8, 1024),
- make_tuple(&av1_fht32x32_avx2, &dummy_inv_txfm, 10, AOM_BITS_8, 1024),
- make_tuple(&av1_fht32x32_avx2, &dummy_inv_txfm, 11, AOM_BITS_8, 1024),
- make_tuple(&av1_fht32x32_avx2, &dummy_inv_txfm, 12, AOM_BITS_8, 1024),
- make_tuple(&av1_fht32x32_avx2, &dummy_inv_txfm, 13, AOM_BITS_8, 1024),
- make_tuple(&av1_fht32x32_avx2, &dummy_inv_txfm, 14, AOM_BITS_8, 1024),
- make_tuple(&av1_fht32x32_avx2, &dummy_inv_txfm, 15, AOM_BITS_8, 1024)
+ make_tuple(&av1_fht32x32_avx2, &dummy_inv_txfm, FLIPADST_DCT, AOM_BITS_8,
+ 1024),
+ make_tuple(&av1_fht32x32_avx2, &dummy_inv_txfm, DCT_FLIPADST, AOM_BITS_8,
+ 1024),
+ make_tuple(&av1_fht32x32_avx2, &dummy_inv_txfm, FLIPADST_FLIPADST, AOM_BITS_8,
+ 1024),
+ make_tuple(&av1_fht32x32_avx2, &dummy_inv_txfm, ADST_FLIPADST, AOM_BITS_8,
+ 1024),
+ make_tuple(&av1_fht32x32_avx2, &dummy_inv_txfm, FLIPADST_ADST, AOM_BITS_8,
+ 1024),
+ make_tuple(&av1_fht32x32_avx2, &dummy_inv_txfm, IDTX, AOM_BITS_8, 1024),
+ make_tuple(&av1_fht32x32_avx2, &dummy_inv_txfm, V_DCT, AOM_BITS_8, 1024),
+ make_tuple(&av1_fht32x32_avx2, &dummy_inv_txfm, H_DCT, AOM_BITS_8, 1024),
+ make_tuple(&av1_fht32x32_avx2, &dummy_inv_txfm, V_ADST, AOM_BITS_8, 1024),
+ make_tuple(&av1_fht32x32_avx2, &dummy_inv_txfm, H_ADST, AOM_BITS_8, 1024),
+ make_tuple(&av1_fht32x32_avx2, &dummy_inv_txfm, V_FLIPADST, AOM_BITS_8, 1024),
+ make_tuple(&av1_fht32x32_avx2, &dummy_inv_txfm, H_FLIPADST, AOM_BITS_8, 1024)
#endif // CONFIG_EXT_TX
};
INSTANTIATE_TEST_CASE_P(AVX2, AV1Trans32x32HT,
diff --git a/test/av1_fht4x4_test.cc b/test/av1_fht4x4_test.cc
index d516e05..1d4fc13 100644
--- a/test/av1_fht4x4_test.cc
+++ b/test/av1_fht4x4_test.cc
@@ -28,7 +28,7 @@
const TxfmParam *txfm_param);
using std::tr1::tuple;
using libaom_test::FhtFunc;
-typedef tuple<FhtFunc, IhtFunc, int, aom_bit_depth_t, int> Ht4x4Param;
+typedef tuple<FhtFunc, IhtFunc, TX_TYPE, aom_bit_depth_t, int> Ht4x4Param;
void fht4x4_ref(const int16_t *in, tran_low_t *out, int stride,
TxfmParam *txfm_param) {
@@ -42,16 +42,16 @@
#if CONFIG_HIGHBITDEPTH
typedef void (*IhighbdHtFunc)(const tran_low_t *in, uint8_t *out, int stride,
- int tx_type, int bd);
+ TX_TYPE tx_type, int bd);
typedef void (*HBDFhtFunc)(const int16_t *input, int32_t *output, int stride,
- int tx_type, int bd);
+ TX_TYPE tx_type, int bd);
// HighbdHt4x4Param argument list:
// <Target optimized function, tx_type, bit depth>
-typedef tuple<HBDFhtFunc, int, int> HighbdHt4x4Param;
+typedef tuple<HBDFhtFunc, TX_TYPE, int> HighbdHt4x4Param;
-void highbe_fht4x4_ref(const int16_t *in, int32_t *out, int stride, int tx_type,
- int bd) {
+void highbe_fht4x4_ref(const int16_t *in, int32_t *out, int stride,
+ TX_TYPE tx_type, int bd) {
av1_fwd_txfm2d_4x4_c(in, out, stride, tx_type, bd);
}
#endif // CONFIG_HIGHBITDEPTH
@@ -131,7 +131,7 @@
private:
HBDFhtFunc fwd_txfm_;
HBDFhtFunc fwd_txfm_ref_;
- int tx_type_;
+ TX_TYPE tx_type_;
int bit_depth_;
int mask_;
int num_coeffs_;
@@ -169,23 +169,34 @@
#if HAVE_SSE2 && !CONFIG_DAALA_DCT4
const Ht4x4Param kArrayHt4x4Param_sse2[] = {
- make_tuple(&av1_fht4x4_sse2, &av1_iht4x4_16_add_sse2, 0, AOM_BITS_8, 16),
- make_tuple(&av1_fht4x4_sse2, &av1_iht4x4_16_add_sse2, 1, AOM_BITS_8, 16),
- make_tuple(&av1_fht4x4_sse2, &av1_iht4x4_16_add_sse2, 2, AOM_BITS_8, 16),
- make_tuple(&av1_fht4x4_sse2, &av1_iht4x4_16_add_sse2, 3, AOM_BITS_8, 16),
+ make_tuple(&av1_fht4x4_sse2, &av1_iht4x4_16_add_sse2, DCT_DCT, AOM_BITS_8,
+ 16),
+ make_tuple(&av1_fht4x4_sse2, &av1_iht4x4_16_add_sse2, ADST_DCT, AOM_BITS_8,
+ 16),
+ make_tuple(&av1_fht4x4_sse2, &av1_iht4x4_16_add_sse2, DCT_ADST, AOM_BITS_8,
+ 16),
+ make_tuple(&av1_fht4x4_sse2, &av1_iht4x4_16_add_sse2, ADST_ADST, AOM_BITS_8,
+ 16),
#if CONFIG_EXT_TX
- make_tuple(&av1_fht4x4_sse2, &av1_iht4x4_16_add_sse2, 4, AOM_BITS_8, 16),
- make_tuple(&av1_fht4x4_sse2, &av1_iht4x4_16_add_sse2, 5, AOM_BITS_8, 16),
- make_tuple(&av1_fht4x4_sse2, &av1_iht4x4_16_add_sse2, 6, AOM_BITS_8, 16),
- make_tuple(&av1_fht4x4_sse2, &av1_iht4x4_16_add_sse2, 7, AOM_BITS_8, 16),
- make_tuple(&av1_fht4x4_sse2, &av1_iht4x4_16_add_sse2, 8, AOM_BITS_8, 16),
- make_tuple(&av1_fht4x4_sse2, &av1_iht4x4_16_add_sse2, 9, AOM_BITS_8, 16),
- make_tuple(&av1_fht4x4_sse2, &av1_iht4x4_16_add_sse2, 10, AOM_BITS_8, 16),
- make_tuple(&av1_fht4x4_sse2, &av1_iht4x4_16_add_sse2, 11, AOM_BITS_8, 16),
- make_tuple(&av1_fht4x4_sse2, &av1_iht4x4_16_add_sse2, 12, AOM_BITS_8, 16),
- make_tuple(&av1_fht4x4_sse2, &av1_iht4x4_16_add_sse2, 13, AOM_BITS_8, 16),
- make_tuple(&av1_fht4x4_sse2, &av1_iht4x4_16_add_sse2, 14, AOM_BITS_8, 16),
- make_tuple(&av1_fht4x4_sse2, &av1_iht4x4_16_add_sse2, 15, AOM_BITS_8, 16)
+ make_tuple(&av1_fht4x4_sse2, &av1_iht4x4_16_add_sse2, FLIPADST_DCT,
+ AOM_BITS_8, 16),
+ make_tuple(&av1_fht4x4_sse2, &av1_iht4x4_16_add_sse2, DCT_FLIPADST,
+ AOM_BITS_8, 16),
+ make_tuple(&av1_fht4x4_sse2, &av1_iht4x4_16_add_sse2, FLIPADST_FLIPADST,
+ AOM_BITS_8, 16),
+ make_tuple(&av1_fht4x4_sse2, &av1_iht4x4_16_add_sse2, ADST_FLIPADST,
+ AOM_BITS_8, 16),
+ make_tuple(&av1_fht4x4_sse2, &av1_iht4x4_16_add_sse2, FLIPADST_ADST,
+ AOM_BITS_8, 16),
+ make_tuple(&av1_fht4x4_sse2, &av1_iht4x4_16_add_sse2, IDTX, AOM_BITS_8, 16),
+ make_tuple(&av1_fht4x4_sse2, &av1_iht4x4_16_add_sse2, V_DCT, AOM_BITS_8, 16),
+ make_tuple(&av1_fht4x4_sse2, &av1_iht4x4_16_add_sse2, H_DCT, AOM_BITS_8, 16),
+ make_tuple(&av1_fht4x4_sse2, &av1_iht4x4_16_add_sse2, V_ADST, AOM_BITS_8, 16),
+ make_tuple(&av1_fht4x4_sse2, &av1_iht4x4_16_add_sse2, H_ADST, AOM_BITS_8, 16),
+ make_tuple(&av1_fht4x4_sse2, &av1_iht4x4_16_add_sse2, V_FLIPADST, AOM_BITS_8,
+ 16),
+ make_tuple(&av1_fht4x4_sse2, &av1_iht4x4_16_add_sse2, H_FLIPADST, AOM_BITS_8,
+ 16)
#endif // CONFIG_EXT_TX
};
INSTANTIATE_TEST_CASE_P(SSE2, AV1Trans4x4HT,
@@ -194,25 +205,25 @@
#if HAVE_SSE4_1 && CONFIG_HIGHBITDEPTH && !CONFIG_DAALA_DCT4
const HighbdHt4x4Param kArrayHighbdHt4x4Param[] = {
- make_tuple(&av1_fwd_txfm2d_4x4_sse4_1, 0, 10),
- make_tuple(&av1_fwd_txfm2d_4x4_sse4_1, 0, 12),
- make_tuple(&av1_fwd_txfm2d_4x4_sse4_1, 1, 10),
- make_tuple(&av1_fwd_txfm2d_4x4_sse4_1, 1, 12),
- make_tuple(&av1_fwd_txfm2d_4x4_sse4_1, 2, 10),
- make_tuple(&av1_fwd_txfm2d_4x4_sse4_1, 2, 12),
- make_tuple(&av1_fwd_txfm2d_4x4_sse4_1, 3, 10),
- make_tuple(&av1_fwd_txfm2d_4x4_sse4_1, 3, 12),
+ make_tuple(&av1_fwd_txfm2d_4x4_sse4_1, DCT_DCT, 10),
+ make_tuple(&av1_fwd_txfm2d_4x4_sse4_1, DCT_DCT, 12),
+ make_tuple(&av1_fwd_txfm2d_4x4_sse4_1, ADST_DCT, 10),
+ make_tuple(&av1_fwd_txfm2d_4x4_sse4_1, ADST_DCT, 12),
+ make_tuple(&av1_fwd_txfm2d_4x4_sse4_1, DCT_ADST, 10),
+ make_tuple(&av1_fwd_txfm2d_4x4_sse4_1, DCT_ADST, 12),
+ make_tuple(&av1_fwd_txfm2d_4x4_sse4_1, ADST_ADST, 10),
+ make_tuple(&av1_fwd_txfm2d_4x4_sse4_1, ADST_ADST, 12),
#if CONFIG_EXT_TX
- make_tuple(&av1_fwd_txfm2d_4x4_sse4_1, 4, 10),
- make_tuple(&av1_fwd_txfm2d_4x4_sse4_1, 4, 12),
- make_tuple(&av1_fwd_txfm2d_4x4_sse4_1, 5, 10),
- make_tuple(&av1_fwd_txfm2d_4x4_sse4_1, 5, 12),
- make_tuple(&av1_fwd_txfm2d_4x4_sse4_1, 6, 10),
- make_tuple(&av1_fwd_txfm2d_4x4_sse4_1, 6, 12),
- make_tuple(&av1_fwd_txfm2d_4x4_sse4_1, 7, 10),
- make_tuple(&av1_fwd_txfm2d_4x4_sse4_1, 7, 12),
- make_tuple(&av1_fwd_txfm2d_4x4_sse4_1, 8, 10),
- make_tuple(&av1_fwd_txfm2d_4x4_sse4_1, 8, 12),
+ make_tuple(&av1_fwd_txfm2d_4x4_sse4_1, FLIPADST_DCT, 10),
+ make_tuple(&av1_fwd_txfm2d_4x4_sse4_1, FLIPADST_DCT, 12),
+ make_tuple(&av1_fwd_txfm2d_4x4_sse4_1, DCT_FLIPADST, 10),
+ make_tuple(&av1_fwd_txfm2d_4x4_sse4_1, DCT_FLIPADST, 12),
+ make_tuple(&av1_fwd_txfm2d_4x4_sse4_1, FLIPADST_FLIPADST, 10),
+ make_tuple(&av1_fwd_txfm2d_4x4_sse4_1, FLIPADST_FLIPADST, 12),
+ make_tuple(&av1_fwd_txfm2d_4x4_sse4_1, ADST_FLIPADST, 10),
+ make_tuple(&av1_fwd_txfm2d_4x4_sse4_1, ADST_FLIPADST, 12),
+ make_tuple(&av1_fwd_txfm2d_4x4_sse4_1, FLIPADST_ADST, 10),
+ make_tuple(&av1_fwd_txfm2d_4x4_sse4_1, FLIPADST_ADST, 12),
#endif // CONFIG_EXT_TX
};
diff --git a/test/av1_fht4x8_test.cc b/test/av1_fht4x8_test.cc
index e447d8e..f9d2120 100644
--- a/test/av1_fht4x8_test.cc
+++ b/test/av1_fht4x8_test.cc
@@ -28,7 +28,7 @@
const TxfmParam *txfm_param);
using std::tr1::tuple;
using libaom_test::FhtFunc;
-typedef tuple<FhtFunc, IhtFunc, int, aom_bit_depth_t, int> Ht4x8Param;
+typedef tuple<FhtFunc, IhtFunc, TX_TYPE, aom_bit_depth_t, int> Ht4x8Param;
void fht4x8_ref(const int16_t *in, tran_low_t *out, int stride,
TxfmParam *txfm_param) {
@@ -81,23 +81,26 @@
using std::tr1::make_tuple;
const Ht4x8Param kArrayHt4x8Param_c[] = {
- make_tuple(&av1_fht4x8_c, &av1_iht4x8_32_add_c, 0, AOM_BITS_8, 32),
- make_tuple(&av1_fht4x8_c, &av1_iht4x8_32_add_c, 1, AOM_BITS_8, 32),
- make_tuple(&av1_fht4x8_c, &av1_iht4x8_32_add_c, 2, AOM_BITS_8, 32),
- make_tuple(&av1_fht4x8_c, &av1_iht4x8_32_add_c, 3, AOM_BITS_8, 32),
+ make_tuple(&av1_fht4x8_c, &av1_iht4x8_32_add_c, DCT_DCT, AOM_BITS_8, 32),
+ make_tuple(&av1_fht4x8_c, &av1_iht4x8_32_add_c, ADST_DCT, AOM_BITS_8, 32),
+ make_tuple(&av1_fht4x8_c, &av1_iht4x8_32_add_c, DCT_ADST, AOM_BITS_8, 32),
+ make_tuple(&av1_fht4x8_c, &av1_iht4x8_32_add_c, ADST_ADST, AOM_BITS_8, 32),
#if CONFIG_EXT_TX
- make_tuple(&av1_fht4x8_c, &av1_iht4x8_32_add_c, 4, AOM_BITS_8, 32),
- make_tuple(&av1_fht4x8_c, &av1_iht4x8_32_add_c, 5, AOM_BITS_8, 32),
- make_tuple(&av1_fht4x8_c, &av1_iht4x8_32_add_c, 6, AOM_BITS_8, 32),
- make_tuple(&av1_fht4x8_c, &av1_iht4x8_32_add_c, 7, AOM_BITS_8, 32),
- make_tuple(&av1_fht4x8_c, &av1_iht4x8_32_add_c, 8, AOM_BITS_8, 32),
- make_tuple(&av1_fht4x8_c, &av1_iht4x8_32_add_c, 9, AOM_BITS_8, 32),
- make_tuple(&av1_fht4x8_c, &av1_iht4x8_32_add_c, 10, AOM_BITS_8, 32),
- make_tuple(&av1_fht4x8_c, &av1_iht4x8_32_add_c, 11, AOM_BITS_8, 32),
- make_tuple(&av1_fht4x8_c, &av1_iht4x8_32_add_c, 12, AOM_BITS_8, 32),
- make_tuple(&av1_fht4x8_c, &av1_iht4x8_32_add_c, 13, AOM_BITS_8, 32),
- make_tuple(&av1_fht4x8_c, &av1_iht4x8_32_add_c, 14, AOM_BITS_8, 32),
- make_tuple(&av1_fht4x8_c, &av1_iht4x8_32_add_c, 15, AOM_BITS_8, 32)
+ make_tuple(&av1_fht4x8_c, &av1_iht4x8_32_add_c, FLIPADST_DCT, AOM_BITS_8, 32),
+ make_tuple(&av1_fht4x8_c, &av1_iht4x8_32_add_c, DCT_FLIPADST, AOM_BITS_8, 32),
+ make_tuple(&av1_fht4x8_c, &av1_iht4x8_32_add_c, FLIPADST_FLIPADST, AOM_BITS_8,
+ 32),
+ make_tuple(&av1_fht4x8_c, &av1_iht4x8_32_add_c, ADST_FLIPADST, AOM_BITS_8,
+ 32),
+ make_tuple(&av1_fht4x8_c, &av1_iht4x8_32_add_c, FLIPADST_ADST, AOM_BITS_8,
+ 32),
+ make_tuple(&av1_fht4x8_c, &av1_iht4x8_32_add_c, IDTX, AOM_BITS_8, 32),
+ make_tuple(&av1_fht4x8_c, &av1_iht4x8_32_add_c, V_DCT, AOM_BITS_8, 32),
+ make_tuple(&av1_fht4x8_c, &av1_iht4x8_32_add_c, H_DCT, AOM_BITS_8, 32),
+ make_tuple(&av1_fht4x8_c, &av1_iht4x8_32_add_c, V_ADST, AOM_BITS_8, 32),
+ make_tuple(&av1_fht4x8_c, &av1_iht4x8_32_add_c, H_ADST, AOM_BITS_8, 32),
+ make_tuple(&av1_fht4x8_c, &av1_iht4x8_32_add_c, V_FLIPADST, AOM_BITS_8, 32),
+ make_tuple(&av1_fht4x8_c, &av1_iht4x8_32_add_c, H_FLIPADST, AOM_BITS_8, 32)
#endif // CONFIG_EXT_TX
};
INSTANTIATE_TEST_CASE_P(C, AV1Trans4x8HT,
@@ -105,23 +108,34 @@
#if HAVE_SSE2
const Ht4x8Param kArrayHt4x8Param_sse2[] = {
- make_tuple(&av1_fht4x8_sse2, &av1_iht4x8_32_add_sse2, 0, AOM_BITS_8, 32),
- make_tuple(&av1_fht4x8_sse2, &av1_iht4x8_32_add_sse2, 1, AOM_BITS_8, 32),
- make_tuple(&av1_fht4x8_sse2, &av1_iht4x8_32_add_sse2, 2, AOM_BITS_8, 32),
- make_tuple(&av1_fht4x8_sse2, &av1_iht4x8_32_add_sse2, 3, AOM_BITS_8, 32),
+ make_tuple(&av1_fht4x8_sse2, &av1_iht4x8_32_add_sse2, DCT_DCT, AOM_BITS_8,
+ 32),
+ make_tuple(&av1_fht4x8_sse2, &av1_iht4x8_32_add_sse2, ADST_DCT, AOM_BITS_8,
+ 32),
+ make_tuple(&av1_fht4x8_sse2, &av1_iht4x8_32_add_sse2, DCT_ADST, AOM_BITS_8,
+ 32),
+ make_tuple(&av1_fht4x8_sse2, &av1_iht4x8_32_add_sse2, ADST_ADST, AOM_BITS_8,
+ 32),
#if CONFIG_EXT_TX
- make_tuple(&av1_fht4x8_sse2, &av1_iht4x8_32_add_sse2, 4, AOM_BITS_8, 32),
- make_tuple(&av1_fht4x8_sse2, &av1_iht4x8_32_add_sse2, 5, AOM_BITS_8, 32),
- make_tuple(&av1_fht4x8_sse2, &av1_iht4x8_32_add_sse2, 6, AOM_BITS_8, 32),
- make_tuple(&av1_fht4x8_sse2, &av1_iht4x8_32_add_sse2, 7, AOM_BITS_8, 32),
- make_tuple(&av1_fht4x8_sse2, &av1_iht4x8_32_add_sse2, 8, AOM_BITS_8, 32),
- make_tuple(&av1_fht4x8_sse2, &av1_iht4x8_32_add_sse2, 9, AOM_BITS_8, 32),
- make_tuple(&av1_fht4x8_sse2, &av1_iht4x8_32_add_sse2, 10, AOM_BITS_8, 32),
- make_tuple(&av1_fht4x8_sse2, &av1_iht4x8_32_add_sse2, 11, AOM_BITS_8, 32),
- make_tuple(&av1_fht4x8_sse2, &av1_iht4x8_32_add_sse2, 12, AOM_BITS_8, 32),
- make_tuple(&av1_fht4x8_sse2, &av1_iht4x8_32_add_sse2, 13, AOM_BITS_8, 32),
- make_tuple(&av1_fht4x8_sse2, &av1_iht4x8_32_add_sse2, 14, AOM_BITS_8, 32),
- make_tuple(&av1_fht4x8_sse2, &av1_iht4x8_32_add_sse2, 15, AOM_BITS_8, 32)
+ make_tuple(&av1_fht4x8_sse2, &av1_iht4x8_32_add_sse2, FLIPADST_DCT,
+ AOM_BITS_8, 32),
+ make_tuple(&av1_fht4x8_sse2, &av1_iht4x8_32_add_sse2, DCT_FLIPADST,
+ AOM_BITS_8, 32),
+ make_tuple(&av1_fht4x8_sse2, &av1_iht4x8_32_add_sse2, FLIPADST_FLIPADST,
+ AOM_BITS_8, 32),
+ make_tuple(&av1_fht4x8_sse2, &av1_iht4x8_32_add_sse2, ADST_FLIPADST,
+ AOM_BITS_8, 32),
+ make_tuple(&av1_fht4x8_sse2, &av1_iht4x8_32_add_sse2, FLIPADST_ADST,
+ AOM_BITS_8, 32),
+ make_tuple(&av1_fht4x8_sse2, &av1_iht4x8_32_add_sse2, IDTX, AOM_BITS_8, 32),
+ make_tuple(&av1_fht4x8_sse2, &av1_iht4x8_32_add_sse2, V_DCT, AOM_BITS_8, 32),
+ make_tuple(&av1_fht4x8_sse2, &av1_iht4x8_32_add_sse2, H_DCT, AOM_BITS_8, 32),
+ make_tuple(&av1_fht4x8_sse2, &av1_iht4x8_32_add_sse2, V_ADST, AOM_BITS_8, 32),
+ make_tuple(&av1_fht4x8_sse2, &av1_iht4x8_32_add_sse2, H_ADST, AOM_BITS_8, 32),
+ make_tuple(&av1_fht4x8_sse2, &av1_iht4x8_32_add_sse2, V_FLIPADST, AOM_BITS_8,
+ 32),
+ make_tuple(&av1_fht4x8_sse2, &av1_iht4x8_32_add_sse2, H_FLIPADST, AOM_BITS_8,
+ 32)
#endif // CONFIG_EXT_TX
};
INSTANTIATE_TEST_CASE_P(SSE2, AV1Trans4x8HT,
diff --git a/test/av1_fht64x64_test.cc b/test/av1_fht64x64_test.cc
index 61ea9f1..f2a03e7 100644
--- a/test/av1_fht64x64_test.cc
+++ b/test/av1_fht64x64_test.cc
@@ -29,7 +29,7 @@
const TxfmParam *txfm_param);
using std::tr1::tuple;
using libaom_test::FhtFunc;
-typedef tuple<FhtFunc, IhtFunc, int, aom_bit_depth_t, int> Ht64x64Param;
+typedef tuple<FhtFunc, IhtFunc, TX_TYPE, aom_bit_depth_t, int> Ht64x64Param;
void fht64x64_ref(const int16_t *in, tran_low_t *out, int stride,
TxfmParam *txfm_param) {
@@ -82,23 +82,38 @@
using std::tr1::make_tuple;
const Ht64x64Param kArrayHt64x64Param_c[] = {
- make_tuple(&av1_fht64x64_c, &av1_iht64x64_4096_add_c, 0, AOM_BITS_8, 4096),
- make_tuple(&av1_fht64x64_c, &av1_iht64x64_4096_add_c, 1, AOM_BITS_8, 4096),
- make_tuple(&av1_fht64x64_c, &av1_iht64x64_4096_add_c, 2, AOM_BITS_8, 4096),
- make_tuple(&av1_fht64x64_c, &av1_iht64x64_4096_add_c, 3, AOM_BITS_8, 4096),
+ make_tuple(&av1_fht64x64_c, &av1_iht64x64_4096_add_c, DCT_DCT, AOM_BITS_8,
+ 4096),
+ make_tuple(&av1_fht64x64_c, &av1_iht64x64_4096_add_c, ADST_DCT, AOM_BITS_8,
+ 4096),
+ make_tuple(&av1_fht64x64_c, &av1_iht64x64_4096_add_c, DCT_ADST, AOM_BITS_8,
+ 4096),
+ make_tuple(&av1_fht64x64_c, &av1_iht64x64_4096_add_c, ADST_ADST, AOM_BITS_8,
+ 4096),
#if CONFIG_EXT_TX
- make_tuple(&av1_fht64x64_c, &av1_iht64x64_4096_add_c, 4, AOM_BITS_8, 4096),
- make_tuple(&av1_fht64x64_c, &av1_iht64x64_4096_add_c, 5, AOM_BITS_8, 4096),
- make_tuple(&av1_fht64x64_c, &av1_iht64x64_4096_add_c, 6, AOM_BITS_8, 4096),
- make_tuple(&av1_fht64x64_c, &av1_iht64x64_4096_add_c, 7, AOM_BITS_8, 4096),
- make_tuple(&av1_fht64x64_c, &av1_iht64x64_4096_add_c, 8, AOM_BITS_8, 4096),
- make_tuple(&av1_fht64x64_c, &av1_iht64x64_4096_add_c, 9, AOM_BITS_8, 4096),
- make_tuple(&av1_fht64x64_c, &av1_iht64x64_4096_add_c, 10, AOM_BITS_8, 4096),
- make_tuple(&av1_fht64x64_c, &av1_iht64x64_4096_add_c, 11, AOM_BITS_8, 4096),
- make_tuple(&av1_fht64x64_c, &av1_iht64x64_4096_add_c, 12, AOM_BITS_8, 4096),
- make_tuple(&av1_fht64x64_c, &av1_iht64x64_4096_add_c, 13, AOM_BITS_8, 4096),
- make_tuple(&av1_fht64x64_c, &av1_iht64x64_4096_add_c, 14, AOM_BITS_8, 4096),
- make_tuple(&av1_fht64x64_c, &av1_iht64x64_4096_add_c, 15, AOM_BITS_8, 4096)
+ make_tuple(&av1_fht64x64_c, &av1_iht64x64_4096_add_c, FLIPADST_DCT,
+ AOM_BITS_8, 4096),
+ make_tuple(&av1_fht64x64_c, &av1_iht64x64_4096_add_c, DCT_FLIPADST,
+ AOM_BITS_8, 4096),
+ make_tuple(&av1_fht64x64_c, &av1_iht64x64_4096_add_c, FLIPADST_FLIPADST,
+ AOM_BITS_8, 4096),
+ make_tuple(&av1_fht64x64_c, &av1_iht64x64_4096_add_c, ADST_FLIPADST,
+ AOM_BITS_8, 4096),
+ make_tuple(&av1_fht64x64_c, &av1_iht64x64_4096_add_c, FLIPADST_ADST,
+ AOM_BITS_8, 4096),
+ make_tuple(&av1_fht64x64_c, &av1_iht64x64_4096_add_c, IDTX, AOM_BITS_8, 4096),
+ make_tuple(&av1_fht64x64_c, &av1_iht64x64_4096_add_c, V_DCT, AOM_BITS_8,
+ 4096),
+ make_tuple(&av1_fht64x64_c, &av1_iht64x64_4096_add_c, H_DCT, AOM_BITS_8,
+ 4096),
+ make_tuple(&av1_fht64x64_c, &av1_iht64x64_4096_add_c, V_ADST, AOM_BITS_8,
+ 4096),
+ make_tuple(&av1_fht64x64_c, &av1_iht64x64_4096_add_c, H_ADST, AOM_BITS_8,
+ 4096),
+ make_tuple(&av1_fht64x64_c, &av1_iht64x64_4096_add_c, V_FLIPADST, AOM_BITS_8,
+ 4096),
+ make_tuple(&av1_fht64x64_c, &av1_iht64x64_4096_add_c, H_FLIPADST, AOM_BITS_8,
+ 4096)
#endif // CONFIG_EXT_TX
};
INSTANTIATE_TEST_CASE_P(C, AV1Trans64x64HT,
diff --git a/test/av1_fht8x16_test.cc b/test/av1_fht8x16_test.cc
index 11f0858..689cb0b 100644
--- a/test/av1_fht8x16_test.cc
+++ b/test/av1_fht8x16_test.cc
@@ -27,7 +27,7 @@
const TxfmParam *txfm_param);
using std::tr1::tuple;
using libaom_test::FhtFunc;
-typedef tuple<FhtFunc, IhtFunc, int, aom_bit_depth_t, int> Ht8x16Param;
+typedef tuple<FhtFunc, IhtFunc, TX_TYPE, aom_bit_depth_t, int> Ht8x16Param;
void fht8x16_ref(const int16_t *in, tran_low_t *out, int stride,
TxfmParam *txfm_param) {
@@ -80,23 +80,31 @@
using std::tr1::make_tuple;
const Ht8x16Param kArrayHt8x16Param_c[] = {
- make_tuple(&av1_fht8x16_c, &av1_iht8x16_128_add_c, 0, AOM_BITS_8, 128),
- make_tuple(&av1_fht8x16_c, &av1_iht8x16_128_add_c, 1, AOM_BITS_8, 128),
- make_tuple(&av1_fht8x16_c, &av1_iht8x16_128_add_c, 2, AOM_BITS_8, 128),
- make_tuple(&av1_fht8x16_c, &av1_iht8x16_128_add_c, 3, AOM_BITS_8, 128),
+ make_tuple(&av1_fht8x16_c, &av1_iht8x16_128_add_c, DCT_DCT, AOM_BITS_8, 128),
+ make_tuple(&av1_fht8x16_c, &av1_iht8x16_128_add_c, ADST_DCT, AOM_BITS_8, 128),
+ make_tuple(&av1_fht8x16_c, &av1_iht8x16_128_add_c, DCT_ADST, AOM_BITS_8, 128),
+ make_tuple(&av1_fht8x16_c, &av1_iht8x16_128_add_c, ADST_ADST, AOM_BITS_8,
+ 128),
#if CONFIG_EXT_TX
- make_tuple(&av1_fht8x16_c, &av1_iht8x16_128_add_c, 4, AOM_BITS_8, 128),
- make_tuple(&av1_fht8x16_c, &av1_iht8x16_128_add_c, 5, AOM_BITS_8, 128),
- make_tuple(&av1_fht8x16_c, &av1_iht8x16_128_add_c, 6, AOM_BITS_8, 128),
- make_tuple(&av1_fht8x16_c, &av1_iht8x16_128_add_c, 7, AOM_BITS_8, 128),
- make_tuple(&av1_fht8x16_c, &av1_iht8x16_128_add_c, 8, AOM_BITS_8, 128),
- make_tuple(&av1_fht8x16_c, &av1_iht8x16_128_add_c, 9, AOM_BITS_8, 128),
- make_tuple(&av1_fht8x16_c, &av1_iht8x16_128_add_c, 10, AOM_BITS_8, 128),
- make_tuple(&av1_fht8x16_c, &av1_iht8x16_128_add_c, 11, AOM_BITS_8, 128),
- make_tuple(&av1_fht8x16_c, &av1_iht8x16_128_add_c, 12, AOM_BITS_8, 128),
- make_tuple(&av1_fht8x16_c, &av1_iht8x16_128_add_c, 13, AOM_BITS_8, 128),
- make_tuple(&av1_fht8x16_c, &av1_iht8x16_128_add_c, 14, AOM_BITS_8, 128),
- make_tuple(&av1_fht8x16_c, &av1_iht8x16_128_add_c, 15, AOM_BITS_8, 128)
+ make_tuple(&av1_fht8x16_c, &av1_iht8x16_128_add_c, FLIPADST_DCT, AOM_BITS_8,
+ 128),
+ make_tuple(&av1_fht8x16_c, &av1_iht8x16_128_add_c, DCT_FLIPADST, AOM_BITS_8,
+ 128),
+ make_tuple(&av1_fht8x16_c, &av1_iht8x16_128_add_c, FLIPADST_FLIPADST,
+ AOM_BITS_8, 128),
+ make_tuple(&av1_fht8x16_c, &av1_iht8x16_128_add_c, ADST_FLIPADST, AOM_BITS_8,
+ 128),
+ make_tuple(&av1_fht8x16_c, &av1_iht8x16_128_add_c, FLIPADST_ADST, AOM_BITS_8,
+ 128),
+ make_tuple(&av1_fht8x16_c, &av1_iht8x16_128_add_c, IDTX, AOM_BITS_8, 128),
+ make_tuple(&av1_fht8x16_c, &av1_iht8x16_128_add_c, V_DCT, AOM_BITS_8, 128),
+ make_tuple(&av1_fht8x16_c, &av1_iht8x16_128_add_c, H_DCT, AOM_BITS_8, 128),
+ make_tuple(&av1_fht8x16_c, &av1_iht8x16_128_add_c, V_ADST, AOM_BITS_8, 128),
+ make_tuple(&av1_fht8x16_c, &av1_iht8x16_128_add_c, H_ADST, AOM_BITS_8, 128),
+ make_tuple(&av1_fht8x16_c, &av1_iht8x16_128_add_c, V_FLIPADST, AOM_BITS_8,
+ 128),
+ make_tuple(&av1_fht8x16_c, &av1_iht8x16_128_add_c, H_FLIPADST, AOM_BITS_8,
+ 128)
#endif // CONFIG_EXT_TX
};
INSTANTIATE_TEST_CASE_P(C, AV1Trans8x16HT,
@@ -104,23 +112,39 @@
#if HAVE_SSE2
const Ht8x16Param kArrayHt8x16Param_sse2[] = {
- make_tuple(&av1_fht8x16_sse2, &av1_iht8x16_128_add_sse2, 0, AOM_BITS_8, 128),
- make_tuple(&av1_fht8x16_sse2, &av1_iht8x16_128_add_sse2, 1, AOM_BITS_8, 128),
- make_tuple(&av1_fht8x16_sse2, &av1_iht8x16_128_add_sse2, 2, AOM_BITS_8, 128),
- make_tuple(&av1_fht8x16_sse2, &av1_iht8x16_128_add_sse2, 3, AOM_BITS_8, 128),
+ make_tuple(&av1_fht8x16_sse2, &av1_iht8x16_128_add_sse2, DCT_DCT, AOM_BITS_8,
+ 128),
+ make_tuple(&av1_fht8x16_sse2, &av1_iht8x16_128_add_sse2, ADST_DCT, AOM_BITS_8,
+ 128),
+ make_tuple(&av1_fht8x16_sse2, &av1_iht8x16_128_add_sse2, DCT_ADST, AOM_BITS_8,
+ 128),
+ make_tuple(&av1_fht8x16_sse2, &av1_iht8x16_128_add_sse2, ADST_ADST,
+ AOM_BITS_8, 128),
#if CONFIG_EXT_TX
- make_tuple(&av1_fht8x16_sse2, &av1_iht8x16_128_add_sse2, 4, AOM_BITS_8, 128),
- make_tuple(&av1_fht8x16_sse2, &av1_iht8x16_128_add_sse2, 5, AOM_BITS_8, 128),
- make_tuple(&av1_fht8x16_sse2, &av1_iht8x16_128_add_sse2, 6, AOM_BITS_8, 128),
- make_tuple(&av1_fht8x16_sse2, &av1_iht8x16_128_add_sse2, 7, AOM_BITS_8, 128),
- make_tuple(&av1_fht8x16_sse2, &av1_iht8x16_128_add_sse2, 8, AOM_BITS_8, 128),
- make_tuple(&av1_fht8x16_sse2, &av1_iht8x16_128_add_sse2, 9, AOM_BITS_8, 128),
- make_tuple(&av1_fht8x16_sse2, &av1_iht8x16_128_add_sse2, 10, AOM_BITS_8, 128),
- make_tuple(&av1_fht8x16_sse2, &av1_iht8x16_128_add_sse2, 11, AOM_BITS_8, 128),
- make_tuple(&av1_fht8x16_sse2, &av1_iht8x16_128_add_sse2, 12, AOM_BITS_8, 128),
- make_tuple(&av1_fht8x16_sse2, &av1_iht8x16_128_add_sse2, 13, AOM_BITS_8, 128),
- make_tuple(&av1_fht8x16_sse2, &av1_iht8x16_128_add_sse2, 14, AOM_BITS_8, 128),
- make_tuple(&av1_fht8x16_sse2, &av1_iht8x16_128_add_sse2, 15, AOM_BITS_8, 128)
+ make_tuple(&av1_fht8x16_sse2, &av1_iht8x16_128_add_sse2, FLIPADST_DCT,
+ AOM_BITS_8, 128),
+ make_tuple(&av1_fht8x16_sse2, &av1_iht8x16_128_add_sse2, DCT_FLIPADST,
+ AOM_BITS_8, 128),
+ make_tuple(&av1_fht8x16_sse2, &av1_iht8x16_128_add_sse2, FLIPADST_FLIPADST,
+ AOM_BITS_8, 128),
+ make_tuple(&av1_fht8x16_sse2, &av1_iht8x16_128_add_sse2, ADST_FLIPADST,
+ AOM_BITS_8, 128),
+ make_tuple(&av1_fht8x16_sse2, &av1_iht8x16_128_add_sse2, FLIPADST_ADST,
+ AOM_BITS_8, 128),
+ make_tuple(&av1_fht8x16_sse2, &av1_iht8x16_128_add_sse2, IDTX, AOM_BITS_8,
+ 128),
+ make_tuple(&av1_fht8x16_sse2, &av1_iht8x16_128_add_sse2, V_DCT, AOM_BITS_8,
+ 128),
+ make_tuple(&av1_fht8x16_sse2, &av1_iht8x16_128_add_sse2, H_DCT, AOM_BITS_8,
+ 128),
+ make_tuple(&av1_fht8x16_sse2, &av1_iht8x16_128_add_sse2, V_ADST, AOM_BITS_8,
+ 128),
+ make_tuple(&av1_fht8x16_sse2, &av1_iht8x16_128_add_sse2, H_ADST, AOM_BITS_8,
+ 128),
+ make_tuple(&av1_fht8x16_sse2, &av1_iht8x16_128_add_sse2, V_FLIPADST,
+ AOM_BITS_8, 128),
+ make_tuple(&av1_fht8x16_sse2, &av1_iht8x16_128_add_sse2, H_FLIPADST,
+ AOM_BITS_8, 128)
#endif // CONFIG_EXT_TX
};
INSTANTIATE_TEST_CASE_P(SSE2, AV1Trans8x16HT,
diff --git a/test/av1_fht8x4_test.cc b/test/av1_fht8x4_test.cc
index c797421..e50a694 100644
--- a/test/av1_fht8x4_test.cc
+++ b/test/av1_fht8x4_test.cc
@@ -27,7 +27,7 @@
const TxfmParam *txfm_param);
using std::tr1::tuple;
using libaom_test::FhtFunc;
-typedef tuple<FhtFunc, IhtFunc, int, aom_bit_depth_t, int> Ht8x4Param;
+typedef tuple<FhtFunc, IhtFunc, TX_TYPE, aom_bit_depth_t, int> Ht8x4Param;
void fht8x4_ref(const int16_t *in, tran_low_t *out, int stride,
TxfmParam *txfm_param) {
@@ -80,23 +80,26 @@
using std::tr1::make_tuple;
const Ht8x4Param kArrayHt8x4Param_c[] = {
- make_tuple(&av1_fht8x4_c, &av1_iht8x4_32_add_c, 0, AOM_BITS_8, 32),
- make_tuple(&av1_fht8x4_c, &av1_iht8x4_32_add_c, 1, AOM_BITS_8, 32),
- make_tuple(&av1_fht8x4_c, &av1_iht8x4_32_add_c, 2, AOM_BITS_8, 32),
- make_tuple(&av1_fht8x4_c, &av1_iht8x4_32_add_c, 3, AOM_BITS_8, 32),
+ make_tuple(&av1_fht8x4_c, &av1_iht8x4_32_add_c, DCT_DCT, AOM_BITS_8, 32),
+ make_tuple(&av1_fht8x4_c, &av1_iht8x4_32_add_c, ADST_DCT, AOM_BITS_8, 32),
+ make_tuple(&av1_fht8x4_c, &av1_iht8x4_32_add_c, DCT_ADST, AOM_BITS_8, 32),
+ make_tuple(&av1_fht8x4_c, &av1_iht8x4_32_add_c, ADST_ADST, AOM_BITS_8, 32),
#if CONFIG_EXT_TX
- make_tuple(&av1_fht8x4_c, &av1_iht8x4_32_add_c, 4, AOM_BITS_8, 32),
- make_tuple(&av1_fht8x4_c, &av1_iht8x4_32_add_c, 5, AOM_BITS_8, 32),
- make_tuple(&av1_fht8x4_c, &av1_iht8x4_32_add_c, 6, AOM_BITS_8, 32),
- make_tuple(&av1_fht8x4_c, &av1_iht8x4_32_add_c, 7, AOM_BITS_8, 32),
- make_tuple(&av1_fht8x4_c, &av1_iht8x4_32_add_c, 8, AOM_BITS_8, 32),
- make_tuple(&av1_fht8x4_c, &av1_iht8x4_32_add_c, 9, AOM_BITS_8, 32),
- make_tuple(&av1_fht8x4_c, &av1_iht8x4_32_add_c, 10, AOM_BITS_8, 32),
- make_tuple(&av1_fht8x4_c, &av1_iht8x4_32_add_c, 11, AOM_BITS_8, 32),
- make_tuple(&av1_fht8x4_c, &av1_iht8x4_32_add_c, 12, AOM_BITS_8, 32),
- make_tuple(&av1_fht8x4_c, &av1_iht8x4_32_add_c, 13, AOM_BITS_8, 32),
- make_tuple(&av1_fht8x4_c, &av1_iht8x4_32_add_c, 14, AOM_BITS_8, 32),
- make_tuple(&av1_fht8x4_c, &av1_iht8x4_32_add_c, 15, AOM_BITS_8, 32)
+ make_tuple(&av1_fht8x4_c, &av1_iht8x4_32_add_c, FLIPADST_DCT, AOM_BITS_8, 32),
+ make_tuple(&av1_fht8x4_c, &av1_iht8x4_32_add_c, DCT_FLIPADST, AOM_BITS_8, 32),
+ make_tuple(&av1_fht8x4_c, &av1_iht8x4_32_add_c, FLIPADST_FLIPADST, AOM_BITS_8,
+ 32),
+ make_tuple(&av1_fht8x4_c, &av1_iht8x4_32_add_c, ADST_FLIPADST, AOM_BITS_8,
+ 32),
+ make_tuple(&av1_fht8x4_c, &av1_iht8x4_32_add_c, FLIPADST_ADST, AOM_BITS_8,
+ 32),
+ make_tuple(&av1_fht8x4_c, &av1_iht8x4_32_add_c, IDTX, AOM_BITS_8, 32),
+ make_tuple(&av1_fht8x4_c, &av1_iht8x4_32_add_c, V_DCT, AOM_BITS_8, 32),
+ make_tuple(&av1_fht8x4_c, &av1_iht8x4_32_add_c, H_DCT, AOM_BITS_8, 32),
+ make_tuple(&av1_fht8x4_c, &av1_iht8x4_32_add_c, V_ADST, AOM_BITS_8, 32),
+ make_tuple(&av1_fht8x4_c, &av1_iht8x4_32_add_c, H_ADST, AOM_BITS_8, 32),
+ make_tuple(&av1_fht8x4_c, &av1_iht8x4_32_add_c, V_FLIPADST, AOM_BITS_8, 32),
+ make_tuple(&av1_fht8x4_c, &av1_iht8x4_32_add_c, H_FLIPADST, AOM_BITS_8, 32)
#endif // CONFIG_EXT_TX
};
INSTANTIATE_TEST_CASE_P(C, AV1Trans8x4HT,
@@ -104,23 +107,34 @@
#if HAVE_SSE2
const Ht8x4Param kArrayHt8x4Param_sse2[] = {
- make_tuple(&av1_fht8x4_sse2, &av1_iht8x4_32_add_sse2, 0, AOM_BITS_8, 32),
- make_tuple(&av1_fht8x4_sse2, &av1_iht8x4_32_add_sse2, 1, AOM_BITS_8, 32),
- make_tuple(&av1_fht8x4_sse2, &av1_iht8x4_32_add_sse2, 2, AOM_BITS_8, 32),
- make_tuple(&av1_fht8x4_sse2, &av1_iht8x4_32_add_sse2, 3, AOM_BITS_8, 32),
+ make_tuple(&av1_fht8x4_sse2, &av1_iht8x4_32_add_sse2, DCT_DCT, AOM_BITS_8,
+ 32),
+ make_tuple(&av1_fht8x4_sse2, &av1_iht8x4_32_add_sse2, ADST_DCT, AOM_BITS_8,
+ 32),
+ make_tuple(&av1_fht8x4_sse2, &av1_iht8x4_32_add_sse2, DCT_ADST, AOM_BITS_8,
+ 32),
+ make_tuple(&av1_fht8x4_sse2, &av1_iht8x4_32_add_sse2, ADST_ADST, AOM_BITS_8,
+ 32),
#if CONFIG_EXT_TX
- make_tuple(&av1_fht8x4_sse2, &av1_iht8x4_32_add_sse2, 4, AOM_BITS_8, 32),
- make_tuple(&av1_fht8x4_sse2, &av1_iht8x4_32_add_sse2, 5, AOM_BITS_8, 32),
- make_tuple(&av1_fht8x4_sse2, &av1_iht8x4_32_add_sse2, 6, AOM_BITS_8, 32),
- make_tuple(&av1_fht8x4_sse2, &av1_iht8x4_32_add_sse2, 7, AOM_BITS_8, 32),
- make_tuple(&av1_fht8x4_sse2, &av1_iht8x4_32_add_sse2, 8, AOM_BITS_8, 32),
- make_tuple(&av1_fht8x4_sse2, &av1_iht8x4_32_add_sse2, 9, AOM_BITS_8, 32),
- make_tuple(&av1_fht8x4_sse2, &av1_iht8x4_32_add_sse2, 10, AOM_BITS_8, 32),
- make_tuple(&av1_fht8x4_sse2, &av1_iht8x4_32_add_sse2, 11, AOM_BITS_8, 32),
- make_tuple(&av1_fht8x4_sse2, &av1_iht8x4_32_add_sse2, 12, AOM_BITS_8, 32),
- make_tuple(&av1_fht8x4_sse2, &av1_iht8x4_32_add_sse2, 13, AOM_BITS_8, 32),
- make_tuple(&av1_fht8x4_sse2, &av1_iht8x4_32_add_sse2, 14, AOM_BITS_8, 32),
- make_tuple(&av1_fht8x4_sse2, &av1_iht8x4_32_add_sse2, 15, AOM_BITS_8, 32)
+ make_tuple(&av1_fht8x4_sse2, &av1_iht8x4_32_add_sse2, FLIPADST_DCT,
+ AOM_BITS_8, 32),
+ make_tuple(&av1_fht8x4_sse2, &av1_iht8x4_32_add_sse2, DCT_FLIPADST,
+ AOM_BITS_8, 32),
+ make_tuple(&av1_fht8x4_sse2, &av1_iht8x4_32_add_sse2, FLIPADST_FLIPADST,
+ AOM_BITS_8, 32),
+ make_tuple(&av1_fht8x4_sse2, &av1_iht8x4_32_add_sse2, ADST_FLIPADST,
+ AOM_BITS_8, 32),
+ make_tuple(&av1_fht8x4_sse2, &av1_iht8x4_32_add_sse2, FLIPADST_ADST,
+ AOM_BITS_8, 32),
+ make_tuple(&av1_fht8x4_sse2, &av1_iht8x4_32_add_sse2, IDTX, AOM_BITS_8, 32),
+ make_tuple(&av1_fht8x4_sse2, &av1_iht8x4_32_add_sse2, V_DCT, AOM_BITS_8, 32),
+ make_tuple(&av1_fht8x4_sse2, &av1_iht8x4_32_add_sse2, H_DCT, AOM_BITS_8, 32),
+ make_tuple(&av1_fht8x4_sse2, &av1_iht8x4_32_add_sse2, V_ADST, AOM_BITS_8, 32),
+ make_tuple(&av1_fht8x4_sse2, &av1_iht8x4_32_add_sse2, H_ADST, AOM_BITS_8, 32),
+ make_tuple(&av1_fht8x4_sse2, &av1_iht8x4_32_add_sse2, V_FLIPADST, AOM_BITS_8,
+ 32),
+ make_tuple(&av1_fht8x4_sse2, &av1_iht8x4_32_add_sse2, H_FLIPADST, AOM_BITS_8,
+ 32)
#endif // CONFIG_EXT_TX
};
INSTANTIATE_TEST_CASE_P(SSE2, AV1Trans8x4HT,
diff --git a/test/av1_fht8x8_test.cc b/test/av1_fht8x8_test.cc
index cc284ef..499fcc3 100644
--- a/test/av1_fht8x8_test.cc
+++ b/test/av1_fht8x8_test.cc
@@ -29,7 +29,7 @@
using libaom_test::FhtFunc;
using std::tr1::tuple;
-typedef tuple<FhtFunc, IhtFunc, int, aom_bit_depth_t, int> Ht8x8Param;
+typedef tuple<FhtFunc, IhtFunc, TX_TYPE, aom_bit_depth_t, int> Ht8x8Param;
void fht8x8_ref(const int16_t *in, tran_low_t *out, int stride,
TxfmParam *txfm_param) {
@@ -43,14 +43,14 @@
#if CONFIG_HIGHBITDEPTH
typedef void (*IHbdHtFunc)(const tran_low_t *in, uint8_t *out, int stride,
- int tx_type, int bd);
+ TX_TYPE tx_type, int bd);
typedef void (*HbdHtFunc)(const int16_t *input, int32_t *output, int stride,
- int tx_type, int bd);
+ TX_TYPE tx_type, int bd);
// Target optimized function, tx_type, bit depth
-typedef tuple<HbdHtFunc, int, int> HighbdHt8x8Param;
+typedef tuple<HbdHtFunc, TX_TYPE, int> HighbdHt8x8Param;
-void highbd_fht8x8_ref(const int16_t *in, int32_t *out, int stride, int tx_type,
- int bd) {
+void highbd_fht8x8_ref(const int16_t *in, int32_t *out, int stride,
+ TX_TYPE tx_type, int bd) {
av1_fwd_txfm2d_8x8_c(in, out, stride, tx_type, bd);
}
#endif // CONFIG_HIGHBITDEPTH
@@ -130,7 +130,7 @@
private:
HbdHtFunc fwd_txfm_;
HbdHtFunc fwd_txfm_ref_;
- int tx_type_;
+ TX_TYPE tx_type_;
int bit_depth_;
int mask_;
int num_coeffs_;
@@ -169,23 +169,34 @@
#if HAVE_SSE2 && !CONFIG_DAALA_DCT8
const Ht8x8Param kArrayHt8x8Param_sse2[] = {
- make_tuple(&av1_fht8x8_sse2, &av1_iht8x8_64_add_sse2, 0, AOM_BITS_8, 64),
- make_tuple(&av1_fht8x8_sse2, &av1_iht8x8_64_add_sse2, 1, AOM_BITS_8, 64),
- make_tuple(&av1_fht8x8_sse2, &av1_iht8x8_64_add_sse2, 2, AOM_BITS_8, 64),
- make_tuple(&av1_fht8x8_sse2, &av1_iht8x8_64_add_sse2, 3, AOM_BITS_8, 64),
+ make_tuple(&av1_fht8x8_sse2, &av1_iht8x8_64_add_sse2, DCT_DCT, AOM_BITS_8,
+ 64),
+ make_tuple(&av1_fht8x8_sse2, &av1_iht8x8_64_add_sse2, ADST_DCT, AOM_BITS_8,
+ 64),
+ make_tuple(&av1_fht8x8_sse2, &av1_iht8x8_64_add_sse2, DCT_ADST, AOM_BITS_8,
+ 64),
+ make_tuple(&av1_fht8x8_sse2, &av1_iht8x8_64_add_sse2, ADST_ADST, AOM_BITS_8,
+ 64),
#if CONFIG_EXT_TX
- make_tuple(&av1_fht8x8_sse2, &av1_iht8x8_64_add_sse2, 4, AOM_BITS_8, 64),
- make_tuple(&av1_fht8x8_sse2, &av1_iht8x8_64_add_sse2, 5, AOM_BITS_8, 64),
- make_tuple(&av1_fht8x8_sse2, &av1_iht8x8_64_add_sse2, 6, AOM_BITS_8, 64),
- make_tuple(&av1_fht8x8_sse2, &av1_iht8x8_64_add_sse2, 7, AOM_BITS_8, 64),
- make_tuple(&av1_fht8x8_sse2, &av1_iht8x8_64_add_sse2, 8, AOM_BITS_8, 64),
- make_tuple(&av1_fht8x8_sse2, &av1_iht8x8_64_add_sse2, 9, AOM_BITS_8, 64),
- make_tuple(&av1_fht8x8_sse2, &av1_iht8x8_64_add_sse2, 10, AOM_BITS_8, 64),
- make_tuple(&av1_fht8x8_sse2, &av1_iht8x8_64_add_sse2, 11, AOM_BITS_8, 64),
- make_tuple(&av1_fht8x8_sse2, &av1_iht8x8_64_add_sse2, 12, AOM_BITS_8, 64),
- make_tuple(&av1_fht8x8_sse2, &av1_iht8x8_64_add_sse2, 13, AOM_BITS_8, 64),
- make_tuple(&av1_fht8x8_sse2, &av1_iht8x8_64_add_sse2, 14, AOM_BITS_8, 64),
- make_tuple(&av1_fht8x8_sse2, &av1_iht8x8_64_add_sse2, 15, AOM_BITS_8, 64)
+ make_tuple(&av1_fht8x8_sse2, &av1_iht8x8_64_add_sse2, FLIPADST_DCT,
+ AOM_BITS_8, 64),
+ make_tuple(&av1_fht8x8_sse2, &av1_iht8x8_64_add_sse2, DCT_FLIPADST,
+ AOM_BITS_8, 64),
+ make_tuple(&av1_fht8x8_sse2, &av1_iht8x8_64_add_sse2, FLIPADST_FLIPADST,
+ AOM_BITS_8, 64),
+ make_tuple(&av1_fht8x8_sse2, &av1_iht8x8_64_add_sse2, ADST_FLIPADST,
+ AOM_BITS_8, 64),
+ make_tuple(&av1_fht8x8_sse2, &av1_iht8x8_64_add_sse2, FLIPADST_ADST,
+ AOM_BITS_8, 64),
+ make_tuple(&av1_fht8x8_sse2, &av1_iht8x8_64_add_sse2, IDTX, AOM_BITS_8, 64),
+ make_tuple(&av1_fht8x8_sse2, &av1_iht8x8_64_add_sse2, V_DCT, AOM_BITS_8, 64),
+ make_tuple(&av1_fht8x8_sse2, &av1_iht8x8_64_add_sse2, H_DCT, AOM_BITS_8, 64),
+ make_tuple(&av1_fht8x8_sse2, &av1_iht8x8_64_add_sse2, V_ADST, AOM_BITS_8, 64),
+ make_tuple(&av1_fht8x8_sse2, &av1_iht8x8_64_add_sse2, H_ADST, AOM_BITS_8, 64),
+ make_tuple(&av1_fht8x8_sse2, &av1_iht8x8_64_add_sse2, V_FLIPADST, AOM_BITS_8,
+ 64),
+ make_tuple(&av1_fht8x8_sse2, &av1_iht8x8_64_add_sse2, H_FLIPADST, AOM_BITS_8,
+ 64)
#endif // CONFIG_EXT_TX
};
INSTANTIATE_TEST_CASE_P(SSE2, AV1Trans8x8HT,
@@ -194,25 +205,25 @@
#if HAVE_SSE4_1 && CONFIG_HIGHBITDEPTH && !CONFIG_DAALA_DCT8
const HighbdHt8x8Param kArrayHBDHt8x8Param_sse4_1[] = {
- make_tuple(&av1_fwd_txfm2d_8x8_sse4_1, 0, 10),
- make_tuple(&av1_fwd_txfm2d_8x8_sse4_1, 0, 12),
- make_tuple(&av1_fwd_txfm2d_8x8_sse4_1, 1, 10),
- make_tuple(&av1_fwd_txfm2d_8x8_sse4_1, 1, 12),
- make_tuple(&av1_fwd_txfm2d_8x8_sse4_1, 2, 10),
- make_tuple(&av1_fwd_txfm2d_8x8_sse4_1, 2, 12),
- make_tuple(&av1_fwd_txfm2d_8x8_sse4_1, 3, 10),
- make_tuple(&av1_fwd_txfm2d_8x8_sse4_1, 3, 12),
+ make_tuple(&av1_fwd_txfm2d_8x8_sse4_1, DCT_DCT, 10),
+ make_tuple(&av1_fwd_txfm2d_8x8_sse4_1, DCT_DCT, 12),
+ make_tuple(&av1_fwd_txfm2d_8x8_sse4_1, ADST_DCT, 10),
+ make_tuple(&av1_fwd_txfm2d_8x8_sse4_1, ADST_DCT, 12),
+ make_tuple(&av1_fwd_txfm2d_8x8_sse4_1, DCT_ADST, 10),
+ make_tuple(&av1_fwd_txfm2d_8x8_sse4_1, DCT_ADST, 12),
+ make_tuple(&av1_fwd_txfm2d_8x8_sse4_1, ADST_ADST, 10),
+ make_tuple(&av1_fwd_txfm2d_8x8_sse4_1, ADST_ADST, 12),
#if CONFIG_EXT_TX
- make_tuple(&av1_fwd_txfm2d_8x8_sse4_1, 4, 10),
- make_tuple(&av1_fwd_txfm2d_8x8_sse4_1, 4, 12),
- make_tuple(&av1_fwd_txfm2d_8x8_sse4_1, 5, 10),
- make_tuple(&av1_fwd_txfm2d_8x8_sse4_1, 5, 12),
- make_tuple(&av1_fwd_txfm2d_8x8_sse4_1, 6, 10),
- make_tuple(&av1_fwd_txfm2d_8x8_sse4_1, 6, 12),
- make_tuple(&av1_fwd_txfm2d_8x8_sse4_1, 7, 10),
- make_tuple(&av1_fwd_txfm2d_8x8_sse4_1, 7, 12),
- make_tuple(&av1_fwd_txfm2d_8x8_sse4_1, 8, 10),
- make_tuple(&av1_fwd_txfm2d_8x8_sse4_1, 8, 12),
+ make_tuple(&av1_fwd_txfm2d_8x8_sse4_1, FLIPADST_DCT, 10),
+ make_tuple(&av1_fwd_txfm2d_8x8_sse4_1, FLIPADST_DCT, 12),
+ make_tuple(&av1_fwd_txfm2d_8x8_sse4_1, DCT_FLIPADST, 10),
+ make_tuple(&av1_fwd_txfm2d_8x8_sse4_1, DCT_FLIPADST, 12),
+ make_tuple(&av1_fwd_txfm2d_8x8_sse4_1, FLIPADST_FLIPADST, 10),
+ make_tuple(&av1_fwd_txfm2d_8x8_sse4_1, FLIPADST_FLIPADST, 12),
+ make_tuple(&av1_fwd_txfm2d_8x8_sse4_1, ADST_FLIPADST, 10),
+ make_tuple(&av1_fwd_txfm2d_8x8_sse4_1, ADST_FLIPADST, 12),
+ make_tuple(&av1_fwd_txfm2d_8x8_sse4_1, FLIPADST_ADST, 10),
+ make_tuple(&av1_fwd_txfm2d_8x8_sse4_1, FLIPADST_ADST, 12),
#endif // CONFIG_EXT_TX
};
INSTANTIATE_TEST_CASE_P(SSE4_1, AV1HighbdTrans8x8HT,
diff --git a/test/av1_fwd_txfm2d_test.cc b/test/av1_fwd_txfm2d_test.cc
index 74557e9..adf9a80 100644
--- a/test/av1_fwd_txfm2d_test.cc
+++ b/test/av1_fwd_txfm2d_test.cc
@@ -185,7 +185,8 @@
// TODO(angiebird): include rect txfm in this test
for (int tx_size = 0; tx_size < TX_SIZES; ++tx_size) {
for (int tx_type = 0; tx_type < TX_TYPES; ++tx_type) {
- TXFM_2D_FLIP_CFG cfg = av1_get_fwd_txfm_cfg(tx_type, tx_size);
+ TXFM_2D_FLIP_CFG cfg = av1_get_fwd_txfm_cfg(
+ static_cast<TX_TYPE>(tx_type), static_cast<TX_SIZE>(tx_size));
int8_t stage_range_col[MAX_TXFM_STAGE_NUM];
int8_t stage_range_row[MAX_TXFM_STAGE_NUM];
av1_gen_fwd_stage_range(stage_range_col, stage_range_row, &cfg, bd);
diff --git a/test/av1_highbd_iht_test.cc b/test/av1_highbd_iht_test.cc
index fa5dff1..45df5ed 100644
--- a/test/av1_highbd_iht_test.cc
+++ b/test/av1_highbd_iht_test.cc
@@ -26,10 +26,10 @@
using libaom_test::ACMRandom;
typedef void (*HbdHtFunc)(const int16_t *input, int32_t *output, int stride,
- int tx_type, int bd);
+ TX_TYPE tx_type, int bd);
typedef void (*IHbdHtFunc)(const int32_t *coeff, uint16_t *output, int stride,
- int tx_type, int bd);
+ TX_TYPE tx_type, int bd);
// Test parameter argument list:
// <transform reference function,
@@ -38,7 +38,7 @@
// num_coeffs,
// tx_type,
// bit_depth>
-typedef tuple<HbdHtFunc, IHbdHtFunc, IHbdHtFunc, int, int, int> IHbdHtParam;
+typedef tuple<HbdHtFunc, IHbdHtFunc, IHbdHtFunc, int, TX_TYPE, int> IHbdHtParam;
class AV1HighbdInvHTNxN : public ::testing::TestWithParam<IHbdHtParam> {
public:
@@ -97,7 +97,7 @@
IHbdHtFunc inv_txfm_;
IHbdHtFunc inv_txfm_ref_;
int num_coeffs_;
- int tx_type_;
+ TX_TYPE tx_type_;
int bit_depth_;
int16_t *input_;
diff --git a/test/av1_inv_txfm2d_test.cc b/test/av1_inv_txfm2d_test.cc
index f8d8e87..bccbdee 100644
--- a/test/av1_inv_txfm2d_test.cc
+++ b/test/av1_inv_txfm2d_test.cc
@@ -193,7 +193,8 @@
// TODO(angiebird): include rect txfm in this test
for (int tx_size = 0; tx_size < TX_SIZES; ++tx_size) {
for (int tx_type = 0; tx_type < TX_TYPES; ++tx_type) {
- TXFM_2D_FLIP_CFG cfg = av1_get_inv_txfm_cfg(tx_type, tx_size);
+ TXFM_2D_FLIP_CFG cfg = av1_get_inv_txfm_cfg(
+ static_cast<TX_TYPE>(tx_type), static_cast<TX_SIZE>(tx_size));
int8_t stage_range_col[MAX_TXFM_STAGE_NUM];
int8_t stage_range_row[MAX_TXFM_STAGE_NUM];
av1_gen_inv_stage_range(stage_range_col, stage_range_row, &cfg,
diff --git a/test/av1_txfm_test.h b/test/av1_txfm_test.h
index 326542c..3e64e36 100644
--- a/test/av1_txfm_test.h
+++ b/test/av1_txfm_test.h
@@ -71,8 +71,8 @@
typedef void (*TxfmFunc)(const int32_t *in, int32_t *out, const int8_t *cos_bit,
const int8_t *range_bit);
-typedef void (*Fwd_Txfm2d_Func)(const int16_t *, int32_t *, int, int, int);
-typedef void (*Inv_Txfm2d_Func)(const int32_t *, uint16_t *, int, int, int);
+typedef void (*Fwd_Txfm2d_Func)(const int16_t *, int32_t *, int, TX_TYPE, int);
+typedef void (*Inv_Txfm2d_Func)(const int32_t *, uint16_t *, int, TX_TYPE, int);
static const int bd = 10;
static const int input_base = (1 << bd);
diff --git a/test/dct16x16_test.cc b/test/dct16x16_test.cc
index dfcebf5..3cc0ed8 100644
--- a/test/dct16x16_test.cc
+++ b/test/dct16x16_test.cc
@@ -230,9 +230,11 @@
typedef void (*IhtFunc)(const tran_low_t *in, uint8_t *out, int stride,
const TxfmParam *txfm_param);
-typedef std::tr1::tuple<FdctFunc, IdctFunc, int, aom_bit_depth_t> Dct16x16Param;
-typedef std::tr1::tuple<FhtFunc, IhtFunc, int, aom_bit_depth_t> Ht16x16Param;
-typedef std::tr1::tuple<IdctFunc, IdctFunc, int, aom_bit_depth_t>
+typedef std::tr1::tuple<FdctFunc, IdctFunc, TX_TYPE, aom_bit_depth_t>
+ Dct16x16Param;
+typedef std::tr1::tuple<FhtFunc, IhtFunc, TX_TYPE, aom_bit_depth_t>
+ Ht16x16Param;
+typedef std::tr1::tuple<IdctFunc, IdctFunc, TX_TYPE, aom_bit_depth_t>
Idct16x16Param;
void fdct16x16_ref(const int16_t *in, tran_low_t *out, int stride,
@@ -778,63 +780,70 @@
INSTANTIATE_TEST_CASE_P(C, Trans16x16DCT,
::testing::Values(make_tuple(&aom_fdct16x16_c,
&aom_idct16x16_256_add_c,
- 0, AOM_BITS_8)));
+ DCT_DCT, AOM_BITS_8)));
#else
INSTANTIATE_TEST_CASE_P(C, Trans16x16DCT,
::testing::Values(make_tuple(&aom_fdct16x16_c,
&aom_idct16x16_256_add_c,
- 0, AOM_BITS_8)));
+ DCT_DCT, AOM_BITS_8)));
#endif // CONFIG_HIGHBITDEPTH
#if CONFIG_HIGHBITDEPTH
INSTANTIATE_TEST_CASE_P(
C, Trans16x16HT,
::testing::Values(
- make_tuple(&fht16x16_10, &iht16x16_10, 0, AOM_BITS_10),
- make_tuple(&fht16x16_10, &iht16x16_10, 1, AOM_BITS_10),
- make_tuple(&fht16x16_10, &iht16x16_10, 2, AOM_BITS_10),
- make_tuple(&fht16x16_10, &iht16x16_10, 3, AOM_BITS_10),
- make_tuple(&fht16x16_12, &iht16x16_12, 0, AOM_BITS_12),
- make_tuple(&fht16x16_12, &iht16x16_12, 1, AOM_BITS_12),
- make_tuple(&fht16x16_12, &iht16x16_12, 2, AOM_BITS_12),
- make_tuple(&fht16x16_12, &iht16x16_12, 3, AOM_BITS_12),
- make_tuple(&av1_fht16x16_c, &av1_iht16x16_256_add_c, 0, AOM_BITS_8),
- make_tuple(&av1_fht16x16_c, &av1_iht16x16_256_add_c, 1, AOM_BITS_8),
- make_tuple(&av1_fht16x16_c, &av1_iht16x16_256_add_c, 2, AOM_BITS_8),
- make_tuple(&av1_fht16x16_c, &av1_iht16x16_256_add_c, 3, AOM_BITS_8)));
+ make_tuple(&fht16x16_10, &iht16x16_10, DCT_DCT, AOM_BITS_10),
+ make_tuple(&fht16x16_10, &iht16x16_10, ADST_DCT, AOM_BITS_10),
+ make_tuple(&fht16x16_10, &iht16x16_10, DCT_ADST, AOM_BITS_10),
+ make_tuple(&fht16x16_10, &iht16x16_10, ADST_ADST, AOM_BITS_10),
+ make_tuple(&fht16x16_12, &iht16x16_12, DCT_DCT, AOM_BITS_12),
+ make_tuple(&fht16x16_12, &iht16x16_12, ADST_DCT, AOM_BITS_12),
+ make_tuple(&fht16x16_12, &iht16x16_12, DCT_ADST, AOM_BITS_12),
+ make_tuple(&fht16x16_12, &iht16x16_12, ADST_ADST, AOM_BITS_12),
+ make_tuple(&av1_fht16x16_c, &av1_iht16x16_256_add_c, DCT_DCT,
+ AOM_BITS_8),
+ make_tuple(&av1_fht16x16_c, &av1_iht16x16_256_add_c, ADST_DCT,
+ AOM_BITS_8),
+ make_tuple(&av1_fht16x16_c, &av1_iht16x16_256_add_c, DCT_ADST,
+ AOM_BITS_8),
+ make_tuple(&av1_fht16x16_c, &av1_iht16x16_256_add_c, ADST_ADST,
+ AOM_BITS_8)));
#else
INSTANTIATE_TEST_CASE_P(
C, Trans16x16HT,
- ::testing::Values(
- make_tuple(&av1_fht16x16_c, &av1_iht16x16_256_add_c, 0, AOM_BITS_8),
- make_tuple(&av1_fht16x16_c, &av1_iht16x16_256_add_c, 1, AOM_BITS_8),
- make_tuple(&av1_fht16x16_c, &av1_iht16x16_256_add_c, 2, AOM_BITS_8),
- make_tuple(&av1_fht16x16_c, &av1_iht16x16_256_add_c, 3, AOM_BITS_8)));
+ ::testing::Values(make_tuple(&av1_fht16x16_c, &av1_iht16x16_256_add_c,
+ DCT_DCT, AOM_BITS_8),
+ make_tuple(&av1_fht16x16_c, &av1_iht16x16_256_add_c,
+ ADST_DCT, AOM_BITS_8),
+ make_tuple(&av1_fht16x16_c, &av1_iht16x16_256_add_c,
+ DCT_ADST, AOM_BITS_8),
+ make_tuple(&av1_fht16x16_c, &av1_iht16x16_256_add_c,
+ ADST_ADST, AOM_BITS_8)));
#endif // CONFIG_HIGHBITDEPTH
#if HAVE_NEON_ASM && !CONFIG_HIGHBITDEPTH
INSTANTIATE_TEST_CASE_P(
NEON, Trans16x16DCT,
::testing::Values(make_tuple(&aom_fdct16x16_c, &aom_idct16x16_256_add_neon,
- 0, AOM_BITS_8)));
+ DCT_DCT, AOM_BITS_8)));
#endif
#if HAVE_SSE2 && !CONFIG_HIGHBITDEPTH
-INSTANTIATE_TEST_CASE_P(
- SSE2, Trans16x16DCT,
- ::testing::Values(make_tuple(&aom_fdct16x16_sse2,
- &aom_idct16x16_256_add_sse2, 0, AOM_BITS_8)));
+INSTANTIATE_TEST_CASE_P(SSE2, Trans16x16DCT,
+ ::testing::Values(make_tuple(
+ &aom_fdct16x16_sse2, &aom_idct16x16_256_add_sse2,
+ DCT_DCT, AOM_BITS_8)));
#if !CONFIG_DAALA_DCT16
INSTANTIATE_TEST_CASE_P(
SSE2, Trans16x16HT,
::testing::Values(make_tuple(&av1_fht16x16_sse2, &av1_iht16x16_256_add_sse2,
- 0, AOM_BITS_8),
+ DCT_DCT, AOM_BITS_8),
make_tuple(&av1_fht16x16_sse2, &av1_iht16x16_256_add_sse2,
- 1, AOM_BITS_8),
+ ADST_DCT, AOM_BITS_8),
make_tuple(&av1_fht16x16_sse2, &av1_iht16x16_256_add_sse2,
- 2, AOM_BITS_8),
+ DCT_ADST, AOM_BITS_8),
make_tuple(&av1_fht16x16_sse2, &av1_iht16x16_256_add_sse2,
- 3, AOM_BITS_8)));
+ ADST_ADST, AOM_BITS_8)));
#endif // CONFIG_DAALA_DCT16
#endif // HAVE_SSE2 && !CONFIG_HIGHBITDEPTH
@@ -842,16 +851,18 @@
INSTANTIATE_TEST_CASE_P(SSE2, Trans16x16DCT,
::testing::Values(make_tuple(&aom_fdct16x16_sse2,
&aom_idct16x16_256_add_c,
- 0, AOM_BITS_8)));
+ DCT_DCT, AOM_BITS_8)));
#if !CONFIG_DAALA_DCT16
INSTANTIATE_TEST_CASE_P(
SSE2, Trans16x16HT,
- ::testing::Values(
- make_tuple(&av1_fht16x16_sse2, &av1_iht16x16_256_add_c, 0, AOM_BITS_8),
- make_tuple(&av1_fht16x16_sse2, &av1_iht16x16_256_add_c, 1, AOM_BITS_8),
- make_tuple(&av1_fht16x16_sse2, &av1_iht16x16_256_add_c, 2, AOM_BITS_8),
- make_tuple(&av1_fht16x16_sse2, &av1_iht16x16_256_add_c, 3,
- AOM_BITS_8)));
+ ::testing::Values(make_tuple(&av1_fht16x16_sse2, &av1_iht16x16_256_add_c,
+ DCT_DCT, AOM_BITS_8),
+ make_tuple(&av1_fht16x16_sse2, &av1_iht16x16_256_add_c,
+ ADST_DCT, AOM_BITS_8),
+ make_tuple(&av1_fht16x16_sse2, &av1_iht16x16_256_add_c,
+ DCT_ADST, AOM_BITS_8),
+ make_tuple(&av1_fht16x16_sse2, &av1_iht16x16_256_add_c,
+ ADST_ADST, AOM_BITS_8)));
#endif
#endif // HAVE_SSE2 && CONFIG_HIGHBITDEPTH
@@ -859,17 +870,19 @@
INSTANTIATE_TEST_CASE_P(MSA, Trans16x16DCT,
::testing::Values(make_tuple(&aom_fdct16x16_msa,
&aom_idct16x16_256_add_msa,
- 0, AOM_BITS_8)));
+ DCT_DCT, AOM_BITS_8)));
#if !CONFIG_EXT_TX && !CONFIG_DAALA_DCT16
// TODO(yaowu): re-enable this after msa versions are updated to match C.
INSTANTIATE_TEST_CASE_P(
DISABLED_MSA, Trans16x16HT,
- ::testing::Values(
- make_tuple(&av1_fht16x16_msa, &av1_iht16x16_256_add_msa, 0, AOM_BITS_8),
- make_tuple(&av1_fht16x16_msa, &av1_iht16x16_256_add_msa, 1, AOM_BITS_8),
- make_tuple(&av1_fht16x16_msa, &av1_iht16x16_256_add_msa, 2, AOM_BITS_8),
- make_tuple(&av1_fht16x16_msa, &av1_iht16x16_256_add_msa, 3,
- AOM_BITS_8)));
+ ::testing::Values(make_tuple(&av1_fht16x16_msa, &av1_iht16x16_256_add_msa,
+ DCT_DCT, AOM_BITS_8),
+ make_tuple(&av1_fht16x16_msa, &av1_iht16x16_256_add_msa,
+ ADST_DCT, AOM_BITS_8),
+ make_tuple(&av1_fht16x16_msa, &av1_iht16x16_256_add_msa,
+ DCT_ADST, AOM_BITS_8),
+ make_tuple(&av1_fht16x16_msa, &av1_iht16x16_256_add_msa,
+ ADST_ADST, AOM_BITS_8)));
#endif // !CONFIG_EXT_TX && !CONFIG_DAALA_DCT16
#endif // HAVE_MSA && !CONFIG_HIGHBITDEPTH
} // namespace
diff --git a/test/dct32x32_test.cc b/test/dct32x32_test.cc
index 0a30f7f..02a723a 100644
--- a/test/dct32x32_test.cc
+++ b/test/dct32x32_test.cc
@@ -363,53 +363,63 @@
INSTANTIATE_TEST_CASE_P(
NEON, Trans32x32Test,
::testing::Values(make_tuple(&aom_fdct32x32_c, &aom_idct32x32_1024_add_neon,
- 0, AOM_BITS_8),
+ DCT_DCT, AOM_BITS_8),
make_tuple(&aom_fdct32x32_rd_c,
- &aom_idct32x32_1024_add_neon, 1, AOM_BITS_8)));
+ &aom_idct32x32_1024_add_neon, ADST_DCT,
+ AOM_BITS_8)));
#endif // HAVE_NEON && !CONFIG_HIGHBITDEPTH
#if HAVE_SSE2 && !CONFIG_HIGHBITDEPTH
INSTANTIATE_TEST_CASE_P(
SSE2, Trans32x32Test,
::testing::Values(make_tuple(&aom_fdct32x32_sse2,
- &aom_idct32x32_1024_add_sse2, 0, AOM_BITS_8),
+ &aom_idct32x32_1024_add_sse2, DCT_DCT,
+ AOM_BITS_8),
make_tuple(&aom_fdct32x32_rd_sse2,
- &aom_idct32x32_1024_add_sse2, 1, AOM_BITS_8)));
+ &aom_idct32x32_1024_add_sse2, ADST_DCT,
+ AOM_BITS_8)));
#endif // HAVE_SSE2 && !CONFIG_HIGHBITDEPTH
#if HAVE_SSE2 && CONFIG_HIGHBITDEPTH
-INSTANTIATE_TEST_CASE_P(
- SSE2, Trans32x32Test,
- ::testing::Values(make_tuple(&aom_fdct32x32_sse2, &aom_idct32x32_1024_add_c,
- 0, AOM_BITS_8),
- make_tuple(&aom_fdct32x32_rd_sse2,
- &aom_idct32x32_1024_add_c, 1, AOM_BITS_8)));
+INSTANTIATE_TEST_CASE_P(SSE2, Trans32x32Test,
+ ::testing::Values(make_tuple(&aom_fdct32x32_sse2,
+ &aom_idct32x32_1024_add_c,
+ DCT_DCT, AOM_BITS_8),
+ make_tuple(&aom_fdct32x32_rd_sse2,
+ &aom_idct32x32_1024_add_c,
+ ADST_DCT, AOM_BITS_8)));
#endif // HAVE_SSE2 && CONFIG_HIGHBITDEPTH
#if HAVE_AVX2 && !CONFIG_HIGHBITDEPTH
INSTANTIATE_TEST_CASE_P(
AVX2, Trans32x32Test,
::testing::Values(make_tuple(&aom_fdct32x32_avx2,
- &aom_idct32x32_1024_add_sse2, 0, AOM_BITS_8),
+ &aom_idct32x32_1024_add_sse2, DCT_DCT,
+ AOM_BITS_8),
make_tuple(&aom_fdct32x32_rd_avx2,
- &aom_idct32x32_1024_add_sse2, 1, AOM_BITS_8)));
+ &aom_idct32x32_1024_add_sse2, ADST_DCT,
+ AOM_BITS_8)));
#endif // HAVE_AVX2 && !CONFIG_HIGHBITDEPTH
#if HAVE_AVX2 && CONFIG_HIGHBITDEPTH
INSTANTIATE_TEST_CASE_P(
AVX2, Trans32x32Test,
::testing::Values(make_tuple(&aom_fdct32x32_avx2,
- &aom_idct32x32_1024_add_sse2, 0, AOM_BITS_8),
+ &aom_idct32x32_1024_add_sse2, DCT_DCT,
+ AOM_BITS_8),
make_tuple(&aom_fdct32x32_rd_avx2,
- &aom_idct32x32_1024_add_sse2, 1, AOM_BITS_8)));
+ &aom_idct32x32_1024_add_sse2, ADST_DCT,
+ AOM_BITS_8)));
#endif // HAVE_AVX2 && CONFIG_HIGHBITDEPTH
#if HAVE_MSA && !CONFIG_HIGHBITDEPTH
INSTANTIATE_TEST_CASE_P(
MSA, Trans32x32Test,
::testing::Values(make_tuple(&aom_fdct32x32_msa,
- &aom_idct32x32_1024_add_msa, 0, AOM_BITS_8),
+ &aom_idct32x32_1024_add_msa, DCT_DCT,
+ AOM_BITS_8),
make_tuple(&aom_fdct32x32_rd_msa,
- &aom_idct32x32_1024_add_msa, 1, AOM_BITS_8)));
+ &aom_idct32x32_1024_add_msa, ADST_DCT,
+ AOM_BITS_8)));
#endif // HAVE_MSA && !CONFIG_HIGHBITDEPTH
} // namespace
diff --git a/test/fdct4x4_test.cc b/test/fdct4x4_test.cc
index a45b81d..5fad166 100644
--- a/test/fdct4x4_test.cc
+++ b/test/fdct4x4_test.cc
@@ -36,9 +36,10 @@
const TxfmParam *txfm_param);
using libaom_test::FhtFunc;
-typedef std::tr1::tuple<FdctFunc, IdctFunc, int, aom_bit_depth_t, int>
+typedef std::tr1::tuple<FdctFunc, IdctFunc, TX_TYPE, aom_bit_depth_t, int>
Dct4x4Param;
-typedef std::tr1::tuple<FhtFunc, IhtFunc, int, aom_bit_depth_t, int> Ht4x4Param;
+typedef std::tr1::tuple<FhtFunc, IhtFunc, TX_TYPE, aom_bit_depth_t, int>
+ Ht4x4Param;
void fdct4x4_ref(const int16_t *in, tran_low_t *out, int stride,
TxfmParam * /*txfm_param*/) {
@@ -211,121 +212,139 @@
INSTANTIATE_TEST_CASE_P(C, Trans4x4DCT,
::testing::Values(make_tuple(&aom_fdct4x4_c,
- &aom_idct4x4_16_add_c, 0,
- AOM_BITS_8, 16)));
+ &aom_idct4x4_16_add_c,
+ DCT_DCT, AOM_BITS_8, 16)));
#if CONFIG_HIGHBITDEPTH
INSTANTIATE_TEST_CASE_P(
DISABLED_C, Trans4x4HT,
- ::testing::Values(make_tuple(&fht4x4_12, &iht4x4_12, 0, AOM_BITS_12, 16),
- make_tuple(&fht4x4_12, &iht4x4_12, 1, AOM_BITS_12, 16),
- make_tuple(&fht4x4_12, &iht4x4_12, 2, AOM_BITS_12, 16),
- make_tuple(&fht4x4_12, &iht4x4_12, 3, AOM_BITS_12, 16)));
+ ::testing::Values(
+ make_tuple(&fht4x4_12, &iht4x4_12, DCT_DCT, AOM_BITS_12, 16),
+ make_tuple(&fht4x4_12, &iht4x4_12, ADST_DCT, AOM_BITS_12, 16),
+ make_tuple(&fht4x4_12, &iht4x4_12, DCT_ADST, AOM_BITS_12, 16),
+ make_tuple(&fht4x4_12, &iht4x4_12, ADST_ADST, AOM_BITS_12, 16)));
INSTANTIATE_TEST_CASE_P(
C, Trans4x4HT,
::testing::Values(
- make_tuple(&fht4x4_10, &iht4x4_10, 0, AOM_BITS_10, 16),
- make_tuple(&fht4x4_10, &iht4x4_10, 1, AOM_BITS_10, 16),
- make_tuple(&fht4x4_10, &iht4x4_10, 2, AOM_BITS_10, 16),
- make_tuple(&fht4x4_10, &iht4x4_10, 3, AOM_BITS_10, 16),
- make_tuple(&av1_fht4x4_c, &av1_iht4x4_16_add_c, 0, AOM_BITS_8, 16),
- make_tuple(&av1_fht4x4_c, &av1_iht4x4_16_add_c, 1, AOM_BITS_8, 16),
- make_tuple(&av1_fht4x4_c, &av1_iht4x4_16_add_c, 2, AOM_BITS_8, 16),
- make_tuple(&av1_fht4x4_c, &av1_iht4x4_16_add_c, 3, AOM_BITS_8, 16)));
+ make_tuple(&fht4x4_10, &iht4x4_10, DCT_DCT, AOM_BITS_10, 16),
+ make_tuple(&fht4x4_10, &iht4x4_10, ADST_DCT, AOM_BITS_10, 16),
+ make_tuple(&fht4x4_10, &iht4x4_10, DCT_ADST, AOM_BITS_10, 16),
+ make_tuple(&fht4x4_10, &iht4x4_10, ADST_ADST, AOM_BITS_10, 16),
+ make_tuple(&av1_fht4x4_c, &av1_iht4x4_16_add_c, DCT_DCT, AOM_BITS_8,
+ 16),
+ make_tuple(&av1_fht4x4_c, &av1_iht4x4_16_add_c, ADST_DCT, AOM_BITS_8,
+ 16),
+ make_tuple(&av1_fht4x4_c, &av1_iht4x4_16_add_c, DCT_ADST, AOM_BITS_8,
+ 16),
+ make_tuple(&av1_fht4x4_c, &av1_iht4x4_16_add_c, ADST_ADST, AOM_BITS_8,
+ 16)));
#else
INSTANTIATE_TEST_CASE_P(
C, Trans4x4HT,
- ::testing::Values(
- make_tuple(&av1_fht4x4_c, &av1_iht4x4_16_add_c, 0, AOM_BITS_8, 16),
- make_tuple(&av1_fht4x4_c, &av1_iht4x4_16_add_c, 1, AOM_BITS_8, 16),
- make_tuple(&av1_fht4x4_c, &av1_iht4x4_16_add_c, 2, AOM_BITS_8, 16),
- make_tuple(&av1_fht4x4_c, &av1_iht4x4_16_add_c, 3, AOM_BITS_8, 16)));
+ ::testing::Values(make_tuple(&av1_fht4x4_c, &av1_iht4x4_16_add_c, DCT_DCT,
+ AOM_BITS_8, 16),
+ make_tuple(&av1_fht4x4_c, &av1_iht4x4_16_add_c, ADST_DCT,
+ AOM_BITS_8, 16),
+ make_tuple(&av1_fht4x4_c, &av1_iht4x4_16_add_c, DCT_ADST,
+ AOM_BITS_8, 16),
+ make_tuple(&av1_fht4x4_c, &av1_iht4x4_16_add_c, ADST_ADST,
+ AOM_BITS_8, 16)));
#endif // CONFIG_HIGHBITDEPTH
#if CONFIG_HIGHBITDEPTH
INSTANTIATE_TEST_CASE_P(
C, Trans4x4WHT,
- ::testing::Values(
- make_tuple(&av1_highbd_fwht4x4_c, &iwht4x4_10, 0, AOM_BITS_10, 16),
- make_tuple(&av1_highbd_fwht4x4_c, &iwht4x4_12, 0, AOM_BITS_12, 16),
- make_tuple(&av1_fwht4x4_c, &aom_iwht4x4_16_add_c, 0, AOM_BITS_8, 16)));
+ ::testing::Values(make_tuple(&av1_highbd_fwht4x4_c, &iwht4x4_10, DCT_DCT,
+ AOM_BITS_10, 16),
+ make_tuple(&av1_highbd_fwht4x4_c, &iwht4x4_12, DCT_DCT,
+ AOM_BITS_12, 16),
+ make_tuple(&av1_fwht4x4_c, &aom_iwht4x4_16_add_c, DCT_DCT,
+ AOM_BITS_8, 16)));
#else
INSTANTIATE_TEST_CASE_P(C, Trans4x4WHT,
::testing::Values(make_tuple(&av1_fwht4x4_c,
- &aom_iwht4x4_16_add_c, 0,
- AOM_BITS_8, 16)));
+ &aom_iwht4x4_16_add_c,
+ DCT_DCT, AOM_BITS_8, 16)));
#endif // CONFIG_HIGHBITDEPTH
#if HAVE_NEON_ASM && !CONFIG_HIGHBITDEPTH
INSTANTIATE_TEST_CASE_P(NEON, Trans4x4DCT,
::testing::Values(make_tuple(&aom_fdct4x4_c,
&aom_idct4x4_16_add_neon,
- 0, AOM_BITS_8, 16)));
+ DCT_DCT, AOM_BITS_8, 16)));
#endif // HAVE_NEON_ASM && !CONFIG_HIGHBITDEPTH
#if HAVE_NEON && !CONFIG_HIGHBITDEPTH
INSTANTIATE_TEST_CASE_P(
NEON, Trans4x4HT,
- ::testing::Values(
- make_tuple(&av1_fht4x4_c, &av1_iht4x4_16_add_neon, 0, AOM_BITS_8, 16),
- make_tuple(&av1_fht4x4_c, &av1_iht4x4_16_add_neon, 1, AOM_BITS_8, 16),
- make_tuple(&av1_fht4x4_c, &av1_iht4x4_16_add_neon, 2, AOM_BITS_8, 16),
- make_tuple(&av1_fht4x4_c, &av1_iht4x4_16_add_neon, 3, AOM_BITS_8, 16)));
+ ::testing::Values(make_tuple(&av1_fht4x4_c, &av1_iht4x4_16_add_neon,
+ DCT_DCT, AOM_BITS_8, 16),
+ make_tuple(&av1_fht4x4_c, &av1_iht4x4_16_add_neon,
+ ADST_DCT, AOM_BITS_8, 16),
+ make_tuple(&av1_fht4x4_c, &av1_iht4x4_16_add_neon,
+ DCT_ADST, AOM_BITS_8, 16),
+ make_tuple(&av1_fht4x4_c, &av1_iht4x4_16_add_neon,
+ ADST_ADST, AOM_BITS_8, 16)));
#endif // HAVE_NEON && !CONFIG_HIGHBITDEPTH
#if HAVE_SSE2 && !CONFIG_DAALA_DCT4
INSTANTIATE_TEST_CASE_P(
SSE2, Trans4x4WHT,
- ::testing::Values(make_tuple(&av1_fwht4x4_c, &aom_iwht4x4_16_add_c, 0,
+ ::testing::Values(make_tuple(&av1_fwht4x4_c, &aom_iwht4x4_16_add_c, DCT_DCT,
AOM_BITS_8, 16),
- make_tuple(&av1_fwht4x4_c, &aom_iwht4x4_16_add_sse2, 0,
- AOM_BITS_8, 16)));
+ make_tuple(&av1_fwht4x4_c, &aom_iwht4x4_16_add_sse2,
+ DCT_DCT, AOM_BITS_8, 16)));
#endif
#if HAVE_SSE2 && !CONFIG_HIGHBITDEPTH
INSTANTIATE_TEST_CASE_P(SSE2, Trans4x4DCT,
::testing::Values(make_tuple(&aom_fdct4x4_sse2,
&aom_idct4x4_16_add_sse2,
- 0, AOM_BITS_8, 16)));
+ DCT_DCT, AOM_BITS_8, 16)));
#if !CONFIG_DAALA_DCT4
INSTANTIATE_TEST_CASE_P(
SSE2, Trans4x4HT,
- ::testing::Values(make_tuple(&av1_fht4x4_sse2, &av1_iht4x4_16_add_sse2, 0,
- AOM_BITS_8, 16),
- make_tuple(&av1_fht4x4_sse2, &av1_iht4x4_16_add_sse2, 1,
- AOM_BITS_8, 16),
- make_tuple(&av1_fht4x4_sse2, &av1_iht4x4_16_add_sse2, 2,
- AOM_BITS_8, 16),
- make_tuple(&av1_fht4x4_sse2, &av1_iht4x4_16_add_sse2, 3,
- AOM_BITS_8, 16)));
+ ::testing::Values(make_tuple(&av1_fht4x4_sse2, &av1_iht4x4_16_add_sse2,
+ DCT_DCT, AOM_BITS_8, 16),
+ make_tuple(&av1_fht4x4_sse2, &av1_iht4x4_16_add_sse2,
+ ADST_DCT, AOM_BITS_8, 16),
+ make_tuple(&av1_fht4x4_sse2, &av1_iht4x4_16_add_sse2,
+ DCT_ADST, AOM_BITS_8, 16),
+ make_tuple(&av1_fht4x4_sse2, &av1_iht4x4_16_add_sse2,
+ ADST_ADST, AOM_BITS_8, 16)));
#endif // !CONFIG_DAALA_DCT4
#endif // HAVE_SSE2 && !CONFIG_HIGHBITDEPTH
#if HAVE_SSE2 && CONFIG_HIGHBITDEPTH && !CONFIG_DAALA_DCT4
INSTANTIATE_TEST_CASE_P(
SSE2, Trans4x4HT,
- ::testing::Values(
- make_tuple(&av1_fht4x4_sse2, &av1_iht4x4_16_add_c, 0, AOM_BITS_8, 16),
- make_tuple(&av1_fht4x4_sse2, &av1_iht4x4_16_add_c, 1, AOM_BITS_8, 16),
- make_tuple(&av1_fht4x4_sse2, &av1_iht4x4_16_add_c, 2, AOM_BITS_8, 16),
- make_tuple(&av1_fht4x4_sse2, &av1_iht4x4_16_add_c, 3, AOM_BITS_8, 16)));
+ ::testing::Values(make_tuple(&av1_fht4x4_sse2, &av1_iht4x4_16_add_c,
+ DCT_DCT, AOM_BITS_8, 16),
+ make_tuple(&av1_fht4x4_sse2, &av1_iht4x4_16_add_c,
+ ADST_DCT, AOM_BITS_8, 16),
+ make_tuple(&av1_fht4x4_sse2, &av1_iht4x4_16_add_c,
+ DCT_ADST, AOM_BITS_8, 16),
+ make_tuple(&av1_fht4x4_sse2, &av1_iht4x4_16_add_c,
+ ADST_ADST, AOM_BITS_8, 16)));
#endif // HAVE_SSE2 && CONFIG_HIGHBITDEPTH && !CONFIG_DAALA_DCT4
#if HAVE_MSA && !CONFIG_HIGHBITDEPTH
INSTANTIATE_TEST_CASE_P(MSA, Trans4x4DCT,
::testing::Values(make_tuple(&aom_fdct4x4_msa,
- &aom_idct4x4_16_add_msa, 0,
- AOM_BITS_8, 16)));
+ &aom_idct4x4_16_add_msa,
+ DCT_DCT, AOM_BITS_8, 16)));
#if !CONFIG_EXT_TX && !CONFIG_DAALA_DCT4
INSTANTIATE_TEST_CASE_P(
MSA, Trans4x4HT,
- ::testing::Values(
- make_tuple(&av1_fht4x4_msa, &av1_iht4x4_16_add_msa, 0, AOM_BITS_8, 16),
- make_tuple(&av1_fht4x4_msa, &av1_iht4x4_16_add_msa, 1, AOM_BITS_8, 16),
- make_tuple(&av1_fht4x4_msa, &av1_iht4x4_16_add_msa, 2, AOM_BITS_8, 16),
- make_tuple(&av1_fht4x4_msa, &av1_iht4x4_16_add_msa, 3, AOM_BITS_8,
- 16)));
+ ::testing::Values(make_tuple(&av1_fht4x4_msa, &av1_iht4x4_16_add_msa,
+ DCT_DCT, AOM_BITS_8, 16),
+ make_tuple(&av1_fht4x4_msa, &av1_iht4x4_16_add_msa,
+ ADST_DCT, AOM_BITS_8, 16),
+ make_tuple(&av1_fht4x4_msa, &av1_iht4x4_16_add_msa,
+ DCT_ADST, AOM_BITS_8, 16),
+ make_tuple(&av1_fht4x4_msa, &av1_iht4x4_16_add_msa,
+ ADST_ADST, AOM_BITS_8, 16)));
#endif // !CONFIG_EXT_TX && && !CONFIG_DAALA_DCT4
#endif // HAVE_MSA && !CONFIG_HIGHBITDEPTH
} // namespace
diff --git a/test/fdct8x8_test.cc b/test/fdct8x8_test.cc
index 8a6d6d3..99ae8d6 100644
--- a/test/fdct8x8_test.cc
+++ b/test/fdct8x8_test.cc
@@ -44,8 +44,9 @@
typedef void (*IhtFunc)(const tran_low_t *in, uint8_t *out, int stride,
const TxfmParam *txfm_param);
-typedef std::tr1::tuple<FdctFunc, IdctFunc, int, aom_bit_depth_t> Dct8x8Param;
-typedef std::tr1::tuple<FhtFunc, IhtFunc, int, aom_bit_depth_t> Ht8x8Param;
+typedef std::tr1::tuple<FdctFunc, IdctFunc, TX_TYPE, aom_bit_depth_t>
+ Dct8x8Param;
+typedef std::tr1::tuple<FhtFunc, IhtFunc, TX_TYPE, aom_bit_depth_t> Ht8x8Param;
typedef std::tr1::tuple<IdctFunc, IdctFunc, int, aom_bit_depth_t> Idct8x8Param;
void reference_8x8_dct_1d(const double in[8], double out[8]) {
@@ -614,87 +615,98 @@
#if CONFIG_HIGHBITDEPTH
INSTANTIATE_TEST_CASE_P(C, FwdTrans8x8DCT,
::testing::Values(make_tuple(&aom_fdct8x8_c,
- &aom_idct8x8_64_add_c, 0,
- AOM_BITS_8)));
+ &aom_idct8x8_64_add_c,
+ DCT_DCT, AOM_BITS_8)));
#else
INSTANTIATE_TEST_CASE_P(C, FwdTrans8x8DCT,
::testing::Values(make_tuple(&aom_fdct8x8_c,
- &aom_idct8x8_64_add_c, 0,
- AOM_BITS_8)));
+ &aom_idct8x8_64_add_c,
+ DCT_DCT, AOM_BITS_8)));
#endif // CONFIG_HIGHBITDEPTH
#if CONFIG_HIGHBITDEPTH
INSTANTIATE_TEST_CASE_P(
C, FwdTrans8x8HT,
::testing::Values(
- make_tuple(&av1_fht8x8_c, &av1_iht8x8_64_add_c, 0, AOM_BITS_8),
- make_tuple(&fht8x8_10, &iht8x8_10, 0, AOM_BITS_10),
- make_tuple(&fht8x8_10, &iht8x8_10, 1, AOM_BITS_10),
- make_tuple(&fht8x8_10, &iht8x8_10, 2, AOM_BITS_10),
- make_tuple(&fht8x8_10, &iht8x8_10, 3, AOM_BITS_10),
- make_tuple(&fht8x8_12, &iht8x8_12, 0, AOM_BITS_12),
- make_tuple(&fht8x8_12, &iht8x8_12, 1, AOM_BITS_12),
- make_tuple(&fht8x8_12, &iht8x8_12, 2, AOM_BITS_12),
- make_tuple(&fht8x8_12, &iht8x8_12, 3, AOM_BITS_12),
- make_tuple(&av1_fht8x8_c, &av1_iht8x8_64_add_c, 1, AOM_BITS_8),
- make_tuple(&av1_fht8x8_c, &av1_iht8x8_64_add_c, 2, AOM_BITS_8),
- make_tuple(&av1_fht8x8_c, &av1_iht8x8_64_add_c, 3, AOM_BITS_8)));
+ make_tuple(&av1_fht8x8_c, &av1_iht8x8_64_add_c, DCT_DCT, AOM_BITS_8),
+ make_tuple(&fht8x8_10, &iht8x8_10, DCT_DCT, AOM_BITS_10),
+ make_tuple(&fht8x8_10, &iht8x8_10, ADST_DCT, AOM_BITS_10),
+ make_tuple(&fht8x8_10, &iht8x8_10, DCT_ADST, AOM_BITS_10),
+ make_tuple(&fht8x8_10, &iht8x8_10, ADST_ADST, AOM_BITS_10),
+ make_tuple(&fht8x8_12, &iht8x8_12, DCT_DCT, AOM_BITS_12),
+ make_tuple(&fht8x8_12, &iht8x8_12, ADST_DCT, AOM_BITS_12),
+ make_tuple(&fht8x8_12, &iht8x8_12, DCT_ADST, AOM_BITS_12),
+ make_tuple(&fht8x8_12, &iht8x8_12, ADST_ADST, AOM_BITS_12),
+ make_tuple(&av1_fht8x8_c, &av1_iht8x8_64_add_c, ADST_DCT, AOM_BITS_8),
+ make_tuple(&av1_fht8x8_c, &av1_iht8x8_64_add_c, DCT_ADST, AOM_BITS_8),
+ make_tuple(&av1_fht8x8_c, &av1_iht8x8_64_add_c, ADST_ADST,
+ AOM_BITS_8)));
#else
INSTANTIATE_TEST_CASE_P(
C, FwdTrans8x8HT,
::testing::Values(
- make_tuple(&av1_fht8x8_c, &av1_iht8x8_64_add_c, 0, AOM_BITS_8),
- make_tuple(&av1_fht8x8_c, &av1_iht8x8_64_add_c, 1, AOM_BITS_8),
- make_tuple(&av1_fht8x8_c, &av1_iht8x8_64_add_c, 2, AOM_BITS_8),
- make_tuple(&av1_fht8x8_c, &av1_iht8x8_64_add_c, 3, AOM_BITS_8)));
+ make_tuple(&av1_fht8x8_c, &av1_iht8x8_64_add_c, DCT_DCT, AOM_BITS_8),
+ make_tuple(&av1_fht8x8_c, &av1_iht8x8_64_add_c, ADST_DCT, AOM_BITS_8),
+ make_tuple(&av1_fht8x8_c, &av1_iht8x8_64_add_c, DCT_ADST, AOM_BITS_8),
+ make_tuple(&av1_fht8x8_c, &av1_iht8x8_64_add_c, ADST_ADST,
+ AOM_BITS_8)));
#endif // CONFIG_HIGHBITDEPTH
#if HAVE_NEON_ASM && !CONFIG_HIGHBITDEPTH
INSTANTIATE_TEST_CASE_P(NEON, FwdTrans8x8DCT,
::testing::Values(make_tuple(&aom_fdct8x8_neon,
&aom_idct8x8_64_add_neon,
- 0, AOM_BITS_8)));
+ DCT_DCT, AOM_BITS_8)));
#endif // HAVE_NEON_ASM && !CONFIG_HIGHBITDEPTH
#if HAVE_NEON && !CONFIG_HIGHBITDEPTH
INSTANTIATE_TEST_CASE_P(
NEON, FwdTrans8x8HT,
- ::testing::Values(
- make_tuple(&av1_fht8x8_c, &av1_iht8x8_64_add_neon, 0, AOM_BITS_8),
- make_tuple(&av1_fht8x8_c, &av1_iht8x8_64_add_neon, 1, AOM_BITS_8),
- make_tuple(&av1_fht8x8_c, &av1_iht8x8_64_add_neon, 2, AOM_BITS_8),
- make_tuple(&av1_fht8x8_c, &av1_iht8x8_64_add_neon, 3, AOM_BITS_8)));
+ ::testing::Values(make_tuple(&av1_fht8x8_c, &av1_iht8x8_64_add_neon,
+ DCT_DCT, AOM_BITS_8),
+ make_tuple(&av1_fht8x8_c, &av1_iht8x8_64_add_neon,
+ ADST_DCT, AOM_BITS_8),
+ make_tuple(&av1_fht8x8_c, &av1_iht8x8_64_add_neon,
+ DCT_ADST, AOM_BITS_8),
+ make_tuple(&av1_fht8x8_c, &av1_iht8x8_64_add_neon,
+ ADST_ADST, AOM_BITS_8)));
#endif // HAVE_NEON && !CONFIG_HIGHBITDEPTH
#if HAVE_SSE2 && !CONFIG_HIGHBITDEPTH
INSTANTIATE_TEST_CASE_P(SSE2, FwdTrans8x8DCT,
::testing::Values(make_tuple(&aom_fdct8x8_sse2,
&aom_idct8x8_64_add_sse2,
- 0, AOM_BITS_8)));
+ DCT_DCT, AOM_BITS_8)));
#if !CONFIG_DAALA_DCT8
INSTANTIATE_TEST_CASE_P(
SSE2, FwdTrans8x8HT,
- ::testing::Values(
- make_tuple(&av1_fht8x8_sse2, &av1_iht8x8_64_add_sse2, 0, AOM_BITS_8),
- make_tuple(&av1_fht8x8_sse2, &av1_iht8x8_64_add_sse2, 1, AOM_BITS_8),
- make_tuple(&av1_fht8x8_sse2, &av1_iht8x8_64_add_sse2, 2, AOM_BITS_8),
- make_tuple(&av1_fht8x8_sse2, &av1_iht8x8_64_add_sse2, 3, AOM_BITS_8)));
+ ::testing::Values(make_tuple(&av1_fht8x8_sse2, &av1_iht8x8_64_add_sse2,
+ DCT_DCT, AOM_BITS_8),
+ make_tuple(&av1_fht8x8_sse2, &av1_iht8x8_64_add_sse2,
+ ADST_DCT, AOM_BITS_8),
+ make_tuple(&av1_fht8x8_sse2, &av1_iht8x8_64_add_sse2,
+ DCT_ADST, AOM_BITS_8),
+ make_tuple(&av1_fht8x8_sse2, &av1_iht8x8_64_add_sse2,
+ ADST_ADST, AOM_BITS_8)));
#endif // !CONFIG_DAALA_DCT8
#endif // HAVE_SSE2 && !CONFIG_HIGHBITDEPTH
#if HAVE_SSE2 && CONFIG_HIGHBITDEPTH
INSTANTIATE_TEST_CASE_P(SSE2, FwdTrans8x8DCT,
::testing::Values(make_tuple(&aom_fdct8x8_sse2,
- &aom_idct8x8_64_add_c, 0,
- AOM_BITS_8)));
+ &aom_idct8x8_64_add_c,
+ DCT_DCT, AOM_BITS_8)));
#if !CONFIG_DAALA_DCT8
INSTANTIATE_TEST_CASE_P(
SSE2, FwdTrans8x8HT,
- ::testing::Values(
- make_tuple(&av1_fht8x8_sse2, &av1_iht8x8_64_add_c, 0, AOM_BITS_8),
- make_tuple(&av1_fht8x8_sse2, &av1_iht8x8_64_add_c, 1, AOM_BITS_8),
- make_tuple(&av1_fht8x8_sse2, &av1_iht8x8_64_add_c, 2, AOM_BITS_8),
- make_tuple(&av1_fht8x8_sse2, &av1_iht8x8_64_add_c, 3, AOM_BITS_8)));
+ ::testing::Values(make_tuple(&av1_fht8x8_sse2, &av1_iht8x8_64_add_c,
+ DCT_DCT, AOM_BITS_8),
+ make_tuple(&av1_fht8x8_sse2, &av1_iht8x8_64_add_c,
+ ADST_DCT, AOM_BITS_8),
+ make_tuple(&av1_fht8x8_sse2, &av1_iht8x8_64_add_c,
+ DCT_ADST, AOM_BITS_8),
+ make_tuple(&av1_fht8x8_sse2, &av1_iht8x8_64_add_c,
+ ADST_ADST, AOM_BITS_8)));
#endif // !CONFIG_DAALA_DCT8
#endif // HAVE_SSE2 && CONFIG_HIGHBITDEPTH
@@ -702,22 +714,25 @@
INSTANTIATE_TEST_CASE_P(SSSE3, FwdTrans8x8DCT,
::testing::Values(make_tuple(&aom_fdct8x8_ssse3,
&aom_idct8x8_64_add_ssse3,
- 0, AOM_BITS_8)));
+ DCT_DCT, AOM_BITS_8)));
#endif
#if HAVE_MSA && !CONFIG_HIGHBITDEPTH
INSTANTIATE_TEST_CASE_P(MSA, FwdTrans8x8DCT,
::testing::Values(make_tuple(&aom_fdct8x8_msa,
- &aom_idct8x8_64_add_msa, 0,
- AOM_BITS_8)));
+ &aom_idct8x8_64_add_msa,
+ DCT_DCT, AOM_BITS_8)));
#if !CONFIG_EXT_TX && !CONFIG_DAALA_DCT8
INSTANTIATE_TEST_CASE_P(
MSA, FwdTrans8x8HT,
- ::testing::Values(
- make_tuple(&av1_fht8x8_msa, &av1_iht8x8_64_add_msa, 0, AOM_BITS_8),
- make_tuple(&av1_fht8x8_msa, &av1_iht8x8_64_add_msa, 1, AOM_BITS_8),
- make_tuple(&av1_fht8x8_msa, &av1_iht8x8_64_add_msa, 2, AOM_BITS_8),
- make_tuple(&av1_fht8x8_msa, &av1_iht8x8_64_add_msa, 3, AOM_BITS_8)));
+ ::testing::Values(make_tuple(&av1_fht8x8_msa, &av1_iht8x8_64_add_msa,
+ DCT_DCT, AOM_BITS_8),
+ make_tuple(&av1_fht8x8_msa, &av1_iht8x8_64_add_msa,
+ ADST_DCT, AOM_BITS_8),
+ make_tuple(&av1_fht8x8_msa, &av1_iht8x8_64_add_msa,
+ DCT_ADST, AOM_BITS_8),
+ make_tuple(&av1_fht8x8_msa, &av1_iht8x8_64_add_msa,
+ ADST_ADST, AOM_BITS_8)));
#endif // !CONFIG_EXT_TX && !CONFIG_DAALA_DCT8
#endif // HAVE_MSA && !CONFIG_HIGHBITDEPTH
} // namespace