add unit test for highbd flip transform
Change-Id: I368d365ee0f58373bc399b615febd790addb2c36
diff --git a/test/vp10_fwd_txfm2d_test.cc b/test/vp10_fwd_txfm2d_test.cc
index 945062e..34e232f 100644
--- a/test/vp10_fwd_txfm2d_test.cc
+++ b/test/vp10_fwd_txfm2d_test.cc
@@ -48,6 +48,8 @@
const TXFM_2D_CFG *fwd_txfm_cfg = fwd_txfm_flip_cfg.cfg;
int amplify_bit = fwd_txfm_cfg->shift[0] + fwd_txfm_cfg->shift[1] +
fwd_txfm_cfg->shift[2];
+ ud_flip_ = fwd_txfm_flip_cfg.ud_flip;
+ lr_flip_ = fwd_txfm_flip_cfg.lr_flip;
amplify_factor_ =
amplify_bit >= 0 ? (1 << amplify_bit) : (1.0 / (1 << -amplify_bit));
@@ -77,6 +79,14 @@
}
fwd_txfm_(input_, output_, txfm1d_size_, tx_type_, bd);
+
+ if (lr_flip_ && ud_flip_)
+ libvpx_test::fliplrud(ref_input_, txfm1d_size_, txfm1d_size_);
+ else if (lr_flip_)
+ libvpx_test::fliplr(ref_input_, txfm1d_size_, txfm1d_size_);
+ else if (ud_flip_)
+ libvpx_test::flipud(ref_input_, txfm1d_size_, txfm1d_size_);
+
reference_hybrid_2d(ref_input_, ref_output_, txfm1d_size_,
type0_, type1_);
@@ -120,6 +130,8 @@
int32_t* output_;
double* ref_input_;
double* ref_output_;
+ int ud_flip_; // flip upside down
+ int lr_flip_; // flip left to right
};
TEST_P(VP10FwdTxfm2d, RunFwdAccuracyCheck) {
@@ -129,6 +141,28 @@
INSTANTIATE_TEST_CASE_P(
C, VP10FwdTxfm2d,
::testing::Values(
+#if CONFIG_EXT_TX
+ VP10FwdTxfm2dParam(FLIPADST_DCT, TX_4X4, 2, 0.2),
+ VP10FwdTxfm2dParam(DCT_FLIPADST, TX_4X4, 2, 0.2),
+ VP10FwdTxfm2dParam(FLIPADST_FLIPADST, TX_4X4, 2, 0.2),
+ VP10FwdTxfm2dParam(ADST_FLIPADST, TX_4X4, 2, 0.2),
+ VP10FwdTxfm2dParam(FLIPADST_ADST, TX_4X4, 2, 0.2),
+ VP10FwdTxfm2dParam(FLIPADST_DCT, TX_8X8, 5, 0.6),
+ VP10FwdTxfm2dParam(DCT_FLIPADST, TX_8X8, 5, 0.6),
+ VP10FwdTxfm2dParam(FLIPADST_FLIPADST, TX_8X8, 5, 0.6),
+ VP10FwdTxfm2dParam(ADST_FLIPADST, TX_8X8, 5, 0.6),
+ VP10FwdTxfm2dParam(FLIPADST_ADST, TX_8X8, 5, 0.6),
+ VP10FwdTxfm2dParam(FLIPADST_DCT, TX_16X16, 11, 1.5),
+ VP10FwdTxfm2dParam(DCT_FLIPADST, TX_16X16, 11, 1.5),
+ VP10FwdTxfm2dParam(FLIPADST_FLIPADST, TX_16X16, 11, 1.5),
+ VP10FwdTxfm2dParam(ADST_FLIPADST, TX_16X16, 11, 1.5),
+ VP10FwdTxfm2dParam(FLIPADST_ADST, TX_16X16, 11, 1.5),
+ VP10FwdTxfm2dParam(FLIPADST_DCT, TX_32X32, 70, 7),
+ VP10FwdTxfm2dParam(DCT_FLIPADST, TX_32X32, 70, 7),
+ VP10FwdTxfm2dParam(FLIPADST_FLIPADST, TX_32X32, 70, 7),
+ VP10FwdTxfm2dParam(ADST_FLIPADST, TX_32X32, 70, 7),
+ VP10FwdTxfm2dParam(FLIPADST_ADST, TX_32X32, 70, 7),
+#endif
VP10FwdTxfm2dParam(DCT_DCT, TX_4X4, 2, 0.2),
VP10FwdTxfm2dParam(ADST_DCT, TX_4X4, 2, 0.2),
VP10FwdTxfm2dParam(DCT_ADST, TX_4X4, 2, 0.2),
diff --git a/test/vp10_txfm_test.cc b/test/vp10_txfm_test.cc
index a1fef0b..6b36126 100644
--- a/test/vp10_txfm_test.cc
+++ b/test/vp10_txfm_test.cc
@@ -36,6 +36,28 @@
*type0 = TYPE_ADST;
*type1 = TYPE_ADST;
break;
+#if CONFIG_EXT_TX
+ case FLIPADST_DCT:
+ *type0 = TYPE_ADST;
+ *type1 = TYPE_DCT;
+ break;
+ case DCT_FLIPADST:
+ *type0 = TYPE_DCT;
+ *type1 = TYPE_ADST;
+ break;
+ case FLIPADST_FLIPADST:
+ *type0 = TYPE_ADST;
+ *type1 = TYPE_ADST;
+ break;
+ case ADST_FLIPADST:
+ *type0 = TYPE_ADST;
+ *type1 = TYPE_ADST;
+ break;
+ case FLIPADST_ADST:
+ *type0 = TYPE_ADST;
+ *type1 = TYPE_ADST;
+ break;
+#endif // CONFIG_EXT_TX
default:
*type0 = TYPE_DCT;
*type1 = TYPE_DCT;
@@ -100,4 +122,45 @@
}
delete[] tempOut;
}
+
+template<typename Type>
+void fliplr(Type *dest, int stride, int length) {
+ int i, j;
+ for (i = 0; i < length; ++i) {
+ for (j = 0; j < length / 2; ++j) {
+ const Type tmp = dest[i * stride + j];
+ dest[i * stride + j] = dest[i * stride + length - 1 - j];
+ dest[i * stride + length - 1 - j] = tmp;
+ }
+ }
+}
+
+template<typename Type>
+void flipud(Type *dest, int stride, int length) {
+ int i, j;
+ for (j = 0; j < length; ++j) {
+ for (i = 0; i < length / 2; ++i) {
+ const Type tmp = dest[i * stride + j];
+ dest[i * stride + j] = dest[(length - 1 - i) * stride + j];
+ dest[(length - 1 - i) * stride + j] = tmp;
+ }
+ }
+}
+
+template<typename Type>
+void fliplrud(Type *dest, int stride, int length) {
+ int i, j;
+ for (i = 0; i < length / 2; ++i) {
+ for (j = 0; j < length; ++j) {
+ const Type tmp = dest[i * stride + j];
+ dest[i * stride + j] = dest[(length - 1 - i) * stride + length - 1 - j];
+ dest[(length - 1 - i) * stride + length - 1 - j] = tmp;
+ }
+ }
+}
+
+template void fliplr<double>(double *dest, int stride, int length);
+template void flipud<double>(double *dest, int stride, int length);
+template void fliplrud<double>(double *dest, int stride, int length);
+
} // namespace libvpx_test
diff --git a/test/vp10_txfm_test.h b/test/vp10_txfm_test.h
index 3849f51..c75c810 100644
--- a/test/vp10_txfm_test.h
+++ b/test/vp10_txfm_test.h
@@ -45,8 +45,7 @@
void reference_hybrid_1d(double* in, double* out, int size, int type);
void reference_hybrid_2d(double* in, double* out, int size,
- int type0, int type1);
-
+ int type0, int type1);
template <typename Type1, typename Type2>
static double compute_avg_abs_error(const Type1* a, const Type2* b,
const int size) {
@@ -58,6 +57,15 @@
return error;
}
+template<typename Type>
+void fliplr(Type *dest, int stride, int length);
+
+template<typename Type>
+void flipud(Type *dest, int stride, int length);
+
+template<typename Type>
+void fliplrud(Type *dest, int stride, int length);
+
typedef void (*TxfmFunc)(const int32_t* in, int32_t* out, const int8_t* cos_bit,
const int8_t* range_bit);
diff --git a/vp10/common/vp10_fwd_txfm2d.c b/vp10/common/vp10_fwd_txfm2d.c
index 371b4df..c61cb07 100644
--- a/vp10/common/vp10_fwd_txfm2d.c
+++ b/vp10/common/vp10_fwd_txfm2d.c
@@ -90,8 +90,8 @@
}
void vp10_fwd_txfm2d_4x4_c(const int16_t *input, int32_t *output,
- const int stride, int tx_type,
- const int bd) {
+ const int stride, int tx_type,
+ const int bd) {
int32_t txfm_buf[4 * 4];
TXFM_2D_FLIP_CFG cfg = vp10_get_fwd_txfm_cfg(tx_type, TX_4X4);
(void)bd;
@@ -99,8 +99,8 @@
}
void vp10_fwd_txfm2d_8x8_c(const int16_t *input, int32_t *output,
- const int stride, int tx_type,
- const int bd) {
+ const int stride, int tx_type,
+ const int bd) {
int32_t txfm_buf[8 * 8];
TXFM_2D_FLIP_CFG cfg = vp10_get_fwd_txfm_cfg(tx_type, TX_8X8);
(void)bd;
@@ -108,8 +108,8 @@
}
void vp10_fwd_txfm2d_16x16_c(const int16_t *input, int32_t *output,
- const int stride, int tx_type,
- const int bd) {
+ const int stride, int tx_type,
+ const int bd) {
int32_t txfm_buf[16 * 16];
TXFM_2D_FLIP_CFG cfg = vp10_get_fwd_txfm_cfg(tx_type, TX_16X16);
(void)bd;
@@ -117,8 +117,8 @@
}
void vp10_fwd_txfm2d_32x32_c(const int16_t *input, int32_t *output,
- const int stride, int tx_type,
- const int bd) {
+ const int stride, int tx_type,
+ const int bd) {
int32_t txfm_buf[32 * 32];
TXFM_2D_FLIP_CFG cfg = vp10_get_fwd_txfm_cfg(tx_type, TX_32X32);
(void)bd;
@@ -126,15 +126,37 @@
}
void vp10_fwd_txfm2d_64x64_c(const int16_t *input, int32_t *output,
- const int stride, int tx_type,
- const int bd) {
+ const int stride, int tx_type,
+ const int bd) {
int32_t txfm_buf[64 * 64];
TXFM_2D_FLIP_CFG cfg = vp10_get_fwd_txfm_64x64_cfg(tx_type);
(void)bd;
fwd_txfm2d_c(input, output, stride, &cfg, txfm_buf);
}
-static const TXFM_2D_CFG* fwd_txfm_cfg_ls[4][TX_SIZES] = {
+#if CONFIG_EXT_TX
+static const TXFM_2D_CFG* fwd_txfm_cfg_ls[FLIPADST_ADST + 1][TX_SIZES] = {
+ {&fwd_txfm_2d_cfg_dct_dct_4 , &fwd_txfm_2d_cfg_dct_dct_8,
+ &fwd_txfm_2d_cfg_dct_dct_16 , &fwd_txfm_2d_cfg_dct_dct_32},
+ {&fwd_txfm_2d_cfg_adst_dct_4 , &fwd_txfm_2d_cfg_adst_dct_8,
+ &fwd_txfm_2d_cfg_adst_dct_16 , &fwd_txfm_2d_cfg_adst_dct_32},
+ {&fwd_txfm_2d_cfg_dct_adst_4 , &fwd_txfm_2d_cfg_dct_adst_8,
+ &fwd_txfm_2d_cfg_dct_adst_16 , &fwd_txfm_2d_cfg_dct_adst_32},
+ {&fwd_txfm_2d_cfg_adst_adst_4, &fwd_txfm_2d_cfg_adst_adst_8,
+ &fwd_txfm_2d_cfg_adst_adst_16, &fwd_txfm_2d_cfg_adst_adst_32},
+ {&fwd_txfm_2d_cfg_adst_dct_4 , &fwd_txfm_2d_cfg_adst_dct_8,
+ &fwd_txfm_2d_cfg_adst_dct_16 , &fwd_txfm_2d_cfg_adst_dct_32},
+ {&fwd_txfm_2d_cfg_dct_adst_4 , &fwd_txfm_2d_cfg_dct_adst_8,
+ &fwd_txfm_2d_cfg_dct_adst_16 , &fwd_txfm_2d_cfg_dct_adst_32},
+ {&fwd_txfm_2d_cfg_adst_adst_4, &fwd_txfm_2d_cfg_adst_adst_8,
+ &fwd_txfm_2d_cfg_adst_adst_16, &fwd_txfm_2d_cfg_adst_adst_32},
+ {&fwd_txfm_2d_cfg_adst_adst_4, &fwd_txfm_2d_cfg_adst_adst_8,
+ &fwd_txfm_2d_cfg_adst_adst_16, &fwd_txfm_2d_cfg_adst_adst_32},
+ {&fwd_txfm_2d_cfg_adst_adst_4, &fwd_txfm_2d_cfg_adst_adst_8,
+ &fwd_txfm_2d_cfg_adst_adst_16, &fwd_txfm_2d_cfg_adst_adst_32},
+};
+#else // CONFIG_EXT_TX
+static const TXFM_2D_CFG* fwd_txfm_cfg_ls[TX_TYPES][TX_SIZES] = {
{&fwd_txfm_2d_cfg_dct_dct_4 , &fwd_txfm_2d_cfg_dct_dct_8,
&fwd_txfm_2d_cfg_dct_dct_16 , &fwd_txfm_2d_cfg_dct_dct_32},
{&fwd_txfm_2d_cfg_adst_dct_4 , &fwd_txfm_2d_cfg_adst_dct_8,
@@ -144,6 +166,8 @@
{&fwd_txfm_2d_cfg_adst_adst_4, &fwd_txfm_2d_cfg_adst_adst_8,
&fwd_txfm_2d_cfg_adst_adst_16, &fwd_txfm_2d_cfg_adst_adst_32},
};
+#endif // CONFIG_EXT_TX
+
void set_flip_cfg(int tx_type, TXFM_2D_FLIP_CFG* cfg) {
switch (tx_type) {
@@ -154,7 +178,31 @@
cfg->ud_flip = 0;
cfg->lr_flip = 0;
break;
+#if CONFIG_EXT_TX
+ case FLIPADST_DCT:
+ cfg->ud_flip = 1;
+ cfg->lr_flip = 0;
+ break;
+ case DCT_FLIPADST:
+ cfg->ud_flip = 0;
+ cfg->lr_flip = 1;
+ break;
+ case FLIPADST_FLIPADST:
+ cfg->ud_flip = 1;
+ cfg->lr_flip = 1;
+ break;
+ case ADST_FLIPADST:
+ cfg->ud_flip = 0;
+ cfg->lr_flip = 1;
+ break;
+ case FLIPADST_ADST:
+ cfg->ud_flip = 1;
+ cfg->lr_flip = 0;
+ break;
+#endif // CONFIG_EXT_TX
default:
+ cfg->ud_flip = 0;
+ cfg->lr_flip = 0;
assert(0);
}
}
@@ -178,6 +226,8 @@
case DCT_ADST:
case ADST_ADST:
default:
+ cfg.ud_flip = 0;
+ cfg.lr_flip = 0;
assert(0);
}
return cfg;