Implement av1_lowbd_inv_txfm2d_add_8x8_sse2
1. Change the output type from uint16_t to uint8_t;
2. sse2 version comparing to av1_inv_txfm2d_add_8x8_c achieves 4x~9x
speedup.
Change-Id: I08675a018d11f9904d5b0a40aa896fe8fd437352
diff --git a/av1/common/x86/av1_inv_txfm_sse2.c b/av1/common/x86/av1_inv_txfm_sse2.c
index aacd283..73d8153 100644
--- a/av1/common/x86/av1_inv_txfm_sse2.c
+++ b/av1/common/x86/av1_inv_txfm_sse2.c
@@ -9,6 +9,9 @@
* PATENTS file, you can obtain it at www.aomedia.org/license/patent.
*/
+#include "./aom_config.h"
+#include "./av1_rtcd.h"
+#include "av1/common/av1_inv_txfm1d_cfg.h"
#include "av1/common/x86/av1_txfm_sse2.h"
void idct4_new_sse2(const __m128i *input, __m128i *output, int8_t cos_bit) {
@@ -1595,3 +1598,106 @@
output[14] = x8[9];
output[15] = _mm_subs_epi16(__zero, x8[1]);
}
+
+static void iidentity8_new_sse2(const __m128i *input, __m128i *output,
+ int8_t cos_bit) {
+ (void)cos_bit;
+
+ output[0] = _mm_adds_epi16(input[0], input[0]);
+ output[1] = _mm_adds_epi16(input[1], input[1]);
+ output[2] = _mm_adds_epi16(input[2], input[2]);
+ output[3] = _mm_adds_epi16(input[3], input[3]);
+ output[4] = _mm_adds_epi16(input[4], input[4]);
+ output[5] = _mm_adds_epi16(input[5], input[5]);
+ output[6] = _mm_adds_epi16(input[6], input[6]);
+ output[7] = _mm_adds_epi16(input[7], input[7]);
+}
+
+static INLINE __m128i lowbd_get_recon_8x8_sse2(const __m128i pred, __m128i res,
+ int fliplr) {
+ const __m128i zero = _mm_setzero_si128();
+ __m128i x0 = _mm_unpacklo_epi8(pred, zero);
+ if (fliplr) {
+ // TODO(binpengsmail@gmail.com):
+ // To replace these shuffle by transpose twice (in different diagonals)
+ // with flipped reading, for shuffle is usually expensive for low-end CPUs
+ res = _mm_shufflelo_epi16(res, 0x1B);
+ res = _mm_shufflehi_epi16(res, 0x1B);
+ res = _mm_shuffle_epi32(res, 0x4E);
+ }
+ x0 = _mm_adds_epi16(res, x0);
+ return _mm_packus_epi16(x0, x0);
+}
+
+static void lowbd_write_buffer_8x8_sse2(__m128i *in, uint8_t *output,
+ int stride, int fliplr, int flipud) {
+ __m128i u0, u1, u2, u3, u4, u5, u6, u7;
+ __m128i v0, v1, v2, v3, v4, v5, v6, v7;
+ v0 = _mm_loadl_epi64((__m128i const *)(output + 0 * stride));
+ v1 = _mm_loadl_epi64((__m128i const *)(output + 1 * stride));
+ v2 = _mm_loadl_epi64((__m128i const *)(output + 2 * stride));
+ v3 = _mm_loadl_epi64((__m128i const *)(output + 3 * stride));
+ v4 = _mm_loadl_epi64((__m128i const *)(output + 4 * stride));
+ v5 = _mm_loadl_epi64((__m128i const *)(output + 5 * stride));
+ v6 = _mm_loadl_epi64((__m128i const *)(output + 6 * stride));
+ v7 = _mm_loadl_epi64((__m128i const *)(output + 7 * stride));
+
+ if (flipud) {
+ u0 = lowbd_get_recon_8x8_sse2(v0, in[7], fliplr);
+ u1 = lowbd_get_recon_8x8_sse2(v1, in[6], fliplr);
+ u2 = lowbd_get_recon_8x8_sse2(v2, in[5], fliplr);
+ u3 = lowbd_get_recon_8x8_sse2(v3, in[4], fliplr);
+ u4 = lowbd_get_recon_8x8_sse2(v4, in[3], fliplr);
+ u5 = lowbd_get_recon_8x8_sse2(v5, in[2], fliplr);
+ u6 = lowbd_get_recon_8x8_sse2(v6, in[1], fliplr);
+ u7 = lowbd_get_recon_8x8_sse2(v7, in[0], fliplr);
+ } else {
+ u0 = lowbd_get_recon_8x8_sse2(v0, in[0], fliplr);
+ u1 = lowbd_get_recon_8x8_sse2(v1, in[1], fliplr);
+ u2 = lowbd_get_recon_8x8_sse2(v2, in[2], fliplr);
+ u3 = lowbd_get_recon_8x8_sse2(v3, in[3], fliplr);
+ u4 = lowbd_get_recon_8x8_sse2(v4, in[4], fliplr);
+ u5 = lowbd_get_recon_8x8_sse2(v5, in[5], fliplr);
+ u6 = lowbd_get_recon_8x8_sse2(v6, in[6], fliplr);
+ u7 = lowbd_get_recon_8x8_sse2(v7, in[7], fliplr);
+ }
+
+ _mm_storel_epi64((__m128i *)(output + 0 * stride), u0);
+ _mm_storel_epi64((__m128i *)(output + 1 * stride), u1);
+ _mm_storel_epi64((__m128i *)(output + 2 * stride), u2);
+ _mm_storel_epi64((__m128i *)(output + 3 * stride), u3);
+ _mm_storel_epi64((__m128i *)(output + 4 * stride), u4);
+ _mm_storel_epi64((__m128i *)(output + 5 * stride), u5);
+ _mm_storel_epi64((__m128i *)(output + 6 * stride), u6);
+ _mm_storel_epi64((__m128i *)(output + 7 * stride), u7);
+}
+
+static const transform_1d_sse2 lowbd_txfm8_1d_arr[TX_TYPES_1D] = {
+ idct8_new_sse2, iadst8_new_sse2, iadst8_new_sse2, iidentity8_new_sse2,
+};
+
+void av1_lowbd_inv_txfm2d_add_8x8_sse2(const int32_t *input, uint8_t *output,
+ int stride, TX_TYPE tx_type, int bd) {
+ (void)bd;
+ __m128i buf[8];
+ const int8_t *shift = inv_txfm_shift_ls[TX_8X8];
+ const int txw_idx = get_txw_idx(TX_8X8);
+ const int txh_idx = get_txh_idx(TX_8X8);
+ const int cos_bit_col = inv_cos_bit_col[txw_idx][txh_idx];
+ const int cos_bit_row = inv_cos_bit_row[txw_idx][txh_idx];
+ const int buf_size = 8;
+
+ const transform_1d_sse2 col_txfm = lowbd_txfm8_1d_arr[vtx_tab[tx_type]];
+ const transform_1d_sse2 row_txfm = lowbd_txfm8_1d_arr[htx_tab[tx_type]];
+
+ int ud_flip, lr_flip;
+ get_flip_cfg(tx_type, &ud_flip, &lr_flip);
+ load_buffer_32bit_to_16bit(input, 8, buf, buf_size);
+ transpose_16bit_8x8(buf, buf);
+ row_txfm(buf, buf, cos_bit_row);
+ transpose_16bit_8x8(buf, buf);
+ round_shift_16bit(buf, 8, shift[0]);
+ col_txfm(buf, buf, cos_bit_col);
+ round_shift_16bit(buf, 8, shift[1]);
+ lowbd_write_buffer_8x8_sse2(buf, output, stride, lr_flip, ud_flip);
+}
diff --git a/av1/common/x86/av1_txfm_sse2.h b/av1/common/x86/av1_txfm_sse2.h
index b0a9690..368bca9 100644
--- a/av1/common/x86/av1_txfm_sse2.h
+++ b/av1/common/x86/av1_txfm_sse2.h
@@ -107,6 +107,14 @@
}
}
+static INLINE void load_buffer_32bit_to_16bit_flip(const int32_t *in,
+ int stride, __m128i *out,
+ int out_size) {
+ for (int i = 0; i < out_size; ++i) {
+ out[out_size - i - 1] = load_32bit_to_16bit(in + i * stride);
+ }
+}
+
static INLINE void store_buffer_16bit_to_32bit_8x8(const __m128i *const in,
int32_t *const out,
const int stride) {
@@ -124,9 +132,10 @@
}
static INLINE void store_buffer_16bit_to_16bit_8x8(const __m128i *in,
- int16_t *out) {
+ uint16_t *out,
+ const int stride) {
for (int i = 0; i < 8; ++i) {
- _mm_store_si128((__m128i *)(out + i * 8), in[i]);
+ _mm_store_si128((__m128i *)(out + i * stride), in[i]);
}
}
@@ -178,6 +187,10 @@
typedef struct {
transform_1d_sse2 col, row; // vertical and horizontal
} transform_2d_sse2;
+
+void av1_lowbd_inv_txfm2d_add_8x8_sse2(const int32_t *input, uint8_t *output,
+ int stride, TX_TYPE tx_type, int bd);
+
#ifdef __cplusplus
}
#endif // __cplusplus
diff --git a/test/av1_fwd_txfm2d_test.cc b/test/av1_fwd_txfm2d_test.cc
index 88721a6..b962189 100644
--- a/test/av1_fwd_txfm2d_test.cc
+++ b/test/av1_fwd_txfm2d_test.cc
@@ -21,7 +21,7 @@
#include "./av1_rtcd.h"
using libaom_test::ACMRandom;
-using libaom_test::Fwd_Txfm2d_Func;
+using libaom_test::FwdTxfm2dFunc;
using libaom_test::TYPE_TXFM;
using libaom_test::bd;
using libaom_test::compute_avg_abs_error;
@@ -125,7 +125,7 @@
int tx_width_;
int tx_height_;
int txfm2d_size_;
- Fwd_Txfm2d_Func fwd_txfm_;
+ FwdTxfm2dFunc fwd_txfm_;
int16_t *input_;
int32_t *output_;
double *ref_input_;
@@ -210,7 +210,7 @@
#if HAVE_SSE2 && defined(__SSE2__)
#include "av1/common/x86/av1_txfm_sse2.h"
-Fwd_Txfm2d_Func fwd_func_sse2_list[TX_SIZES_ALL][2] = {
+FwdTxfm2dFunc fwd_func_sse2_list[TX_SIZES_ALL][2] = {
{ NULL, NULL }, // TX_4X4
{ av1_fwd_txfm2d_8x8_c,
av1_fwd_txfm2d_8x8_sse2 }, // TX_8X8 // 8x8 transform
@@ -250,8 +250,8 @@
// No ADST for large size transforms.
continue;
}
- Fwd_Txfm2d_Func ref_func = fwd_func_sse2_list[tx_size][0];
- Fwd_Txfm2d_Func target_func = fwd_func_sse2_list[tx_size][1];
+ FwdTxfm2dFunc ref_func = fwd_func_sse2_list[tx_size][0];
+ FwdTxfm2dFunc target_func = fwd_func_sse2_list[tx_size][1];
if (ref_func != NULL && target_func != NULL) {
DECLARE_ALIGNED(16, int16_t, input[64 * 64]) = { 0 };
DECLARE_ALIGNED(16, int32_t, output[64 * 64]) = { 0 };
diff --git a/test/av1_inv_txfm2d_test.cc b/test/av1_inv_txfm2d_test.cc
index d6a4622..f4ee7d3 100644
--- a/test/av1_inv_txfm2d_test.cc
+++ b/test/av1_inv_txfm2d_test.cc
@@ -15,14 +15,17 @@
#include <vector>
#include "./av1_rtcd.h"
-#include "test/acm_random.h"
-#include "test/util.h"
-#include "test/av1_txfm_test.h"
+
+#include "aom_ports/aom_timer.h"
#include "av1/common/av1_inv_txfm1d_cfg.h"
+#include "test/acm_random.h"
+#include "test/av1_txfm_test.h"
+#include "test/util.h"
using libaom_test::ACMRandom;
-using libaom_test::Fwd_Txfm2d_Func;
-using libaom_test::Inv_Txfm2d_Func;
+using libaom_test::FwdTxfm2dFunc;
+using libaom_test::InvTxfm2dFunc;
+using libaom_test::LbdInvTxfm2dFunc;
using libaom_test::bd;
using libaom_test::compute_avg_abs_error;
using libaom_test::input_base;
@@ -48,10 +51,8 @@
int tx_w = tx_size_wide[tx_size_];
int tx_h = tx_size_high[tx_size_];
int txfm2d_size = tx_w * tx_h;
- const Fwd_Txfm2d_Func fwd_txfm_func =
- libaom_test::fwd_txfm_func_ls[tx_size_];
- const Inv_Txfm2d_Func inv_txfm_func =
- libaom_test::inv_txfm_func_ls[tx_size_];
+ const FwdTxfm2dFunc fwd_txfm_func = libaom_test::fwd_txfm_func_ls[tx_size_];
+ const InvTxfm2dFunc inv_txfm_func = libaom_test::inv_txfm_func_ls[tx_size_];
double avg_abs_error = 0;
ACMRandom rnd(ACMRandom::DeterministicSeed());
@@ -213,4 +214,123 @@
}
}
}
+
+typedef std::tr1::tuple<LbdInvTxfm2dFunc *> AV1LbdInvTxfm2dParam;
+class AV1LbdInvTxfm2d : public ::testing::TestWithParam<AV1LbdInvTxfm2dParam> {
+ public:
+ void RunAV1InvTxfm2dTest(LbdInvTxfm2dFunc *test_list, int run_times);
+};
+
+void AV1LbdInvTxfm2d::RunAV1InvTxfm2dTest(LbdInvTxfm2dFunc *test_list,
+ int run_times) {
+ const int bd = 8;
+ for (int tx_size = TX_4X4; tx_size < TX_SIZES_ALL; ++tx_size) {
+ for (int tx_type = 0; tx_type < TX_TYPES; ++tx_type) {
+ FwdTxfm2dFunc fwd_func = libaom_test::fwd_txfm_func_ls[tx_size];
+ InvTxfm2dFunc ref_func = libaom_test::inv_txfm_func_ls[tx_size];
+ LbdInvTxfm2dFunc target_func = test_list[tx_size];
+ if (ref_func != NULL && target_func != NULL) {
+ const int BLK_WIDTH = 64;
+ const int BLK_SIZE = BLK_WIDTH * BLK_WIDTH;
+ DECLARE_ALIGNED(16, int16_t, input[BLK_SIZE]) = { 0 };
+ DECLARE_ALIGNED(32, int32_t, inv_input[BLK_SIZE]) = { 0 };
+ DECLARE_ALIGNED(16, uint8_t, output[BLK_SIZE]) = { 0 };
+ DECLARE_ALIGNED(16, uint16_t, ref_output[BLK_SIZE]) = { 0 };
+ int stride = BLK_WIDTH;
+ ACMRandom rnd(ACMRandom::DeterministicSeed());
+ int rows = tx_size_high[tx_size];
+ int cols = tx_size_wide[tx_size];
+ int randTimes = run_times == 1 ? 500 : 2;
+ for (int cnt = 0; cnt < randTimes; ++cnt) {
+ if (cnt == 0) {
+ const int16_t max_in = (1 << (bd + 1)) - 1;
+ for (int r = 0; r < rows; ++r) {
+ for (int c = 0; c < cols; ++c) {
+ input[r * cols + c] = max_in;
+ output[r * stride + c] = ref_output[r * stride + c] = 128;
+ }
+ }
+ } else {
+ for (int r = 0; r < rows; ++r) {
+ for (int c = 0; c < cols; ++c) {
+ input[r * cols + c] = rnd.Rand8Extremes();
+ output[r * stride + c] = ref_output[r * stride + c] =
+ rnd.Rand8();
+ }
+ }
+ }
+ fwd_func(input, inv_input, stride, (TX_TYPE)tx_type, bd);
+ aom_usec_timer timer;
+ aom_usec_timer_start(&timer);
+ for (int i = 0; i < run_times; ++i) {
+ ref_func(inv_input, ref_output, stride, (TX_TYPE)tx_type, bd);
+ }
+ aom_usec_timer_mark(&timer);
+ double time1 = static_cast<double>(aom_usec_timer_elapsed(&timer));
+ aom_usec_timer_start(&timer);
+ for (int i = 0; i < run_times; ++i) {
+ target_func(inv_input, output, stride, (TX_TYPE)tx_type, bd);
+ }
+ aom_usec_timer_mark(&timer);
+ double time2 = static_cast<double>(aom_usec_timer_elapsed(&timer));
+ if (run_times > 10) {
+ printf("txfm[%d] %3dx%-3d:%7.2f/%7.2fns", tx_type, cols, rows,
+ time1, time2);
+ printf("(%3.2f)\n", time1 / time2);
+ }
+ for (int r = 0; r < rows; ++r) {
+ for (int c = 0; c < cols; ++c) {
+ ASSERT_EQ((uint8_t)ref_output[r * stride + c],
+ output[r * stride + c])
+ << "[" << r << "," << c << "] " << cnt
+ << " tx_size: " << tx_size << " tx_type: " << tx_type;
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+TEST_P(AV1LbdInvTxfm2d, match) { RunAV1InvTxfm2dTest(GET_PARAM(0), 1); }
+TEST_P(AV1LbdInvTxfm2d, DISABLED_Speed) {
+ RunAV1InvTxfm2dTest(GET_PARAM(0), 10000000);
+}
+
+#if HAVE_SSE2 && defined(__SSE2__)
+#include "av1/common/x86/av1_txfm_sse2.h"
+
+LbdInvTxfm2dFunc ldb_inv_func_sse2_list[TX_SIZES_ALL] = {
+ NULL, // TX_4X4
+ av1_lowbd_inv_txfm2d_add_8x8_sse2, // TX_8X8
+ NULL, // TX_16X16
+ NULL, // TX_32X32
+#if CONFIG_TX64X64
+ NULL, // TX_64X64
+#endif // CONFIG_TX64X64
+ NULL, // TX_4X8
+ NULL, // TX_8X4
+ NULL, // TX_8X16
+ NULL, // TX_16X8
+ NULL, // TX_16X32
+ NULL, // TX_32X16
+#if CONFIG_TX64X64
+ NULL, // TX_32X64
+ NULL, // TX_64X32
+#endif // CONFIG_TX64X64
+ NULL, // TX_4X16
+ NULL, // TX_16X4
+ NULL, // TX_8X32
+ NULL, // TX_32X8
+#if CONFIG_TX64X64
+ NULL, // TX_16X64
+ NULL, // TX_64X16
+#endif // CONFIG_TX64X64
+};
+
+INSTANTIATE_TEST_CASE_P(SSE2, AV1LbdInvTxfm2d,
+ ::testing::Values(ldb_inv_func_sse2_list));
+
+#endif // HAVE_SSE2
+
} // namespace
diff --git a/test/av1_txfm_test.h b/test/av1_txfm_test.h
index dc9e5a0..f1028e6 100644
--- a/test/av1_txfm_test.h
+++ b/test/av1_txfm_test.h
@@ -74,15 +74,16 @@
typedef void (*TxfmFunc)(const int32_t *in, int32_t *out, const int8_t cos_bit,
const int8_t *range_bit);
-typedef void (*Fwd_Txfm2d_Func)(const int16_t *, int32_t *, int, TX_TYPE, int);
-typedef void (*Inv_Txfm2d_Func)(const int32_t *, uint16_t *, int, TX_TYPE, int);
+typedef void (*FwdTxfm2dFunc)(const int16_t *, int32_t *, int, TX_TYPE, int);
+typedef void (*InvTxfm2dFunc)(const int32_t *, uint16_t *, int, TX_TYPE, int);
+typedef void (*LbdInvTxfm2dFunc)(const int32_t *, uint8_t *, int, TX_TYPE, int);
static const int bd = 10;
static const int input_base = (1 << bd);
#if CONFIG_AV1_ENCODER
-static const Fwd_Txfm2d_Func fwd_txfm_func_ls[TX_SIZES_ALL] = {
+static const FwdTxfm2dFunc fwd_txfm_func_ls[TX_SIZES_ALL] = {
av1_fwd_txfm2d_4x4_c, av1_fwd_txfm2d_8x8_c, av1_fwd_txfm2d_16x16_c,
av1_fwd_txfm2d_32x32_c,
#if CONFIG_TX64X64
@@ -101,7 +102,7 @@
};
#endif
-static const Inv_Txfm2d_Func inv_txfm_func_ls[TX_SIZES_ALL] = {
+static const InvTxfm2dFunc inv_txfm_func_ls[TX_SIZES_ALL] = {
av1_inv_txfm2d_add_4x4_c, av1_inv_txfm2d_add_8x8_c,
av1_inv_txfm2d_add_16x16_c, av1_inv_txfm2d_add_32x32_c,
#if CONFIG_TX64X64