Merge "Fix to conform Google's coding convention" into nextgenv2
diff --git a/test/vp10_iht4x4_test.cc b/test/vp10_iht4x4_test.cc
index 1cad402..3960b5a 100644
--- a/test/vp10_iht4x4_test.cc
+++ b/test/vp10_iht4x4_test.cc
@@ -15,6 +15,7 @@
#include "test/clear_system_state.h"
#include "test/register_state_check.h"
#include "test/util.h"
+#include "vpx_dsp/vpx_dsp_common.h"
#include "vpx_ports/mem.h"
namespace {
@@ -34,24 +35,27 @@
// <target optimization function, tx_type, bit_depth>
typedef tuple<IHbdHtFunc, int, int> IHbdHt4x4Param;
-class VP10HighbdInvTrans4x4HT :
- public ::testing::TestWithParam<IHbdHt4x4Param> {
+class VP10HighbdInvTrans4x4HT
+ : public ::testing::TestWithParam<IHbdHt4x4Param> {
public:
virtual ~VP10HighbdInvTrans4x4HT() {}
virtual void SetUp() {
inv_txfm_ = GET_PARAM(0);
- inv_txfm_ref_ = iht4x4_ref;
tx_type_ = GET_PARAM(1);
bit_depth_ = GET_PARAM(2);
num_coeffs_ = 4 * 4;
+ // Note:
+ // Inverse transform input buffer is 32-byte aligned
+ // refer to function void alloc_mode_context() in
+ // vp10/encoder/context_tree.c
coeffs_ = reinterpret_cast<int32_t *>(
- vpx_memalign(16, sizeof(int32_t) * num_coeffs_));
+ vpx_memalign(32, sizeof(coeffs_[0]) * num_coeffs_));
output_ = reinterpret_cast<uint16_t *>(
- vpx_memalign(16, sizeof(uint16_t) * num_coeffs_));
+ vpx_memalign(32, sizeof(output_[0]) * num_coeffs_));
output_ref_ = reinterpret_cast<uint16_t *>(
- vpx_memalign(16, sizeof(uint16_t) * num_coeffs_));
+ vpx_memalign(32, sizeof(output_ref_[0]) * num_coeffs_));
}
virtual void TearDown() {
@@ -65,49 +69,39 @@
void RunBitexactCheck();
private:
+ static int32_t ClampCoeffs(int number, int bit) {
+ const int max = (1 << bit) - 1;
+ const int min = -max;
+ return clamp(number, min, max);
+ }
+
IHbdHtFunc inv_txfm_;
- IHbdHtFunc inv_txfm_ref_;
int tx_type_;
int bit_depth_;
int num_coeffs_;
int32_t *coeffs_;
uint16_t *output_;
uint16_t *output_ref_;
-
- int32_t clamp(int32_t number, int bit) {
- int32_t ret = number;
- const int32_t max = (int32_t)(1 << bit) - 1;
- const int32_t min = -max;
-
- if (number > max) {
- ret = max;
- } else if (number < min) {
- ret = min;
- }
- return ret;
- }
};
void VP10HighbdInvTrans4x4HT::RunBitexactCheck() {
ACMRandom rnd(ACMRandom::DeterministicSeed());
const int stride = 4;
const int num_tests = 2000000;
- int i;
- int j;
const uint16_t mask = (1 << bit_depth_) - 1;
- for (i = 0; i < num_tests; ++i) {
- for (j = 0; j < num_coeffs_; ++j) {
- coeffs_[j] = clamp((rnd.Rand16() - rnd.Rand16()) << 2, 18);
+ for (int i = 0; i < num_tests; ++i) {
+ for (int j = 0; j < num_coeffs_; ++j) {
+ coeffs_[j] = ClampCoeffs((rnd.Rand16() - rnd.Rand16()) << 2, 18);
output_ref_[j] = rnd.Rand16() & mask;
output_[j] = output_ref_[j];
}
- inv_txfm_ref_(coeffs_, output_ref_, stride, tx_type_, bit_depth_);
+ iht4x4_ref(coeffs_, output_ref_, stride, tx_type_, bit_depth_);
ASM_REGISTER_STATE_CHECK(inv_txfm_(coeffs_, output_, stride, tx_type_,
bit_depth_));
- for (j = 0; j < num_coeffs_; ++j) {
+ for (int j = 0; j < num_coeffs_; ++j) {
EXPECT_EQ(output_ref_[j], output_[j])
<< "Not bit-exact result at index: " << j
<< "At test block: " << i;
diff --git a/vp10/common/x86/highbd_inv_txfm_sse4.c b/vp10/common/x86/highbd_inv_txfm_sse4.c
index 0c623df..80d4c4f 100644
--- a/vp10/common/x86/highbd_inv_txfm_sse4.c
+++ b/vp10/common/x86/highbd_inv_txfm_sse4.c
@@ -9,18 +9,17 @@
*/
#include <assert.h>
-#include <smmintrin.h> /* SSE4.1 */
+#include <smmintrin.h> /* SSE4.1 */
#include "./vp10_rtcd.h"
#include "./vpx_config.h"
#include "vp10/common/vp10_inv_txfm2d_cfg.h"
-
static INLINE void load_buffer_4x4(const int32_t *coeff, __m128i *in) {
- in[0] = _mm_loadu_si128((const __m128i *)(coeff + 0));
- in[1] = _mm_loadu_si128((const __m128i *)(coeff + 4));
- in[2] = _mm_loadu_si128((const __m128i *)(coeff + 8));
- in[3] = _mm_loadu_si128((const __m128i *)(coeff + 12));
+ in[0] = _mm_load_si128((const __m128i *)(coeff + 0));
+ in[1] = _mm_load_si128((const __m128i *)(coeff + 4));
+ in[2] = _mm_load_si128((const __m128i *)(coeff + 8));
+ in[3] = _mm_load_si128((const __m128i *)(coeff + 12));
}
static void idct4x4_sse4_1(__m128i *in, int bit) {
@@ -176,7 +175,7 @@
}
static void write_buffer_4x4(__m128i *in, uint16_t *output, int stride,
- int flipud, int fliplr, int shift, int bd) {
+ int shift, int bd) {
const __m128i zero = _mm_setzero_si128();
__m128i u0, u1, u2, u3;
__m128i v0, v1, v2, v3;
@@ -213,9 +212,6 @@
_mm_storel_epi64((__m128i *)(output + 1 * stride), v1);
_mm_storel_epi64((__m128i *)(output + 2 * stride), v2);
_mm_storel_epi64((__m128i *)(output + 3 * stride), v3);
-
- (void) flipud;
- (void) fliplr;
}
void vp10_inv_txfm2d_add_4x4_sse4_1(const int32_t *coeff, uint16_t *output,
@@ -229,28 +225,28 @@
load_buffer_4x4(coeff, in);
idct4x4_sse4_1(in, cfg->cos_bit_row[2]);
idct4x4_sse4_1(in, cfg->cos_bit_row[2]);
- write_buffer_4x4(in, output, stride, 0, 0, -cfg->shift[1], bd);
+ write_buffer_4x4(in, output, stride, -cfg->shift[1], bd);
break;
case ADST_DCT:
cfg = &inv_txfm_2d_cfg_adst_dct_4;
load_buffer_4x4(coeff, in);
idct4x4_sse4_1(in, cfg->cos_bit_row[2]);
iadst4x4_sse4_1(in, cfg->cos_bit_row[2]);
- write_buffer_4x4(in, output, stride, 0, 0, -cfg->shift[1], bd);
+ write_buffer_4x4(in, output, stride, -cfg->shift[1], bd);
break;
case DCT_ADST:
cfg = &inv_txfm_2d_cfg_dct_adst_4;
load_buffer_4x4(coeff, in);
iadst4x4_sse4_1(in, cfg->cos_bit_row[2]);
idct4x4_sse4_1(in, cfg->cos_bit_row[2]);
- write_buffer_4x4(in, output, stride, 0, 0, -cfg->shift[1], bd);
+ write_buffer_4x4(in, output, stride, -cfg->shift[1], bd);
break;
case ADST_ADST:
cfg = &inv_txfm_2d_cfg_adst_adst_4;
load_buffer_4x4(coeff, in);
iadst4x4_sse4_1(in, cfg->cos_bit_row[2]);
iadst4x4_sse4_1(in, cfg->cos_bit_row[2]);
- write_buffer_4x4(in, output, stride, 0, 0, -cfg->shift[1], bd);
+ write_buffer_4x4(in, output, stride, -cfg->shift[1], bd);
break;
default:
assert(0);